From 47ac3dcaf15167afc354bf6da938942dad2303c3 Mon Sep 17 00:00:00 2001 From: Johngreen Date: Mon, 22 Dec 2025 15:33:24 +0900 Subject: [PATCH 1/4] Add Kubernetes deployment and CI/CD workflow Introduce Kubernetes manifests for backend, frontend, ingress, storage, and namespace setup under k8s/. Add Gitea Actions workflow for automated build and deployment to Kubernetes. Provide deployment and cluster setup guides in docs/ and project root. Update .gitignore to exclude Kubernetes secret files. --- .gitea/workflows/deploy.yml | 327 +++++++++++++++++++++++ .gitignore | 5 + docs/KUBERNETES_DEPLOYMENT_GUIDE.md | 375 +++++++++++++++++++++++++++ k8s/ingress-nginx.yaml | 41 +++ k8s/local-path-provisioner.yaml | 135 ++++++++++ k8s/namespace.yaml | 9 + k8s/vexplor-backend-deployment.yaml | 133 ++++++++++ k8s/vexplor-config.yaml | 32 +++ k8s/vexplor-frontend-deployment.yaml | 92 +++++++ k8s/vexplor-ingress.yaml | 58 +++++ k8s/vexplor-secret.yaml.template | 38 +++ kubernetes-setup-guide.md | 304 ++++++++++++++++++++++ 12 files changed, 1549 insertions(+) create mode 100644 .gitea/workflows/deploy.yml create mode 100644 docs/KUBERNETES_DEPLOYMENT_GUIDE.md create mode 100644 k8s/ingress-nginx.yaml create mode 100644 k8s/local-path-provisioner.yaml create mode 100644 k8s/namespace.yaml create mode 100644 k8s/vexplor-backend-deployment.yaml create mode 100644 k8s/vexplor-config.yaml create mode 100644 k8s/vexplor-frontend-deployment.yaml create mode 100644 k8s/vexplor-ingress.yaml create mode 100644 k8s/vexplor-secret.yaml.template create mode 100644 kubernetes-setup-guide.md diff --git a/.gitea/workflows/deploy.yml b/.gitea/workflows/deploy.yml new file mode 100644 index 00000000..e307079e --- /dev/null +++ b/.gitea/workflows/deploy.yml @@ -0,0 +1,327 @@ +# Gitea Actions Workflow - vexplor 자동 배포 +# +# 환경 변수: +# - GITEA_DOMAIN: g.wace.me +# - HARBOR_REGISTRY: harbor.wace.me +# - K8S_NAMESPACE: vexplor +# +# 필수 Secrets (Repository Settings > Secrets): +# - HARBOR_USERNAME: Harbor 사용자명 +# - HARBOR_PASSWORD: Harbor 비밀번호 +# - KUBECONFIG: base64로 인코딩된 Kubernetes config +# +# Application Secrets: +# - k8s/vexplor-secret.yaml 파일에서 관리 + +name: Deploy vexplor + +on: + push: + branches: + - main + - master + paths: + - "backend-node/**" + - "frontend/**" + - "docker/**" + - "k8s/**" + - ".gitea/workflows/deploy.yml" + workflow_dispatch: + +env: + GITEA_DOMAIN: g.wace.me + HARBOR_REGISTRY: localhost:5001 + HARBOR_REGISTRY_K8S: 192.168.1.100:5001 + HARBOR_REGISTRY_EXTERNAL: harbor.wace.me + HARBOR_PROJECT: speefox_vexplor + K8S_NAMESPACE: vexplor + + # Frontend 빌드 환경 변수 + NEXT_PUBLIC_API_URL: "https://api.vexplor.com/api" + NEXT_PUBLIC_ENV: "production" + INTERNAL_API_URL: "http://vexplor-backend-service:3001" + + # Frontend 설정 + FRONTEND_IMAGE_NAME: vexplor-frontend + FRONTEND_DEPLOYMENT_NAME: vexplor-frontend + FRONTEND_CONTAINER_NAME: vexplor-frontend + FRONTEND_BUILD_CONTEXT: frontend + FRONTEND_DOCKERFILE_PATH: docker/deploy/frontend.Dockerfile + + # Backend 설정 + BACKEND_IMAGE_NAME: vexplor-backend + BACKEND_DEPLOYMENT_NAME: vexplor-backend + BACKEND_CONTAINER_NAME: vexplor-backend + BACKEND_BUILD_CONTEXT: backend-node + BACKEND_DOCKERFILE_PATH: docker/deploy/backend.Dockerfile + +jobs: + build-and-deploy: + runs-on: ubuntu-24.04 + + steps: + # 작업 디렉토리 정리 + - name: Clean workspace + run: | + echo "작업 디렉토리 정리..." + cd /workspace + rm -rf source + mkdir -p source + echo "정리 완료" + + # 필수 도구 설치 + - name: Install required tools + run: | + echo "필수 도구 설치 중..." + apt-get update -qq + apt-get install -y git curl ca-certificates gnupg + + # kubectl 설치 + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" + chmod +x kubectl + mv kubectl /usr/local/bin/ + + # Docker 클라이언트 설치 + install -m 0755 -d /etc/apt/keyrings + curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc + chmod a+r /etc/apt/keyrings/docker.asc + + echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \ + $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null + + apt-get update -qq + apt-get install -y docker-ce-cli + + echo "설치 완료:" + git --version + kubectl version --client + docker --version + + export DOCKER_HOST=unix:///var/run/docker.sock + docker version || echo "소켓 연결 대기 중..." + + # 저장소 체크아웃 + - name: Checkout code + run: | + echo "저장소 체크아웃..." + cd /workspace/source + + git clone --depth 1 --branch ${GITHUB_REF_NAME} \ + https://oauth2:${{ github.token }}@${GITEA_DOMAIN}/${GITHUB_REPOSITORY}.git . + + echo "체크아웃 완료" + git log -1 --oneline + + # 빌드 환경 설정 + - name: Set up build environment + run: | + IMAGE_TAG="v$(date +%Y%m%d-%H%M%S)-${GITHUB_SHA::7}" + echo "IMAGE_TAG=${IMAGE_TAG}" >> $GITHUB_ENV + + # Frontend 이미지 + echo "FRONTEND_FULL_IMAGE=${HARBOR_REGISTRY}/${HARBOR_PROJECT}/${FRONTEND_IMAGE_NAME}" >> $GITHUB_ENV + echo "FRONTEND_FULL_IMAGE_K8S=${HARBOR_REGISTRY_K8S}/${HARBOR_PROJECT}/${FRONTEND_IMAGE_NAME}" >> $GITHUB_ENV + + # Backend 이미지 + echo "BACKEND_FULL_IMAGE=${HARBOR_REGISTRY}/${HARBOR_PROJECT}/${BACKEND_IMAGE_NAME}" >> $GITHUB_ENV + echo "BACKEND_FULL_IMAGE_K8S=${HARBOR_REGISTRY_K8S}/${HARBOR_PROJECT}/${BACKEND_IMAGE_NAME}" >> $GITHUB_ENV + + echo "빌드 태그: ${IMAGE_TAG}" + + # Harbor 로그인 + - name: Login to Harbor + env: + HARBOR_USER: ${{ secrets.HARBOR_USERNAME }} + HARBOR_PASS: ${{ secrets.HARBOR_PASSWORD }} + run: | + echo "Harbor 로그인..." + export DOCKER_HOST=unix:///var/run/docker.sock + echo "${HARBOR_PASS}" | docker login ${HARBOR_REGISTRY} \ + --username ${HARBOR_USER} \ + --password-stdin + echo "Harbor 로그인 완료" + + # Backend 빌드 및 푸시 + - name: Build and Push Backend image + run: | + echo "Backend 이미지 빌드 및 푸시..." + export DOCKER_HOST=unix:///var/run/docker.sock + cd /workspace/source + + docker build \ + -t ${BACKEND_FULL_IMAGE}:${IMAGE_TAG} \ + -t ${BACKEND_FULL_IMAGE}:latest \ + -f ${BACKEND_DOCKERFILE_PATH} \ + ${BACKEND_BUILD_CONTEXT} + + docker push ${BACKEND_FULL_IMAGE}:${IMAGE_TAG} + docker push ${BACKEND_FULL_IMAGE}:latest + echo "Backend 푸시 완료" + + # Frontend 빌드 및 푸시 + - name: Build and Push Frontend image + run: | + echo "Frontend 이미지 빌드 및 푸시..." + export DOCKER_HOST=unix:///var/run/docker.sock + cd /workspace/source + + echo "빌드 환경 변수:" + echo " NEXT_PUBLIC_API_URL=${NEXT_PUBLIC_API_URL}" + echo " NEXT_PUBLIC_ENV=${NEXT_PUBLIC_ENV}" + + docker build \ + -t ${FRONTEND_FULL_IMAGE}:${IMAGE_TAG} \ + -t ${FRONTEND_FULL_IMAGE}:latest \ + -f ${FRONTEND_DOCKERFILE_PATH} \ + --build-arg NEXT_PUBLIC_API_URL="${NEXT_PUBLIC_API_URL}" \ + ${FRONTEND_BUILD_CONTEXT} + + docker push ${FRONTEND_FULL_IMAGE}:${IMAGE_TAG} + docker push ${FRONTEND_FULL_IMAGE}:latest + echo "Frontend 푸시 완료" + + # Kubernetes 설정 + - name: Setup Kubernetes config + env: + KUBECONFIG_CONTENT: ${{ secrets.KUBECONFIG }} + run: | + echo "Kubernetes 설정..." + + if [ -z "${KUBECONFIG_CONTENT}" ]; then + echo "KUBECONFIG secret이 설정되지 않았습니다!" + exit 1 + fi + + mkdir -p ~/.kube + echo "${KUBECONFIG_CONTENT}" | base64 -d > ~/.kube/config + chmod 600 ~/.kube/config + + if [ ! -s ~/.kube/config ]; then + echo "kubeconfig 파일이 비어있습니다" + exit 1 + fi + + echo "kubeconfig 파일 생성 완료" + kubectl cluster-info > /dev/null 2>&1 && echo "Kubernetes 클러스터 연결 성공" + + # Kubernetes 배포 + - name: Deploy to Kubernetes + run: | + echo "Kubernetes 배포 시작..." + + cd /workspace/source + + # 네임스페이스 생성 (없을 때만) + echo "네임스페이스 확인..." + kubectl apply -f k8s/namespace.yaml + + # ConfigMap 적용 + echo "ConfigMap 적용..." + kubectl apply -f k8s/vexplor-config.yaml -n ${K8S_NAMESPACE} + + # Secret 적용 (존재하는 경우에만) + if [ -f k8s/vexplor-secret.yaml ]; then + echo "Secret 적용..." + kubectl apply -f k8s/vexplor-secret.yaml -n ${K8S_NAMESPACE} + fi + + # Harbor Registry Secret 생성 (없을 때만) + echo "Harbor Registry Secret 확인..." + if ! kubectl get secret harbor-registry -n ${K8S_NAMESPACE} > /dev/null 2>&1; then + echo "Harbor Registry Secret 생성 중..." + kubectl create secret docker-registry harbor-registry \ + --docker-server=${HARBOR_REGISTRY_K8S} \ + --docker-username=${{ secrets.HARBOR_USERNAME }} \ + --docker-password=${{ secrets.HARBOR_PASSWORD }} \ + -n ${K8S_NAMESPACE} + echo "Harbor Registry Secret 생성 완료" + else + echo "Harbor Registry Secret 이미 존재" + fi + + # Backend 배포 + echo "Backend 배포..." + kubectl apply -f k8s/vexplor-backend-deployment.yaml -n ${K8S_NAMESPACE} + + if kubectl get deployment ${BACKEND_DEPLOYMENT_NAME} -n ${K8S_NAMESPACE} > /dev/null 2>&1; then + echo "Backend 이미지 업데이트..." + kubectl set image deployment/${BACKEND_DEPLOYMENT_NAME} \ + ${BACKEND_CONTAINER_NAME}=${BACKEND_FULL_IMAGE_K8S}:latest \ + -n ${K8S_NAMESPACE} + kubectl rollout restart deployment/${BACKEND_DEPLOYMENT_NAME} -n ${K8S_NAMESPACE} + fi + + echo "Backend Rolling Update 진행 중..." + kubectl rollout status deployment/${BACKEND_DEPLOYMENT_NAME} -n ${K8S_NAMESPACE} --timeout=5m + echo "Backend 배포 완료" + + # Frontend 배포 + echo "Frontend 배포..." + kubectl apply -f k8s/vexplor-frontend-deployment.yaml -n ${K8S_NAMESPACE} + + if kubectl get deployment ${FRONTEND_DEPLOYMENT_NAME} -n ${K8S_NAMESPACE} > /dev/null 2>&1; then + echo "Frontend 이미지 업데이트..." + kubectl set image deployment/${FRONTEND_DEPLOYMENT_NAME} \ + ${FRONTEND_CONTAINER_NAME}=${FRONTEND_FULL_IMAGE_K8S}:latest \ + -n ${K8S_NAMESPACE} + kubectl rollout restart deployment/${FRONTEND_DEPLOYMENT_NAME} -n ${K8S_NAMESPACE} + fi + + echo "Frontend Rolling Update 진행 중..." + kubectl rollout status deployment/${FRONTEND_DEPLOYMENT_NAME} -n ${K8S_NAMESPACE} --timeout=5m + echo "Frontend 배포 완료" + + # Ingress 배포 + echo "Ingress 배포..." + kubectl apply -f k8s/vexplor-ingress.yaml -n ${K8S_NAMESPACE} + + echo "전체 배포 완료!" + + # 배포 검증 + - name: Verify deployment + run: | + echo "배포 검증..." + echo "" + echo "Backend 상태:" + kubectl get deployment ${BACKEND_DEPLOYMENT_NAME} -n ${K8S_NAMESPACE} + kubectl get pods -l app=${BACKEND_DEPLOYMENT_NAME} -n ${K8S_NAMESPACE} + echo "" + echo "Frontend 상태:" + kubectl get deployment ${FRONTEND_DEPLOYMENT_NAME} -n ${K8S_NAMESPACE} + kubectl get pods -l app=${FRONTEND_DEPLOYMENT_NAME} -n ${K8S_NAMESPACE} + echo "" + echo "Services:" + kubectl get svc -n ${K8S_NAMESPACE} + echo "" + echo "Ingress:" + kubectl get ingress -n ${K8S_NAMESPACE} + echo "" + echo "검증 완료" + + # 배포 요약 + - name: Deployment summary + if: success() + run: | + echo "==========================================" + echo "배포가 성공적으로 완료되었습니다!" + echo "==========================================" + echo "빌드 버전: ${IMAGE_TAG}" + echo "Frontend: https://v1.vexplor.com" + echo "Backend API: https://api.vexplor.com" + echo "==========================================" + + # 실패 시 롤백 + - name: Rollback on failure + if: failure() + run: | + echo "배포 실패! 이전 버전으로 롤백..." + kubectl rollout undo deployment/${BACKEND_DEPLOYMENT_NAME} -n ${K8S_NAMESPACE} || true + kubectl rollout undo deployment/${FRONTEND_DEPLOYMENT_NAME} -n ${K8S_NAMESPACE} || true + + # Harbor 로그아웃 + - name: Logout from Harbor + if: always() + run: | + export DOCKER_HOST=unix:///var/run/docker.sock + docker logout ${HARBOR_REGISTRY} || true + diff --git a/.gitignore b/.gitignore index a771d2c9..5b2b1f56 100644 --- a/.gitignore +++ b/.gitignore @@ -225,6 +225,11 @@ secrets.yml api-keys.json tokens.json +# Kubernetes Secrets (절대 커밋하지 않음!) +k8s/vexplor-secret.yaml +k8s/*-secret.yaml +!k8s/*-secret.yaml.template + # 데이터베이스 덤프 파일 *.sql *.dump diff --git a/docs/KUBERNETES_DEPLOYMENT_GUIDE.md b/docs/KUBERNETES_DEPLOYMENT_GUIDE.md new file mode 100644 index 00000000..f5c99cbf --- /dev/null +++ b/docs/KUBERNETES_DEPLOYMENT_GUIDE.md @@ -0,0 +1,375 @@ +# vexplor 쿠버네티스 자동 배포 가이드 + +## 개요 + +이 문서는 vexplor 프로젝트를 Gitea Actions를 통해 쿠버네티스 클러스터에 자동 배포하는 방법을 설명합니다. + +**작성일**: 2024년 12월 22일 + +--- + +## 아키텍처 + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ Gitea Repository │ +│ g.wace.me/chpark/vexplor │ +└─────────────────────┬───────────────────────────────────────────┘ + │ push to main + ▼ +┌─────────────────────────────────────────────────────────────────┐ +│ Gitea Actions Runner │ +│ 1. Checkout code │ +│ 2. Build Docker images (frontend, backend) │ +│ 3. Push to Harbor Registry │ +│ 4. Deploy to Kubernetes │ +└─────────────────────┬───────────────────────────────────────────┘ + │ + ┌──────────┴──────────┐ + ▼ ▼ +┌──────────────────┐ ┌──────────────────┐ +│ Harbor Registry │ │ Kubernetes (K8s) │ +│ harbor.wace.me │ │ 112.168.212.142 │ +└──────────────────┘ └──────────────────┘ + │ + ┌────────────────┼────────────────┐ + ▼ ▼ ▼ + ┌──────────┐ ┌──────────┐ ┌──────────┐ + │ Frontend │ │ Backend │ │ Ingress │ + │ :3000 │ │ :3001 │ │ Nginx │ + └──────────┘ └──────────┘ └──────────┘ + │ │ │ + └────────────────┴────────────────┘ + │ + ▼ + ┌─────────────────────┐ + │ External Access │ + │ v1.vexplor.com │ + │ api.vexplor.com │ + └─────────────────────┘ +``` + +--- + +## 사전 요구사항 + +### 1. 쿠버네티스 클러스터 + +```bash +# 서버 정보 +IP: 112.168.212.142 +SSH: ssh -p 22 wace@112.168.212.142 +K8s 버전: v1.28.15 +``` + +### 2. Harbor 레지스트리 접근 권한 + +Harbor에 `vexplor` 프로젝트가 생성되어 있어야 합니다. + +### 3. Gitea Repository Secrets + +Gitea 저장소에 다음 Secrets를 설정해야 합니다: + +| Secret 이름 | 설명 | +|------------|------| +| `HARBOR_USERNAME` | Harbor 사용자명 | +| `HARBOR_PASSWORD` | Harbor 비밀번호 | +| `KUBECONFIG` | base64 인코딩된 Kubernetes config | + +--- + +## 초기 설정 + +### 1단계: 쿠버네티스 클러스터 접속 + +```bash +ssh -p 22 wace@112.168.212.142 +``` + +### 2단계: Nginx Ingress Controller 설치 + +```bash +# Nginx Ingress Controller 설치 (baremetal용) +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.9.5/deploy/static/provider/baremetal/deploy.yaml + +# 설치 확인 +kubectl get pods -n ingress-nginx +kubectl get svc -n ingress-nginx +``` + +### 3단계: Local Path Provisioner 설치 (PVC용) + +```bash +# Local Path Provisioner 설치 +kubectl apply -f k8s/local-path-provisioner.yaml + +# 설치 확인 +kubectl get pods -n local-path-storage +kubectl get storageclass +``` + +### 4단계: Cert-Manager 설치 (SSL 인증서용) + +```bash +# Cert-Manager 설치 +kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.3/cert-manager.yaml + +# 설치 확인 +kubectl get pods -n cert-manager + +# ClusterIssuer 생성 (Let's Encrypt) +cat < Secrets > Actions 메뉴로 이동 +3. 다음 Secrets 추가: + +#### HARBOR_USERNAME +Harbor 로그인 사용자명 + +#### HARBOR_PASSWORD +Harbor 로그인 비밀번호 + +#### KUBECONFIG +```bash +# 쿠버네티스 서버에서 실행 +cat ~/.kube/config | base64 -w 0 +``` +출력된 값을 KUBECONFIG secret으로 등록 + +--- + +## 배포 트리거 + +### 자동 배포 (Push) + +다음 경로의 파일이 변경되어 `main` 브랜치에 push되면 자동으로 배포됩니다: + +- `backend-node/**` +- `frontend/**` +- `docker/**` +- `k8s/**` +- `.gitea/workflows/deploy.yml` + +### 수동 배포 + +1. Gitea 저장소 > Actions 탭으로 이동 +2. "Deploy vexplor" 워크플로우 선택 +3. "Run workflow" 버튼 클릭 + +--- + +## 파일 구조 + +``` +vexplor/ +├── .gitea/ +│ └── workflows/ +│ └── deploy.yml # Gitea Actions 워크플로우 +├── docker/ +│ └── deploy/ +│ ├── backend.Dockerfile # 백엔드 배포용 Dockerfile +│ └── frontend.Dockerfile # 프론트엔드 배포용 Dockerfile +├── k8s/ +│ ├── namespace.yaml # 네임스페이스 정의 +│ ├── vexplor-config.yaml # ConfigMap +│ ├── vexplor-secret.yaml.template # Secret 템플릿 +│ ├── vexplor-backend-deployment.yaml # 백엔드 Deployment/Service/PVC +│ ├── vexplor-frontend-deployment.yaml # 프론트엔드 Deployment/Service +│ ├── vexplor-ingress.yaml # Ingress 설정 +│ ├── local-path-provisioner.yaml # 스토리지 프로비저너 +│ └── ingress-nginx.yaml # Ingress 컨트롤러 패치 +└── docs/ + └── KUBERNETES_DEPLOYMENT_GUIDE.md # 이 문서 +``` + +--- + +## 운영 명령어 + +### 상태 확인 + +```bash +# 전체 리소스 확인 +kubectl get all -n vexplor + +# Pod 상태 확인 +kubectl get pods -n vexplor -o wide + +# 로그 확인 +kubectl logs -f deployment/vexplor-backend -n vexplor +kubectl logs -f deployment/vexplor-frontend -n vexplor + +# Pod 상세 정보 +kubectl describe pod -n vexplor +``` + +### 수동 배포/롤백 + +```bash +# 이미지 업데이트 +kubectl set image deployment/vexplor-backend \ + vexplor-backend=harbor.wace.me/vexplor/vexplor-backend:v20241222-120000-abc1234 \ + -n vexplor + +# 롤아웃 상태 확인 +kubectl rollout status deployment/vexplor-backend -n vexplor + +# 롤백 +kubectl rollout undo deployment/vexplor-backend -n vexplor +kubectl rollout undo deployment/vexplor-frontend -n vexplor + +# 히스토리 확인 +kubectl rollout history deployment/vexplor-backend -n vexplor +``` + +### 스케일링 + +```bash +# 레플리카 수 조정 +kubectl scale deployment/vexplor-backend --replicas=3 -n vexplor +kubectl scale deployment/vexplor-frontend --replicas=3 -n vexplor +``` + +### Pod 재시작 + +```bash +# Deployment 재시작 (롤링 업데이트) +kubectl rollout restart deployment/vexplor-backend -n vexplor +kubectl rollout restart deployment/vexplor-frontend -n vexplor +``` + +--- + +## 문제 해결 + +### Pod이 Pending 상태일 때 + +```bash +# Pod 이벤트 확인 +kubectl describe pod -n vexplor + +# 노드 리소스 확인 +kubectl describe node +kubectl top nodes +``` + +### ImagePullBackOff 오류 + +```bash +# Harbor Secret 확인 +kubectl get secret harbor-registry -n vexplor -o yaml + +# Secret 재생성 +kubectl delete secret harbor-registry -n vexplor +kubectl create secret docker-registry harbor-registry \ + --docker-server=192.168.1.100:5001 \ + --docker-username= \ + --docker-password= \ + -n vexplor +``` + +### Ingress가 작동하지 않을 때 + +```bash +# Ingress 상태 확인 +kubectl get ingress -n vexplor +kubectl describe ingress vexplor-ingress -n vexplor + +# Ingress Controller 로그 +kubectl logs -f deployment/ingress-nginx-controller -n ingress-nginx +``` + +### SSL 인증서 문제 + +```bash +# Certificate 상태 확인 +kubectl get certificate -n vexplor +kubectl describe certificate vexplor-tls -n vexplor + +# Cert-Manager 로그 +kubectl logs -f deployment/cert-manager -n cert-manager +``` + +--- + +## 네트워크 설정 + +### 방화벽 포트 개방 + +쿠버네티스 서버에서 다음 포트가 개방되어야 합니다: + +| 포트 | 용도 | +|-----|------| +| 30080 | HTTP (Ingress NodePort) | +| 30443 | HTTPS (Ingress NodePort) | +| 6443 | Kubernetes API | + +### DNS 설정 + +다음 도메인이 쿠버네티스 서버 IP를 가리키도록 설정: + +- `v1.vexplor.com` → 112.168.212.142 +- `api.vexplor.com` → 112.168.212.142 + +--- + +## 환경 변수 + +### Backend 환경 변수 + +| 변수 | 설명 | 소스 | +|-----|------|-----| +| `NODE_ENV` | 환경 (production) | ConfigMap | +| `PORT` | 서버 포트 (3001) | ConfigMap | +| `DATABASE_URL` | PostgreSQL 연결 문자열 | Secret | +| `JWT_SECRET` | JWT 서명 키 | Secret | +| `JWT_EXPIRES_IN` | JWT 만료 시간 | ConfigMap | +| `CORS_ORIGIN` | CORS 허용 도메인 | ConfigMap | + +### Frontend 환경 변수 + +| 변수 | 설명 | 소스 | +|-----|------|-----| +| `NODE_ENV` | 환경 (production) | ConfigMap | +| `NEXT_PUBLIC_API_URL` | 클라이언트 API URL | ConfigMap | +| `SERVER_API_URL` | SSR용 내부 API URL | Deployment | + +--- + +## 참고 자료 + +- [Kubernetes 공식 문서](https://kubernetes.io/docs/) +- [Gitea Actions 문서](https://docs.gitea.com/usage/actions/overview) +- [Nginx Ingress Controller](https://kubernetes.github.io/ingress-nginx/) +- [Cert-Manager](https://cert-manager.io/docs/) +- [Harbor Registry](https://goharbor.io/docs/) + diff --git a/k8s/ingress-nginx.yaml b/k8s/ingress-nginx.yaml new file mode 100644 index 00000000..dfb551cd --- /dev/null +++ b/k8s/ingress-nginx.yaml @@ -0,0 +1,41 @@ +# Nginx Ingress Controller 설치 +# 단일 노드 클러스터용 설정 (NodePort 사용) +# +# 설치 명령어: +# kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.9.5/deploy/static/provider/baremetal/deploy.yaml +# +# 또는 이 파일로 커스텀 설치: +# kubectl apply -f k8s/ingress-nginx.yaml + +# NodePort를 80, 443으로 고정하는 패치용 설정 +apiVersion: v1 +kind: Service +metadata: + name: ingress-nginx-controller + namespace: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller +spec: + type: NodePort + externalTrafficPolicy: Local + ipFamilyPolicy: SingleStack + ipFamilies: + - IPv4 + ports: + - name: http + port: 80 + protocol: TCP + targetPort: http + nodePort: 30080 + - name: https + port: 443 + protocol: TCP + targetPort: https + nodePort: 30443 + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller + diff --git a/k8s/local-path-provisioner.yaml b/k8s/local-path-provisioner.yaml new file mode 100644 index 00000000..7965bc26 --- /dev/null +++ b/k8s/local-path-provisioner.yaml @@ -0,0 +1,135 @@ +# Local Path Provisioner - 단일 노드 클러스터용 스토리지 +# Rancher의 Local Path Provisioner 사용 +# 참고: https://github.com/rancher/local-path-provisioner + +apiVersion: v1 +kind: Namespace +metadata: + name: local-path-storage + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: local-path-provisioner-service-account + namespace: local-path-storage + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: local-path-provisioner-role +rules: + - apiGroups: [""] + resources: ["nodes", "persistentvolumeclaims", "configmaps"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["endpoints", "persistentvolumes", "pods"] + verbs: ["*"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: local-path-provisioner-bind +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: local-path-provisioner-role +subjects: + - kind: ServiceAccount + name: local-path-provisioner-service-account + namespace: local-path-storage + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: local-path-provisioner + namespace: local-path-storage +spec: + replicas: 1 + selector: + matchLabels: + app: local-path-provisioner + template: + metadata: + labels: + app: local-path-provisioner + spec: + serviceAccountName: local-path-provisioner-service-account + containers: + - name: local-path-provisioner + image: rancher/local-path-provisioner:v0.0.26 + imagePullPolicy: IfNotPresent + command: + - local-path-provisioner + - --debug + - start + - --config + - /etc/config/config.json + volumeMounts: + - name: config-volume + mountPath: /etc/config/ + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumes: + - name: config-volume + configMap: + name: local-path-config + +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: local-path + annotations: + storageclass.kubernetes.io/is-default-class: "true" +provisioner: rancher.io/local-path +volumeBindingMode: WaitForFirstConsumer +reclaimPolicy: Delete + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: local-path-config + namespace: local-path-storage +data: + config.json: |- + { + "nodePathMap": [ + { + "node": "DEFAULT_PATH_FOR_NON_LISTED_NODES", + "paths": ["/opt/local-path-provisioner"] + } + ] + } + setup: |- + #!/bin/sh + set -eu + mkdir -m 0777 -p "$VOL_DIR" + teardown: |- + #!/bin/sh + set -eu + rm -rf "$VOL_DIR" + helperPod.yaml: |- + apiVersion: v1 + kind: Pod + metadata: + name: helper-pod + spec: + containers: + - name: helper-pod + image: busybox:latest + imagePullPolicy: IfNotPresent + diff --git a/k8s/namespace.yaml b/k8s/namespace.yaml new file mode 100644 index 00000000..aa37dc26 --- /dev/null +++ b/k8s/namespace.yaml @@ -0,0 +1,9 @@ +# vexplor 네임스페이스 +apiVersion: v1 +kind: Namespace +metadata: + name: vexplor + labels: + name: vexplor + project: vexplor + diff --git a/k8s/vexplor-backend-deployment.yaml b/k8s/vexplor-backend-deployment.yaml new file mode 100644 index 00000000..37778fe7 --- /dev/null +++ b/k8s/vexplor-backend-deployment.yaml @@ -0,0 +1,133 @@ +# vexplor Backend Deployment +apiVersion: apps/v1 +kind: Deployment +metadata: + name: vexplor-backend + namespace: vexplor + labels: + app: vexplor-backend + component: backend +spec: + replicas: 2 + selector: + matchLabels: + app: vexplor-backend + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + template: + metadata: + labels: + app: vexplor-backend + component: backend + spec: + imagePullSecrets: + - name: harbor-registry + containers: + - name: vexplor-backend + image: harbor.wace.me/vexplor/vexplor-backend:latest + imagePullPolicy: Always + ports: + - containerPort: 3001 + protocol: TCP + envFrom: + - configMapRef: + name: vexplor-config + - secretRef: + name: vexplor-secret + env: + - name: PORT + value: "3001" + - name: HOST + value: "0.0.0.0" + resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "1Gi" + cpu: "500m" + livenessProbe: + httpGet: + path: /api/health + port: 3001 + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /api/health + port: 3001 + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 3 + failureThreshold: 3 + volumeMounts: + - name: uploads + mountPath: /app/uploads + - name: data + mountPath: /app/data + - name: logs + mountPath: /app/logs + volumes: + - name: uploads + persistentVolumeClaim: + claimName: vexplor-backend-uploads-pvc + - name: data + persistentVolumeClaim: + claimName: vexplor-backend-data-pvc + - name: logs + emptyDir: {} + +--- +# Backend Service +apiVersion: v1 +kind: Service +metadata: + name: vexplor-backend-service + namespace: vexplor + labels: + app: vexplor-backend +spec: + type: ClusterIP + selector: + app: vexplor-backend + ports: + - name: http + port: 3001 + targetPort: 3001 + protocol: TCP + +--- +# Backend PVC - Uploads +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: vexplor-backend-uploads-pvc + namespace: vexplor +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + storageClassName: local-path + +--- +# Backend PVC - Data +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: vexplor-backend-data-pvc + namespace: vexplor +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi + storageClassName: local-path + diff --git a/k8s/vexplor-config.yaml b/k8s/vexplor-config.yaml new file mode 100644 index 00000000..39af783d --- /dev/null +++ b/k8s/vexplor-config.yaml @@ -0,0 +1,32 @@ +# vexplor ConfigMap - 환경 설정 +apiVersion: v1 +kind: ConfigMap +metadata: + name: vexplor-config + namespace: vexplor + labels: + app: vexplor +data: + # 공통 설정 + NODE_ENV: "production" + TZ: "Asia/Seoul" + + # Backend 설정 + BACKEND_PORT: "3001" + BACKEND_HOST: "0.0.0.0" + JWT_EXPIRES_IN: "24h" + LOG_LEVEL: "info" + CORS_CREDENTIALS: "true" + + # Frontend 설정 + FRONTEND_PORT: "3000" + FRONTEND_HOSTNAME: "0.0.0.0" + NEXT_TELEMETRY_DISABLED: "1" + + # 내부 서비스 URL (클러스터 내부 통신) + INTERNAL_BACKEND_URL: "http://vexplor-backend-service:3001" + + # 외부 URL (클라이언트 접근용) + NEXT_PUBLIC_API_URL: "https://api.vexplor.com/api" + CORS_ORIGIN: "https://v1.vexplor.com,https://api.vexplor.com" + diff --git a/k8s/vexplor-frontend-deployment.yaml b/k8s/vexplor-frontend-deployment.yaml new file mode 100644 index 00000000..013b8348 --- /dev/null +++ b/k8s/vexplor-frontend-deployment.yaml @@ -0,0 +1,92 @@ +# vexplor Frontend Deployment +apiVersion: apps/v1 +kind: Deployment +metadata: + name: vexplor-frontend + namespace: vexplor + labels: + app: vexplor-frontend + component: frontend +spec: + replicas: 2 + selector: + matchLabels: + app: vexplor-frontend + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + template: + metadata: + labels: + app: vexplor-frontend + component: frontend + spec: + imagePullSecrets: + - name: harbor-registry + containers: + - name: vexplor-frontend + image: harbor.wace.me/vexplor/vexplor-frontend:latest + imagePullPolicy: Always + ports: + - containerPort: 3000 + protocol: TCP + envFrom: + - configMapRef: + name: vexplor-config + env: + - name: PORT + value: "3000" + - name: HOSTNAME + value: "0.0.0.0" + - name: NODE_ENV + value: "production" + - name: NEXT_PUBLIC_API_URL + value: "https://api.vexplor.com/api" + # 서버사이드 렌더링시 내부 백엔드 호출용 + - name: SERVER_API_URL + value: "http://vexplor-backend-service:3001" + resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "1Gi" + cpu: "500m" + livenessProbe: + httpGet: + path: / + port: 3000 + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + readinessProbe: + httpGet: + path: / + port: 3000 + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 3 + failureThreshold: 3 + +--- +# Frontend Service +apiVersion: v1 +kind: Service +metadata: + name: vexplor-frontend-service + namespace: vexplor + labels: + app: vexplor-frontend +spec: + type: ClusterIP + selector: + app: vexplor-frontend + ports: + - name: http + port: 3000 + targetPort: 3000 + protocol: TCP + diff --git a/k8s/vexplor-ingress.yaml b/k8s/vexplor-ingress.yaml new file mode 100644 index 00000000..df3e0d38 --- /dev/null +++ b/k8s/vexplor-ingress.yaml @@ -0,0 +1,58 @@ +# vexplor Ingress - Nginx Ingress Controller 기반 +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: vexplor-ingress + namespace: vexplor + labels: + app: vexplor + annotations: + # Nginx Ingress Controller 설정 + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/proxy-body-size: "100m" + nginx.ingress.kubernetes.io/proxy-read-timeout: "300" + nginx.ingress.kubernetes.io/proxy-send-timeout: "300" + nginx.ingress.kubernetes.io/proxy-connect-timeout: "60" + + # WebSocket 지원 + nginx.ingress.kubernetes.io/proxy-http-version: "1.1" + nginx.ingress.kubernetes.io/upstream-hash-by: "$remote_addr" + + # SSL Redirect + nginx.ingress.kubernetes.io/ssl-redirect: "true" + nginx.ingress.kubernetes.io/force-ssl-redirect: "true" + + # Cert-Manager (Let's Encrypt) + cert-manager.io/cluster-issuer: "letsencrypt-prod" +spec: + ingressClassName: nginx + tls: + - hosts: + - v1.vexplor.com + - api.vexplor.com + secretName: vexplor-tls + rules: + # Frontend 도메인 + - host: v1.vexplor.com + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: vexplor-frontend-service + port: + number: 3000 + + # Backend API 도메인 + - host: api.vexplor.com + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: vexplor-backend-service + port: + number: 3001 + diff --git a/k8s/vexplor-secret.yaml.template b/k8s/vexplor-secret.yaml.template new file mode 100644 index 00000000..97e6b4f1 --- /dev/null +++ b/k8s/vexplor-secret.yaml.template @@ -0,0 +1,38 @@ +# vexplor Secret 템플릿 +# 이 파일은 템플릿입니다. 실제 값으로 채운 후 vexplor-secret.yaml로 저장하세요. +# 주의: vexplor-secret.yaml은 .gitignore에 추가되어야 합니다! +# +# Secret 값은 base64로 인코딩해야 합니다: +# echo -n "your-value" | base64 +# +apiVersion: v1 +kind: Secret +metadata: + name: vexplor-secret + namespace: vexplor + labels: + app: vexplor +type: Opaque +data: + # 데이터베이스 연결 정보 (base64 인코딩 필요) + # echo -n "postgresql://postgres:password@211.115.91.141:11134/plm" | base64 + DATABASE_URL: "cG9zdGdyZXNxbDovL3Bvc3RncmVzOnZleHBsb3IwOTA5ISFAMjExLjExNS45MS4xNDE6MTExMzQvcGxt" + + # JWT 시크릿 + # echo -n "your-jwt-secret" | base64 + JWT_SECRET: "aWxzaGluLXBsbS1zdXBlci1zZWNyZXQtand0LWtleS0yMDI0" + + # 메일 암호화 키 + # echo -n "your-encryption-key" | base64 + ENCRYPTION_KEY: "aWxzaGluLXBsbS1tYWlsLWVuY3J5cHRpb24ta2V5LTMyY2hhcmFjdGVycy0yMDI0LXNlY3VyZQ==" + + # API 키들 + # echo -n "your-kma-api-key" | base64 + KMA_API_KEY: "b2dkWHIyZTlUNGlIVjY5bnZWLUl3QQ==" + + # echo -n "your-its-api-key" | base64 + ITS_API_KEY: "ZDZiOWJlZmVjMzExNGQ2NDgyODQ2NzRiOGZkZGNjMzI=" + + # echo -n "your-expressway-api-key" | base64 + EXPRESSWAY_API_KEY: "" + diff --git a/kubernetes-setup-guide.md b/kubernetes-setup-guide.md new file mode 100644 index 00000000..3d27b04c --- /dev/null +++ b/kubernetes-setup-guide.md @@ -0,0 +1,304 @@ +# 쿠버네티스 클러스터 구축 가이드 + +## 📋 개요 + +이 문서는 Digital Twin 프로젝트의 쿠버네티스 클러스터 구축 과정을 정리한 가이드입니다. + +**작성일**: 2024년 12월 22일 + +--- + +## 🖥️ 서버 정보 + +### 기존 서버 (참조용) + +| 항목 | 값 | +|------|-----| +| IP | 211.115.91.170 | +| SSH 포트 | 12991 | +| 사용자 | geonhee | +| OS | Ubuntu 24.04.3 LTS | +| K8s 버전 | v1.28.0 | +| 컨테이너 런타임 | containerd 1.7.28 | + +### 새 서버 (구축 완료) + +| 항목 | 값 | +|------|-----| +| IP | 112.168.212.142 | +| SSH 포트 | 22 | +| 사용자 | wace | +| 호스트명 | waceserver | +| OS | Ubuntu 24.04.3 LTS | +| K8s 버전 | v1.28.15 | +| 컨테이너 런타임 | containerd 1.7.28 | +| 내부 IP | 10.10.0.74 | +| CPU | 20코어 | +| 메모리 | 31GB | + +--- + +## 🔐 SSH 접속 설정 + +### SSH 키 기반 인증 설정 + +```bash +# 1. 로컬에서 SSH 키 확인 +ls -la ~/.ssh/ + +# 2. 공개키를 서버에 복사 +ssh-copy-id -p 12991 geonhee@211.115.91.170 # 기존 서버 +ssh-copy-id -p 22 wace@112.168.212.142 # 새 서버 + +# 3. 비밀번호 없이 접속 테스트 +ssh -p 12991 geonhee@211.115.91.170 +ssh -p 22 wace@112.168.212.142 +``` + +### SSH Config 설정 (선택사항) + +```bash +# ~/.ssh/config 파일에 추가 +Host wace-old + HostName 211.115.91.170 + Port 12991 + User geonhee + +Host wace-new + HostName 112.168.212.142 + Port 22 + User wace +``` + +--- + +## 🚀 쿠버네티스 클러스터 구축 과정 + +### 1단계: Swap 비활성화 + +쿠버네티스는 swap이 활성화되어 있으면 제대로 동작하지 않습니다. + +```bash +# swap 비활성화 +sudo swapoff -a + +# 영구적으로 비활성화 (재부팅 후에도 유지) +sudo sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab + +# 확인 (아무것도 출력되지 않으면 성공) +swapon --show +``` + +### 2단계: containerd 설정 + +```bash +# containerd 기본 설정 생성 +sudo containerd config default | sudo tee /etc/containerd/config.toml + +# SystemdCgroup 활성화 (중요!) +sudo sed -i 's/SystemdCgroup = false/SystemdCgroup = true/g' /etc/containerd/config.toml + +# containerd 재시작 +sudo systemctl restart containerd + +# 상태 확인 +sudo systemctl is-active containerd +``` + +### 3단계: kubeadm init (클러스터 초기화) + +```bash +sudo kubeadm init --pod-network-cidr=10.244.0.0/16 +``` + +**출력 결과 (중요 정보)**: +- 클러스터 초기화 성공 +- API 서버: https://10.10.0.74:6443 +- 워커 노드 조인 토큰 생성됨 + +### 4단계: kubectl 설정 + +일반 사용자가 kubectl을 사용할 수 있도록 설정합니다. + +```bash +mkdir -p $HOME/.kube +sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config +sudo chown $(id -u):$(id -g) $HOME/.kube/config + +# 확인 +kubectl cluster-info +``` + +### 5단계: 네트워크 플러그인 설치 (Flannel) + +Pod 간 통신을 위한 네트워크 플러그인을 설치합니다. + +```bash +kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml +``` + +### 6단계: 단일 노드 설정 + +마스터 노드에서도 워크로드를 실행할 수 있도록 taint를 제거합니다. + +```bash +kubectl taint nodes --all node-role.kubernetes.io/control-plane- +``` + +--- + +## ✅ 구축 결과 + +### 클러스터 상태 + +```bash +kubectl get nodes -o wide +``` + +| NAME | STATUS | ROLES | VERSION | INTERNAL-IP | OS-IMAGE | CONTAINER-RUNTIME | +|------|--------|-------|---------|-------------|----------|-------------------| +| waceserver | Ready | control-plane | v1.28.15 | 10.10.0.74 | Ubuntu 24.04.3 LTS | containerd://1.7.28 | + +### 시스템 Pod 상태 + +```bash +kubectl get pods -n kube-system +kubectl get pods -n kube-flannel +``` + +| 컴포넌트 | 상태 | +|---------|------| +| etcd | ✅ Running | +| kube-apiserver | ✅ Running | +| kube-controller-manager | ✅ Running | +| kube-scheduler | ✅ Running | +| kube-proxy | ✅ Running | +| coredns (x2) | ✅ Running | +| kube-flannel | ✅ Running | + +--- + +## 📌 워커 노드 추가 (필요 시) + +다른 서버를 워커 노드로 추가하려면: + +```bash +kubeadm join 10.10.0.74:6443 --token 4lfga6.luad9f367uxh0rlq \ + --discovery-token-ca-cert-hash sha256:9bea59b6fd34115c3f893a4b10bacc0a5409192b288564dc055251210081c86e +``` + +**토큰 만료 시 새 토큰 생성**: +```bash +kubeadm token create --print-join-command +``` + +--- + +## 🔧 유용한 명령어 + +### 클러스터 정보 확인 + +```bash +# 노드 상태 +kubectl get nodes -o wide + +# 모든 Pod 상태 +kubectl get pods -A + +# 클러스터 정보 +kubectl cluster-info + +# 컴포넌트 상태 +kubectl get componentstatuses +``` + +### 문제 해결 + +```bash +# kubelet 로그 확인 +sudo journalctl -u kubelet -f + +# containerd 로그 확인 +sudo journalctl -u containerd -f + +# Pod 상세 정보 +kubectl describe pod -n + +# Pod 로그 확인 +kubectl logs -n +``` + +### 클러스터 리셋 (초기화 실패 시) + +```bash +sudo kubeadm reset +sudo rm -rf /etc/cni/net.d +sudo rm -rf $HOME/.kube +sudo iptables -F && sudo iptables -t nat -F && sudo iptables -t mangle -F && sudo iptables -X +``` + +--- + +## 📂 다음 단계: 자동 배포 설정 + +쿠버네티스 클러스터 구축이 완료되었습니다. 다음 단계로 진행할 사항: + +1. **Ingress Controller 설치** (외부 트래픽 라우팅) ✅ 완료 +2. **Cert-Manager 설치** (SSL 인증서 자동 관리) +3. **Harbor/Registry 연동** (컨테이너 이미지 저장소) +4. **CI/CD 파이프라인 구성** (Gitea Actions) ✅ 완료 +5. **Helm 설치** (패키지 관리) +6. **애플리케이션 배포** (Deployment, Service, Ingress) ✅ 완료 + +### Gitea Actions 자동 배포 설정 완료 + +자세한 설정 방법은 [KUBERNETES_DEPLOYMENT_GUIDE.md](docs/KUBERNETES_DEPLOYMENT_GUIDE.md) 참조 + +#### 생성된 파일 목록 + +``` +.gitea/workflows/deploy.yml # Gitea Actions 워크플로우 +k8s/ +├── namespace.yaml # 네임스페이스 정의 +├── vexplor-config.yaml # ConfigMap +├── vexplor-secret.yaml.template # Secret 템플릿 +├── vexplor-backend-deployment.yaml # 백엔드 Deployment/Service/PVC +├── vexplor-frontend-deployment.yaml# 프론트엔드 Deployment/Service +├── vexplor-ingress.yaml # Ingress 설정 +├── local-path-provisioner.yaml # 스토리지 프로비저너 +└── ingress-nginx.yaml # Ingress 컨트롤러 패치 +``` + +#### Gitea Repository Secrets 설정 필요 + +| Secret 이름 | 설명 | +|------------|------| +| `HARBOR_USERNAME` | Harbor 사용자명 | +| `HARBOR_PASSWORD` | Harbor 비밀번호 | +| `KUBECONFIG` | base64 인코딩된 Kubernetes config | + +```bash +# KUBECONFIG 생성 방법 (K8s 서버에서 실행) +cat ~/.kube/config | base64 -w 0 +``` + +--- + +## 📞 참고 정보 + +### 서버 접속 + +```bash +# 새 서버 (쿠버네티스 클러스터) +ssh -p 22 wace@112.168.212.142 + +# 기존 서버 (참조용) +ssh -p 12991 geonhee@211.115.91.170 +``` + +### 관련 문서 + +- [Kubernetes 공식 문서](https://kubernetes.io/docs/) +- [kubeadm 설치 가이드](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/) +- [Flannel 네트워크 플러그인](https://github.com/flannel-io/flannel) + From 15265ebfc9eec6c7a2b9fc4efacfab919bed2481 Mon Sep 17 00:00:00 2001 From: Johngreen Date: Mon, 22 Dec 2025 17:39:26 +0900 Subject: [PATCH 2/4] Refactor deploy workflow to use SSH for k8s operations Replaces direct kubectl usage with SSH-based remote execution for Kubernetes deployment steps. Updates secrets and environment variables to use SSH key and connection info, and modifies manifest transfer and deployment verification to operate over SSH. This improves security and flexibility for remote Kubernetes server management. --- .gitea/workflows/deploy.yml | 165 ++++++++++++++++++++---------------- 1 file changed, 94 insertions(+), 71 deletions(-) diff --git a/.gitea/workflows/deploy.yml b/.gitea/workflows/deploy.yml index e307079e..ec48126e 100644 --- a/.gitea/workflows/deploy.yml +++ b/.gitea/workflows/deploy.yml @@ -8,7 +8,7 @@ # 필수 Secrets (Repository Settings > Secrets): # - HARBOR_USERNAME: Harbor 사용자명 # - HARBOR_PASSWORD: Harbor 비밀번호 -# - KUBECONFIG: base64로 인코딩된 Kubernetes config +# - K8S_SSH_KEY: base64로 인코딩된 SSH 비밀키 (쿠버네티스 서버 접속용) # # Application Secrets: # - k8s/vexplor-secret.yaml 파일에서 관리 @@ -31,10 +31,15 @@ on: env: GITEA_DOMAIN: g.wace.me HARBOR_REGISTRY: localhost:5001 - HARBOR_REGISTRY_K8S: 192.168.1.100:5001 + HARBOR_REGISTRY_K8S: harbor.wace.me HARBOR_REGISTRY_EXTERNAL: harbor.wace.me HARBOR_PROJECT: speefox_vexplor K8S_NAMESPACE: vexplor + + # 쿠버네티스 서버 SSH 접속 정보 + K8S_SSH_HOST: 112.168.212.142 + K8S_SSH_PORT: 22 + K8S_SSH_USER: wace # Frontend 빌드 환경 변수 NEXT_PUBLIC_API_URL: "https://api.vexplor.com/api" @@ -74,12 +79,7 @@ jobs: run: | echo "필수 도구 설치 중..." apt-get update -qq - apt-get install -y git curl ca-certificates gnupg - - # kubectl 설치 - curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" - chmod +x kubectl - mv kubectl /usr/local/bin/ + apt-get install -y git curl ca-certificates gnupg openssh-client # Docker 클라이언트 설치 install -m 0755 -d /etc/apt/keyrings @@ -94,7 +94,7 @@ jobs: echo "설치 완료:" git --version - kubectl version --client + ssh -V docker --version export DOCKER_HOST=unix:///var/run/docker.sock @@ -180,100 +180,121 @@ jobs: docker push ${FRONTEND_FULL_IMAGE}:latest echo "Frontend 푸시 완료" - # Kubernetes 설정 - - name: Setup Kubernetes config + # SSH 키 설정 (쿠버네티스 서버 접속용) + - name: Setup SSH Key env: - KUBECONFIG_CONTENT: ${{ secrets.KUBECONFIG }} + SSH_KEY_CONTENT: ${{ secrets.K8S_SSH_KEY }} run: | - echo "Kubernetes 설정..." + echo "SSH 키 설정..." - if [ -z "${KUBECONFIG_CONTENT}" ]; then - echo "KUBECONFIG secret이 설정되지 않았습니다!" + if [ -z "${SSH_KEY_CONTENT}" ]; then + echo "K8S_SSH_KEY secret이 설정되지 않았습니다!" exit 1 fi - mkdir -p ~/.kube - echo "${KUBECONFIG_CONTENT}" | base64 -d > ~/.kube/config - chmod 600 ~/.kube/config + mkdir -p ~/.ssh + echo "${SSH_KEY_CONTENT}" | base64 -d > ~/.ssh/id_rsa + chmod 600 ~/.ssh/id_rsa - if [ ! -s ~/.kube/config ]; then - echo "kubeconfig 파일이 비어있습니다" - exit 1 - fi + # known_hosts에 쿠버네티스 서버 추가 + ssh-keyscan -p ${K8S_SSH_PORT} ${K8S_SSH_HOST} >> ~/.ssh/known_hosts 2>/dev/null - echo "kubeconfig 파일 생성 완료" - kubectl cluster-info > /dev/null 2>&1 && echo "Kubernetes 클러스터 연결 성공" + # SSH 연결 테스트 + echo "SSH 연결 테스트..." + ssh -o StrictHostKeyChecking=no -p ${K8S_SSH_PORT} ${K8S_SSH_USER}@${K8S_SSH_HOST} "echo 'SSH 연결 성공'" + echo "SSH 키 설정 완료" - # Kubernetes 배포 - - name: Deploy to Kubernetes + # k8s 매니페스트 파일을 쿠버네티스 서버로 전송 + - name: Transfer k8s manifests run: | - echo "Kubernetes 배포 시작..." - + echo "k8s 매니페스트 파일 전송..." cd /workspace/source - # 네임스페이스 생성 (없을 때만) + # 쿠버네티스 서버에 디렉토리 생성 + ssh -p ${K8S_SSH_PORT} ${K8S_SSH_USER}@${K8S_SSH_HOST} "mkdir -p ~/vexplor-deploy/k8s" + + # k8s 파일 전송 + scp -P ${K8S_SSH_PORT} -r k8s/* ${K8S_SSH_USER}@${K8S_SSH_HOST}:~/vexplor-deploy/k8s/ + + echo "매니페스트 파일 전송 완료" + + # Kubernetes 배포 (SSH를 통해 원격 실행) + - name: Deploy to Kubernetes + env: + HARBOR_USER: ${{ secrets.HARBOR_USERNAME }} + HARBOR_PASS: ${{ secrets.HARBOR_PASSWORD }} + run: | + echo "Kubernetes 배포 시작 (SSH 원격 실행)..." + + # SSH를 통해 쿠버네티스 서버에서 kubectl 명령 실행 + ssh -p ${K8S_SSH_PORT} ${K8S_SSH_USER}@${K8S_SSH_HOST} << 'DEPLOY_SCRIPT' + set -e + cd ~/vexplor-deploy + echo "네임스페이스 확인..." kubectl apply -f k8s/namespace.yaml - # ConfigMap 적용 echo "ConfigMap 적용..." - kubectl apply -f k8s/vexplor-config.yaml -n ${K8S_NAMESPACE} + kubectl apply -f k8s/vexplor-config.yaml -n vexplor # Secret 적용 (존재하는 경우에만) if [ -f k8s/vexplor-secret.yaml ]; then echo "Secret 적용..." - kubectl apply -f k8s/vexplor-secret.yaml -n ${K8S_NAMESPACE} + kubectl apply -f k8s/vexplor-secret.yaml -n vexplor fi - # Harbor Registry Secret 생성 (없을 때만) + echo "네임스페이스 및 ConfigMap 적용 완료" + DEPLOY_SCRIPT + + # Harbor Registry Secret 생성 (별도로 처리 - 환경변수 사용) echo "Harbor Registry Secret 확인..." - if ! kubectl get secret harbor-registry -n ${K8S_NAMESPACE} > /dev/null 2>&1; then - echo "Harbor Registry Secret 생성 중..." - kubectl create secret docker-registry harbor-registry \ - --docker-server=${HARBOR_REGISTRY_K8S} \ - --docker-username=${{ secrets.HARBOR_USERNAME }} \ - --docker-password=${{ secrets.HARBOR_PASSWORD }} \ - -n ${K8S_NAMESPACE} - echo "Harbor Registry Secret 생성 완료" - else - echo "Harbor Registry Secret 이미 존재" - fi + ssh -p ${K8S_SSH_PORT} ${K8S_SSH_USER}@${K8S_SSH_HOST} "kubectl get secret harbor-registry -n vexplor" > /dev/null 2>&1 || \ + ssh -p ${K8S_SSH_PORT} ${K8S_SSH_USER}@${K8S_SSH_HOST} "kubectl create secret docker-registry harbor-registry \ + --docker-server=${HARBOR_REGISTRY_K8S} \ + --docker-username=${HARBOR_USER} \ + --docker-password='${HARBOR_PASS}' \ + -n vexplor" + echo "Harbor Registry Secret 확인 완료" # Backend 배포 echo "Backend 배포..." - kubectl apply -f k8s/vexplor-backend-deployment.yaml -n ${K8S_NAMESPACE} + ssh -p ${K8S_SSH_PORT} ${K8S_SSH_USER}@${K8S_SSH_HOST} << BACKEND_DEPLOY + set -e + cd ~/vexplor-deploy + kubectl apply -f k8s/vexplor-backend-deployment.yaml -n vexplor - if kubectl get deployment ${BACKEND_DEPLOYMENT_NAME} -n ${K8S_NAMESPACE} > /dev/null 2>&1; then - echo "Backend 이미지 업데이트..." - kubectl set image deployment/${BACKEND_DEPLOYMENT_NAME} \ - ${BACKEND_CONTAINER_NAME}=${BACKEND_FULL_IMAGE_K8S}:latest \ - -n ${K8S_NAMESPACE} - kubectl rollout restart deployment/${BACKEND_DEPLOYMENT_NAME} -n ${K8S_NAMESPACE} - fi + echo "Backend 이미지 업데이트..." + kubectl set image deployment/${BACKEND_DEPLOYMENT_NAME} \ + ${BACKEND_CONTAINER_NAME}=${HARBOR_REGISTRY_K8S}/${HARBOR_PROJECT}/${BACKEND_IMAGE_NAME}:latest \ + -n vexplor || true + kubectl rollout restart deployment/${BACKEND_DEPLOYMENT_NAME} -n vexplor echo "Backend Rolling Update 진행 중..." - kubectl rollout status deployment/${BACKEND_DEPLOYMENT_NAME} -n ${K8S_NAMESPACE} --timeout=5m + kubectl rollout status deployment/${BACKEND_DEPLOYMENT_NAME} -n vexplor --timeout=5m echo "Backend 배포 완료" + BACKEND_DEPLOY # Frontend 배포 echo "Frontend 배포..." - kubectl apply -f k8s/vexplor-frontend-deployment.yaml -n ${K8S_NAMESPACE} + ssh -p ${K8S_SSH_PORT} ${K8S_SSH_USER}@${K8S_SSH_HOST} << FRONTEND_DEPLOY + set -e + cd ~/vexplor-deploy + kubectl apply -f k8s/vexplor-frontend-deployment.yaml -n vexplor - if kubectl get deployment ${FRONTEND_DEPLOYMENT_NAME} -n ${K8S_NAMESPACE} > /dev/null 2>&1; then - echo "Frontend 이미지 업데이트..." - kubectl set image deployment/${FRONTEND_DEPLOYMENT_NAME} \ - ${FRONTEND_CONTAINER_NAME}=${FRONTEND_FULL_IMAGE_K8S}:latest \ - -n ${K8S_NAMESPACE} - kubectl rollout restart deployment/${FRONTEND_DEPLOYMENT_NAME} -n ${K8S_NAMESPACE} - fi + echo "Frontend 이미지 업데이트..." + kubectl set image deployment/${FRONTEND_DEPLOYMENT_NAME} \ + ${FRONTEND_CONTAINER_NAME}=${HARBOR_REGISTRY_K8S}/${HARBOR_PROJECT}/${FRONTEND_IMAGE_NAME}:latest \ + -n vexplor || true + kubectl rollout restart deployment/${FRONTEND_DEPLOYMENT_NAME} -n vexplor echo "Frontend Rolling Update 진행 중..." - kubectl rollout status deployment/${FRONTEND_DEPLOYMENT_NAME} -n ${K8S_NAMESPACE} --timeout=5m + kubectl rollout status deployment/${FRONTEND_DEPLOYMENT_NAME} -n vexplor --timeout=5m echo "Frontend 배포 완료" + FRONTEND_DEPLOY # Ingress 배포 echo "Ingress 배포..." - kubectl apply -f k8s/vexplor-ingress.yaml -n ${K8S_NAMESPACE} + ssh -p ${K8S_SSH_PORT} ${K8S_SSH_USER}@${K8S_SSH_HOST} "cd ~/vexplor-deploy && kubectl apply -f k8s/vexplor-ingress.yaml -n vexplor" echo "전체 배포 완료!" @@ -281,22 +302,24 @@ jobs: - name: Verify deployment run: | echo "배포 검증..." + ssh -p ${K8S_SSH_PORT} ${K8S_SSH_USER}@${K8S_SSH_HOST} << 'VERIFY_SCRIPT' echo "" echo "Backend 상태:" - kubectl get deployment ${BACKEND_DEPLOYMENT_NAME} -n ${K8S_NAMESPACE} - kubectl get pods -l app=${BACKEND_DEPLOYMENT_NAME} -n ${K8S_NAMESPACE} + kubectl get deployment vexplor-backend -n vexplor + kubectl get pods -l app=vexplor-backend -n vexplor echo "" echo "Frontend 상태:" - kubectl get deployment ${FRONTEND_DEPLOYMENT_NAME} -n ${K8S_NAMESPACE} - kubectl get pods -l app=${FRONTEND_DEPLOYMENT_NAME} -n ${K8S_NAMESPACE} + kubectl get deployment vexplor-frontend -n vexplor + kubectl get pods -l app=vexplor-frontend -n vexplor echo "" echo "Services:" - kubectl get svc -n ${K8S_NAMESPACE} + kubectl get svc -n vexplor echo "" echo "Ingress:" - kubectl get ingress -n ${K8S_NAMESPACE} + kubectl get ingress -n vexplor echo "" echo "검증 완료" + VERIFY_SCRIPT # 배포 요약 - name: Deployment summary @@ -315,8 +338,8 @@ jobs: if: failure() run: | echo "배포 실패! 이전 버전으로 롤백..." - kubectl rollout undo deployment/${BACKEND_DEPLOYMENT_NAME} -n ${K8S_NAMESPACE} || true - kubectl rollout undo deployment/${FRONTEND_DEPLOYMENT_NAME} -n ${K8S_NAMESPACE} || true + ssh -p ${K8S_SSH_PORT} ${K8S_SSH_USER}@${K8S_SSH_HOST} "kubectl rollout undo deployment/vexplor-backend -n vexplor" || true + ssh -p ${K8S_SSH_PORT} ${K8S_SSH_USER}@${K8S_SSH_HOST} "kubectl rollout undo deployment/vexplor-frontend -n vexplor" || true # Harbor 로그아웃 - name: Logout from Harbor From 44254731145918c9bac47c4295c0eca21951f4fc Mon Sep 17 00:00:00 2001 From: Johngreen Date: Mon, 22 Dec 2025 18:25:35 +0900 Subject: [PATCH 3/4] Update app.ts --- backend-node/src/app.ts | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/backend-node/src/app.ts b/backend-node/src/app.ts index e928f96c..b3d84ecb 100644 --- a/backend-node/src/app.ts +++ b/backend-node/src/app.ts @@ -280,12 +280,20 @@ app.use(errorHandler); const PORT = config.port; const HOST = config.host; -app.listen(PORT, HOST, async () => { +const server = app.listen(PORT, HOST, async () => { logger.info(`🚀 Server is running on ${HOST}:${PORT}`); logger.info(`📊 Environment: ${config.nodeEnv}`); logger.info(`🔗 Health check: http://${HOST}:${PORT}/health`); logger.info(`🌐 External access: http://39.117.244.52:${PORT}/health`); + // 비동기 초기화 작업 (에러가 발생해도 서버는 유지) + initializeServices().catch(err => { + logger.error('❌ 서비스 초기화 중 치명적 에러 발생:', err); + }); +}); + +// 서비스 초기화 함수 분리 +async function initializeServices() { // 데이터베이스 마이그레이션 실행 try { const { @@ -343,6 +351,15 @@ app.listen(PORT, HOST, async () => { } catch (error) { logger.error(`❌ 메일 자동 삭제 스케줄러 시작 실패:`, error); } +} + +// 우아한 종료 처리 +process.on('SIGTERM', () => { + logger.info('SIGTERM signal received: closing HTTP server'); + server.close(() => { + logger.info('HTTP server closed'); + process.exit(0); + }); }); export default app; From f2647415a17b0be3aea9fab18b4ed077b9b53141 Mon Sep 17 00:00:00 2001 From: Johngreen Date: Mon, 22 Dec 2025 18:39:00 +0900 Subject: [PATCH 4/4] Update vexplor-backend-deployment.yaml --- k8s/vexplor-backend-deployment.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/k8s/vexplor-backend-deployment.yaml b/k8s/vexplor-backend-deployment.yaml index 37778fe7..a9ab534d 100644 --- a/k8s/vexplor-backend-deployment.yaml +++ b/k8s/vexplor-backend-deployment.yaml @@ -51,7 +51,7 @@ spec: cpu: "500m" livenessProbe: httpGet: - path: /api/health + path: /health port: 3001 initialDelaySeconds: 30 periodSeconds: 10 @@ -59,7 +59,7 @@ spec: failureThreshold: 3 readinessProbe: httpGet: - path: /api/health + path: /health port: 3001 initialDelaySeconds: 10 periodSeconds: 5