Merge branch 'hml'

This commit is contained in:
GoHorse Deploy 2026-02-07 13:29:21 +00:00
commit 1aeeb4796f
10 changed files with 245 additions and 356 deletions

View file

@ -1,202 +0,0 @@
# ═══════════════════════════════════════════════════════════════════════════════
# PIPELINE 1: DEV (branch dev)
# ═══════════════════════════════════════════════════════════════════════════════
kind: pipeline
type: kubernetes
name: deploy-backend-dev
namespace: drone
service_account_name: drone-deployer
trigger:
event:
- custom
steps:
- name: build-and-push-backend
image: plugins/docker:latest
settings:
privileged: true
buildkit: true
registry: in.gohorsejobs.com
repo: in.gohorsejobs.com/gohorsejobsdev/gohorsejobs-backend
tags: [ latest, "${DRONE_COMMIT_SHA:0:8}" ]
mtu: 1200
context: backend
dockerfile: backend/Dockerfile
purge: true
username: { from_secret: HARBOR_USERNAME }
password: { from_secret: HARBOR_PASSWORD }
- name: build-and-push-backoffice
image: plugins/docker:latest
settings:
privileged: true
registry: in.gohorsejobs.com
repo: in.gohorsejobs.com/gohorsejobs-backoffice-dev/backoffice
tags: [ latest, "${DRONE_COMMIT_SHA:0:8}" ]
mtu: 1200
custom_config:
max-concurrent-uploads: 1
context: backoffice
dockerfile: backoffice/Dockerfile
purge: true
username: { from_secret: HARBOR_USERNAME }
password: { from_secret: HARBOR_PASSWORD }
- name: export-envs-to-k8s
image: bitnami/kubectl:latest
environment:
AWS_ACCESS_KEY_ID: { from_secret: AWS_ACCESS_KEY_ID }
AWS_ENDPOINT: { from_secret: AWS_ENDPOINT }
AWS_REGION: { from_secret: AWS_REGION }
AWS_SECRET_ACCESS_KEY: { from_secret: AWS_SECRET_ACCESS_KEY }
BACKEND_HOST: { from_secret: BACKEND_HOST }
ENV: { from_secret: ENV }
CORS_ORIGINS: { from_secret: CORS_ORIGINS }
S3_BUCKET: { from_secret: S3_BUCKET }
DATABASE_URL: { from_secret: DATABASE_URL }
HARBOR_USER: { from_secret: HARBOR_USERNAME }
HARBOR_PASS: { from_secret: HARBOR_PASSWORD }
JWT_SECRET: { from_secret: JWT_SECRET }
COOKIE_SECRET: { from_secret: COOKIE_SECRET }
COOKIE_DOMAIN: { from_secret: COOKIE_DOMAIN }
BACKEND_PORT: { from_secret: BACKEND_PORT }
PASSWORD_PEPPER: { from_secret: PASSWORD_PEPPER }
NEXT_PUBLIC_API_URL: { from_secret: NEXT_PUBLIC_API_URL }
NEXT_PUBLIC_BACKOFFICE_URL: { from_secret: NEXT_PUBLIC_BACKOFFICE_URL }
MTU: { from_secret: MTU }
commands:
# 1. Gera o arquivo .env garantindo que contenha as variáveis necessárias (incluindo as _DEV)
- touch .env.k8s
# O filtro abaixo agora captura tanto prefixos globais quanto o sufixo _DEV
- env | grep -E '(MTU|AWS_|DB_|^ENV\b|CORS_|JWT_|MAX_|PORT|S3_|UPLOAD_|STRIPE_|WEBHOOK_|DATABASE_|CLOUDFLARE_|CPANEL_|HARBOR_|COOKIE_)' > .env.k8s
# 2. Atualiza o secret de variáveis de ambiente no namespace de destino
- kubectl -n gohorsejobsdev delete secret backend-secrets --ignore-not-found
- kubectl -n gohorsejobsdev create secret generic backend-secrets --from-env-file=.env.k8s
# 3. Atualiza o secret de autenticação do registro Harbor
- kubectl -n gohorsejobsdev delete secret harbor-registry --ignore-not-found
- kubectl -n gohorsejobsdev create secret docker-registry harbor-registry --docker-server=in.gohorsejobs.com --docker-username=$${HARBOR_USER} --docker-password=$${HARBOR_PASS}
- name: deploy-apps
image: bitnami/kubectl:latest
commands:
# Força a atualização removendo os deployments antigos (opcional, mas garante limpeza)
- kubectl delete deployment gohorse-backend-dev -n gohorsejobsdev --ignore-not-found
- kubectl delete deployment gohorse-backoffice-dev -n gohorsejobsdev --ignore-not-found
# Aplica as novas configurações
- kubectl apply -f k8s/dev/backend-deployment-dev.yaml -n gohorsejobsdev
- kubectl apply -f k8s/dev/backend-service-dev.yaml -n gohorsejobsdev
- kubectl apply -f k8s/dev/backend-ingress-dev.yaml -n gohorsejobsdev
- kubectl apply -f k8s/dev/backoffice-deployment-dev.yaml -n gohorsejobsdev
- kubectl apply -f k8s/dev/backoffice-service-dev.yaml -n gohorsejobsdev
- kubectl apply -f k8s/dev/backoffice-ingress-dev.yaml -n gohorsejobsdev
---
# ═══════════════════════════════════════════════════════════════════════════════
# PIPELINE 2: HML (branch hml)
# ═══════════════════════════════════════════════════════════════════════════════
kind: pipeline
type: kubernetes
name: deploy-backend-hml
namespace: drone
service_account_name: drone-deployer
trigger:
branch:
- hml
steps:
- name: build-and-push-backend
image: plugins/docker:latest
settings:
registry: in.gohorsejobs.com
repo: in.gohorsejobs.com/gohorsejobshml/gohorsejobs-backend
tags: [ latest, "${DRONE_COMMIT_SHA:0:8}" ]
context: backend
dockerfile: backend/Dockerfile
insecure: true
username: { from_secret: HARBOR_USERNAME }
password: { from_secret: HARBOR_PASSWORD }
- name: build-and-push-backoffice
image: plugins/docker:latest
settings:
registry: in.gohorsejobs.com
repo: in.gohorsejobs.com/gohorsejobs-backoffice-hml/backoffice
tags: [ latest, "${DRONE_COMMIT_SHA:0:8}" ]
context: backoffice
dockerfile: backoffice/Dockerfile
insecure: true
username: { from_secret: HARBOR_USERNAME }
password: { from_secret: HARBOR_PASSWORD }
- name: deploy-apps
image: bitnami/kubectl:latest
commands:
- kubectl delete deployment gohorse-backend-hml -n gohorsejobshml --ignore-not-found
- kubectl delete deployment gohorse-backoffice-hml -n gohorsejobshml --ignore-not-found
- kubectl apply -f k8s/hml/backend-deployment-hml.yaml -n gohorsejobshml
- kubectl apply -f k8s/hml/backend-service-hml.yaml -n gohorsejobshml
- kubectl apply -f k8s/hml/backend-ingress-hml.yaml -n gohorsejobshml
- kubectl apply -f k8s/hml/backoffice-hml.yaml -n gohorsejobshml
- kubectl apply -f k8s/hml/backoffice-service-hml.yaml -n gohorsejobshml
- kubectl apply -f k8s/hml/backoffice-ingress-hml.yaml -n gohorsejobshml
---
# ═══════════════════════════════════════════════════════════════════════════════
# PIPELINE 3: PRD (branch main)
# ═══════════════════════════════════════════════════════════════════════════════
kind: pipeline
type: kubernetes
name: deploy-backend-prd
namespace: drone
service_account_name: drone-deployer
trigger:
branch:
- main
steps:
- name: build-and-push-backend
image: plugins/docker:latest
settings:
registry: in.gohorsejobs.com
repo: in.gohorsejobs.com/gohorsejobs/gohorsejobs-backend
tags: [ latest, "${DRONE_COMMIT_SHA:0:8}" ]
context: backend
dockerfile: backend/Dockerfile
insecure: true
username: { from_secret: HARBOR_USERNAME }
password: { from_secret: HARBOR_PASSWORD }
- name: build-and-push-backoffice
image: plugins/docker:latest
settings:
registry: in.gohorsejobs.com
repo: in.gohorsejobs.com/gohorsejobs/backoffice
tags: [ latest, "${DRONE_COMMIT_SHA:0:8}" ]
context: backoffice
dockerfile: backoffice/Dockerfile
insecure: true
username: { from_secret: HARBOR_USERNAME }
password: { from_secret: HARBOR_PASSWORD }
- name: deploy-apps
image: bitnami/kubectl:latest
commands:
- kubectl delete deployment gohorse-backend -n gohorsejobs --ignore-not-found
- kubectl delete deployment gohorse-backoffice -n gohorsejobs --ignore-not-found
- kubectl apply -f k8s/prd/backend-deployment.-prd.yaml -n gohorsejobs
- kubectl apply -f k8s/prd/backend-service-prd.yaml -n gohorsejobs
- kubectl apply -f k8s/prd/backend-ingress-prd.yaml -n gohorsejobs
- kubectl apply -f k8s/prd/backoffice-deployment-prd.yaml -n gohorsejobs
- kubectl apply -f k8s/prd/backoffice-service-prd.yaml -n gohorsejobs
- kubectl apply -f k8s/prd/backoffice-ingress-prd.yaml -n gohorsejobs
- kubectl rollout status deployment/gohorse-backend -n gohorsejobs
- kubectl rollout status deployment/gohorse-backoffice -n gohorsejobs

View file

@ -1,91 +1,102 @@
name: Deploy Stack (Dev)
name: Deploy Backend and Backoffice Dev
on:
workflow_dispatch:
push:
branches:
- dev
paths:
- 'backend/**'
- 'backoffice/**'
- 'frontend/**'
branches: [dev]
env:
REGISTRY: forgejo-gru.rede5.com.br
NAMESPACE: rede5
REGISTRY: pipe.gohorsejobs.com
IMAGE_NAMESPACE: bohessefm
jobs:
# Job: Deploy no Servidor (Pull das imagens do Forgejo)
deploy-dev:
runs-on: docker
build-and-push:
runs-on: [self-hosted, linux-amd64]
steps:
- name: Checkout code
uses: https://github.com/actions/checkout@v4
with:
fetch-depth: 2
uses: actions/checkout@v4
- name: Check changed files
id: check
- name: Docker Login
run: |
if git diff --name-only HEAD~1 HEAD | grep -q "^backend/"; then
echo "backend=true" >> $GITHUB_OUTPUT
else
echo "backend=false" >> $GITHUB_OUTPUT
fi
if git diff --name-only HEAD~1 HEAD | grep -q "^frontend/"; then
echo "frontend=true" >> $GITHUB_OUTPUT
else
echo "frontend=false" >> $GITHUB_OUTPUT
fi
if git diff --name-only HEAD~1 HEAD | grep -q "^backoffice/"; then
echo "backoffice=true" >> $GITHUB_OUTPUT
else
echo "backoffice=false" >> $GITHUB_OUTPUT
fi
if git diff --name-only HEAD~1 HEAD | grep -q "^seeder-api/"; then
echo "seeder=true" >> $GITHUB_OUTPUT
else
echo "seeder=false" >> $GITHUB_OUTPUT
fi
echo "${{ secrets.FORGEJO_TOKEN }}" | docker login ${{ env.REGISTRY }} \
-u ${{ env.IMAGE_NAMESPACE }} --password-stdin
- name: Deploy via SSH
uses: https://github.com/appleboy/ssh-action@v1.0.3
with:
host: ${{ secrets.HOST }}
username: ${{ secrets.USERNAME }}
key: ${{ secrets.SSH_KEY }}
port: ${{ secrets.PORT || 22 }}
script: |
# Login no Forgejo Registry
echo "${{ secrets.FORGEJO_PASSWORD }}" | podman login ${{ env.REGISTRY }} -u ${{ secrets.FORGEJO_USERNAME }} --password-stdin
- name: Build & Push Backend
run: |
# Build usando SHA para imutabilidade e latest para conveniência
docker build -t ${{ env.REGISTRY }}/${{ env.IMAGE_NAMESPACE }}/gohorsejobs:${{ github.sha }} \
-t ${{ env.REGISTRY }}/${{ env.IMAGE_NAMESPACE }}/gohorsejobs:latest ./backend
docker push ${{ env.REGISTRY }}/${{ env.IMAGE_NAMESPACE }}/gohorsejobs:${{ github.sha }}
docker push ${{ env.REGISTRY }}/${{ env.IMAGE_NAMESPACE }}/gohorsejobs:latest
# --- DEPLOY DO BACKEND ---
if [ "${{ steps.check.outputs.backend }}" == "true" ]; then
echo "Pulling e reiniciando Backend..."
podman pull ${{ env.REGISTRY }}/${{ env.NAMESPACE }}/gohorsejobs-backend:latest
sudo systemctl restart gohorsejobs-backend-dev
fi
- name: Build & Push Backoffice
run: |
docker build -t ${{ env.REGISTRY }}/${{ env.IMAGE_NAMESPACE }}/backoffice:${{ github.sha }} \
-t ${{ env.REGISTRY }}/${{ env.IMAGE_NAMESPACE }}/backoffice:latest ./backoffice
docker push ${{ env.REGISTRY }}/${{ env.IMAGE_NAMESPACE }}/backoffice:${{ github.sha }}
docker push ${{ env.REGISTRY }}/${{ env.IMAGE_NAMESPACE }}/backoffice:latest
# --- DEPLOY DO FRONTEND ---
if [ "${{ steps.check.outputs.frontend }}" == "true" ]; then
echo "Pulling e reiniciando Frontend..."
podman pull ${{ env.REGISTRY }}/${{ env.NAMESPACE }}/gohorsejobs-frontend:latest
sudo systemctl restart gohorsejobs-frontend-dev
fi
deploy:
needs: build-and-push
runs-on: [self-hosted, linux-amd64]
steps:
- name: Checkout code
uses: actions/checkout@v4
# --- DEPLOY DO BACKOFFICE ---
if [ "${{ steps.check.outputs.backoffice }}" == "true" ]; then
echo "Pulling e reiniciando Backoffice..."
podman pull ${{ env.REGISTRY }}/${{ env.NAMESPACE }}/gohorsejobs-backoffice:latest
sudo systemctl restart gohorsejobs-backoffice-dev
fi
- name: Install kubectl
run: |
apk add --no-cache curl
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
chmod +x kubectl
mv kubectl /usr/local/bin/
# --- DEPLOY DO SEEDER ---
if [ "${{ steps.check.outputs.seeder }}" == "true" ]; then
echo "Pulling e reiniciando Seeder..."
podman pull ${{ env.REGISTRY }}/${{ env.NAMESPACE }}/gohorsejobs-seeder:latest
sudo systemctl restart gohorsejobs-seeder-dev
fi
- name: Configure Kubeconfig
run: |
mkdir -p ~/.kube
echo "${{ secrets.KUBE_CONFIG }}" > ~/.kube/config
chmod 600 ~/.kube/config
# --- LIMPEZA ---
echo "Limpando imagens antigas..."
podman image prune -f || true
- name: Sync Secrets and Vars
run: |
kubectl create namespace gohorsejobsdev --dry-run=client -o yaml | kubectl apply -f -
# Sincroniza Registry Secret
kubectl get secret forgejo-registry-secret --namespace=forgejo -o yaml | \
sed 's/namespace: forgejo/namespace: gohorsejobsdev/' | \
kubectl apply -f - --force
# Injeta variáveis (Lembre-se de mudar DATABASE_URL para sslmode=disable no Forgejo!)
kubectl delete secret backend-secrets -n gohorsejobsdev --ignore-not-found
kubectl create secret generic backend-secrets -n gohorsejobsdev \
--from-literal=MTU="${{ vars.MTU }}" \
--from-literal=DATABASE_URL="${{ vars.DATABASE_URL }}" \
--from-literal=AMQP_URL="${{ vars.AMQP_URL }}" \
--from-literal=JWT_SECRET="${{ vars.JWT_SECRET }}" \
--from-literal=JWT_EXPIRATION="${{ vars.JWT_EXPIRATION }}" \
--from-literal=PASSWORD_PEPPER="${{ vars.PASSWORD_PEPPER }}" \
--from-literal=COOKIE_SECRET="${{ vars.COOKIE_SECRET }}" \
--from-literal=COOKIE_DOMAIN="${{ vars.COOKIE_DOMAIN }}" \
--from-literal=BACKEND_PORT="${{ vars.BACKEND_PORT }}" \
--from-literal=BACKEND_HOST="${{ vars.BACKEND_HOST }}" \
--from-literal=ENV="${{ vars.ENV }}" \
--from-literal=CORS_ORIGINS="${{ vars.CORS_ORIGINS }}" \
--from-literal=S3_BUCKET="${{ vars.S3_BUCKET }}" \
--from-literal=AWS_REGION="${{ vars.AWS_REGION }}" \
--from-literal=AWS_ENDPOINT="${{ vars.AWS_ENDPOINT }}" \
--from-literal=AWS_ACCESS_KEY_ID="${{ vars.AWS_ACCESS_KEY_ID }}" \
--from-literal=AWS_SECRET_ACCESS_KEY="${{ vars.AWS_SECRET_ACCESS_KEY }}"
- name: Deploy to K3s
run: |
kubectl apply -f k8s/dev/ -n gohorsejobsdev
# Vincula o deployment ao SHA específico para garantir que o Pull ocorra corretamente
kubectl -n gohorsejobsdev set image deployment/gohorse-backend-dev backend=${{ env.REGISTRY }}/${{ env.IMAGE_NAMESPACE }}/gohorsejobs:${{ github.sha }}
kubectl -n gohorsejobsdev set image deployment/gohorse-backoffice-dev backoffice=${{ env.REGISTRY }}/${{ env.IMAGE_NAMESPACE }}/backoffice:${{ github.sha }}
# Força o restart para carregar os novos valores do secret backend-secrets
kubectl -n gohorsejobsdev rollout restart deployment/gohorse-backend-dev
kubectl -n gohorsejobsdev rollout restart deployment/gohorse-backoffice-dev
# Aguarda estabilização
kubectl -n gohorsejobsdev rollout status deployment/gohorse-backend-dev --timeout=120s

73
Dockerfile Normal file
View file

@ -0,0 +1,73 @@
# =============================================================================
# GoHorse Jobs Frontend - Ultra-Optimized Dockerfile
# =============================================================================
# syntax=docker/dockerfile:1
# -----------------------------------------------------------------------------
# Stage 1: Base with pnpm
# -----------------------------------------------------------------------------
FROM mirror.gcr.io/library/node:20-alpine AS base
RUN corepack enable && corepack prepare pnpm@latest --activate
ENV PNPM_HOME="/pnpm"
ENV PATH="$PNPM_HOME:$PATH"
# -----------------------------------------------------------------------------
# Stage 2: Dependencies
# -----------------------------------------------------------------------------
FROM base AS deps
WORKDIR /app
COPY package.json package-lock.json* pnpm-lock.yaml* ./
RUN --mount=type=cache,id=pnpm,target=/pnpm/store \
pnpm import 2>/dev/null || true && \
pnpm install --frozen-lockfile || pnpm install
# -----------------------------------------------------------------------------
# Stage 3: Builder (AQUI ESTAVA O PROBLEMA)
# -----------------------------------------------------------------------------
FROM base AS builder
ENV NODE_OPTIONS="--max-old-space-size=512"
WORKDIR /app
COPY --from=deps /app/node_modules ./node_modules
# CORREÇÃO: Copiando TODAS as configs e pastas necessárias
COPY package.json next.config.* tsconfig.json postcss.config.mjs ./
# Se tiver outras configs como tailwind.config.ts, adicione aqui ou use COPY . . (com dockerignore bom)
COPY public ./public
COPY src ./src
# Adicionei a pasta messages que apareceu no seu ls
COPY messages ./messages
# Build arguments
ARG NEXT_PUBLIC_API_URL=http://localhost:8521
ARG NEXT_PUBLIC_BACKOFFICE_URL=http://localhost:3001
ENV NEXT_PUBLIC_API_URL=$NEXT_PUBLIC_API_URL
ENV NEXT_PUBLIC_BACKOFFICE_URL=$NEXT_PUBLIC_BACKOFFICE_URL
ENV NEXT_TELEMETRY_DISABLED=1
RUN pnpm build && \
rm -rf node_modules/.cache .next/cache
# -----------------------------------------------------------------------------
# Stage 4: Production Runner
# -----------------------------------------------------------------------------
FROM mirror.gcr.io/library/node:20-alpine AS runner
WORKDIR /app
RUN addgroup -g 1001 -S nodejs && \
adduser -u 1001 -S nextjs -G nodejs
ENV NODE_ENV=production \
NEXT_TELEMETRY_DISABLED=1 \
PORT=3000 \
HOSTNAME="0.0.0.0"
# O Docker faz o "cp" que você fez na mão AQUI:
COPY --from=builder --chown=nextjs:nodejs /app/public ./public
COPY --from=builder --chown=nextjs:nodejs /app/.next/standalone ./
COPY --from=builder --chown=nextjs:nodejs /app/.next/static ./.next/static
USER nextjs
EXPOSE 3000
CMD ["node", "server.js"]

View file

@ -1,33 +1,33 @@
# syntax=docker/dockerfile:1
FROM mirror.gcr.io/library/golang:1.24-alpine AS builder
RUN apk add --no-cache git ca-certificates tzdata
# Ajuste de Mirror e instalação de dependências
RUN sed -i 's/dl-cdn.alpinelinux.org/uk.alpinelinux.org/g' /etc/apk/repositories && \
apk add --no-cache git ca-certificates tzdata
WORKDIR /build
# PASSO 1: Copia apenas os arquivos de dependência
# Se esses arquivos não mudarem, o Docker pula o próximo passo.
# Copia apenas os arquivos de dependência primeiro para aproveitar o cache de camadas do Docker
COPY go.mod go.sum ./
# PASSO 2: Baixa as dependências
# Isso fica guardado no cache de imagem do seu servidor.
# REMOVIDO --mount: Usando o download padrão compatível com Docker Legacy
RUN go mod download && go mod verify
# PASSO 3: Copia o código fonte
# Só agora copiamos o resto. Se você mudar o código, ele não baixa as deps de novo.
# Copia o restante do código fonte
COPY . .
# PASSO 4: Build puro
# Sem o --mount para não dar erro, mas o Go 1.24 já é rápido por natureza.
# Adicione -gcflags="all=-N -l" para um build muito mais leve
# Build Otimizado:
# -p 2 para evitar picos de CPU que causam Erro 500 no Forgejo
# REMOVIDO --mount para compatibilidade
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build \
-p 2 \
-gcflags="all=-N -l" \
-ldflags="-s -w" \
-trimpath \
-o /app/main ./cmd/api
# --- Estágio Final ---
# --- Estágio Final (Imagem de Produção) ---
FROM scratch AS runner
# Copia apenas o necessário do builder
COPY --from=builder /usr/share/zoneinfo /usr/share/zoneinfo
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
COPY --from=builder /app/main /main

View file

@ -3,15 +3,17 @@
# =============================================================================
FROM mirror.gcr.io/library/node:20-alpine AS base
# Configurações básicas e pnpm
# Configurações básicas e pnpm com Mirror fix para evitar travamentos
RUN sed -i 's/dl-cdn.alpinelinux.org/mirror.leaseweb.com/g' /etc/apk/repositories && \
apk add --no-cache libc6-compat
RUN corepack enable && corepack prepare pnpm@latest --activate
RUN apk add --no-cache libc6-compat
ENV PNPM_HOME="/pnpm"
ENV PATH="$PNPM_HOME:$PATH"
# -----------------------------------------------------------------------------
# Stage 1: Dependencies (Instalação real aqui)
# Stage 1: Dependencies
# -----------------------------------------------------------------------------
FROM base AS deps
WORKDIR /app
@ -19,8 +21,10 @@ WORKDIR /app
# Copia apenas os arquivos de definição
COPY package.json pnpm-lock.yaml ./
# Instalamos as dependências de forma clássica (sem --mount para não quebrar no Drone)
RUN pnpm install --frozen-lockfile
# Configura timeout e retry para o pnpm não travar em redes instáveis
RUN pnpm config set fetch-retries 5 && \
pnpm config set fetch-retry-maxtimeout 600000 && \
pnpm install --frozen-lockfile
# -----------------------------------------------------------------------------
# Stage 2: Builder
@ -38,8 +42,7 @@ COPY . .
RUN pnpm build && \
rm -rf node_modules/.cache
# Remove dependências de desenvolvimento para diminuir a imagem final
# Removido --mount para compatibilidade com seu ambiente
# Remove dependências de desenvolvimento
RUN pnpm prune --prod
# -----------------------------------------------------------------------------
@ -47,7 +50,9 @@ RUN pnpm prune --prod
# -----------------------------------------------------------------------------
FROM mirror.gcr.io/library/node:20-alpine AS runner
RUN addgroup -g 1001 -S nodejs && \
# Mirror fix também na imagem final (boa prática para auditorias ou instalações extras)
RUN sed -i 's/dl-cdn.alpinelinux.org/mirror.leaseweb.com/g' /etc/apk/repositories && \
addgroup -g 1001 -S nodejs && \
adduser -S nestjs -u 1001 -G nodejs
WORKDIR /app

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.2 MiB

After

Width:  |  Height:  |  Size: 1.4 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 304 KiB

After

Width:  |  Height:  |  Size: 81 KiB

BIN
frontend/public/public.zip Normal file

Binary file not shown.

View file

@ -5,6 +5,8 @@ metadata:
namespace: gohorsejobsdev
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: gohorse-backend-dev
@ -14,54 +16,47 @@ spec:
app: gohorse-backend-dev
env: development
spec:
terminationGracePeriodSeconds: 10
imagePullSecrets:
- name: harbor-registry
# --- INÍCIO DA CORREÇÃO DE REDE ---
initContainers:
- name: set-mtu
image: busybox
command: ['sh', '-c', 'ifconfig eth0 mtu $(MTU_VALUE)']
env:
- name: MTU_VALUE
valueFrom:
secretKeyRef:
name: backend-secrets
key: MTU
securityContext:
privileged: true
# --- FIM DA CORREÇÃO DE REDE ---
- name: forgejo-registry-secret
dnsConfig:
options:
- name: ndots
value: "1"
containers:
- name: backend
image: in.gohorsejobs.com/gohorsejobsdev/gohorsejobs-backend:latest
image: pipe.gohorsejobs.com/bohessefm/gohorsejobs:latest
imagePullPolicy: Always
ports:
- containerPort: 8521
envFrom:
- secretRef:
name: backend-secrets
# Adicionando a variável explicitamente também no container principal se precisar
env:
- name: MTU
valueFrom:
secretKeyRef:
name: backend-secrets
key: MTU
# Força o modo performance do framework
- name: NODE_ENV
value: "production"
# Tunagem do Garbage Collector para eliminar oscilação de latência no pod
- name: NODE_OPTIONS
value: "--max-old-space-size=768 --min-semi-space-size=64 --max-semi-space-size=128"
resources:
requests:
memory: "128Mi"
cpu: "100m"
memory: "768Mi" # Alinhado com o heap do Node
cpu: "500m" # Prioridade de CPU garantida
limits:
memory: "512Mi"
cpu: "500m"
memory: "1024Mi"
livenessProbe:
httpGet:
path: /health
port: 8521
initialDelaySeconds: 15
periodSeconds: 20
initialDelaySeconds: 50
periodSeconds: 30
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
httpGet:
path: /health
port: 8521
initialDelaySeconds: 5
periodSeconds: 10
initialDelaySeconds: 20
periodSeconds: 15
timeoutSeconds: 3

View file

@ -5,6 +5,8 @@ metadata:
namespace: gohorsejobsdev
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: gohorse-backoffice-dev
@ -14,43 +16,48 @@ spec:
app: gohorse-backoffice-dev
env: development
spec:
terminationGracePeriodSeconds: 10
imagePullSecrets:
- name: harbor-registry
# --- AJUSTE DE REDE (MTU) ---
initContainers:
- name: set-mtu
image: busybox
command: ['sh', '-c', 'ifconfig eth0 mtu $(MTU_VALUE)']
env:
- name: MTU_VALUE
valueFrom:
secretKeyRef:
name: backend-secrets
key: MTU
securityContext:
privileged: true
# --- FIM DO AJUSTE ---
- name: forgejo-registry-secret
dnsConfig:
options:
- name: ndots
value: "1"
containers:
- name: backoffice
image: in.gohorsejobs.com/gohorsejobs-backoffice-dev/backoffice:latest
image: pipe.gohorsejobs.com/bohessefm/backoffice:latest
imagePullPolicy: Always
ports:
- containerPort: 3001
env:
- name: NODE_TLS_REJECT_UNAUTHORIZED
value: "0"
- name: MTU # Passando a variável para o container principal também
valueFrom:
secretKeyRef:
name: backend-secrets
key: MTU
envFrom:
- secretRef:
name: backend-secrets
env:
- name: NODE_TLS_REJECT_UNAUTHORIZED
value: "0"
- name: NODE_ENV
value: "production"
# Removidas as flags de semi-space que causaram o erro
- name: NODE_OPTIONS
value: "--max-old-space-size=1536"
resources:
requests:
memory: "800Mi"
cpu: "300m"
memory: "1536Mi"
cpu: "500m"
limits:
memory: "2Gi"
cpu: "800m"
livenessProbe:
httpGet:
path: /health
port: 3001
initialDelaySeconds: 80
periodSeconds: 30
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
httpGet:
path: /health
port: 3001
initialDelaySeconds: 40
periodSeconds: 20
timeoutSeconds: 3