gohorsejobs/backend/internal/services/storage_service.go
NANDO9322 ddc2f5dd03 feat: atualiza fluxo de cadastro de candidatos com persistência completa de dados e máscara de telefone
Frontend:
- Implementar máscara de entrada de telefone para números BR ((XX) XXXXX-XXXX).
- Atualizar formulário de cadastro para enviar dados completos do perfil do candidato (endereço, formação, habilidades, etc.).
- Corrigir problemas de idioma misto na página de Detalhes da Vaga e adicionar traduções faltantes.

Backend:
- Atualizar modelo de Usuário, Entidade e DTOs para incluir campos de perfil (Data de Nascimento, Endereço, Formação, etc.).
- Atualizar UserRepository para persistir e recuperar os dados estendidos do usuário no PostgreSQL.
- Atualizar RegisterCandidateUseCase para mapear campos de entrada para a entidade Usuário.
2026-01-06 18:19:47 -03:00

265 lines
7.8 KiB
Go

package services
import (
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"os"
"strings"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/service/s3"
)
type StorageService struct {
credentialsService *CredentialsService
}
func NewStorageService(cs *CredentialsService) *StorageService {
return &StorageService{credentialsService: cs}
}
// UploadConfig holds the necessary keys.
type UploadConfig struct {
Endpoint string `json:"endpoint"`
AccessKey string `json:"accessKey"`
SecretKey string `json:"secretKey"`
Bucket string `json:"bucket"`
Region string `json:"region"`
}
func (s *StorageService) getConfig(ctx context.Context) (UploadConfig, error) {
payload, err := s.credentialsService.GetDecryptedKey(ctx, "storage")
var uCfg UploadConfig
// Fallback to Environment Variables if DB lookup fails
if err != nil {
fmt.Printf("Storage credentials not found in DB, falling back to ENV: %v\n", err)
uCfg = UploadConfig{
Endpoint: os.Getenv("AWS_ENDPOINT"),
AccessKey: os.Getenv("AWS_ACCESS_KEY_ID"),
SecretKey: os.Getenv("AWS_SECRET_ACCESS_KEY"),
Bucket: os.Getenv("S3_BUCKET"),
Region: os.Getenv("AWS_REGION"),
}
} else {
if err := json.Unmarshal([]byte(payload), &uCfg); err != nil {
return UploadConfig{}, fmt.Errorf("failed to parse storage credentials: %w", err)
}
}
if uCfg.Endpoint == "" || uCfg.AccessKey == "" || uCfg.SecretKey == "" || uCfg.Bucket == "" {
missing := []string{}
if uCfg.Endpoint == "" {
missing = append(missing, "AWS_ENDPOINT")
}
if uCfg.AccessKey == "" {
missing = append(missing, "AWS_ACCESS_KEY_ID")
}
if uCfg.SecretKey == "" {
missing = append(missing, "AWS_SECRET_ACCESS_KEY")
}
if uCfg.Bucket == "" {
missing = append(missing, "S3_BUCKET")
}
return UploadConfig{}, fmt.Errorf("storage credentials incomplete. Missing: %s", strings.Join(missing, ", "))
}
if uCfg.Region == "" || uCfg.Region == "auto" {
uCfg.Region = "us-east-1"
}
// Ensure endpoint has protocol
if !strings.HasPrefix(uCfg.Endpoint, "https://") && !strings.HasPrefix(uCfg.Endpoint, "http://") {
uCfg.Endpoint = "https://" + uCfg.Endpoint
}
return uCfg, nil
}
func (s *StorageService) getClient(ctx context.Context) (*s3.PresignClient, string, error) {
uCfg, err := s.getConfig(ctx)
if err != nil {
return nil, "", err
}
// 2. Setup S3 V2 Client
cfg, err := config.LoadDefaultConfig(ctx,
config.WithRegion(uCfg.Region),
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(uCfg.AccessKey, uCfg.SecretKey, "")),
)
if err != nil {
return nil, "", err
}
// R2/S3 specific endpoint
client := s3.NewFromConfig(cfg, func(o *s3.Options) {
o.BaseEndpoint = aws.String(uCfg.Endpoint)
o.UsePathStyle = true // Often needed for R2/MinIO
})
psClient := s3.NewPresignClient(client)
return psClient, uCfg.Bucket, nil
}
// GetPresignedUploadURL generates a URL for PUT requests
func (s *StorageService) GetPresignedUploadURL(ctx context.Context, key string, contentType string) (string, error) {
psClient, bucket, err := s.getClient(ctx)
if err != nil {
return "", err
}
req, err := psClient.PresignPutObject(ctx, &s3.PutObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
ContentType: aws.String(contentType),
}, func(o *s3.PresignOptions) {
o.Expires = 15 * time.Minute
})
if err != nil {
return "", fmt.Errorf("failed to presign upload: %w", err)
}
return req.URL, nil
}
// TestConnection checks if the creds are valid and bucket is accessible
func (s *StorageService) TestConnection(ctx context.Context) error {
psClient, bucket, err := s.getClient(ctx)
if err != nil {
return fmt.Errorf("failed to get client: %w", err)
}
// Note: PresignClient doesn't strictly validate creds against the cloud until used,
// checking existence via HeadBucket or ListBuckets using a real S3 client would be better.
// But getClient returns a PresignClient.
// We need a standard client to Verify.
// Re-instantiating logic or Refactoring `getClient` to return `*s3.Client` is best.
// For now, let's refactor `getClient` slightly to expose specific logic or just create a one-off checker here.
// Refetch raw creds to make a standard client
uCfg, err := s.getConfig(ctx)
if err != nil {
return fmt.Errorf("failed to get storage credentials: %w", err)
}
cfg, err := config.LoadDefaultConfig(ctx,
config.WithRegion(uCfg.Region),
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(uCfg.AccessKey, uCfg.SecretKey, "")),
)
if err != nil {
return err
}
client := s3.NewFromConfig(cfg, func(o *s3.Options) {
o.BaseEndpoint = aws.String(uCfg.Endpoint)
o.UsePathStyle = true
})
// Try HeadBucket
_, err = client.HeadBucket(ctx, &s3.HeadBucketInput{
Bucket: aws.String(uCfg.Bucket),
})
if err != nil {
return fmt.Errorf("connection failed: %w", err)
}
// Just to be sure, presign client creation (original logic)
_ = psClient
_ = bucket
return nil
}
func (s *StorageService) GetPublicURL(ctx context.Context, key string) (string, error) {
uCfg, err := s.getConfig(ctx)
if err != nil {
return "", err
}
endpoint := strings.TrimRight(uCfg.Endpoint, "/")
return fmt.Sprintf("%s/%s/%s", endpoint, uCfg.Bucket, key), nil
}
// UploadFile uploads a file directly to storage
func (s *StorageService) UploadFile(ctx context.Context, file io.Reader, folder string, filename string, contentType string) (string, error) {
// 1. Get Client
// Re-using logic but need a real client, not presigned
uCfg, err := s.getConfig(ctx)
if err != nil {
return "", err
}
// 2. Handle Bucket/Prefix logic
// If S3_BUCKET is "bucket/path/to/folder", we need to split it
bucketName := uCfg.Bucket
keyPrefix := ""
if strings.Contains(bucketName, "/") {
parts := strings.SplitN(bucketName, "/", 2)
bucketName = parts[0]
keyPrefix = parts[1]
}
cfg, err := config.LoadDefaultConfig(ctx,
config.WithRegion(uCfg.Region),
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(uCfg.AccessKey, uCfg.SecretKey, "")),
)
if err != nil {
return "", fmt.Errorf("failed to load aws config: %w", err)
}
client := s3.NewFromConfig(cfg, func(o *s3.Options) {
o.BaseEndpoint = aws.String(uCfg.Endpoint)
o.UsePathStyle = true
o.Region = uCfg.Region
})
// 3. Prepare Key (prepend prefix if exists)
// originalKey is folder/filename
originalKey := fmt.Sprintf("%s/%s", folder, filename)
s3Key := originalKey
if keyPrefix != "" {
// Ensure clean slashes
s3Key = fmt.Sprintf("%s/%s", strings.Trim(keyPrefix, "/"), originalKey)
}
// Read file into memory to calculate SHA256 correctly and avoid mismatch
// This also avoids issues if the reader is not seekable or changes during read
fileBytes, err := io.ReadAll(file)
if err != nil {
return "", fmt.Errorf("failed to read file content: %w", err)
}
// DEBUG:
fmt.Printf("[Storage] Uploading: Bucket=%s Key=%s Size=%d\n", bucketName, s3Key, len(fileBytes))
// Calculate SHA256 manually to ensure it matches what we send
hash := sha256.Sum256(fileBytes)
checksum := hex.EncodeToString(hash[:])
// 4. Upload
_, err = client.PutObject(ctx, &s3.PutObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(s3Key),
Body: bytes.NewReader(fileBytes),
ContentType: aws.String(contentType),
ChecksumSHA256: aws.String(checksum), // Explicitly set the checksum we calculated
})
if err != nil {
return "", fmt.Errorf("failed to put object: %w", err)
}
// 5. Return Public URL
// We pass originalKey because GetPublicURL uses uCfg.Bucket (which includes the prefix)
// So: endpoint + "/" + uCfg.Bucket ("bucket/prefix") + "/" + originalKey ("folder/file")
// Result: endpoint/bucket/prefix/folder/file -> CORRECT
return s.GetPublicURL(ctx, originalKey)
}