feat(backend): implement S3 object storage with pre-signed URLs
- Add s3_storage.go service using AWS SDK v2 - Support custom S3-compatible endpoints (Civo) - Implement pre-signed URL generation for uploads/downloads - Add storage_handler.go with REST endpoints - Register protected storage routes in router - Graceful degradation when S3 not configured
This commit is contained in:
parent
c6e0a70d50
commit
ce6e35aefd
3 changed files with 324 additions and 0 deletions
170
backend/internal/handlers/storage_handler.go
Normal file
170
backend/internal/handlers/storage_handler.go
Normal file
|
|
@ -0,0 +1,170 @@
|
||||||
|
package handlers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/rede5/gohorsejobs/backend/internal/infrastructure/storage"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StorageHandler handles file storage operations
|
||||||
|
type StorageHandler struct {
|
||||||
|
Storage *storage.S3Storage
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStorageHandler creates a new storage handler
|
||||||
|
func NewStorageHandler(s *storage.S3Storage) *StorageHandler {
|
||||||
|
return &StorageHandler{Storage: s}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadURLRequest represents a request for a pre-signed upload URL
|
||||||
|
type UploadURLRequest struct {
|
||||||
|
Filename string `json:"filename"`
|
||||||
|
ContentType string `json:"contentType"`
|
||||||
|
Folder string `json:"folder"` // Optional: logos, resumes, documents
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadURLResponse represents the response with a pre-signed upload URL
|
||||||
|
type UploadURLResponse struct {
|
||||||
|
UploadURL string `json:"uploadUrl"`
|
||||||
|
Key string `json:"key"`
|
||||||
|
PublicURL string `json:"publicUrl"`
|
||||||
|
ExpiresIn int `json:"expiresIn"` // seconds
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadURLRequest represents a request for a pre-signed download URL
|
||||||
|
type DownloadURLRequest struct {
|
||||||
|
Key string `json:"key"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadURLResponse represents the response with a pre-signed download URL
|
||||||
|
type DownloadURLResponse struct {
|
||||||
|
DownloadURL string `json:"downloadUrl"`
|
||||||
|
ExpiresIn int `json:"expiresIn"` // seconds
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateUploadURL handles POST /api/v1/storage/upload-url
|
||||||
|
func (h *StorageHandler) GenerateUploadURL(w http.ResponseWriter, r *http.Request) {
|
||||||
|
var req UploadURLRequest
|
||||||
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||||
|
http.Error(w, "Invalid request body", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if req.Filename == "" {
|
||||||
|
http.Error(w, "Filename is required", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if req.ContentType == "" {
|
||||||
|
// Try to infer from extension
|
||||||
|
ext := strings.ToLower(filepath.Ext(req.Filename))
|
||||||
|
switch ext {
|
||||||
|
case ".jpg", ".jpeg":
|
||||||
|
req.ContentType = "image/jpeg"
|
||||||
|
case ".png":
|
||||||
|
req.ContentType = "image/png"
|
||||||
|
case ".gif":
|
||||||
|
req.ContentType = "image/gif"
|
||||||
|
case ".webp":
|
||||||
|
req.ContentType = "image/webp"
|
||||||
|
case ".pdf":
|
||||||
|
req.ContentType = "application/pdf"
|
||||||
|
case ".doc":
|
||||||
|
req.ContentType = "application/msword"
|
||||||
|
case ".docx":
|
||||||
|
req.ContentType = "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
|
||||||
|
default:
|
||||||
|
req.ContentType = "application/octet-stream"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate folder
|
||||||
|
folder := "uploads"
|
||||||
|
if req.Folder != "" {
|
||||||
|
validFolders := map[string]bool{
|
||||||
|
"logos": true,
|
||||||
|
"resumes": true,
|
||||||
|
"documents": true,
|
||||||
|
"avatars": true,
|
||||||
|
}
|
||||||
|
if validFolders[req.Folder] {
|
||||||
|
folder = req.Folder
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate unique key
|
||||||
|
ext := filepath.Ext(req.Filename)
|
||||||
|
uniqueID := uuid.New().String()
|
||||||
|
timestamp := time.Now().Format("20060102")
|
||||||
|
key := fmt.Sprintf("%s/%s/%s%s", folder, timestamp, uniqueID, ext)
|
||||||
|
|
||||||
|
// Generate pre-signed URL (15 minutes expiry)
|
||||||
|
expiryMinutes := 15
|
||||||
|
uploadURL, err := h.Storage.GenerateUploadURL(key, req.ContentType, expiryMinutes)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, fmt.Sprintf("Failed to generate upload URL: %v", err), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
response := UploadURLResponse{
|
||||||
|
UploadURL: uploadURL,
|
||||||
|
Key: key,
|
||||||
|
PublicURL: h.Storage.GetPublicURL(key),
|
||||||
|
ExpiresIn: expiryMinutes * 60,
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
json.NewEncoder(w).Encode(response)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateDownloadURL handles POST /api/v1/storage/download-url
|
||||||
|
func (h *StorageHandler) GenerateDownloadURL(w http.ResponseWriter, r *http.Request) {
|
||||||
|
var req DownloadURLRequest
|
||||||
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||||
|
http.Error(w, "Invalid request body", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if req.Key == "" {
|
||||||
|
http.Error(w, "Key is required", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate pre-signed URL (1 hour expiry)
|
||||||
|
expiryMinutes := 60
|
||||||
|
downloadURL, err := h.Storage.GenerateDownloadURL(req.Key, expiryMinutes)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, fmt.Sprintf("Failed to generate download URL: %v", err), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
response := DownloadURLResponse{
|
||||||
|
DownloadURL: downloadURL,
|
||||||
|
ExpiresIn: expiryMinutes * 60,
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
json.NewEncoder(w).Encode(response)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteFile handles DELETE /api/v1/storage/files
|
||||||
|
func (h *StorageHandler) DeleteFile(w http.ResponseWriter, r *http.Request) {
|
||||||
|
key := r.URL.Query().Get("key")
|
||||||
|
if key == "" {
|
||||||
|
http.Error(w, "Key query parameter is required", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := h.Storage.DeleteObject(key); err != nil {
|
||||||
|
http.Error(w, fmt.Sprintf("Failed to delete file: %v", err), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
w.WriteHeader(http.StatusNoContent)
|
||||||
|
}
|
||||||
139
backend/internal/infrastructure/storage/s3_storage.go
Normal file
139
backend/internal/infrastructure/storage/s3_storage.go
Normal file
|
|
@ -0,0 +1,139 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go-v2/aws"
|
||||||
|
"github.com/aws/aws-sdk-go-v2/config"
|
||||||
|
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||||
|
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||||
|
)
|
||||||
|
|
||||||
|
// S3Storage handles S3-compatible object storage operations
|
||||||
|
type S3Storage struct {
|
||||||
|
client *s3.Client
|
||||||
|
presigner *s3.PresignClient
|
||||||
|
bucket string
|
||||||
|
endpoint string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewS3Storage creates a new S3 storage service
|
||||||
|
func NewS3Storage() (*S3Storage, error) {
|
||||||
|
region := os.Getenv("AWS_REGION")
|
||||||
|
if region == "" {
|
||||||
|
region = "us-east-1"
|
||||||
|
}
|
||||||
|
|
||||||
|
accessKey := os.Getenv("AWS_ACCESS_KEY_ID")
|
||||||
|
secretKey := os.Getenv("AWS_SECRET_ACCESS_KEY")
|
||||||
|
endpoint := os.Getenv("AWS_ENDPOINT")
|
||||||
|
bucket := os.Getenv("S3_BUCKET")
|
||||||
|
|
||||||
|
if accessKey == "" || secretKey == "" || bucket == "" {
|
||||||
|
return nil, fmt.Errorf("missing required S3 configuration (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, S3_BUCKET)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create custom credentials provider
|
||||||
|
creds := credentials.NewStaticCredentialsProvider(accessKey, secretKey, "")
|
||||||
|
|
||||||
|
// Build S3 config
|
||||||
|
cfg, err := config.LoadDefaultConfig(context.Background(),
|
||||||
|
config.WithRegion(region),
|
||||||
|
config.WithCredentialsProvider(creds),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to load AWS config: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create S3 client with custom endpoint for S3-compatible storage (like Civo)
|
||||||
|
var client *s3.Client
|
||||||
|
if endpoint != "" {
|
||||||
|
client = s3.NewFromConfig(cfg, func(o *s3.Options) {
|
||||||
|
o.BaseEndpoint = aws.String(endpoint)
|
||||||
|
o.UsePathStyle = true // Required for most S3-compatible services
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
client = s3.NewFromConfig(cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
presigner := s3.NewPresignClient(client)
|
||||||
|
|
||||||
|
return &S3Storage{
|
||||||
|
client: client,
|
||||||
|
presigner: presigner,
|
||||||
|
bucket: bucket,
|
||||||
|
endpoint: endpoint,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateUploadURL generates a pre-signed URL for uploading a file
|
||||||
|
func (s *S3Storage) GenerateUploadURL(key string, contentType string, expiryMinutes int) (string, error) {
|
||||||
|
if expiryMinutes <= 0 {
|
||||||
|
expiryMinutes = 15 // Default 15 minutes
|
||||||
|
}
|
||||||
|
|
||||||
|
input := &s3.PutObjectInput{
|
||||||
|
Bucket: aws.String(s.bucket),
|
||||||
|
Key: aws.String(key),
|
||||||
|
ContentType: aws.String(contentType),
|
||||||
|
}
|
||||||
|
|
||||||
|
presignResult, err := s.presigner.PresignPutObject(context.Background(), input,
|
||||||
|
s3.WithPresignExpires(time.Duration(expiryMinutes)*time.Minute))
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("failed to generate upload URL: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return presignResult.URL, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateDownloadURL generates a pre-signed URL for downloading a file
|
||||||
|
func (s *S3Storage) GenerateDownloadURL(key string, expiryMinutes int) (string, error) {
|
||||||
|
if expiryMinutes <= 0 {
|
||||||
|
expiryMinutes = 60 // Default 1 hour
|
||||||
|
}
|
||||||
|
|
||||||
|
input := &s3.GetObjectInput{
|
||||||
|
Bucket: aws.String(s.bucket),
|
||||||
|
Key: aws.String(key),
|
||||||
|
}
|
||||||
|
|
||||||
|
presignResult, err := s.presigner.PresignGetObject(context.Background(), input,
|
||||||
|
s3.WithPresignExpires(time.Duration(expiryMinutes)*time.Minute))
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("failed to generate download URL: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return presignResult.URL, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteObject deletes an object from the bucket
|
||||||
|
func (s *S3Storage) DeleteObject(key string) error {
|
||||||
|
input := &s3.DeleteObjectInput{
|
||||||
|
Bucket: aws.String(s.bucket),
|
||||||
|
Key: aws.String(key),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := s.client.DeleteObject(context.Background(), input)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to delete object: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPublicURL returns the public URL for an object (if bucket is public)
|
||||||
|
func (s *S3Storage) GetPublicURL(key string) string {
|
||||||
|
if s.endpoint != "" {
|
||||||
|
return fmt.Sprintf("%s/%s/%s", s.endpoint, s.bucket, key)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("https://%s.s3.amazonaws.com/%s", s.bucket, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBucket returns the bucket name
|
||||||
|
func (s *S3Storage) GetBucket() string {
|
||||||
|
return s.bucket
|
||||||
|
}
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
package router
|
package router
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
|
@ -8,6 +9,7 @@ import (
|
||||||
"github.com/rede5/gohorsejobs/backend/internal/database"
|
"github.com/rede5/gohorsejobs/backend/internal/database"
|
||||||
"github.com/rede5/gohorsejobs/backend/internal/handlers"
|
"github.com/rede5/gohorsejobs/backend/internal/handlers"
|
||||||
"github.com/rede5/gohorsejobs/backend/internal/infrastructure/persistence/postgres"
|
"github.com/rede5/gohorsejobs/backend/internal/infrastructure/persistence/postgres"
|
||||||
|
"github.com/rede5/gohorsejobs/backend/internal/infrastructure/storage"
|
||||||
"github.com/rede5/gohorsejobs/backend/internal/services"
|
"github.com/rede5/gohorsejobs/backend/internal/services"
|
||||||
|
|
||||||
// Core Imports
|
// Core Imports
|
||||||
|
|
@ -87,6 +89,19 @@ func NewRouter() http.Handler {
|
||||||
mux.HandleFunc("GET /applications/{id}", applicationHandler.GetApplicationByID)
|
mux.HandleFunc("GET /applications/{id}", applicationHandler.GetApplicationByID)
|
||||||
mux.HandleFunc("PUT /applications/{id}/status", applicationHandler.UpdateApplicationStatus)
|
mux.HandleFunc("PUT /applications/{id}/status", applicationHandler.UpdateApplicationStatus)
|
||||||
|
|
||||||
|
// --- STORAGE ROUTES ---
|
||||||
|
// Initialize S3 Storage (optional - graceful degradation if not configured)
|
||||||
|
s3Storage, err := storage.NewS3Storage()
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Warning: S3 storage not available: %v", err)
|
||||||
|
} else {
|
||||||
|
storageHandler := handlers.NewStorageHandler(s3Storage)
|
||||||
|
mux.Handle("POST /api/v1/storage/upload-url", authMiddleware.HeaderAuthGuard(http.HandlerFunc(storageHandler.GenerateUploadURL)))
|
||||||
|
mux.Handle("POST /api/v1/storage/download-url", authMiddleware.HeaderAuthGuard(http.HandlerFunc(storageHandler.GenerateDownloadURL)))
|
||||||
|
mux.Handle("DELETE /api/v1/storage/files", authMiddleware.HeaderAuthGuard(http.HandlerFunc(storageHandler.DeleteFile)))
|
||||||
|
log.Println("S3 storage routes registered successfully")
|
||||||
|
}
|
||||||
|
|
||||||
// Swagger Route
|
// Swagger Route
|
||||||
mux.HandleFunc("/swagger/", httpSwagger.WrapHandler)
|
mux.HandleFunc("/swagger/", httpSwagger.WrapHandler)
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue