package services import ( "bytes" "context" "crypto/sha256" "encoding/hex" "encoding/json" "fmt" "io" "os" "strings" "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/credentials" "github.com/aws/aws-sdk-go-v2/service/s3" ) type StorageService struct { credentialsService *CredentialsService } func NewStorageService(cs *CredentialsService) *StorageService { return &StorageService{credentialsService: cs} } // UploadConfig holds the necessary keys. type UploadConfig struct { Endpoint string `json:"endpoint"` AccessKey string `json:"accessKey"` SecretKey string `json:"secretKey"` Bucket string `json:"bucket"` Region string `json:"region"` } func (s *StorageService) getConfig(ctx context.Context) (UploadConfig, error) { payload, err := s.credentialsService.GetDecryptedKey(ctx, "storage") var uCfg UploadConfig // Fallback to Environment Variables if DB lookup fails if err != nil { fmt.Printf("Storage credentials not found in DB, falling back to ENV: %v\n", err) uCfg = UploadConfig{ Endpoint: os.Getenv("AWS_ENDPOINT"), AccessKey: os.Getenv("AWS_ACCESS_KEY_ID"), SecretKey: os.Getenv("AWS_SECRET_ACCESS_KEY"), Bucket: os.Getenv("S3_BUCKET"), Region: os.Getenv("AWS_REGION"), } } else { if err := json.Unmarshal([]byte(payload), &uCfg); err != nil { return UploadConfig{}, fmt.Errorf("failed to parse storage credentials: %w", err) } } if uCfg.Endpoint == "" || uCfg.AccessKey == "" || uCfg.SecretKey == "" || uCfg.Bucket == "" { missing := []string{} if uCfg.Endpoint == "" { missing = append(missing, "AWS_ENDPOINT") } if uCfg.AccessKey == "" { missing = append(missing, "AWS_ACCESS_KEY_ID") } if uCfg.SecretKey == "" { missing = append(missing, "AWS_SECRET_ACCESS_KEY") } if uCfg.Bucket == "" { missing = append(missing, "S3_BUCKET") } return UploadConfig{}, fmt.Errorf("storage credentials incomplete. Missing: %s", strings.Join(missing, ", ")) } if uCfg.Region == "" || uCfg.Region == "auto" { uCfg.Region = "us-east-1" } // Ensure endpoint has protocol if !strings.HasPrefix(uCfg.Endpoint, "https://") && !strings.HasPrefix(uCfg.Endpoint, "http://") { uCfg.Endpoint = "https://" + uCfg.Endpoint } return uCfg, nil } func (s *StorageService) getClient(ctx context.Context) (*s3.PresignClient, string, error) { client, bucket, err := s.getS3Client(ctx) if err != nil { return nil, "", err } psClient := s3.NewPresignClient(client) return psClient, bucket, nil } func (s *StorageService) getS3Client(ctx context.Context) (*s3.Client, string, error) { uCfg, err := s.getConfig(ctx) if err != nil { return nil, "", err } // 2. Setup S3 V2 Client cfg, err := config.LoadDefaultConfig(ctx, config.WithRegion(uCfg.Region), config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(uCfg.AccessKey, uCfg.SecretKey, "")), ) if err != nil { return nil, "", err } // R2/S3 specific endpoint client := s3.NewFromConfig(cfg, func(o *s3.Options) { o.BaseEndpoint = aws.String(uCfg.Endpoint) o.UsePathStyle = true // Often needed for R2/MinIO }) return client, uCfg.Bucket, nil } func (s *StorageService) sanitizeObjectKey(key string) string { return strings.TrimLeft(strings.TrimSpace(key), "/") } // GetPresignedUploadURL generates a URL for PUT requests func (s *StorageService) GetPresignedUploadURL(ctx context.Context, key string, contentType string) (string, error) { key = s.sanitizeObjectKey(key) if key == "" { return "", fmt.Errorf("key is required") } psClient, bucket, err := s.getClient(ctx) if err != nil { return "", err } req, err := psClient.PresignPutObject(ctx, &s3.PutObjectInput{ Bucket: aws.String(bucket), Key: aws.String(key), ContentType: aws.String(contentType), }, func(o *s3.PresignOptions) { o.Expires = 15 * time.Minute }) if err != nil { return "", fmt.Errorf("failed to presign upload: %w", err) } return req.URL, nil } // GetPresignedDownloadURL generates a URL for GET requests. func (s *StorageService) GetPresignedDownloadURL(ctx context.Context, key string) (string, error) { key = s.sanitizeObjectKey(key) if key == "" { return "", fmt.Errorf("key is required") } psClient, bucket, err := s.getClient(ctx) if err != nil { return "", err } req, err := psClient.PresignGetObject(ctx, &s3.GetObjectInput{ Bucket: aws.String(bucket), Key: aws.String(key), }, func(o *s3.PresignOptions) { o.Expires = 60 * time.Minute }) if err != nil { return "", fmt.Errorf("failed to presign download: %w", err) } return req.URL, nil } // DeleteObject removes an object from storage. func (s *StorageService) DeleteObject(ctx context.Context, key string) error { key = s.sanitizeObjectKey(key) if key == "" { return fmt.Errorf("key is required") } client, bucket, err := s.getS3Client(ctx) if err != nil { return err } _, err = client.DeleteObject(ctx, &s3.DeleteObjectInput{ Bucket: aws.String(bucket), Key: aws.String(key), }) if err != nil { return fmt.Errorf("failed to delete object: %w", err) } return nil } // TestConnection checks if the creds are valid and bucket is accessible func (s *StorageService) TestConnection(ctx context.Context) error { psClient, bucket, err := s.getClient(ctx) if err != nil { return fmt.Errorf("failed to get client: %w", err) } // Note: PresignClient doesn't strictly validate creds against the cloud until used, // checking existence via HeadBucket or ListBuckets using a real S3 client would be better. // But getClient returns a PresignClient. // We need a standard client to Verify. // Re-instantiating logic or Refactoring `getClient` to return `*s3.Client` is best. // For now, let's refactor `getClient` slightly to expose specific logic or just create a one-off checker here. // Refetch raw creds to make a standard client uCfg, err := s.getConfig(ctx) if err != nil { return fmt.Errorf("failed to get storage credentials: %w", err) } cfg, err := config.LoadDefaultConfig(ctx, config.WithRegion(uCfg.Region), config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(uCfg.AccessKey, uCfg.SecretKey, "")), ) if err != nil { return err } client := s3.NewFromConfig(cfg, func(o *s3.Options) { o.BaseEndpoint = aws.String(uCfg.Endpoint) o.UsePathStyle = true }) // Try HeadBucket _, err = client.HeadBucket(ctx, &s3.HeadBucketInput{ Bucket: aws.String(uCfg.Bucket), }) if err != nil { return fmt.Errorf("connection failed: %w", err) } // Just to be sure, presign client creation (original logic) _ = psClient _ = bucket return nil } func (s *StorageService) GetPublicURL(ctx context.Context, key string) (string, error) { uCfg, err := s.getConfig(ctx) if err != nil { return "", err } endpoint := strings.TrimRight(uCfg.Endpoint, "/") return fmt.Sprintf("%s/%s/%s", endpoint, uCfg.Bucket, key), nil } // UploadFile uploads a file directly to storage func (s *StorageService) UploadFile(ctx context.Context, file io.Reader, folder string, filename string, contentType string) (string, error) { // 1. Get Client // Re-using logic but need a real client, not presigned uCfg, err := s.getConfig(ctx) if err != nil { return "", err } // 2. Handle Bucket/Prefix logic // If S3_BUCKET is "bucket/path/to/folder", we need to split it bucketName := uCfg.Bucket keyPrefix := "" if strings.Contains(bucketName, "/") { parts := strings.SplitN(bucketName, "/", 2) bucketName = parts[0] keyPrefix = parts[1] } cfg, err := config.LoadDefaultConfig(ctx, config.WithRegion(uCfg.Region), config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(uCfg.AccessKey, uCfg.SecretKey, "")), ) if err != nil { return "", fmt.Errorf("failed to load aws config: %w", err) } client := s3.NewFromConfig(cfg, func(o *s3.Options) { o.BaseEndpoint = aws.String(uCfg.Endpoint) o.UsePathStyle = true o.Region = uCfg.Region }) // 3. Prepare Key (prepend prefix if exists) // originalKey is folder/filename originalKey := fmt.Sprintf("%s/%s", folder, filename) s3Key := originalKey if keyPrefix != "" { // Ensure clean slashes s3Key = fmt.Sprintf("%s/%s", strings.Trim(keyPrefix, "/"), originalKey) } // Read file into memory to calculate SHA256 correctly and avoid mismatch // This also avoids issues if the reader is not seekable or changes during read fileBytes, err := io.ReadAll(file) if err != nil { return "", fmt.Errorf("failed to read file content: %w", err) } // DEBUG: fmt.Printf("[Storage] Uploading: Bucket=%s Key=%s Size=%d\n", bucketName, s3Key, len(fileBytes)) // Calculate SHA256 manually to ensure it matches what we send hash := sha256.Sum256(fileBytes) checksum := hex.EncodeToString(hash[:]) // 4. Upload _, err = client.PutObject(ctx, &s3.PutObjectInput{ Bucket: aws.String(bucketName), Key: aws.String(s3Key), Body: bytes.NewReader(fileBytes), ContentType: aws.String(contentType), ChecksumSHA256: aws.String(checksum), // Explicitly set the checksum we calculated }) if err != nil { return "", fmt.Errorf("failed to put object: %w", err) } // 5. Return Public URL // We pass originalKey because GetPublicURL uses uCfg.Bucket (which includes the prefix) // So: endpoint + "/" + uCfg.Bucket ("bucket/prefix") + "/" + originalKey ("folder/file") // Result: endpoint/bucket/prefix/folder/file -> CORRECT return s.GetPublicURL(ctx, originalKey) }