package storage import ( "context" "os" "path/filepath" "time" "backend/database/fields" "backend/database/models/qvyun_v2/public/model" ) func Build(m *model.Storages) Storage { switch m.Type { case fields.StorageTypeLocal: return &Local{ Host: m.Config.Local.Host, Path: m.Config.Local.Path, } case fields.StorageTypeS3: return &S3{ Endpoint: m.Config.S3.Endpoint, AccessKeyID: m.Config.S3.AccessKeyID, AccessKeySecret: m.Config.S3.AccessKeySecret, BucketName: m.Config.S3.BucketName, Path: m.Config.S3.Path, } default: panic("invalid storage type") } } type Storage interface { Save(ctx context.Context, file *UploadedFile) (*UploadedFile, error) } type Local struct { Host string Path string } func (p *Local) Save(ctx context.Context, file *UploadedFile) (*UploadedFile, error) { saveToPath := filepath.Join(file.Path, time.Now().Format("2006/01/02")) if err := os.MkdirAll(saveToPath, 0o755); err != nil { return nil, err } filename := file.Hash + filepath.Ext(file.Name) finalPath := filepath.Join(os.TempDir(), filename) saveTo := filepath.Join(saveToPath, file.Name) if err := os.Rename(finalPath, saveTo); err != nil { return nil, err } file.Path = filepath.Join(time.Now().Format("2006/01/02"), filename) file.Preview = filepath.Join(p.Host, time.Now().Format("2006/01/02"), filename) return file, nil } type S3 struct { Endpoint string AccessKeyID string AccessKeySecret string BucketName string Path string } func (s *S3) Save(ctx context.Context, file *UploadedFile) (*UploadedFile, error) { return nil, nil }