feat: upload

This commit is contained in:
Rogee
2025-01-15 10:43:16 +08:00
parent 9bfdf0e0ea
commit ab827715fb
16 changed files with 378 additions and 161 deletions

View File

@@ -1,28 +1,24 @@
package medias
import (
"crypto/md5"
"encoding/hex"
"errors"
"fmt"
"io"
"mime/multipart"
"os"
"path/filepath"
"time"
"backend/app/http/storages"
"backend/app/http/tenants"
"backend/database/models/qvyun_v2/public/model"
"backend/pkg/storage"
"backend/providers/jwt"
"github.com/gofiber/fiber/v3"
log "github.com/sirupsen/logrus"
)
const (
uploadTempDir = "./temp/chunks" // 临时分片目录
uploadStorageDir = "./uploads" // 最终文件存储目录
)
// @provider
type Controller struct {
tenantSvc *tenants.Service
svc *Service
storageSvc *storages.Service
log *log.Entry `inject:"false"`
}
@@ -32,119 +28,55 @@ func (ctl *Controller) Prepare() error {
}
// Upload
// @Router /api/v1/medias/upload [post]
// @Router /api/v1/medias/:tenant/upload [post]
// @Bind tenantSlug path
// @Bind req body
// @Bind file file
func (ctl *Controller) Upload(ctx fiber.Ctx, file *multipart.FileHeader, req *UploadReq) (*UploadResp, error) {
// 使用MD5创建唯一的临时目录
tempDir := filepath.Join(uploadTempDir, req.FileMD5)
if err := os.MkdirAll(tempDir, 0o755); err != nil {
// @Bind claim local
func (ctl *Controller) Upload(ctx fiber.Ctx, claim *jwt.Claims, tenantSlug string, file *multipart.FileHeader, req *UploadReq) (*storage.UploadedFile, error) {
tenant, err := ctl.tenantSvc.GetTenantBySlug(ctx.Context(), tenantSlug)
if err != nil {
return nil, err
}
chunkPath := filepath.Join(tempDir, fmt.Sprintf("chunk_%d", req.ChunkNumber))
if err := ctx.SaveFile(file, chunkPath); err != nil {
defaultStorage, err := ctl.storageSvc.GetDefault(ctx.Context())
if err != nil {
return nil, err
}
// 如果是最后一个分片
if req.ChunkNumber == req.TotalChunks-1 {
// 生成唯一的文件存储路径
ext := filepath.Ext(req.FileName)
storageDir := filepath.Join(uploadStorageDir, time.Now().Format("2006/01/02"))
if err := os.MkdirAll(storageDir, 0o755); err != nil {
os.RemoveAll(tempDir)
uploader, err := storage.NewUploader(req.FileName, req.ChunkNumber, req.TotalChunks, req.FileMD5)
if err != nil {
return nil, err
}
finalPath := filepath.Join(storageDir, req.FileMD5+ext)
// 计算所有分片的实际大小总和
totalSize, err := calculateTotalSize(tempDir, req.TotalChunks)
uploadedFile, err := uploader.Save(ctx, file)
if err != nil {
os.RemoveAll(tempDir)
return nil, fmt.Errorf("计算文件大小失败: %w", err)
return nil, err
}
// 合并文件
if err := combineChunks(tempDir, finalPath, req.TotalChunks); err != nil {
os.RemoveAll(tempDir)
return nil, fmt.Errorf("合并文件失败: %w", err)
if uploadedFile == nil {
return uploadedFile, nil
}
// 验证MD5
calculatedMD5, err := calculateFileMD5(finalPath)
if err != nil || calculatedMD5 != req.FileMD5 {
os.RemoveAll(tempDir)
os.Remove(finalPath)
return nil, errors.New("文件MD5验证失败")
}
// 清理临时目录
os.RemoveAll(tempDir)
return &UploadResp{
Files: []UploadFile{
{
HashID: calculatedMD5,
Name: req.FileName,
Path: finalPath,
Size: totalSize,
MimeType: file.Header.Get("Content-Type"),
},
},
}, nil
}
return &UploadResp{}, nil
}
// 计算所有分片的实际大小总和
func calculateTotalSize(tempDir string, totalChunks int) (int64, error) {
var totalSize int64
for i := 0; i < totalChunks; i++ {
chunkPath := filepath.Join(tempDir, fmt.Sprintf("chunk_%d", i))
info, err := os.Stat(chunkPath)
uploadedFile, err = storage.Build(defaultStorage).Save(ctx.Context(), uploadedFile)
if err != nil {
return 0, err
}
totalSize += info.Size()
}
return totalSize, nil
return nil, err
}
func combineChunks(tempDir, finalPath string, totalChunks int) error {
finalFile, err := os.Create(finalPath)
if err != nil {
return err
}
defer finalFile.Close()
// save to db
_, err = ctl.svc.Create(ctx.Context(), &model.Medias{
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
TenantID: tenant.ID,
UserID: claim.UserID,
StorageID: defaultStorage.ID,
Name: uploadedFile.Name,
UUID: uploadedFile.Hash,
MimeType: uploadedFile.MimeType,
Size: uploadedFile.Size,
Path: uploadedFile.Path,
})
uploadedFile.Preview = ""
for i := 0; i < totalChunks; i++ {
chunkPath := fmt.Sprintf("%s/chunk_%d", tempDir, i)
chunk, err := os.ReadFile(chunkPath)
if err != nil {
return err
}
if _, err := finalFile.Write(chunk); err != nil {
return err
}
}
return nil
}
func calculateFileMD5(filePath string) (string, error) {
file, err := os.Open(filePath)
if err != nil {
return "", err
}
defer file.Close()
hash := md5.New()
if _, err := io.Copy(hash, file); err != nil {
return "", err
}
return hex.EncodeToString(hash.Sum(nil)), nil
return uploadedFile, err
}

View File

@@ -6,16 +6,3 @@ type UploadReq struct {
TotalChunks int `form:"total_chunks"`
FileMD5 string `form:"file_md5"`
}
type UploadResp struct {
Files []UploadFile `json:"files"`
}
type UploadFile struct {
HashID string `json:"hash_id"`
Name string `json:"name"`
Size int64 `json:"size"`
MimeType string `json:"type"`
Path string `json:"path"`
Preview string `json:"preview"`
}

View File

@@ -3,6 +3,9 @@ package medias
import (
"database/sql"
"backend/app/http/storages"
"backend/app/http/tenants"
"git.ipao.vip/rogeecn/atom"
"git.ipao.vip/rogeecn/atom/container"
"git.ipao.vip/rogeecn/atom/contracts"
@@ -11,10 +14,14 @@ import (
func Provide(opts ...opt.Option) error {
if err := container.Container.Provide(func(
storageSvc *storages.Service,
svc *Service,
tenantSvc *tenants.Service,
) (*Controller, error) {
obj := &Controller{
storageSvc: storageSvc,
svc: svc,
tenantSvc: tenantSvc,
}
if err := obj.Prepare(); err != nil {
return nil, err

View File

@@ -4,6 +4,8 @@ package medias
import (
. "backend/pkg/f"
"backend/providers/jwt"
"mime/multipart"
_ "git.ipao.vip/rogeecn/atom"
_ "git.ipao.vip/rogeecn/atom/contracts"
@@ -28,9 +30,11 @@ func (r *Routes) Name() string {
func (r *Routes) Register(router fiber.Router) {
// 注册路由组: Controller
router.Post("/api/v1/medias/upload", DataFunc2(
router.Post("/api/v1/medias/:tenant/upload", DataFunc4(
r.controller.Upload,
File("file"),
Local[*jwt.Claims]("claim"),
PathParam[string]("tenantSlug"),
File[multipart.FileHeader]("file"),
Body[UploadReq]("req"),
))

View File

@@ -1,10 +1,16 @@
package medias
import (
"context"
"database/sql"
"backend/database/models/qvyun_v2/public/model"
"backend/database/models/qvyun_v2/public/table"
"backend/providers/otel"
. "github.com/go-jet/jet/v2/postgres"
log "github.com/sirupsen/logrus"
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
)
// @provider:except
@@ -18,3 +24,19 @@ func (svc *Service) Prepare() error {
_ = Int(1)
return nil
}
// Create
func (svc *Service) Create(ctx context.Context, m *model.Medias) (*model.Medias, error) {
_, span := otel.Start(ctx, "medias.service.Create")
defer span.End()
tbl := table.Medias
stmt := tbl.INSERT(tbl.MutableColumns).MODEL(m).RETURNING(tbl.AllColumns)
span.SetAttributes(semconv.DBStatementKey.String(stmt.DebugSql()))
var ret model.Medias
if err := stmt.QueryContext(ctx, svc.db, &ret); err != nil {
return nil, err
}
return &ret, nil
}

View File

@@ -11,6 +11,6 @@ type Storage struct {
type CreateStorageReq struct {
Name string `json:"name"`
Config string `json:"config"`
Config fields.StorageConfig `json:"config"`
Type fields.StorageType `json:"type"`
}

View File

@@ -122,3 +122,19 @@ func (svc *Service) SetDefault(ctx context.Context, id int64) error {
// Commit transaction
return tx.Commit()
}
// GetDefault
func (svc *Service) GetDefault(ctx context.Context) (*model.Storages, error) {
_, span := otel.Start(ctx, "storages.service.GetDefault")
defer span.End()
tbl := table.Storages
stmt := tbl.SELECT(tbl.AllColumns).WHERE(tbl.IsDefault.EQ(Bool(true)))
span.SetAttributes(semconv.DBStatementKey.String(stmt.DebugSql()))
var storage model.Storages
if err := stmt.QueryContext(ctx, svc.db, &storage); err != nil {
return nil, err
}
return &storage, nil
}

View File

@@ -17,8 +17,6 @@ import (
const (
// StorageTypeLocal is a StorageType of type Local.
StorageTypeLocal StorageType = iota
// StorageTypeAliOSS is a StorageType of type AliOSS.
StorageTypeAliOSS
// StorageTypeS3 is a StorageType of type S3.
StorageTypeS3
// StorageTypeMinIO is a StorageType of type MinIO.
@@ -27,13 +25,12 @@ const (
var ErrInvalidStorageType = fmt.Errorf("not a valid StorageType, try [%s]", strings.Join(_StorageTypeNames, ", "))
const _StorageTypeName = "LocalAliOSSS3MinIO"
const _StorageTypeName = "LocalS3MinIO"
var _StorageTypeNames = []string{
_StorageTypeName[0:5],
_StorageTypeName[5:11],
_StorageTypeName[11:13],
_StorageTypeName[13:18],
_StorageTypeName[5:7],
_StorageTypeName[7:12],
}
// StorageTypeNames returns a list of possible string values of StorageType.
@@ -47,7 +44,6 @@ func StorageTypeNames() []string {
func StorageTypeValues() []StorageType {
return []StorageType{
StorageTypeLocal,
StorageTypeAliOSS,
StorageTypeS3,
StorageTypeMinIO,
}
@@ -55,9 +51,8 @@ func StorageTypeValues() []StorageType {
var _StorageTypeMap = map[StorageType]string{
StorageTypeLocal: _StorageTypeName[0:5],
StorageTypeAliOSS: _StorageTypeName[5:11],
StorageTypeS3: _StorageTypeName[11:13],
StorageTypeMinIO: _StorageTypeName[13:18],
StorageTypeS3: _StorageTypeName[5:7],
StorageTypeMinIO: _StorageTypeName[7:12],
}
// String implements the Stringer interface.
@@ -77,9 +72,8 @@ func (x StorageType) IsValid() bool {
var _StorageTypeValue = map[string]StorageType{
_StorageTypeName[0:5]: StorageTypeLocal,
_StorageTypeName[5:11]: StorageTypeAliOSS,
_StorageTypeName[11:13]: StorageTypeS3,
_StorageTypeName[13:18]: StorageTypeMinIO,
_StorageTypeName[5:7]: StorageTypeS3,
_StorageTypeName[7:12]: StorageTypeMinIO,
}
// ParseStorageType attempts to convert a string to a StorageType.

View File

@@ -1,15 +1,21 @@
package fields
// swagger:enum UserStatus
// ENUM( Local ,AliOSS, S3, MinIO)
// ENUM( Local, S3, MinIO)
type StorageType int16
type StorageConfig struct {
Path *string `json:"path"`
Local *LocalStorage `json:"local"`
S3 *StorageS3Config `json:"s3"`
}
type LocalStorage struct {
Path string `json:"path"`
Host string `json:"host"`
}
type StorageS3Config struct {
Host string `json:"host"`
Endpoint string `json:"endpoint"`
AccessKeyID string `json:"access_key_id"`
AccessKeySecret string `json:"access_key_secret"`

View File

@@ -19,5 +19,5 @@ type Storages struct {
IsDefault bool `json:"is_default"`
Name string `json:"name"`
Type fields.StorageType `json:"type"`
Config string `json:"config"`
Config fields.StorageConfig `json:"config"`
}

View File

@@ -14,6 +14,7 @@ types:
storages:
type: StorageType
config: StorageConfig
posts:
stage: PostStage

View File

@@ -1 +1,26 @@
package main
import (
"os"
"path/filepath"
"testing"
. "github.com/smartystreets/goconvey/convey"
)
func Test_mkdir(t *testing.T) {
Convey("Test os.MkdirAll", t, func() {
path := filepath.Join(os.TempDir(), "abc")
defer os.RemoveAll(path)
Convey("first create a directory", func() {
err := os.MkdirAll(path, 0o755)
So(err, ShouldBeNil)
})
Convey("create a directory again", func() {
err := os.MkdirAll(path, 0o755)
So(err, ShouldBeNil)
})
})
}

View File

@@ -7,8 +7,9 @@ import (
"github.com/pkg/errors"
)
func File(key string) func(fiber.Ctx) (*multipart.FileHeader, error) {
func File[T any](key string) func(fiber.Ctx) (*multipart.FileHeader, error) {
return func(ctx fiber.Ctx) (*multipart.FileHeader, error) {
_ = new(T)
return ctx.FormFile(key)
}
}

View File

@@ -0,0 +1,70 @@
package storage
import (
"context"
"os"
"path/filepath"
"time"
"backend/database/fields"
"backend/database/models/qvyun_v2/public/model"
)
func Build(m *model.Storages) Storage {
switch m.Type {
case fields.StorageTypeLocal:
return &Local{
Host: m.Config.Local.Host,
Path: m.Config.Local.Path,
}
case fields.StorageTypeS3:
return &S3{
Endpoint: m.Config.S3.Endpoint,
AccessKeyID: m.Config.S3.AccessKeyID,
AccessKeySecret: m.Config.S3.AccessKeySecret,
BucketName: m.Config.S3.BucketName,
Path: m.Config.S3.Path,
}
default:
panic("invalid storage type")
}
}
type Storage interface {
Save(ctx context.Context, file *UploadedFile) (*UploadedFile, error)
}
type Local struct {
Host string
Path string
}
func (p *Local) Save(ctx context.Context, file *UploadedFile) (*UploadedFile, error) {
saveToPath := filepath.Join(file.Path, time.Now().Format("2006/01/02"))
if err := os.MkdirAll(saveToPath, 0o755); err != nil {
return nil, err
}
filename := file.Hash + filepath.Ext(file.Name)
finalPath := filepath.Join(os.TempDir(), filename)
saveTo := filepath.Join(saveToPath, file.Name)
if err := os.Rename(finalPath, saveTo); err != nil {
return nil, err
}
file.Path = filepath.Join(time.Now().Format("2006/01/02"), filename)
file.Preview = filepath.Join(p.Host, time.Now().Format("2006/01/02"), filename)
return file, nil
}
type S3 struct {
Endpoint string
AccessKeyID string
AccessKeySecret string
BucketName string
Path string
}
func (s *S3) Save(ctx context.Context, file *UploadedFile) (*UploadedFile, error) {
return nil, nil
}

View File

@@ -0,0 +1,152 @@
package storage
import (
"crypto/md5"
"encoding/hex"
"errors"
"fmt"
"io"
"mime/multipart"
"os"
"path/filepath"
"time"
"github.com/gofiber/fiber/v3"
)
type Uploader struct {
tmpDir string
chunkPath string
fileName string
chunkNumber int
totalChunks int
fileMD5 string
dst string
ext string
finalPath string
}
type UploadedFile struct {
Hash string `json:"hash"`
Name string `json:"name"`
Size int64 `json:"size"`
MimeType string `json:"type"`
Path string `json:"path"`
Preview string `json:"preview"`
}
func NewUploader(fileName string, chunkNumber, totalChunks int, fileMD5 string) (*Uploader, error) {
// 使用MD5创建唯一的临时目录
tempDir := filepath.Join(os.TempDir(), fileMD5)
if err := os.MkdirAll(tempDir, 0o755); err != nil {
return nil, err
}
return &Uploader{
tmpDir: filepath.Join(os.TempDir(), fileMD5),
chunkPath: filepath.Join(os.TempDir(), fileMD5, fmt.Sprintf("chunk_%d", chunkNumber)),
fileName: fileName,
chunkNumber: chunkNumber,
totalChunks: totalChunks,
fileMD5: fileMD5,
ext: filepath.Ext(fileName),
finalPath: filepath.Join(os.TempDir(), fileMD5+filepath.Ext(fileName)),
}, nil
}
func (up *Uploader) Save(ctx fiber.Ctx, file *multipart.FileHeader) (*UploadedFile, error) {
if up.chunkNumber != up.totalChunks-1 {
return nil, ctx.SaveFile(file, up.chunkPath)
}
// 如果是最后一个分片
// 生成唯一的文件存储路径
storageDir := filepath.Join(up.dst, time.Now().Format("2006/01/02"))
if err := os.MkdirAll(storageDir, 0o755); err != nil {
os.RemoveAll(filepath.Join(os.TempDir(), up.fileMD5))
return nil, err
}
// 计算所有分片的实际大小总和
totalSize, err := calculateTotalSize(up.tmpDir, up.totalChunks)
if err != nil {
os.RemoveAll(up.tmpDir)
return nil, fmt.Errorf("计算文件大小失败: %w", err)
}
// 合并文件
if err := combineChunks(up.tmpDir, up.finalPath, up.totalChunks); err != nil {
os.RemoveAll(up.tmpDir)
return nil, fmt.Errorf("合并文件失败: %w", err)
}
// 验证MD5
calculatedMD5, err := calculateFileMD5(up.finalPath)
if err != nil || calculatedMD5 != up.fileMD5 {
os.RemoveAll(up.tmpDir)
os.Remove(up.finalPath)
return nil, errors.New("文件MD5验证失败")
}
// 清理临时目录
os.RemoveAll(up.tmpDir)
return &UploadedFile{
Hash: calculatedMD5,
Name: up.fileName,
Path: up.finalPath,
Size: totalSize,
MimeType: file.Header.Get("Content-Type"),
}, nil
}
// 计算所有分片的实际大小总和
func calculateTotalSize(tempDir string, totalChunks int) (int64, error) {
var totalSize int64
for i := 0; i < totalChunks; i++ {
chunkPath := filepath.Join(tempDir, fmt.Sprintf("chunk_%d", i))
info, err := os.Stat(chunkPath)
if err != nil {
return 0, err
}
totalSize += info.Size()
}
return totalSize, nil
}
func combineChunks(tempDir, finalPath string, totalChunks int) error {
finalFile, err := os.Create(finalPath)
if err != nil {
return err
}
defer finalFile.Close()
for i := 0; i < totalChunks; i++ {
chunkPath := fmt.Sprintf("%s/chunk_%d", tempDir, i)
chunk, err := os.ReadFile(chunkPath)
if err != nil {
return err
}
if _, err := finalFile.Write(chunk); err != nil {
return err
}
}
return nil
}
func calculateFileMD5(filePath string) (string, error) {
file, err := os.Open(filePath)
if err != nil {
return "", err
}
defer file.Close()
hash := md5.New()
if _, err := io.Copy(hash, file); err != nil {
return "", err
}
return hex.EncodeToString(hash.Sum(nil)), nil
}

View File

@@ -15,6 +15,7 @@ import SparkMD5 from 'spark-md5';
import { ref } from 'vue';
const CHUNK_SIZE = 2 * 1024 * 1024; // 2MB chunks
const MAX_CONCURRENT_UPLOADS = 4; // Changed from 3 to 4
const progress = ref(0);
const status = ref('');
@@ -68,7 +69,6 @@ const calculateFileMD5 = async (file) => {
});
};
const MAX_CONCURRENT_UPLOADS = 3;
const uploadChunks = async (chunks, file, fileMD5) => {
const pending = [...Array(chunks.length).keys()];