feat: migrate serevices
Some checks failed
build quyun / Build (push) Failing after 2m50s

This commit is contained in:
2025-12-19 19:05:12 +08:00
parent 005585c53b
commit 557a641f41
71 changed files with 5626 additions and 280 deletions

View File

@@ -0,0 +1,109 @@
package jobs
import (
"context"
"fmt"
"time"
"github.com/pkg/errors"
. "github.com/riverqueue/river"
log "github.com/sirupsen/logrus"
_ "go.ipao.vip/atom"
"go.ipao.vip/atom/contracts"
)
var _ contracts.JobArgs = (*BalancePayNotify)(nil)
type BalancePayNotify struct {
OrderNo string `json:"order_no"`
}
func (s BalancePayNotify) InsertOpts() InsertOpts {
return InsertOpts{
Queue: QueueDefault,
Priority: PriorityDefault,
}
}
func (BalancePayNotify) Kind() string { return "balance_pay_notify" }
func (a BalancePayNotify) UniqueID() string { return a.Kind() }
var _ Worker[BalancePayNotify] = (*BalancePayNotifyWorker)(nil)
// @provider(job)
type BalancePayNotifyWorker struct {
WorkerDefaults[BalancePayNotify]
}
func (w *BalancePayNotifyWorker) Work(ctx context.Context, job *Job[BalancePayNotify]) error {
log := log.WithField("job", job.Args.Kind())
log.Infof("[Start] Working on job with strings: %+v", job.Args)
defer log.Infof("[End] Finished %s", job.Args.Kind())
order, err := model.OrdersModel().GetByOrderNo(context.Background(), job.Args.OrderNo)
if err != nil {
log.Errorf("GetByOrderNo error:%v", err)
return err
}
if order.Status != fields.OrderStatusPending {
log.Infof("Order %s is paid, processing...", job.Args.OrderNo)
return JobCancel(fmt.Errorf("Order already paid, currently status: %d", order.Status))
}
user, err := model.UsersModel().GetByID(context.Background(), order.UserID)
if err != nil {
log.Errorf("GetByID error:%v", err)
return errors.Wrap(err, "get user error")
}
payPrice := order.Price * int64(order.Discount) / 100
order.PaymentMethod = "balance"
order.Status = fields.OrderStatusCompleted
meta := order.Meta.Data
if user.Balance-meta.CostBalance < 0 {
log.Errorf("User %d balance is not enough, current balance: %d, cost: %d", user.ID, user.Balance, payPrice)
return JobCancel(
fmt.Errorf("User %d balance is not enough, current balance: %d, cost: %d", user.ID, user.Balance, payPrice),
)
}
log.Infof("Updated order details: %+v", order)
tx, err := model.Transaction(ctx)
if err != nil {
return errors.Wrap(err, "Transaction error")
}
defer tx.Rollback()
// update user balance
err = user.SetBalance(ctx, user.Balance-payPrice)
if err != nil {
log.WithError(err).Error("SetBalance error")
return JobCancel(errors.Wrap(err, "set user balance failed"))
}
if err := user.BuyPosts(context.Background(), order.PostID, order.Price); err != nil {
log.Errorf("BuyPosts error:%v", err)
return errors.Wrap(err, "BuyPosts error")
}
if err := order.Update(context.Background()); err != nil {
log.Errorf("Update order error:%v", err)
return errors.Wrap(err, "Update order error")
}
if err := tx.Commit(); err != nil {
log.Errorf("Commit error:%v", err)
return errors.Wrap(err, "Commit error")
}
log.Infof("Successfully processed order %s", order.OrderNo)
return nil
}
func (w *BalancePayNotifyWorker) NextRetry(job *Job[BalancePayNotify]) time.Time {
return time.Now().Add(30 * time.Second)
}

View File

@@ -1,36 +0,0 @@
package jobs
import (
"time"
. "github.com/riverqueue/river"
"github.com/sirupsen/logrus"
_ "go.ipao.vip/atom"
"go.ipao.vip/atom/contracts"
)
var _ contracts.CronJob = (*DemoCronJob)(nil)
// @provider(cronjob)
type DemoCronJob struct {
log *logrus.Entry `inject:"false"`
}
// Prepare implements contracts.CronJob.
func (DemoCronJob) Prepare() error {
return nil
}
// JobArgs implements contracts.CronJob.
func (DemoCronJob) Args() []contracts.CronJobArg {
return []contracts.CronJobArg{
{
Arg: DemoJob{
Strings: []string{"a", "b", "c", "d"},
},
PeriodicInterval: PeriodicInterval(time.Second * 10),
RunOnStart: false,
},
}
}

View File

@@ -1,53 +0,0 @@
package jobs
import (
"context"
"sort"
"time"
. "github.com/riverqueue/river"
log "github.com/sirupsen/logrus"
_ "go.ipao.vip/atom"
"go.ipao.vip/atom/contracts"
_ "go.ipao.vip/atom/contracts"
)
var _ contracts.JobArgs = DemoJob{}
type DemoJob struct {
Strings []string `json:"strings"`
}
func (s DemoJob) InsertOpts() InsertOpts {
return InsertOpts{
Queue: QueueDefault,
Priority: PriorityDefault,
}
}
func (DemoJob) Kind() string { return "demo_job" }
func (a DemoJob) UniqueID() string { return a.Kind() }
var _ Worker[DemoJob] = (*DemoJobWorker)(nil)
// @provider(job)
type DemoJobWorker struct {
WorkerDefaults[DemoJob]
}
func (w *DemoJobWorker) NextRetry(job *Job[DemoJob]) time.Time {
return time.Now().Add(30 * time.Second)
}
func (w *DemoJobWorker) Work(ctx context.Context, job *Job[DemoJob]) error {
logger := log.WithField("job", job.Args.Kind())
logger.Infof("[START] %s args: %v", job.Args.Kind(), job.Args.Strings)
defer logger.Infof("[END] %s", job.Args.Kind())
// modify below
sort.Strings(job.Args.Strings)
logger.Infof("[%s] Sorted strings: %v\n", time.Now().Format(time.TimeOnly), job.Args.Strings)
return nil
}

View File

@@ -1,53 +0,0 @@
package jobs
import (
"context"
"testing"
"quyun/v2/app/commands/testx"
"quyun/v2/app/services"
. "github.com/riverqueue/river"
. "github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/suite"
_ "go.ipao.vip/atom"
"go.ipao.vip/atom/contracts"
"go.uber.org/dig"
)
type DemoJobSuiteInjectParams struct {
dig.In
Initials []contracts.Initial `group:"initials"` // nolint:structcheck
}
type DemoJobSuite struct {
suite.Suite
DemoJobSuiteInjectParams
}
func Test_DemoJob(t *testing.T) {
providers := testx.Default().With(Provide, services.Provide)
testx.Serve(providers, t, func(p DemoJobSuiteInjectParams) {
suite.Run(t, &DemoJobSuite{DemoJobSuiteInjectParams: p})
})
}
func (t *DemoJobSuite) Test_Work() {
Convey("test_work", t.T(), func() {
Convey("step 1", func() {
job := &Job[DemoJob]{
Args: DemoJob{
Strings: []string{"a", "b", "c"},
},
}
worker := &DemoJobWorker{}
err := worker.Work(context.Background(), job)
So(err, ShouldBeNil)
})
})
}

View File

@@ -0,0 +1,104 @@
package jobs
import (
"context"
"os"
"path/filepath"
"time"
"quyun/v2/app/model"
"quyun/v2/providers/ali"
"quyun/v2/providers/app"
"quyun/v2/providers/job"
. "github.com/riverqueue/river"
log "github.com/sirupsen/logrus"
_ "go.ipao.vip/atom"
"go.ipao.vip/atom/contracts"
_ "go.ipao.vip/atom/contracts"
)
var _ contracts.JobArgs = (*DownloadFromAliOSS)(nil)
type DownloadFromAliOSS struct {
MediaHash string `json:"media_hash"`
}
func (s DownloadFromAliOSS) InsertOpts() InsertOpts {
return InsertOpts{
Queue: QueueDefault,
Priority: PriorityDefault,
}
}
func (s DownloadFromAliOSS) Kind() string { return "download_from_ali_oss" }
func (a DownloadFromAliOSS) UniqueID() string { return a.Kind() }
var _ Worker[DownloadFromAliOSS] = (*DownloadFromAliOSSWorker)(nil)
// @provider(job)
type DownloadFromAliOSSWorker struct {
WorkerDefaults[DownloadFromAliOSS]
oss *ali.OSSClient
job *job.Job
app *app.Config
}
func (w *DownloadFromAliOSSWorker) NextRetry(job *Job[DownloadFromAliOSS]) time.Time {
return time.Now().Add(30 * time.Second)
}
func (w *DownloadFromAliOSSWorker) Work(ctx context.Context, job *Job[DownloadFromAliOSS]) error {
log := log.WithField("job", job.Args.Kind())
log.Infof("[Start] Working on job with strings: %+v", job.Args)
defer log.Infof("[End] Finished %s", job.Args.Kind())
media, err := model.MediasModel().GetByHash(ctx, job.Args.MediaHash)
if err != nil {
log.Errorf("Error getting media by ID: %v", err)
return JobCancel(err)
}
dst := filepath.Join(w.app.StoragePath, media.Path)
// check is path exist
st, err := os.Stat(dst)
if os.IsNotExist(err) {
log.Infof("File not exists: %s", dst)
err := os.MkdirAll(filepath.Dir(dst), os.ModePerm)
if err != nil {
log.Errorf("Error creating directory: %v", err)
return err
}
} else {
if st.Size() == media.Size {
return w.NextJob(media.Hash)
} else {
// remove file
if err := os.Remove(dst); err != nil {
log.Errorf("Error removing file: %v", err)
return err
}
}
}
log.Infof("Starting download for file: %s", media.Path)
if err := w.oss.Download(ctx, media.Path, dst, ali.WithInternal()); err != nil {
log.Errorf("Error downloading file: %v", err)
return err
}
log.Infof("Successfully downloaded file: %s", media.Path)
return w.NextJob(media.Hash)
}
func (w *DownloadFromAliOSSWorker) NextJob(hash string) error {
if err := w.job.Add(&VideoCut{MediaHash: hash}); err != nil {
log.Errorf("Error adding job: %v", err)
return err
}
return nil
}

View File

@@ -0,0 +1,63 @@
package jobs
import (
"context"
"testing"
"quyun/v2/app/commands/testx"
"quyun/v2/app/model"
"quyun/v2/providers/ali"
"quyun/v2/providers/app"
"quyun/v2/providers/job"
. "github.com/riverqueue/river"
. "github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/suite"
_ "go.ipao.vip/atom"
"go.ipao.vip/atom/contracts"
"go.uber.org/dig"
)
type DownloadFromAliOSSSuiteInjectParams struct {
dig.In
Initials []contracts.Initial `group:"initials"` // nolint:structcheck
Job *job.Job
Oss *ali.OSSClient
App *app.Config
}
type DownloadFromAliOSSSuite struct {
suite.Suite
DownloadFromAliOSSSuiteInjectParams
}
func Test_DownloadFromAliOSS(t *testing.T) {
providers := testx.Default().With(Provide, model.Provide)
testx.Serve(providers, t, func(p DownloadFromAliOSSSuiteInjectParams) {
suite.Run(t, &DownloadFromAliOSSSuite{DownloadFromAliOSSSuiteInjectParams: p})
})
}
func (t *DownloadFromAliOSSSuite) Test_Work() {
Convey("test_work", t.T(), func() {
Convey("step 1", func() {
job := &Job[DownloadFromAliOSS]{
Args: DownloadFromAliOSS{
MediaHash: "959e5310105c96e653f10b74e5bdc36b",
},
}
worker := &DownloadFromAliOSSWorker{
oss: t.Oss,
job: t.Job,
app: t.App,
}
err := worker.Work(context.Background(), job)
So(err, ShouldBeNil)
})
})
}

View File

@@ -1,6 +1,8 @@
package jobs
import (
"quyun/v2/providers/ali"
"quyun/v2/providers/app"
"quyun/v2/providers/job"
"github.com/riverqueue/river"
@@ -14,12 +16,48 @@ func Provide(opts ...opt.Option) error {
if err := container.Container.Provide(func(
__job *job.Job,
) (contracts.Initial, error) {
obj := &DemoCronJob{}
if err := obj.Prepare(); err != nil {
obj := &BalancePayNotifyWorker{}
if err := river.AddWorkerSafely(__job.Workers, obj); err != nil {
return nil, err
}
container.Later(func() error { return __job.AddPeriodicJobs(obj) })
return obj, nil
}, atom.GroupInitial); err != nil {
return err
}
if err := container.Container.Provide(func(
__job *job.Job,
app *app.Config,
job *job.Job,
oss *ali.OSSClient,
) (contracts.Initial, error) {
obj := &DownloadFromAliOSSWorker{
app: app,
job: job,
oss: oss,
}
if err := river.AddWorkerSafely(__job.Workers, obj); err != nil {
return nil, err
}
return obj, nil
}, atom.GroupInitial); err != nil {
return err
}
if err := container.Container.Provide(func(
__job *job.Job,
app *app.Config,
job *job.Job,
oss *ali.OSSClient,
) (contracts.Initial, error) {
obj := &PublishDraftPostsWorker{
app: app,
job: job,
oss: oss,
}
if err := river.AddWorkerSafely(__job.Workers, obj); err != nil {
return nil, err
}
return obj, nil
}, atom.GroupInitial); err != nil {
@@ -28,7 +66,62 @@ func Provide(opts ...opt.Option) error {
if err := container.Container.Provide(func(
__job *job.Job,
) (contracts.Initial, error) {
obj := &DemoJobWorker{}
obj := &RemoveFileWorker{}
if err := river.AddWorkerSafely(__job.Workers, obj); err != nil {
return nil, err
}
return obj, nil
}, atom.GroupInitial); err != nil {
return err
}
if err := container.Container.Provide(func(
__job *job.Job,
app *app.Config,
job *job.Job,
) (contracts.Initial, error) {
obj := &VideoCutWorker{
app: app,
job: job,
}
if err := river.AddWorkerSafely(__job.Workers, obj); err != nil {
return nil, err
}
return obj, nil
}, atom.GroupInitial); err != nil {
return err
}
if err := container.Container.Provide(func(
__job *job.Job,
app *app.Config,
job *job.Job,
oss *ali.OSSClient,
) (contracts.Initial, error) {
obj := &VideoExtractHeadImageWorker{
app: app,
job: job,
oss: oss,
}
if err := river.AddWorkerSafely(__job.Workers, obj); err != nil {
return nil, err
}
return obj, nil
}, atom.GroupInitial); err != nil {
return err
}
if err := container.Container.Provide(func(
__job *job.Job,
app *app.Config,
job *job.Job,
oss *ali.OSSClient,
) (contracts.Initial, error) {
obj := &VideoStoreShortWorker{
app: app,
job: job,
oss: oss,
}
if err := river.AddWorkerSafely(__job.Workers, obj); err != nil {
return nil, err
}

View File

@@ -0,0 +1,105 @@
package jobs
import (
"context"
"time"
"quyun/v2/pkg/utils"
"quyun/v2/providers/ali"
"quyun/v2/providers/app"
"quyun/v2/providers/job"
"github.com/pkg/errors"
. "github.com/riverqueue/river"
"github.com/samber/lo"
log "github.com/sirupsen/logrus"
_ "go.ipao.vip/atom"
"go.ipao.vip/atom/contracts"
)
var _ contracts.JobArgs = (*PublishDraftPosts)(nil)
type PublishDraftPosts struct {
MediaHash string `json:"media_hash"`
}
func (s PublishDraftPosts) InsertOpts() InsertOpts {
return InsertOpts{
Queue: QueueDefault,
Priority: PriorityDefault,
}
}
func (s PublishDraftPosts) Kind() string { return "publish_draft_posts" }
func (a PublishDraftPosts) UniqueID() string { return a.Kind() }
var _ Worker[PublishDraftPosts] = (*PublishDraftPostsWorker)(nil)
// @provider(job)
type PublishDraftPostsWorker struct {
WorkerDefaults[PublishDraftPosts]
oss *ali.OSSClient
job *job.Job
app *app.Config
}
func (w *PublishDraftPostsWorker) NextRetry(job *Job[PublishDraftPosts]) time.Time {
return time.Now().Add(30 * time.Second)
}
func (w *PublishDraftPostsWorker) Work(ctx context.Context, job *Job[PublishDraftPosts]) error {
log := log.WithField("job", job.Args.Kind())
log.Infof("[Start] Working on job with strings: %+v", job.Args)
defer log.Infof("[End] Finished %s", job.Args.Kind())
media, err := model.MediasModel().GetByHash(ctx, job.Args.MediaHash)
if err != nil {
log.Errorf("Error getting media by ID: %v", err)
return JobCancel(err)
}
relationMedias, err := model.MediasModel().GetRelations(ctx, media.Hash)
if err != nil {
log.Errorf("Error getting relation medias: %v", err)
return JobCancel(err)
}
assets := lo.FilterMap(relationMedias, func(media *model.Medias, _ int) (fields.MediaAsset, bool) {
return fields.MediaAsset{
Type: media.MimeType,
Media: media.ID,
Metas: &media.Metas.Data,
}, media.MimeType != "image/jpeg"
})
assets = append(assets, fields.MediaAsset{
Type: media.MimeType,
Media: media.ID,
Metas: &media.Metas.Data,
})
// publish a draft posts
post := &model.Posts{
Status: fields.PostStatusDraft,
Title: utils.FormatTitle(media.Name),
Description: "",
Content: "",
Price: 0,
Discount: 100,
Views: 0,
Likes: 0,
Tags: fields.Json[[]string]{},
Assets: fields.ToJson(assets),
HeadImages: fields.ToJson(lo.FilterMap(relationMedias, func(media *model.Medias, _ int) (int64, bool) {
return media.ID, media.MimeType == "image/jpeg"
})),
}
if err := post.Create(ctx); err != nil {
log.Errorf("Error creating post: %v", err)
return errors.Wrap(err, "create post")
}
log.Infof("Post created successfully with ID: %d", post.ID)
return nil
}

View File

@@ -0,0 +1,61 @@
package jobs
import (
"context"
"os"
"time"
. "github.com/riverqueue/river"
log "github.com/sirupsen/logrus"
_ "go.ipao.vip/atom"
"go.ipao.vip/atom/contracts"
)
var _ contracts.JobArgs = (*RemoveFile)(nil)
type RemoveFile struct {
FilePath string `json:"file_path"`
}
func (s RemoveFile) InsertOpts() InsertOpts {
return InsertOpts{
Queue: QueueDefault,
Priority: PriorityDefault,
// ScheduledAt: time.Now().Add(time.Minute * 10),
}
}
func (s RemoveFile) Kind() string { return "remove_file" }
func (a RemoveFile) UniqueID() string { return a.Kind() }
var _ Worker[RemoveFile] = (*RemoveFileWorker)(nil)
// @provider(job)
type RemoveFileWorker struct {
WorkerDefaults[RemoveFile]
}
func (w *RemoveFileWorker) NextRetry(job *Job[RemoveFile]) time.Time {
return time.Now().Add(30 * time.Second)
}
func (w *RemoveFileWorker) Work(ctx context.Context, job *Job[RemoveFile]) error {
log := log.WithField("job", job.Args.Kind())
log.Infof("[Start] Working on job with strings: %+v", job.Args)
defer log.Infof("[End] Finished %s", job.Args.Kind())
// Check if the file exists
if _, err := os.Stat(job.Args.FilePath); os.IsNotExist(err) {
log.Warnf("File does not exist: %v", job.Args.FilePath)
return nil
}
// Remove the file
if err := os.Remove(job.Args.FilePath); err != nil {
log.Errorf("Error removing file: %v", err)
return err
}
log.Infof("File removed successfully: %v", job.Args.FilePath)
return nil
}

View File

@@ -0,0 +1,94 @@
package jobs
import (
"context"
"path/filepath"
"time"
"quyun/v2/app/model"
"quyun/v2/database/fields"
"quyun/v2/pkg/utils"
"quyun/v2/providers/app"
"quyun/v2/providers/job"
"github.com/pkg/errors"
. "github.com/riverqueue/river"
log "github.com/sirupsen/logrus"
_ "go.ipao.vip/atom"
"go.ipao.vip/atom/contracts"
)
var _ contracts.JobArgs = (*VideoCut)(nil)
type VideoCut struct {
MediaHash string `json:"media_hash"`
}
func (s VideoCut) InsertOpts() InsertOpts {
return InsertOpts{
Queue: QueueDefault,
Priority: PriorityDefault,
}
}
func (s VideoCut) Kind() string { return "video_cut" }
func (a VideoCut) UniqueID() string { return a.Kind() }
var _ Worker[VideoCut] = (*VideoCutWorker)(nil)
// @provider(job)
type VideoCutWorker struct {
WorkerDefaults[VideoCut]
job *job.Job
app *app.Config
}
func (w *VideoCutWorker) NextRetry(job *Job[VideoCut]) time.Time {
return time.Now().Add(30 * time.Second)
}
func (w *VideoCutWorker) Work(ctx context.Context, job *Job[VideoCut]) error {
log := log.WithField("job", job.Args.Kind())
log.Infof("[Start] Working on job with strings: %+v", job.Args)
defer log.Infof("[End] Finished %s", job.Args.Kind())
media, err := model.MediasModel().GetByHash(ctx, job.Args.MediaHash)
if err != nil {
log.Errorf("Error getting media by ID: %v", err)
return JobCancel(err)
}
input := filepath.Join(w.app.StoragePath, media.Path)
output := input[:len(input)-len(filepath.Ext(input))] + "-short" + filepath.Ext(input)
log.Infof("cut video process %s to %s", input, output)
if err := utils.CutMedia(input, output, 0, 60); err != nil {
log.Errorf("Error cutting media: %v", err)
return errors.Wrap(err, "cut media")
}
duration, err := utils.GetMediaDuration(input)
if err != nil {
log.Errorf("Error getting media duration: %v", err)
return errors.Wrap(err, "get media duration")
}
// update media metas
metas := fields.MediaMetas{
ParentHash: "",
Short: false,
Duration: duration,
}
if err := model.MediasModel().UpdateMetas(ctx, media.ID, metas); err != nil {
log.Errorf("Error updating media metas: %v", err)
return errors.Wrap(err, "update media metas")
}
// save to database
return w.job.Add(&VideoStoreShort{
MediaHash: media.Hash,
FilePath: output,
})
}

View File

@@ -0,0 +1,128 @@
package jobs
import (
"context"
"os"
"path/filepath"
"time"
"quyun/v2/app/model"
"quyun/v2/database/fields"
"quyun/v2/pkg/utils"
"quyun/v2/providers/ali"
"quyun/v2/providers/app"
"quyun/v2/providers/job"
"github.com/pkg/errors"
. "github.com/riverqueue/river"
log "github.com/sirupsen/logrus"
_ "go.ipao.vip/atom"
"go.ipao.vip/atom/contracts"
)
var _ contracts.JobArgs = (*VideoExtractHeadImage)(nil)
type VideoExtractHeadImage struct {
MediaHash string `json:"media_hash"`
}
func (s VideoExtractHeadImage) InsertOpts() InsertOpts {
return InsertOpts{
Queue: QueueDefault,
Priority: PriorityDefault,
}
}
func (s VideoExtractHeadImage) Kind() string { return "video_extract_head_image" }
func (a VideoExtractHeadImage) UniqueID() string { return a.Kind() }
var _ Worker[VideoExtractHeadImage] = (*VideoExtractHeadImageWorker)(nil)
// @provider(job)
type VideoExtractHeadImageWorker struct {
WorkerDefaults[VideoExtractHeadImage]
oss *ali.OSSClient
job *job.Job
app *app.Config
}
func (w *VideoExtractHeadImageWorker) NextRetry(job *Job[VideoExtractHeadImage]) time.Time {
return time.Now().Add(30 * time.Second)
}
func (w *VideoExtractHeadImageWorker) Work(ctx context.Context, job *Job[VideoExtractHeadImage]) error {
log := log.WithField("job", job.Args.Kind())
log.Infof("[Start] Working on job with strings: %+v", job.Args)
defer log.Infof("[End] Finished %s", job.Args.Kind())
media, err := model.MediasModel().GetByHash(ctx, job.Args.MediaHash)
if err != nil {
log.Errorf("Error getting media by ID: %v", err)
return JobCancel(err)
}
input := filepath.Join(w.app.StoragePath, media.Path)
output := input[:len(input)-len(filepath.Ext(input))] + ".jpg"
if err := utils.GetFrameImageFromVideo(input, output, 1); err != nil {
log.Errorf("Error extracting image from video: %v", err)
return errors.Wrap(err, "failed to extract image from video")
}
defer os.RemoveAll(output)
fileSize, err := utils.GetFileSize(output)
if err != nil {
log.Errorf("Error getting file size: %v", err)
return errors.Wrap(err, "failed to get file size")
}
fileMd5, err := utils.GetFileMd5(output)
if err != nil {
log.Errorf("Error getting file MD5: %v", err)
return errors.Wrap(err, "failed to get file MD5")
}
filename := fileMd5 + filepath.Ext(output)
name := "[展示图]" + media.Name + ".jpg"
// create a new media record for the image
imageMedia := &model.Medias{
Name: name,
MimeType: "image/jpeg",
Size: fileSize,
Path: w.oss.GetSavePath(filename),
Hash: fileMd5,
Metas: fields.ToJson(fields.MediaMetas{
ParentHash: media.Hash,
}),
}
// upload to oss
if err := w.oss.Upload(ctx, output, imageMedia.Path, ali.WithInternal()); err != nil {
log.Errorf("Error uploading image to OSS: %v", err)
return errors.Wrap(err, "failed to upload image to OSS")
}
if err := w.job.Add(&RemoveFile{FilePath: output}); err != nil {
log.Errorf("Error removing original file: %v", err)
}
if err := imageMedia.Create(ctx); err != nil {
log.Errorf("Error creating media record: %v", err)
return errors.Wrap(err, "failed to create media record")
}
dst := filepath.Join(w.app.StoragePath, media.Path)
if err := w.job.Add(&RemoveFile{FilePath: dst}); err != nil {
log.Errorf("Error removing original file: %v", err)
}
if err := w.job.Add(&PublishDraftPosts{MediaHash: media.Hash}); err != nil {
log.Errorf("Error adding job: %v", err)
return errors.Wrap(err, "failed to add job")
}
return nil
}

View File

@@ -0,0 +1,135 @@
package jobs
import (
"context"
"path/filepath"
"time"
"quyun/v2/app/model"
"quyun/v2/database/fields"
"quyun/v2/pkg/utils"
"quyun/v2/providers/ali"
"quyun/v2/providers/app"
"quyun/v2/providers/job"
"github.com/pkg/errors"
. "github.com/riverqueue/river"
log "github.com/sirupsen/logrus"
_ "go.ipao.vip/atom"
"go.ipao.vip/atom/contracts"
)
var _ contracts.JobArgs = (*VideoStoreShort)(nil)
type VideoStoreShort struct {
MediaHash string `json:"media_hash"`
FilePath string `json:"file_path"`
}
func (s VideoStoreShort) InsertOpts() InsertOpts {
return InsertOpts{
Queue: QueueDefault,
Priority: PriorityDefault,
}
}
func (s VideoStoreShort) Kind() string { return "video_store_short" }
func (a VideoStoreShort) UniqueID() string { return a.Kind() }
var _ Worker[VideoStoreShort] = (*VideoStoreShortWorker)(nil)
// @provider(job)
type VideoStoreShortWorker struct {
WorkerDefaults[VideoStoreShort]
oss *ali.OSSClient
job *job.Job
app *app.Config
}
func (w *VideoStoreShortWorker) NextRetry(job *Job[VideoStoreShort]) time.Time {
return time.Now().Add(30 * time.Second)
}
func (w *VideoStoreShortWorker) Work(ctx context.Context, job *Job[VideoStoreShort]) error {
log := log.WithField("job", job.Args.Kind())
log.Infof("[Start] Working on job with strings: %+v", job.Args)
defer log.Infof("[End] Finished %s", job.Args.Kind())
media, err := model.MediasModel().GetByHash(ctx, job.Args.MediaHash)
if err != nil {
log.Errorf("Error getting media by ID: %v", err)
return JobCancel(err)
}
duration, err := utils.GetMediaDuration(job.Args.FilePath)
if err != nil {
log.Errorf("Error getting media duration: %v", err)
return errors.Wrap(err, "failed to get media duration")
}
// get file md5
log.Infof("pending get file md5 %s", job.Args.FilePath)
fileMd5, err := utils.GetFileMd5(job.Args.FilePath)
if err != nil {
log.Errorf("Error getting file md5: %v", err)
return errors.Wrap(err, "failed to get file md5")
}
log.Infof("got file md5 %s %s", job.Args.FilePath, fileMd5)
filePath := w.oss.GetSavePath(fileMd5 + filepath.Ext(job.Args.FilePath))
// get file size
log.Infof("pending get file size %s", job.Args.FilePath)
fileSize, err := utils.GetFileSize(job.Args.FilePath)
if err != nil {
log.Errorf("Error getting file size: %v", err)
return errors.Wrap(err, "failed to get file size")
}
log.Infof("got file size %s %d", job.Args.FilePath, fileSize)
// save to db and relate to master
mediaModel := &model.Medias{
Name: "[试听] " + media.Name,
MimeType: media.MimeType,
Size: fileSize,
Path: filePath,
Hash: fileMd5,
Metas: fields.ToJson(fields.MediaMetas{
ParentHash: media.Hash,
Short: true,
Duration: duration,
}),
}
// upload to oss
log.Infof("pending upload file to oss %s", job.Args.FilePath)
if err := w.oss.Upload(ctx, job.Args.FilePath, filePath, ali.WithInternal()); err != nil {
log.Errorf("Error uploading file to OSS: %v", err)
return err
}
log.Infof("pending create media record %s", job.Args.FilePath)
if err := mediaModel.Create(ctx); err != nil {
log.Errorf("Error saving media record: %v data: %+v", err, mediaModel)
return err
}
log.Infof("Media record created with path: %s and hash: %s", filePath, fileMd5)
log.Infof("pending remove local storage file %s", job.Args.FilePath)
if err := w.job.Add(&RemoveFile{FilePath: job.Args.FilePath}); err != nil {
log.Errorf("Error removing original file: %v", err)
}
return w.NextJob(media.Hash)
}
func (w *VideoStoreShortWorker) NextJob(hash string) error {
if err := w.job.Add(&VideoExtractHeadImage{MediaHash: hash}); err != nil {
log.Errorf("Error adding job: %v", err)
return err
}
return nil
}