feat: migrate serevices
Some checks failed
build quyun / Build (push) Failing after 2m50s

This commit is contained in:
2025-12-19 19:05:12 +08:00
parent 005585c53b
commit 557a641f41
71 changed files with 5626 additions and 280 deletions

View File

@@ -0,0 +1,109 @@
package jobs
import (
"context"
"fmt"
"time"
"github.com/pkg/errors"
. "github.com/riverqueue/river"
log "github.com/sirupsen/logrus"
_ "go.ipao.vip/atom"
"go.ipao.vip/atom/contracts"
)
var _ contracts.JobArgs = (*BalancePayNotify)(nil)
type BalancePayNotify struct {
OrderNo string `json:"order_no"`
}
func (s BalancePayNotify) InsertOpts() InsertOpts {
return InsertOpts{
Queue: QueueDefault,
Priority: PriorityDefault,
}
}
func (BalancePayNotify) Kind() string { return "balance_pay_notify" }
func (a BalancePayNotify) UniqueID() string { return a.Kind() }
var _ Worker[BalancePayNotify] = (*BalancePayNotifyWorker)(nil)
// @provider(job)
type BalancePayNotifyWorker struct {
WorkerDefaults[BalancePayNotify]
}
func (w *BalancePayNotifyWorker) Work(ctx context.Context, job *Job[BalancePayNotify]) error {
log := log.WithField("job", job.Args.Kind())
log.Infof("[Start] Working on job with strings: %+v", job.Args)
defer log.Infof("[End] Finished %s", job.Args.Kind())
order, err := model.OrdersModel().GetByOrderNo(context.Background(), job.Args.OrderNo)
if err != nil {
log.Errorf("GetByOrderNo error:%v", err)
return err
}
if order.Status != fields.OrderStatusPending {
log.Infof("Order %s is paid, processing...", job.Args.OrderNo)
return JobCancel(fmt.Errorf("Order already paid, currently status: %d", order.Status))
}
user, err := model.UsersModel().GetByID(context.Background(), order.UserID)
if err != nil {
log.Errorf("GetByID error:%v", err)
return errors.Wrap(err, "get user error")
}
payPrice := order.Price * int64(order.Discount) / 100
order.PaymentMethod = "balance"
order.Status = fields.OrderStatusCompleted
meta := order.Meta.Data
if user.Balance-meta.CostBalance < 0 {
log.Errorf("User %d balance is not enough, current balance: %d, cost: %d", user.ID, user.Balance, payPrice)
return JobCancel(
fmt.Errorf("User %d balance is not enough, current balance: %d, cost: %d", user.ID, user.Balance, payPrice),
)
}
log.Infof("Updated order details: %+v", order)
tx, err := model.Transaction(ctx)
if err != nil {
return errors.Wrap(err, "Transaction error")
}
defer tx.Rollback()
// update user balance
err = user.SetBalance(ctx, user.Balance-payPrice)
if err != nil {
log.WithError(err).Error("SetBalance error")
return JobCancel(errors.Wrap(err, "set user balance failed"))
}
if err := user.BuyPosts(context.Background(), order.PostID, order.Price); err != nil {
log.Errorf("BuyPosts error:%v", err)
return errors.Wrap(err, "BuyPosts error")
}
if err := order.Update(context.Background()); err != nil {
log.Errorf("Update order error:%v", err)
return errors.Wrap(err, "Update order error")
}
if err := tx.Commit(); err != nil {
log.Errorf("Commit error:%v", err)
return errors.Wrap(err, "Commit error")
}
log.Infof("Successfully processed order %s", order.OrderNo)
return nil
}
func (w *BalancePayNotifyWorker) NextRetry(job *Job[BalancePayNotify]) time.Time {
return time.Now().Add(30 * time.Second)
}

View File

@@ -1,36 +0,0 @@
package jobs
import (
"time"
. "github.com/riverqueue/river"
"github.com/sirupsen/logrus"
_ "go.ipao.vip/atom"
"go.ipao.vip/atom/contracts"
)
var _ contracts.CronJob = (*DemoCronJob)(nil)
// @provider(cronjob)
type DemoCronJob struct {
log *logrus.Entry `inject:"false"`
}
// Prepare implements contracts.CronJob.
func (DemoCronJob) Prepare() error {
return nil
}
// JobArgs implements contracts.CronJob.
func (DemoCronJob) Args() []contracts.CronJobArg {
return []contracts.CronJobArg{
{
Arg: DemoJob{
Strings: []string{"a", "b", "c", "d"},
},
PeriodicInterval: PeriodicInterval(time.Second * 10),
RunOnStart: false,
},
}
}

View File

@@ -1,53 +0,0 @@
package jobs
import (
"context"
"sort"
"time"
. "github.com/riverqueue/river"
log "github.com/sirupsen/logrus"
_ "go.ipao.vip/atom"
"go.ipao.vip/atom/contracts"
_ "go.ipao.vip/atom/contracts"
)
var _ contracts.JobArgs = DemoJob{}
type DemoJob struct {
Strings []string `json:"strings"`
}
func (s DemoJob) InsertOpts() InsertOpts {
return InsertOpts{
Queue: QueueDefault,
Priority: PriorityDefault,
}
}
func (DemoJob) Kind() string { return "demo_job" }
func (a DemoJob) UniqueID() string { return a.Kind() }
var _ Worker[DemoJob] = (*DemoJobWorker)(nil)
// @provider(job)
type DemoJobWorker struct {
WorkerDefaults[DemoJob]
}
func (w *DemoJobWorker) NextRetry(job *Job[DemoJob]) time.Time {
return time.Now().Add(30 * time.Second)
}
func (w *DemoJobWorker) Work(ctx context.Context, job *Job[DemoJob]) error {
logger := log.WithField("job", job.Args.Kind())
logger.Infof("[START] %s args: %v", job.Args.Kind(), job.Args.Strings)
defer logger.Infof("[END] %s", job.Args.Kind())
// modify below
sort.Strings(job.Args.Strings)
logger.Infof("[%s] Sorted strings: %v\n", time.Now().Format(time.TimeOnly), job.Args.Strings)
return nil
}

View File

@@ -1,53 +0,0 @@
package jobs
import (
"context"
"testing"
"quyun/v2/app/commands/testx"
"quyun/v2/app/services"
. "github.com/riverqueue/river"
. "github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/suite"
_ "go.ipao.vip/atom"
"go.ipao.vip/atom/contracts"
"go.uber.org/dig"
)
type DemoJobSuiteInjectParams struct {
dig.In
Initials []contracts.Initial `group:"initials"` // nolint:structcheck
}
type DemoJobSuite struct {
suite.Suite
DemoJobSuiteInjectParams
}
func Test_DemoJob(t *testing.T) {
providers := testx.Default().With(Provide, services.Provide)
testx.Serve(providers, t, func(p DemoJobSuiteInjectParams) {
suite.Run(t, &DemoJobSuite{DemoJobSuiteInjectParams: p})
})
}
func (t *DemoJobSuite) Test_Work() {
Convey("test_work", t.T(), func() {
Convey("step 1", func() {
job := &Job[DemoJob]{
Args: DemoJob{
Strings: []string{"a", "b", "c"},
},
}
worker := &DemoJobWorker{}
err := worker.Work(context.Background(), job)
So(err, ShouldBeNil)
})
})
}

View File

@@ -0,0 +1,104 @@
package jobs
import (
"context"
"os"
"path/filepath"
"time"
"quyun/v2/app/model"
"quyun/v2/providers/ali"
"quyun/v2/providers/app"
"quyun/v2/providers/job"
. "github.com/riverqueue/river"
log "github.com/sirupsen/logrus"
_ "go.ipao.vip/atom"
"go.ipao.vip/atom/contracts"
_ "go.ipao.vip/atom/contracts"
)
var _ contracts.JobArgs = (*DownloadFromAliOSS)(nil)
type DownloadFromAliOSS struct {
MediaHash string `json:"media_hash"`
}
func (s DownloadFromAliOSS) InsertOpts() InsertOpts {
return InsertOpts{
Queue: QueueDefault,
Priority: PriorityDefault,
}
}
func (s DownloadFromAliOSS) Kind() string { return "download_from_ali_oss" }
func (a DownloadFromAliOSS) UniqueID() string { return a.Kind() }
var _ Worker[DownloadFromAliOSS] = (*DownloadFromAliOSSWorker)(nil)
// @provider(job)
type DownloadFromAliOSSWorker struct {
WorkerDefaults[DownloadFromAliOSS]
oss *ali.OSSClient
job *job.Job
app *app.Config
}
func (w *DownloadFromAliOSSWorker) NextRetry(job *Job[DownloadFromAliOSS]) time.Time {
return time.Now().Add(30 * time.Second)
}
func (w *DownloadFromAliOSSWorker) Work(ctx context.Context, job *Job[DownloadFromAliOSS]) error {
log := log.WithField("job", job.Args.Kind())
log.Infof("[Start] Working on job with strings: %+v", job.Args)
defer log.Infof("[End] Finished %s", job.Args.Kind())
media, err := model.MediasModel().GetByHash(ctx, job.Args.MediaHash)
if err != nil {
log.Errorf("Error getting media by ID: %v", err)
return JobCancel(err)
}
dst := filepath.Join(w.app.StoragePath, media.Path)
// check is path exist
st, err := os.Stat(dst)
if os.IsNotExist(err) {
log.Infof("File not exists: %s", dst)
err := os.MkdirAll(filepath.Dir(dst), os.ModePerm)
if err != nil {
log.Errorf("Error creating directory: %v", err)
return err
}
} else {
if st.Size() == media.Size {
return w.NextJob(media.Hash)
} else {
// remove file
if err := os.Remove(dst); err != nil {
log.Errorf("Error removing file: %v", err)
return err
}
}
}
log.Infof("Starting download for file: %s", media.Path)
if err := w.oss.Download(ctx, media.Path, dst, ali.WithInternal()); err != nil {
log.Errorf("Error downloading file: %v", err)
return err
}
log.Infof("Successfully downloaded file: %s", media.Path)
return w.NextJob(media.Hash)
}
func (w *DownloadFromAliOSSWorker) NextJob(hash string) error {
if err := w.job.Add(&VideoCut{MediaHash: hash}); err != nil {
log.Errorf("Error adding job: %v", err)
return err
}
return nil
}

View File

@@ -0,0 +1,63 @@
package jobs
import (
"context"
"testing"
"quyun/v2/app/commands/testx"
"quyun/v2/app/model"
"quyun/v2/providers/ali"
"quyun/v2/providers/app"
"quyun/v2/providers/job"
. "github.com/riverqueue/river"
. "github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/suite"
_ "go.ipao.vip/atom"
"go.ipao.vip/atom/contracts"
"go.uber.org/dig"
)
type DownloadFromAliOSSSuiteInjectParams struct {
dig.In
Initials []contracts.Initial `group:"initials"` // nolint:structcheck
Job *job.Job
Oss *ali.OSSClient
App *app.Config
}
type DownloadFromAliOSSSuite struct {
suite.Suite
DownloadFromAliOSSSuiteInjectParams
}
func Test_DownloadFromAliOSS(t *testing.T) {
providers := testx.Default().With(Provide, model.Provide)
testx.Serve(providers, t, func(p DownloadFromAliOSSSuiteInjectParams) {
suite.Run(t, &DownloadFromAliOSSSuite{DownloadFromAliOSSSuiteInjectParams: p})
})
}
func (t *DownloadFromAliOSSSuite) Test_Work() {
Convey("test_work", t.T(), func() {
Convey("step 1", func() {
job := &Job[DownloadFromAliOSS]{
Args: DownloadFromAliOSS{
MediaHash: "959e5310105c96e653f10b74e5bdc36b",
},
}
worker := &DownloadFromAliOSSWorker{
oss: t.Oss,
job: t.Job,
app: t.App,
}
err := worker.Work(context.Background(), job)
So(err, ShouldBeNil)
})
})
}

View File

@@ -1,6 +1,8 @@
package jobs
import (
"quyun/v2/providers/ali"
"quyun/v2/providers/app"
"quyun/v2/providers/job"
"github.com/riverqueue/river"
@@ -14,12 +16,48 @@ func Provide(opts ...opt.Option) error {
if err := container.Container.Provide(func(
__job *job.Job,
) (contracts.Initial, error) {
obj := &DemoCronJob{}
if err := obj.Prepare(); err != nil {
obj := &BalancePayNotifyWorker{}
if err := river.AddWorkerSafely(__job.Workers, obj); err != nil {
return nil, err
}
container.Later(func() error { return __job.AddPeriodicJobs(obj) })
return obj, nil
}, atom.GroupInitial); err != nil {
return err
}
if err := container.Container.Provide(func(
__job *job.Job,
app *app.Config,
job *job.Job,
oss *ali.OSSClient,
) (contracts.Initial, error) {
obj := &DownloadFromAliOSSWorker{
app: app,
job: job,
oss: oss,
}
if err := river.AddWorkerSafely(__job.Workers, obj); err != nil {
return nil, err
}
return obj, nil
}, atom.GroupInitial); err != nil {
return err
}
if err := container.Container.Provide(func(
__job *job.Job,
app *app.Config,
job *job.Job,
oss *ali.OSSClient,
) (contracts.Initial, error) {
obj := &PublishDraftPostsWorker{
app: app,
job: job,
oss: oss,
}
if err := river.AddWorkerSafely(__job.Workers, obj); err != nil {
return nil, err
}
return obj, nil
}, atom.GroupInitial); err != nil {
@@ -28,7 +66,62 @@ func Provide(opts ...opt.Option) error {
if err := container.Container.Provide(func(
__job *job.Job,
) (contracts.Initial, error) {
obj := &DemoJobWorker{}
obj := &RemoveFileWorker{}
if err := river.AddWorkerSafely(__job.Workers, obj); err != nil {
return nil, err
}
return obj, nil
}, atom.GroupInitial); err != nil {
return err
}
if err := container.Container.Provide(func(
__job *job.Job,
app *app.Config,
job *job.Job,
) (contracts.Initial, error) {
obj := &VideoCutWorker{
app: app,
job: job,
}
if err := river.AddWorkerSafely(__job.Workers, obj); err != nil {
return nil, err
}
return obj, nil
}, atom.GroupInitial); err != nil {
return err
}
if err := container.Container.Provide(func(
__job *job.Job,
app *app.Config,
job *job.Job,
oss *ali.OSSClient,
) (contracts.Initial, error) {
obj := &VideoExtractHeadImageWorker{
app: app,
job: job,
oss: oss,
}
if err := river.AddWorkerSafely(__job.Workers, obj); err != nil {
return nil, err
}
return obj, nil
}, atom.GroupInitial); err != nil {
return err
}
if err := container.Container.Provide(func(
__job *job.Job,
app *app.Config,
job *job.Job,
oss *ali.OSSClient,
) (contracts.Initial, error) {
obj := &VideoStoreShortWorker{
app: app,
job: job,
oss: oss,
}
if err := river.AddWorkerSafely(__job.Workers, obj); err != nil {
return nil, err
}

View File

@@ -0,0 +1,105 @@
package jobs
import (
"context"
"time"
"quyun/v2/pkg/utils"
"quyun/v2/providers/ali"
"quyun/v2/providers/app"
"quyun/v2/providers/job"
"github.com/pkg/errors"
. "github.com/riverqueue/river"
"github.com/samber/lo"
log "github.com/sirupsen/logrus"
_ "go.ipao.vip/atom"
"go.ipao.vip/atom/contracts"
)
var _ contracts.JobArgs = (*PublishDraftPosts)(nil)
type PublishDraftPosts struct {
MediaHash string `json:"media_hash"`
}
func (s PublishDraftPosts) InsertOpts() InsertOpts {
return InsertOpts{
Queue: QueueDefault,
Priority: PriorityDefault,
}
}
func (s PublishDraftPosts) Kind() string { return "publish_draft_posts" }
func (a PublishDraftPosts) UniqueID() string { return a.Kind() }
var _ Worker[PublishDraftPosts] = (*PublishDraftPostsWorker)(nil)
// @provider(job)
type PublishDraftPostsWorker struct {
WorkerDefaults[PublishDraftPosts]
oss *ali.OSSClient
job *job.Job
app *app.Config
}
func (w *PublishDraftPostsWorker) NextRetry(job *Job[PublishDraftPosts]) time.Time {
return time.Now().Add(30 * time.Second)
}
func (w *PublishDraftPostsWorker) Work(ctx context.Context, job *Job[PublishDraftPosts]) error {
log := log.WithField("job", job.Args.Kind())
log.Infof("[Start] Working on job with strings: %+v", job.Args)
defer log.Infof("[End] Finished %s", job.Args.Kind())
media, err := model.MediasModel().GetByHash(ctx, job.Args.MediaHash)
if err != nil {
log.Errorf("Error getting media by ID: %v", err)
return JobCancel(err)
}
relationMedias, err := model.MediasModel().GetRelations(ctx, media.Hash)
if err != nil {
log.Errorf("Error getting relation medias: %v", err)
return JobCancel(err)
}
assets := lo.FilterMap(relationMedias, func(media *model.Medias, _ int) (fields.MediaAsset, bool) {
return fields.MediaAsset{
Type: media.MimeType,
Media: media.ID,
Metas: &media.Metas.Data,
}, media.MimeType != "image/jpeg"
})
assets = append(assets, fields.MediaAsset{
Type: media.MimeType,
Media: media.ID,
Metas: &media.Metas.Data,
})
// publish a draft posts
post := &model.Posts{
Status: fields.PostStatusDraft,
Title: utils.FormatTitle(media.Name),
Description: "",
Content: "",
Price: 0,
Discount: 100,
Views: 0,
Likes: 0,
Tags: fields.Json[[]string]{},
Assets: fields.ToJson(assets),
HeadImages: fields.ToJson(lo.FilterMap(relationMedias, func(media *model.Medias, _ int) (int64, bool) {
return media.ID, media.MimeType == "image/jpeg"
})),
}
if err := post.Create(ctx); err != nil {
log.Errorf("Error creating post: %v", err)
return errors.Wrap(err, "create post")
}
log.Infof("Post created successfully with ID: %d", post.ID)
return nil
}

View File

@@ -0,0 +1,61 @@
package jobs
import (
"context"
"os"
"time"
. "github.com/riverqueue/river"
log "github.com/sirupsen/logrus"
_ "go.ipao.vip/atom"
"go.ipao.vip/atom/contracts"
)
var _ contracts.JobArgs = (*RemoveFile)(nil)
type RemoveFile struct {
FilePath string `json:"file_path"`
}
func (s RemoveFile) InsertOpts() InsertOpts {
return InsertOpts{
Queue: QueueDefault,
Priority: PriorityDefault,
// ScheduledAt: time.Now().Add(time.Minute * 10),
}
}
func (s RemoveFile) Kind() string { return "remove_file" }
func (a RemoveFile) UniqueID() string { return a.Kind() }
var _ Worker[RemoveFile] = (*RemoveFileWorker)(nil)
// @provider(job)
type RemoveFileWorker struct {
WorkerDefaults[RemoveFile]
}
func (w *RemoveFileWorker) NextRetry(job *Job[RemoveFile]) time.Time {
return time.Now().Add(30 * time.Second)
}
func (w *RemoveFileWorker) Work(ctx context.Context, job *Job[RemoveFile]) error {
log := log.WithField("job", job.Args.Kind())
log.Infof("[Start] Working on job with strings: %+v", job.Args)
defer log.Infof("[End] Finished %s", job.Args.Kind())
// Check if the file exists
if _, err := os.Stat(job.Args.FilePath); os.IsNotExist(err) {
log.Warnf("File does not exist: %v", job.Args.FilePath)
return nil
}
// Remove the file
if err := os.Remove(job.Args.FilePath); err != nil {
log.Errorf("Error removing file: %v", err)
return err
}
log.Infof("File removed successfully: %v", job.Args.FilePath)
return nil
}

View File

@@ -0,0 +1,94 @@
package jobs
import (
"context"
"path/filepath"
"time"
"quyun/v2/app/model"
"quyun/v2/database/fields"
"quyun/v2/pkg/utils"
"quyun/v2/providers/app"
"quyun/v2/providers/job"
"github.com/pkg/errors"
. "github.com/riverqueue/river"
log "github.com/sirupsen/logrus"
_ "go.ipao.vip/atom"
"go.ipao.vip/atom/contracts"
)
var _ contracts.JobArgs = (*VideoCut)(nil)
type VideoCut struct {
MediaHash string `json:"media_hash"`
}
func (s VideoCut) InsertOpts() InsertOpts {
return InsertOpts{
Queue: QueueDefault,
Priority: PriorityDefault,
}
}
func (s VideoCut) Kind() string { return "video_cut" }
func (a VideoCut) UniqueID() string { return a.Kind() }
var _ Worker[VideoCut] = (*VideoCutWorker)(nil)
// @provider(job)
type VideoCutWorker struct {
WorkerDefaults[VideoCut]
job *job.Job
app *app.Config
}
func (w *VideoCutWorker) NextRetry(job *Job[VideoCut]) time.Time {
return time.Now().Add(30 * time.Second)
}
func (w *VideoCutWorker) Work(ctx context.Context, job *Job[VideoCut]) error {
log := log.WithField("job", job.Args.Kind())
log.Infof("[Start] Working on job with strings: %+v", job.Args)
defer log.Infof("[End] Finished %s", job.Args.Kind())
media, err := model.MediasModel().GetByHash(ctx, job.Args.MediaHash)
if err != nil {
log.Errorf("Error getting media by ID: %v", err)
return JobCancel(err)
}
input := filepath.Join(w.app.StoragePath, media.Path)
output := input[:len(input)-len(filepath.Ext(input))] + "-short" + filepath.Ext(input)
log.Infof("cut video process %s to %s", input, output)
if err := utils.CutMedia(input, output, 0, 60); err != nil {
log.Errorf("Error cutting media: %v", err)
return errors.Wrap(err, "cut media")
}
duration, err := utils.GetMediaDuration(input)
if err != nil {
log.Errorf("Error getting media duration: %v", err)
return errors.Wrap(err, "get media duration")
}
// update media metas
metas := fields.MediaMetas{
ParentHash: "",
Short: false,
Duration: duration,
}
if err := model.MediasModel().UpdateMetas(ctx, media.ID, metas); err != nil {
log.Errorf("Error updating media metas: %v", err)
return errors.Wrap(err, "update media metas")
}
// save to database
return w.job.Add(&VideoStoreShort{
MediaHash: media.Hash,
FilePath: output,
})
}

View File

@@ -0,0 +1,128 @@
package jobs
import (
"context"
"os"
"path/filepath"
"time"
"quyun/v2/app/model"
"quyun/v2/database/fields"
"quyun/v2/pkg/utils"
"quyun/v2/providers/ali"
"quyun/v2/providers/app"
"quyun/v2/providers/job"
"github.com/pkg/errors"
. "github.com/riverqueue/river"
log "github.com/sirupsen/logrus"
_ "go.ipao.vip/atom"
"go.ipao.vip/atom/contracts"
)
var _ contracts.JobArgs = (*VideoExtractHeadImage)(nil)
type VideoExtractHeadImage struct {
MediaHash string `json:"media_hash"`
}
func (s VideoExtractHeadImage) InsertOpts() InsertOpts {
return InsertOpts{
Queue: QueueDefault,
Priority: PriorityDefault,
}
}
func (s VideoExtractHeadImage) Kind() string { return "video_extract_head_image" }
func (a VideoExtractHeadImage) UniqueID() string { return a.Kind() }
var _ Worker[VideoExtractHeadImage] = (*VideoExtractHeadImageWorker)(nil)
// @provider(job)
type VideoExtractHeadImageWorker struct {
WorkerDefaults[VideoExtractHeadImage]
oss *ali.OSSClient
job *job.Job
app *app.Config
}
func (w *VideoExtractHeadImageWorker) NextRetry(job *Job[VideoExtractHeadImage]) time.Time {
return time.Now().Add(30 * time.Second)
}
func (w *VideoExtractHeadImageWorker) Work(ctx context.Context, job *Job[VideoExtractHeadImage]) error {
log := log.WithField("job", job.Args.Kind())
log.Infof("[Start] Working on job with strings: %+v", job.Args)
defer log.Infof("[End] Finished %s", job.Args.Kind())
media, err := model.MediasModel().GetByHash(ctx, job.Args.MediaHash)
if err != nil {
log.Errorf("Error getting media by ID: %v", err)
return JobCancel(err)
}
input := filepath.Join(w.app.StoragePath, media.Path)
output := input[:len(input)-len(filepath.Ext(input))] + ".jpg"
if err := utils.GetFrameImageFromVideo(input, output, 1); err != nil {
log.Errorf("Error extracting image from video: %v", err)
return errors.Wrap(err, "failed to extract image from video")
}
defer os.RemoveAll(output)
fileSize, err := utils.GetFileSize(output)
if err != nil {
log.Errorf("Error getting file size: %v", err)
return errors.Wrap(err, "failed to get file size")
}
fileMd5, err := utils.GetFileMd5(output)
if err != nil {
log.Errorf("Error getting file MD5: %v", err)
return errors.Wrap(err, "failed to get file MD5")
}
filename := fileMd5 + filepath.Ext(output)
name := "[展示图]" + media.Name + ".jpg"
// create a new media record for the image
imageMedia := &model.Medias{
Name: name,
MimeType: "image/jpeg",
Size: fileSize,
Path: w.oss.GetSavePath(filename),
Hash: fileMd5,
Metas: fields.ToJson(fields.MediaMetas{
ParentHash: media.Hash,
}),
}
// upload to oss
if err := w.oss.Upload(ctx, output, imageMedia.Path, ali.WithInternal()); err != nil {
log.Errorf("Error uploading image to OSS: %v", err)
return errors.Wrap(err, "failed to upload image to OSS")
}
if err := w.job.Add(&RemoveFile{FilePath: output}); err != nil {
log.Errorf("Error removing original file: %v", err)
}
if err := imageMedia.Create(ctx); err != nil {
log.Errorf("Error creating media record: %v", err)
return errors.Wrap(err, "failed to create media record")
}
dst := filepath.Join(w.app.StoragePath, media.Path)
if err := w.job.Add(&RemoveFile{FilePath: dst}); err != nil {
log.Errorf("Error removing original file: %v", err)
}
if err := w.job.Add(&PublishDraftPosts{MediaHash: media.Hash}); err != nil {
log.Errorf("Error adding job: %v", err)
return errors.Wrap(err, "failed to add job")
}
return nil
}

View File

@@ -0,0 +1,135 @@
package jobs
import (
"context"
"path/filepath"
"time"
"quyun/v2/app/model"
"quyun/v2/database/fields"
"quyun/v2/pkg/utils"
"quyun/v2/providers/ali"
"quyun/v2/providers/app"
"quyun/v2/providers/job"
"github.com/pkg/errors"
. "github.com/riverqueue/river"
log "github.com/sirupsen/logrus"
_ "go.ipao.vip/atom"
"go.ipao.vip/atom/contracts"
)
var _ contracts.JobArgs = (*VideoStoreShort)(nil)
type VideoStoreShort struct {
MediaHash string `json:"media_hash"`
FilePath string `json:"file_path"`
}
func (s VideoStoreShort) InsertOpts() InsertOpts {
return InsertOpts{
Queue: QueueDefault,
Priority: PriorityDefault,
}
}
func (s VideoStoreShort) Kind() string { return "video_store_short" }
func (a VideoStoreShort) UniqueID() string { return a.Kind() }
var _ Worker[VideoStoreShort] = (*VideoStoreShortWorker)(nil)
// @provider(job)
type VideoStoreShortWorker struct {
WorkerDefaults[VideoStoreShort]
oss *ali.OSSClient
job *job.Job
app *app.Config
}
func (w *VideoStoreShortWorker) NextRetry(job *Job[VideoStoreShort]) time.Time {
return time.Now().Add(30 * time.Second)
}
func (w *VideoStoreShortWorker) Work(ctx context.Context, job *Job[VideoStoreShort]) error {
log := log.WithField("job", job.Args.Kind())
log.Infof("[Start] Working on job with strings: %+v", job.Args)
defer log.Infof("[End] Finished %s", job.Args.Kind())
media, err := model.MediasModel().GetByHash(ctx, job.Args.MediaHash)
if err != nil {
log.Errorf("Error getting media by ID: %v", err)
return JobCancel(err)
}
duration, err := utils.GetMediaDuration(job.Args.FilePath)
if err != nil {
log.Errorf("Error getting media duration: %v", err)
return errors.Wrap(err, "failed to get media duration")
}
// get file md5
log.Infof("pending get file md5 %s", job.Args.FilePath)
fileMd5, err := utils.GetFileMd5(job.Args.FilePath)
if err != nil {
log.Errorf("Error getting file md5: %v", err)
return errors.Wrap(err, "failed to get file md5")
}
log.Infof("got file md5 %s %s", job.Args.FilePath, fileMd5)
filePath := w.oss.GetSavePath(fileMd5 + filepath.Ext(job.Args.FilePath))
// get file size
log.Infof("pending get file size %s", job.Args.FilePath)
fileSize, err := utils.GetFileSize(job.Args.FilePath)
if err != nil {
log.Errorf("Error getting file size: %v", err)
return errors.Wrap(err, "failed to get file size")
}
log.Infof("got file size %s %d", job.Args.FilePath, fileSize)
// save to db and relate to master
mediaModel := &model.Medias{
Name: "[试听] " + media.Name,
MimeType: media.MimeType,
Size: fileSize,
Path: filePath,
Hash: fileMd5,
Metas: fields.ToJson(fields.MediaMetas{
ParentHash: media.Hash,
Short: true,
Duration: duration,
}),
}
// upload to oss
log.Infof("pending upload file to oss %s", job.Args.FilePath)
if err := w.oss.Upload(ctx, job.Args.FilePath, filePath, ali.WithInternal()); err != nil {
log.Errorf("Error uploading file to OSS: %v", err)
return err
}
log.Infof("pending create media record %s", job.Args.FilePath)
if err := mediaModel.Create(ctx); err != nil {
log.Errorf("Error saving media record: %v data: %+v", err, mediaModel)
return err
}
log.Infof("Media record created with path: %s and hash: %s", filePath, fileMd5)
log.Infof("pending remove local storage file %s", job.Args.FilePath)
if err := w.job.Add(&RemoveFile{FilePath: job.Args.FilePath}); err != nil {
log.Errorf("Error removing original file: %v", err)
}
return w.NextJob(media.Hash)
}
func (w *VideoStoreShortWorker) NextJob(hash string) error {
if err := w.job.Add(&VideoExtractHeadImage{MediaHash: hash}); err != nil {
log.Errorf("Error adding job: %v", err)
return err
}
return nil
}

View File

@@ -0,0 +1,88 @@
package services
import (
"context"
"quyun/v2/app/requests"
"quyun/v2/database/models"
"quyun/v2/pkg/fields"
"github.com/pkg/errors"
"go.ipao.vip/gen"
)
// @provider
type medias struct{}
func (m *medias) List(
ctx context.Context,
pagination *requests.Pagination,
conds ...gen.Condition,
) (*requests.Pager, error) {
pagination.Format()
tbl, query := models.MediaQuery.QueryContext(ctx)
items, cnt, err := query.
Where(conds...).
Order(tbl.ID.Desc()).
FindByPage(int(pagination.Offset()), int(pagination.Limit))
if err != nil {
return nil, errors.Wrap(err, "failed to list media items")
}
return &requests.Pager{
Items: items,
Total: cnt,
Pagination: *pagination,
}, nil
}
// GetByIds
func (m *medias) GetByIds(ctx context.Context, ids []int64) ([]*models.Media, error) {
if len(ids) == 0 {
return []*models.Media{}, nil
}
tbl, query := models.MediaQuery.QueryContext(ctx)
items, err := query.
Where(tbl.ID.In(ids...)).
Find()
if err != nil {
return nil, errors.Wrap(err, "failed to get media items by ids")
}
return items, nil
}
// GetByHash
func (m *medias) GetByHash(ctx context.Context, hash string) (*models.Media, error) {
tbl, query := models.MediaQuery.QueryContext(ctx)
item, err := query.
Where(tbl.Hash.Eq(hash)).
First()
if err != nil {
return nil, errors.Wrap(err, "failed to get media item by hash")
}
return item, nil
}
// UpdateMetas
func (m *medias) UpdateMetas(ctx context.Context, id int64, metas fields.MediaMetas) error {
tbl, query := models.MediaQuery.QueryContext(ctx)
_, err := query.
Where(tbl.ID.Eq(id)).
Update(tbl.Metas, metas)
if err != nil {
return errors.Wrapf(err, "failed to update media metas for id: %d", id)
}
return nil
}
// GetRelationMedias
func (m *medias) GetRelations(ctx context.Context, hash string) ([]*models.Media, error) {
tbl, query := models.MediaQuery.QueryContext(ctx)
return query.Where(tbl.Metas.KeyEq("parent_hash", hash)).Find()
}

View File

@@ -0,0 +1,133 @@
package services
import (
"context"
"quyun/v2/app/requests"
"quyun/v2/database/models"
"quyun/v2/pkg/fields"
"github.com/pkg/errors"
"github.com/samber/lo"
"go.ipao.vip/gen"
)
// @provider
type orders struct{}
// List 订单列表(支持按订单号模糊查询、按用户过滤)。
func (m *orders) List(
ctx context.Context,
pagination *requests.Pagination,
orderNumber *string,
userID *int64,
) (*requests.Pager, error) {
pagination.Format()
tbl, query := models.OrderQuery.QueryContext(ctx)
conds := make([]gen.Condition, 0, 2)
if orderNumber != nil && *orderNumber != "" {
conds = append(conds, tbl.OrderNo.Like("%"+*orderNumber+"%"))
}
if userID != nil {
conds = append(conds, tbl.UserID.Eq(*userID))
}
orders, cnt, err := query.
Where(conds...).
Order(tbl.ID.Desc()).
FindByPage(int(pagination.Offset()), int(pagination.Limit))
if err != nil {
return nil, errors.Wrap(err, "failed to list orders")
}
// 这里刻意使用“先查订单,再批量查关联”的方式,避免在分页时 JOIN 造成重复行/分页不稳定。
postIDs := lo.Uniq(lo.Map(orders, func(o *models.Order, _ int) int64 { return o.PostID }))
userIDs := lo.Uniq(lo.Map(orders, func(o *models.Order, _ int) int64 { return o.UserID }))
posts, err := models.PostQuery.WithContext(ctx).GetByIDs(postIDs...)
if err != nil {
return nil, errors.Wrap(err, "failed to get posts by ids")
}
users, err := models.UserQuery.WithContext(ctx).GetByIDs(userIDs...)
if err != nil {
return nil, errors.Wrap(err, "failed to get users by ids")
}
postMap := lo.SliceToMap(posts, func(p *models.Post) (int64, *models.Post) { return p.ID, p })
userMap := lo.SliceToMap(users, func(u *models.User) (int64, *models.User) { return u.ID, u })
// OrderListItem 用于订单列表展示,补充作品标题与用户名等冗余信息,避免前端二次查询。
type orderListItem struct {
*models.Order
PostTitle string `json:"post_title"`
Username string `json:"username"`
}
items := lo.Map(orders, func(o *models.Order, _ int) *orderListItem {
item := &orderListItem{Order: o}
if post, ok := postMap[o.PostID]; ok {
item.PostTitle = post.Title
}
if user, ok := userMap[o.UserID]; ok {
item.Username = user.Username
}
return item
})
return &requests.Pager{
Items: items,
Total: cnt,
Pagination: *pagination,
}, nil
}
// Refund 订单退款(余额支付走本地退款;微信支付走微信退款并标记为退款处理中)。
func (m *orders) Refund(ctx context.Context, id int64) error {
// 余额支付:这里强调“状态一致性”,必须在一个事务中完成:余额退回 + 撤销购买权益 + 更新订单状态。
return models.Q.Transaction(func(tx *models.Query) error {
order, err := tx.Order.WithContext(ctx).GetByID(id)
if err != nil {
return errors.Wrap(err, "failed to get order in tx")
}
// 退回余额(使用原子自增,避免并发覆盖)。
costBalance := order.Meta.Data().CostBalance
if costBalance > 0 {
if _, err := tx.User.
WithContext(ctx).
Where(tx.User.ID.Eq(order.UserID)).
Inc(tx.User.Balance, costBalance); err != nil {
return errors.Wrap(err, "failed to refund balance")
}
}
// 撤销已购买的作品权限(删除 user_posts 记录)。
if _, err := tx.UserPost.
WithContext(ctx).
Where(
tx.UserPost.UserID.Eq(order.UserID),
tx.UserPost.PostID.Eq(order.PostID),
).
Delete(); err != nil {
return errors.Wrap(err, "failed to revoke user post")
}
// 更新订单状态为“退款成功”。
if _, err := tx.Order.
WithContext(ctx).
Where(tx.Order.ID.Eq(order.ID)).
Update(tx.Order.Status, fields.OrderStatusRefundSuccess); err != nil {
return errors.Wrap(err, "failed to update order status")
}
return nil
})
}
// GetByOrderNO
func (m *orders) GetByOrderNO(ctx context.Context, orderNo string) (*models.Order, error) {
return models.OrderQuery.WithContext(ctx).Where(models.OrderQuery.OrderNo.Eq(orderNo)).First()
}

View File

@@ -0,0 +1,150 @@
package services
import (
"context"
"quyun/v2/app/requests"
"quyun/v2/database/models"
"time"
"github.com/pkg/errors"
"github.com/samber/lo"
"go.ipao.vip/gen"
)
// @provider
type posts struct{}
// IncrViewCount
func (m *posts) IncrViewCount(ctx context.Context, postID int64) error {
tbl, query := models.PostQuery.QueryContext(ctx)
_, err := query.Where(tbl.ID.Eq(postID)).Inc(tbl.Views, 1)
if err != nil {
return errors.Wrapf(err, "failed to increment view count for post %d", postID)
}
return nil
}
// List
func (m *posts) List(
ctx context.Context,
pagination *requests.Pagination,
conds ...gen.Condition,
) (*requests.Pager, error) {
pagination.Format()
tbl, query := models.PostQuery.QueryContext(ctx)
items, cnt, err := query.Where(conds...).
Order(tbl.ID.Desc()).
FindByPage(int(pagination.Offset()), int(pagination.Limit))
if err != nil {
return nil, errors.Wrap(err, "list post failed")
}
return &requests.Pager{
Items: items,
Total: cnt,
Pagination: *pagination,
}, nil
}
// SendTo
func (m *posts) SendTo(ctx context.Context, postID, userID int64) error {
model := &models.UserPost{
UserID: userID,
PostID: postID,
Price: -1,
}
return model.Create(ctx)
}
// PostBoughtStatistics 获取指定文件 ID 的购买次数
func (m *posts) BoughtStatistics(ctx context.Context, postIds []int64) (map[int64]int64, error) {
tbl, query := models.UserPostQuery.QueryContext(ctx)
var items []struct {
Count int64
PostID int64
}
err := query.Select(
tbl.UserID.Count().As("count"),
tbl.PostID,
).
Where(tbl.PostID.In(postIds...)).
Group(tbl.PostID).Scan(&items)
if err != nil {
return nil, err
}
result := make(map[int64]int64)
for _, item := range items {
result[item.PostID] = item.Count
}
return result, nil
}
// Bought 获取用户购买记录
func (m *posts) Bought(ctx context.Context, userId int64, pagination *requests.Pagination) (*requests.Pager, error) {
pagination.Format()
tbl, query := models.UserPostQuery.QueryContext(ctx)
items, cnt, err := query.
Where(tbl.UserID.Eq(userId)).
FindByPage(int(pagination.Offset()), int(pagination.Limit))
if err != nil {
return nil, err
}
postIds := lo.Map(items, func(item *models.UserPost, _ int) int64 { return item.PostID })
postInfoMap := lo.KeyBy(items, func(item *models.UserPost) int64 { return item.PostID })
postItemMap, err := m.GetPostsMapByIDs(ctx, postIds)
if err != nil {
return nil, err
}
type retItem struct {
Title string `json:"title"`
Price int64 `json:"price"`
BoughtAt time.Time `json:"bought_at"`
}
var retItems []retItem
for _, postID := range postIds {
post, ok := postItemMap[postID]
if !ok {
continue
}
postInfo := postInfoMap[postID]
retItems = append(retItems, retItem{
Title: post.Title,
Price: postInfo.Price,
BoughtAt: postInfo.CreatedAt,
})
}
return &requests.Pager{
Items: retItems,
Total: cnt,
Pagination: *pagination,
}, nil
}
// GetPostsMapByIDs
func (m *posts) GetPostsMapByIDs(ctx context.Context, ids []int64) (map[int64]*models.Post, error) {
tbl, query := models.PostQuery.QueryContext(ctx)
posts, err := query.Where(tbl.ID.In(ids...)).Find()
if err != nil {
return nil, err
}
return lo.KeyBy(posts, func(item *models.Post) int64 { return item.ID }), nil
}
// GetMediaByIds
func (m *posts) GetMediaByIds(ctx context.Context, ids []int64) ([]*models.Media, error) {
if len(ids) == 0 {
return nil, nil
}
tbl, query := models.MediaQuery.QueryContext(ctx)
return query.Where(tbl.ID.In(ids...)).Find()
}

View File

@@ -6,16 +6,35 @@ import (
"go.ipao.vip/atom/contracts"
"go.ipao.vip/atom/opt"
"gorm.io/gorm"
"quyun/v2/providers/wepay"
)
func Provide(opts ...opt.Option) error {
if err := container.Container.Provide(func() (*medias, error) {
obj := &medias{}
return obj, nil
}); err != nil {
return err
}
if err := container.Container.Provide(func(wepayClient *wepay.Client) (*orders, error) {
obj := &orders{
wepay: wepayClient,
}
return obj, nil
}); err != nil {
return err
}
if err := container.Container.Provide(func(
db *gorm.DB,
test *test,
medias *medias,
orders *orders,
) (contracts.Initial, error) {
obj := &services{
db: db,
test: test,
db: db,
medias: medias,
orders: orders,
}
if err := obj.Prepare(); err != nil {
return nil, err
@@ -25,12 +44,5 @@ func Provide(opts ...opt.Option) error {
}, atom.GroupInitial); err != nil {
return err
}
if err := container.Container.Provide(func() (*test, error) {
obj := &test{}
return obj, nil
}); err != nil {
return err
}
return nil
}

View File

@@ -8,21 +8,24 @@ var _db *gorm.DB
// exported CamelCase Services
var (
Test *test
Medias *medias
Orders *orders
)
// @provider(model)
type services struct {
db *gorm.DB
// define Services
test *test
medias *medias
orders *orders
}
func (svc *services) Prepare() error {
_db = svc.db
// set exported Services here
Test = svc.test
Medias = svc.medias
Orders = svc.orders
return nil
}

View File

@@ -1,10 +0,0 @@
package services
import "context"
// @provider
type test struct{}
func (t *test) Test(ctx context.Context) (string, error) {
return "Test", nil
}

View File

@@ -1,41 +0,0 @@
package services
import (
"testing"
"time"
"quyun/v2/app/commands/testx"
. "github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/suite"
_ "go.ipao.vip/atom"
"go.ipao.vip/atom/contracts"
"go.uber.org/dig"
)
type TestSuiteInjectParams struct {
dig.In
Initials []contracts.Initial `group:"initials"` // nolint:structcheck
}
type TestSuite struct {
suite.Suite
TestSuiteInjectParams
}
func Test_Test(t *testing.T) {
providers := testx.Default().With(Provide)
testx.Serve(providers, t, func(p TestSuiteInjectParams) {
suite.Run(t, &TestSuite{TestSuiteInjectParams: p})
})
}
func (t *TestSuite) Test_Test() {
Convey("test_work", t.T(), func() {
t.T().Log("start test at", time.Now())
})
}