diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000..90df546 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,19 @@ +# EditorConfig is awesome: https://EditorConfig.org + +# top-most EditorConfig file +root = true + +[*] +indent_style = space +indent_size = 4 +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = false + +[*.{yaml,yml}] +indent_style = space +indent_size = 2 + +[Makefile] +indent_style = tab \ No newline at end of file diff --git a/.gitignore b/.gitignore index adf8f72..5379cf7 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,15 @@ -# ---> Go -# If you prefer the allow list template instead of the deny list, see community template: -# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore -# +bin/* +vendor/ +__debug_bin* +build/* +.vscode +.idea +tmp/ +docker-compose.yml +atom +sqlite.db +go.work +go.work.sum # Binaries for programs and plugins *.exe *.exe~ @@ -17,7 +25,3 @@ # Dependency directories (remove the comment below to include it) # vendor/ - -# Go workspace file -go.work - diff --git a/README.md b/README.md deleted file mode 100644 index 26c386e..0000000 --- a/README.md +++ /dev/null @@ -1,2 +0,0 @@ -# qvyun - diff --git a/backend/.editorconfig b/backend/.editorconfig new file mode 100644 index 0000000..90df546 --- /dev/null +++ b/backend/.editorconfig @@ -0,0 +1,19 @@ +# EditorConfig is awesome: https://EditorConfig.org + +# top-most EditorConfig file +root = true + +[*] +indent_style = space +indent_size = 4 +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = false + +[*.{yaml,yml}] +indent_style = space +indent_size = 2 + +[Makefile] +indent_style = tab \ No newline at end of file diff --git a/backend/.gitignore b/backend/.gitignore new file mode 100644 index 0000000..7f956e7 --- /dev/null +++ b/backend/.gitignore @@ -0,0 +1,28 @@ +bin/* +vendor/ +__debug_bin* +backend +build/* +.vscode +.idea +tmp/ +docker-compose.yml +atom +sqlite.db +go.work +go.work.sum +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ diff --git a/backend/Dockerfile b/backend/Dockerfile new file mode 100644 index 0000000..3b9a775 --- /dev/null +++ b/backend/Dockerfile @@ -0,0 +1,17 @@ +FROM docker.hub.ipao.vip/alpine:3.20 + +# Set timezone +RUN apk add --no-cache tzdata && \ + cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \ + echo "Asia/Shanghai" > /etc/timezone && \ + apk del tzdata + +COPY backend/build/app /app/app +COPY backend/config.toml /app/config.toml +COPY frontend/dist /app/dist + +WORKDIR /app + +ENTRYPOINT ["/app/app"] + +CMD [ "serve" ] diff --git a/backend/Makefile b/backend/Makefile new file mode 100644 index 0000000..92d0ecd --- /dev/null +++ b/backend/Makefile @@ -0,0 +1,33 @@ +buildAt=`date +%Y/%m/%d-%H:%M:%S` +gitHash=`git rev-parse HEAD` +version=`git rev-parse --abbrev-ref HEAD | grep -v HEAD || git describe --exact-match HEAD || git rev-parse HEAD` ## todo: use current release git tag +flags="-X 'atom/utils.Version=${version}' -X 'atom/utils.BuildAt=${buildAt}' -X 'atom/utils.GitHash=${gitHash}'" +release_flags="-w -s ${flags}" + +GOPATH:=$(shell go env GOPATH) + +.PHONY: tidy +tidy: + @go mod tidy + +.PHONY: release +release: + @CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags=${flags} -o bin/release/backend . + @cp config.toml bin/release/ + +.PHONY: test +test: + @go test -v ./... -cover + +.PHONY: lint +lint: + @golangci-lint run + +.PHONY: init +init: + go install github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway + go install github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2 + go install google.golang.org/protobuf/cmd/protoc-gen-go + go install google.golang.org/grpc/cmd/protoc-gen-go-grpc + go install github.com/bufbuild/buf/cmd/buf + go install github.com/golangci/golangci-lint/cmd/golangci-lint diff --git a/backend/app/console/.gitkeep b/backend/app/console/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/backend/app/errorx/error.go b/backend/app/errorx/error.go new file mode 100644 index 0000000..f9af7be --- /dev/null +++ b/backend/app/errorx/error.go @@ -0,0 +1,147 @@ +package errorx + +import ( + "errors" + "fmt" + "net/http" + "runtime" + "strings" + + "github.com/go-jet/jet/v2/qrm" + "github.com/gofiber/fiber/v3" + "github.com/gofiber/fiber/v3/binder" + "github.com/gofiber/utils/v2" + log "github.com/sirupsen/logrus" +) + +func Middleware(c fiber.Ctx) error { + err := c.Next() + if err != nil { + return Wrap(err).Response(c) + } + return err +} + +type Response struct { + isFormat bool + err error + params []any + sql string + file string + + StatusCode int `json:"-" xml:"-"` + Code int `json:"code" xml:"code"` + Message string `json:"message" xml:"message"` + Data any `json:"data,omitempty" xml:"data"` +} + +func New(code, statusCode int, message string) *Response { + return &Response{ + isFormat: true, + StatusCode: statusCode, + Code: code, + Message: message, + } +} + +func (r *Response) Sql(sql string) *Response { + r.sql = sql + return r +} + +func (r *Response) from(err *Response) *Response { + r.Code = err.Code + r.Message = err.Message + r.StatusCode = err.StatusCode + return r +} + +func (r *Response) Params(params ...any) *Response { + r.params = params + if _, file, line, ok := runtime.Caller(1); ok { + r.file = fmt.Sprintf("%s:%d", file, line) + } + return r +} + +func Wrap(err error) *Response { + if e, ok := err.(*Response); ok { + return e + } + return &Response{err: err} +} + +func (r *Response) Wrap(err error) *Response { + r.err = err + return r +} + +func (r *Response) format() { + r.isFormat = true + if errors.Is(r.err, qrm.ErrNoRows) { + r.from(RecordNotExists) + return + } + + if e, ok := r.err.(*fiber.Error); ok { + r.Code = e.Code + r.Message = e.Message + r.StatusCode = e.Code + return + } + + if r.err != nil { + msg := r.err.Error() + if strings.Contains(msg, "duplicate key value") || strings.Contains(msg, "unique constraint") { + r.from(RecordDuplicated) + return + } + + r.Code = http.StatusInternalServerError + r.StatusCode = http.StatusInternalServerError + r.Message = msg + } + return +} + +func (r *Response) Error() string { + if !r.isFormat { + r.format() + } + + return fmt.Sprintf("[%d] %s", r.Code, r.Message) +} + +func (r *Response) Response(ctx fiber.Ctx) error { + if !r.isFormat { + r.format() + } + + contentType := utils.ToLower(utils.UnsafeString(ctx.Request().Header.ContentType())) + contentType = binder.FilterFlags(utils.ParseVendorSpecificContentType(contentType)) + + log. + WithError(r.err). + WithField("file", r.file). + WithField("sql", r.sql). + WithField("params", r.params). + Errorf("response error: %+v", r) + + // Parse body accordingly + switch contentType { + case fiber.MIMETextXML, fiber.MIMEApplicationXML: + return ctx.Status(r.StatusCode).XML(r) + case fiber.MIMETextHTML, fiber.MIMETextPlain: + return ctx.Status(r.StatusCode).SendString(r.Message) + default: + return ctx.Status(r.StatusCode).JSON(r) + } +} + +var ( + RecordDuplicated = New(1001, http.StatusBadRequest, "记录重复") + RecordNotExists = New(http.StatusNotFound, http.StatusNotFound, "记录不存在") + BadRequest = New(http.StatusBadRequest, http.StatusBadRequest, "请求错误") + Unauthorized = New(http.StatusUnauthorized, http.StatusUnauthorized, "未授权") + InternalErr = New(http.StatusInternalServerError, http.StatusInternalServerError, "内部错误") +) diff --git a/backend/app/events/event_demo.go b/backend/app/events/event_demo.go new file mode 100644 index 0000000..57de3d7 --- /dev/null +++ b/backend/app/events/event_demo.go @@ -0,0 +1,71 @@ +package events + +import ( + "encoding/json" + "fmt" + "time" + + "git.ipao.vip/rogeecn/atom/contracts" + "github.com/ThreeDotsLabs/watermill" + "github.com/ThreeDotsLabs/watermill/message" + "github.com/sirupsen/logrus" +) + +var _ contracts.EventHandler = (*UserRegister)(nil) + +type Event struct { + ID int `json:"id"` +} + +type ProcessedEvent struct { + ProcessedID int `json:"processed_id"` + Time time.Time `json:"time"` +} + +// @provider(event) +type UserRegister struct { + log *logrus.Entry `inject:"false"` +} + +func (u *UserRegister) Prepare() error { + return nil +} + +// Handler implements contracts.EventHandler. +func (u *UserRegister) Handler(msg *message.Message) ([]*message.Message, error) { + consumedPayload := Event{} + err := json.Unmarshal(msg.Payload, &consumedPayload) + if err != nil { + // When a handler returns an error, the default behavior is to send a Nack (negative-acknowledgement). + // The message will be processed again. + // + // You can change the default behaviour by using middlewares, like Retry or PoisonQueue. + // You can also implement your own middleware. + return nil, err + } + + fmt.Printf("received event %+v\n", consumedPayload) + + newPayload, err := json.Marshal(ProcessedEvent{ + ProcessedID: consumedPayload.ID, + Time: time.Now(), + }) + if err != nil { + return nil, err + } + + newMessage := message.NewMessage(watermill.NewUUID(), newPayload) + + return nil, nil + return []*message.Message{newMessage}, nil +} + +// PublishToTopic implements contracts.EventHandler. +func (u *UserRegister) PublishToTopic() string { + return "event:processed" +} + +// Topic implements contracts.EventHandler. +func (u *UserRegister) Topic() string { + return "event:user-register" +} diff --git a/backend/app/events/provider.gen.go b/backend/app/events/provider.gen.go new file mode 100755 index 0000000..793703e --- /dev/null +++ b/backend/app/events/provider.gen.go @@ -0,0 +1,27 @@ +package events + +import ( + "backend/providers/events" + + "git.ipao.vip/rogeecn/atom" + "git.ipao.vip/rogeecn/atom/container" + "git.ipao.vip/rogeecn/atom/contracts" + "git.ipao.vip/rogeecn/atom/utils/opt" +) + +func Provide(opts ...opt.Option) error { + if err := container.Container.Provide(func( + __event *events.PubSub, + ) (contracts.Initial, error) { + obj := &UserRegister{} + if err := obj.Prepare(); err != nil { + return nil, err + } + __event.Handle("handler:UserRegister", obj.Topic(), obj.PublishToTopic(), obj.Handler) + + return obj, nil + }, atom.GroupInitial); err != nil { + return err + } + return nil +} diff --git a/backend/app/grpc/users/handler.go b/backend/app/grpc/users/handler.go new file mode 100644 index 0000000..969f252 --- /dev/null +++ b/backend/app/grpc/users/handler.go @@ -0,0 +1,26 @@ +package users + +import ( + "context" + + userv1 "backend/pkg/proto/user/v1" +) + +// @provider(grpc) userv1.RegisterUserServiceServer +type Users struct { + userv1.UnimplementedUserServiceServer +} + +func (u *Users) ListUsers(ctx context.Context, in *userv1.ListUsersRequest) (*userv1.ListUsersResponse, error) { + // userv1.UserServiceServer + return &userv1.ListUsersResponse{}, nil +} + +// GetUser implements userv1.UserServiceServer +func (u *Users) GetUser(ctx context.Context, in *userv1.GetUserRequest) (*userv1.GetUserResponse, error) { + return &userv1.GetUserResponse{ + User: &userv1.User{ + Id: in.Id, + }, + }, nil +} diff --git a/backend/app/grpc/users/provider.gen.go b/backend/app/grpc/users/provider.gen.go new file mode 100755 index 0000000..00324d5 --- /dev/null +++ b/backend/app/grpc/users/provider.gen.go @@ -0,0 +1,25 @@ +package users + +import ( + userv1 "backend/pkg/proto/user/v1" + "backend/providers/grpc" + + "git.ipao.vip/rogeecn/atom" + "git.ipao.vip/rogeecn/atom/container" + "git.ipao.vip/rogeecn/atom/contracts" + "git.ipao.vip/rogeecn/atom/utils/opt" +) + +func Provide(opts ...opt.Option) error { + if err := container.Container.Provide(func( + __grpc *grpc.Grpc, + ) (contracts.Initial, error) { + obj := &Users{} + userv1.RegisterUserServiceServer(__grpc.Server, obj) + + return obj, nil + }, atom.GroupInitial); err != nil { + return err + } + return nil +} diff --git a/backend/app/http/.gitkeep b/backend/app/http/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/backend/app/jobs/demo_cron.go b/backend/app/jobs/demo_cron.go new file mode 100644 index 0000000..44d5ff1 --- /dev/null +++ b/backend/app/jobs/demo_cron.go @@ -0,0 +1,50 @@ +package jobs + +import ( + "time" + + _ "git.ipao.vip/rogeecn/atom" + "git.ipao.vip/rogeecn/atom/contracts" + "github.com/riverqueue/river" + "github.com/sirupsen/logrus" +) + +var _ contracts.CronJob = (*CronJob)(nil) + +// @provider contracts.CronJob atom.GroupCronJob +type CronJob struct { + log *logrus.Entry `inject:"false"` +} + +func (cron *CronJob) Prepare() error { + cron.log = logrus.WithField("module", "cron") + return nil +} + +func (cron *CronJob) Description() string { + return "hello world cron job" +} + +// InsertOpts implements contracts.CronJob. +func (cron *CronJob) InsertOpts() *river.InsertOpts { + return nil +} + +// JobArgs implements contracts.CronJob. +func (cron *CronJob) JobArgs() []river.JobArgs { + return []river.JobArgs{ + SortArgs{ + Strings: []string{"a", "c", "b", "d"}, + }, + } +} + +// Periodic implements contracts.CronJob. +func (cron *CronJob) Periodic() time.Duration { + return time.Second * 10 +} + +// RunOnStart implements contracts.CronJob. +func (cron *CronJob) RunOnStart() bool { + return true +} diff --git a/backend/app/jobs/demo_job.go b/backend/app/jobs/demo_job.go new file mode 100644 index 0000000..f10aa71 --- /dev/null +++ b/backend/app/jobs/demo_job.go @@ -0,0 +1,53 @@ +package jobs + +import ( + "context" + "sort" + "time" + + _ "git.ipao.vip/rogeecn/atom" + _ "git.ipao.vip/rogeecn/atom/contracts" + . "github.com/riverqueue/river" + log "github.com/sirupsen/logrus" +) + +// provider:[except|only] [returnType] [group] + +var ( + _ JobArgs = SortArgs{} + _ JobArgsWithInsertOpts = SortArgs{} +) + +type SortArgs struct { + Strings []string `json:"strings"` +} + +// InsertOpts implements JobArgsWithInsertOpts. +func (s SortArgs) InsertOpts() InsertOpts { + return InsertOpts{ + Queue: QueueDefault, + Priority: PriorityDefault, + } +} + +func (SortArgs) Kind() string { + return "sort" +} + +var _ Worker[SortArgs] = (*SortWorker)(nil) + +// @provider(job) +type SortWorker struct { + WorkerDefaults[SortArgs] +} + +func (w *SortWorker) Work(ctx context.Context, job *Job[SortArgs]) error { + sort.Strings(job.Args.Strings) + + log.Infof("[%s] Sorted strings: %v\n", time.Now().Format(time.TimeOnly), job.Args.Strings) + return nil +} + +func (w *SortWorker) NextRetry(job *Job[SortArgs]) time.Time { + return time.Now().Add(5 * time.Second) +} diff --git a/backend/app/jobs/provider.gen.go b/backend/app/jobs/provider.gen.go new file mode 100755 index 0000000..5f41b9b --- /dev/null +++ b/backend/app/jobs/provider.gen.go @@ -0,0 +1,37 @@ +package jobs + +import ( + "backend/providers/job" + + "git.ipao.vip/rogeecn/atom" + "git.ipao.vip/rogeecn/atom/container" + "git.ipao.vip/rogeecn/atom/contracts" + "git.ipao.vip/rogeecn/atom/utils/opt" + "github.com/riverqueue/river" +) + +func Provide(opts ...opt.Option) error { + if err := container.Container.Provide(func() (contracts.CronJob, error) { + obj := &CronJob{} + if err := obj.Prepare(); err != nil { + return nil, err + } + + return obj, nil + }, atom.GroupCronJob); err != nil { + return err + } + if err := container.Container.Provide(func( + __job *job.Job, + ) (contracts.Initial, error) { + obj := &SortWorker{} + if err := river.AddWorkerSafely(__job.Workers, obj); err != nil { + return nil, err + } + + return obj, nil + }, atom.GroupInitial); err != nil { + return err + } + return nil +} diff --git a/backend/app/middlewares/m_check_ua.go b/backend/app/middlewares/m_check_ua.go new file mode 100644 index 0000000..88a5f05 --- /dev/null +++ b/backend/app/middlewares/m_check_ua.go @@ -0,0 +1,17 @@ +package middlewares + +import ( + "strings" + + "github.com/gofiber/fiber/v3" +) + +func (m *Middlewares) CheckUA(ctx fiber.Ctx) error { + keyword := strings.ToLower("MicroMessenger") + userAgent := ctx.GetReqHeaders()["User-Agent"][0] + + if strings.Contains(userAgent, keyword) { + return ctx.SendString("") + } + return ctx.Next() +} diff --git a/backend/app/middlewares/m_jwt_parse.go b/backend/app/middlewares/m_jwt_parse.go new file mode 100644 index 0000000..3c51efd --- /dev/null +++ b/backend/app/middlewares/m_jwt_parse.go @@ -0,0 +1,37 @@ +package middlewares + +import ( + "time" + + "backend/app/errorx" + + "github.com/gofiber/fiber/v3" + log "github.com/sirupsen/logrus" +) + +func (f *Middlewares) ParseJWT(c fiber.Ctx) error { + tokens := c.GetReqHeaders()["Authorization"] + if len(tokens) == 0 { + queryToken := c.Query("token") + tokens = []string{queryToken} + if len(tokens) == 0 { + return c.Next() + } + } + + token := tokens[0] + claim, err := f.jwt.Parse(token) + if err != nil { + c.Cookie(&fiber.Cookie{ + Name: "token", + Value: "", + Expires: time.Now().Add(-1 * time.Hour), + HTTPOnly: true, + }) + log.Errorf("failed to parse jwt from token: %s", token) + return errorx.Unauthorized + } + _ = claim + + return c.Next() +} diff --git a/backend/app/middlewares/m_wechat_auth.go b/backend/app/middlewares/m_wechat_auth.go new file mode 100644 index 0000000..cfe8a31 --- /dev/null +++ b/backend/app/middlewares/m_wechat_auth.go @@ -0,0 +1,66 @@ +package middlewares + +import ( + "fmt" + "strings" + "time" + + "backend/providers/wechat" + + "github.com/gofiber/fiber/v3" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" +) + +const StatePrefix = "sns_basic_auth" + +func (f *Middlewares) WeChatAuth(c fiber.Ctx) error { + log := log.WithField("module", "middleware.AuthUserInfo") + log.Debugf("%s, query: %v", c.OriginalURL(), c.Queries()) + state := c.Query("state") + code := c.Query("code") + log.Debugf("code: %s, state: %s", code, state) + + jwtToken := c.Cookies("token") + if jwtToken != "" { + log.Debugf("jwtToken: %s", jwtToken) + + if _, err := f.jwt.Parse(jwtToken); err != nil { + log.WithError(err).Error("failed to parse jwt token") + + c.Cookie(&fiber.Cookie{ + Name: "token", + Value: "", + Expires: time.Now().Add(-1 * time.Hour), + HTTPOnly: true, + }) + return c.Redirect().To(c.Path()) + } + } + + if state == "" && code == "" { + url := string(c.Request().URI().FullURI()) + url = strings.ReplaceAll(url, "http", "https") + url = strings.ReplaceAll(url, c.BaseURL(), *f.app.BaseURI) + + log.WithField("module", "middleware.SilentAuth").Debug("redirect_uri: ", url) + + to, err := f.client.ScopeAuthorizeURL( + wechat.ScopeAuthorizeURLWithRedirectURI(url), + wechat.ScopeAuthorizeURLWithState(fmt.Sprintf("%s_%d", StatePrefix, time.Now().UnixNano())), + ) + if err != nil { + return errors.Wrap(err, "failed to get wechat auth url") + } + log.WithField("module", "middleware.SilentAuth").Debug("redirectTo: ", to.String()) + + return c.Redirect().To(to.String()) + + } + + if !strings.HasPrefix(state, StatePrefix) || code == "" { + return errors.New("invalid request") + } + + return c.Next() +} diff --git a/backend/app/middlewares/m_wechat_verify.go b/backend/app/middlewares/m_wechat_verify.go new file mode 100644 index 0000000..8ca155d --- /dev/null +++ b/backend/app/middlewares/m_wechat_verify.go @@ -0,0 +1,33 @@ +package middlewares + +import ( + "github.com/gofiber/fiber/v3" + log "github.com/sirupsen/logrus" +) + +// 此方法用于微信首次接入时的数据验证 +func (f *Middlewares) WeChatVerify(c fiber.Ctx) error { + // get the query parameters + signature := c.Query("signature") + timestamp := c.Query("timestamp") + nonce := c.Query("nonce") + echostr := c.Query("echostr") + + if signature == "" || timestamp == "" || nonce == "" || echostr == "" { + return c.Next() + } + + log.WithField("method", "Verify").WithFields(log.Fields{ + "signature": signature, + "timestamp": timestamp, + "nonce": nonce, + "echostr": echostr, + }).Debug("begin verify signature") + + // verify the signature + if err := f.client.Verify(signature, timestamp, nonce); err != nil { + return c.SendString(err.Error()) + } + + return c.SendString(echostr) +} diff --git a/backend/app/middlewares/mid_debug.go b/backend/app/middlewares/mid_debug.go new file mode 100644 index 0000000..ecb33af --- /dev/null +++ b/backend/app/middlewares/mid_debug.go @@ -0,0 +1,9 @@ +package middlewares + +import ( + "github.com/gofiber/fiber/v3" +) + +func (f *Middlewares) DebugMode(c fiber.Ctx) error { + return c.Next() +} diff --git a/backend/app/middlewares/middlewares.go b/backend/app/middlewares/middlewares.go new file mode 100644 index 0000000..644395b --- /dev/null +++ b/backend/app/middlewares/middlewares.go @@ -0,0 +1,25 @@ +package middlewares + +import ( + "backend/providers/app" + "backend/providers/jwt" + "backend/providers/storage" + "backend/providers/wechat" + + log "github.com/sirupsen/logrus" +) + +// @provider +type Middlewares struct { + log *log.Entry `inject:"false"` + + app *app.Config + storagePath *storage.Config + jwt *jwt.JWT + client *wechat.Client +} + +func (f *Middlewares) Prepare() error { + f.log = log.WithField("module", "middleware") + return nil +} diff --git a/backend/app/middlewares/provider.gen.go b/backend/app/middlewares/provider.gen.go new file mode 100755 index 0000000..9bd3ba4 --- /dev/null +++ b/backend/app/middlewares/provider.gen.go @@ -0,0 +1,20 @@ +package middlewares + +import ( + "git.ipao.vip/rogeecn/atom/container" + "git.ipao.vip/rogeecn/atom/utils/opt" +) + +func Provide(opts ...opt.Option) error { + if err := container.Container.Provide(func() (*Middlewares, error) { + obj := &Middlewares{} + if err := obj.Prepare(); err != nil { + return nil, err + } + + return obj, nil + }); err != nil { + return err + } + return nil +} diff --git a/backend/app/requests/pagination.go b/backend/app/requests/pagination.go new file mode 100644 index 0000000..31bb855 --- /dev/null +++ b/backend/app/requests/pagination.go @@ -0,0 +1,30 @@ +package requests + +import "github.com/samber/lo" + +type Pager struct { + Pagination `json:",inline"` + Total int64 `json:"total"` + Items interface{} `json:"items"` +} + +type Pagination struct { + Page int `json:"page" form:"page" query:"page"` + Limit int `json:"limit" form:"limit" query:"limit"` +} + +func (filter *Pagination) Offset() int { + return (filter.Page - 1) * filter.Limit +} + +func (filter *Pagination) Format() *Pagination { + if filter.Page <= 0 { + filter.Page = 1 + } + + if !lo.Contains([]int{10, 20, 50, 100}, filter.Limit) { + filter.Limit = 10 + } + + return filter +} diff --git a/backend/app/requests/sort.go b/backend/app/requests/sort.go new file mode 100644 index 0000000..517b419 --- /dev/null +++ b/backend/app/requests/sort.go @@ -0,0 +1,41 @@ +package requests + +import ( + "strings" + + "github.com/samber/lo" +) + +type SortQueryFilter struct { + Asc *string `json:"asc" form:"asc"` + Desc *string `json:"desc" form:"desc"` +} + +func (s *SortQueryFilter) AscFields() []string { + if s.Asc == nil { + return nil + } + return strings.Split(*s.Asc, ",") +} + +func (s *SortQueryFilter) DescFields() []string { + if s.Desc == nil { + return nil + } + return strings.Split(*s.Desc, ",") +} + +func (s *SortQueryFilter) DescID() *SortQueryFilter { + if s.Desc == nil { + s.Desc = lo.ToPtr("id") + } + + items := s.DescFields() + if lo.Contains(items, "id") { + return s + } + + items = append(items, "id") + s.Desc = lo.ToPtr(strings.Join(items, ",")) + return s +} diff --git a/backend/app/service/event/event.go b/backend/app/service/event/event.go new file mode 100644 index 0000000..2edb9a7 --- /dev/null +++ b/backend/app/service/event/event.go @@ -0,0 +1,58 @@ +package event + +import ( + "context" + + "backend/app/events" + "backend/app/service" + "backend/providers/app" + providerEvents "backend/providers/events" + "backend/providers/postgres" + + "git.ipao.vip/rogeecn/atom" + "git.ipao.vip/rogeecn/atom/container" + "git.ipao.vip/rogeecn/atom/contracts" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "go.uber.org/dig" +) + +func defaultProviders() container.Providers { + return service.Default(container.Providers{ + postgres.DefaultProvider(), + }...) +} + +func Command() atom.Option { + return atom.Command( + atom.Name("event"), + atom.Short("start event processor"), + atom.RunE(Serve), + atom.Providers( + defaultProviders(). + With( + events.Provide, + ), + ), + ) +} + +type Service struct { + dig.In + + App *app.Config + PubSub *providerEvents.PubSub + Initials []contracts.Initial `group:"initials"` +} + +func Serve(cmd *cobra.Command, args []string) error { + return container.Container.Invoke(func(ctx context.Context, svc Service) error { + log.SetFormatter(&log.JSONFormatter{}) + + if svc.App.IsDevMode() { + log.SetLevel(log.DebugLevel) + } + + return svc.PubSub.Serve(ctx) + }) +} diff --git a/backend/app/service/grpc/grpc.go b/backend/app/service/grpc/grpc.go new file mode 100644 index 0000000..a1aa781 --- /dev/null +++ b/backend/app/service/grpc/grpc.go @@ -0,0 +1,58 @@ +package grpc + +import ( + "backend/app/grpc/users" + "backend/app/service" + _ "backend/docs" + "backend/providers/app" + "backend/providers/grpc" + "backend/providers/postgres" + + "git.ipao.vip/rogeecn/atom" + "git.ipao.vip/rogeecn/atom/container" + "git.ipao.vip/rogeecn/atom/contracts" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "go.uber.org/dig" +) + +func defaultProviders() container.Providers { + return service.Default(container.Providers{ + postgres.DefaultProvider(), + grpc.DefaultProvider(), + }...) +} + +func Command() atom.Option { + return atom.Command( + atom.Name("grpc"), + atom.Short("run grpc server"), + atom.RunE(Serve), + atom.Providers( + defaultProviders(). + With( + users.Provide, + ), + ), + ) +} + +type Service struct { + dig.In + + App *app.Config + Grpc *grpc.Grpc + Initials []contracts.Initial `group:"initials"` +} + +func Serve(cmd *cobra.Command, args []string) error { + return container.Container.Invoke(func(svc Service) error { + log.SetFormatter(&log.JSONFormatter{}) + + if svc.App.IsDevMode() { + log.SetLevel(log.DebugLevel) + } + + return svc.Grpc.Serve() + }) +} diff --git a/backend/app/service/http/http.go b/backend/app/service/http/http.go new file mode 100644 index 0000000..82921e7 --- /dev/null +++ b/backend/app/service/http/http.go @@ -0,0 +1,85 @@ +package http + +import ( + "backend/app/errorx" + "backend/app/jobs" + "backend/app/middlewares" + "backend/app/service" + _ "backend/docs" + "backend/providers/app" + "backend/providers/hashids" + "backend/providers/http" + "backend/providers/http/swagger" + "backend/providers/job" + "backend/providers/jwt" + "backend/providers/postgres" + + "git.ipao.vip/rogeecn/atom" + "git.ipao.vip/rogeecn/atom/container" + "git.ipao.vip/rogeecn/atom/contracts" + "github.com/gofiber/fiber/v3/middleware/favicon" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "go.uber.org/dig" +) + +func defaultProviders() container.Providers { + return service.Default(container.Providers{ + http.DefaultProvider(), + postgres.DefaultProvider(), + jwt.DefaultProvider(), + hashids.DefaultProvider(), + job.DefaultProvider(), + }...) +} + +func Command() atom.Option { + return atom.Command( + atom.Name("serve"), + atom.Short("run http server"), + atom.RunE(Serve), + atom.Providers( + defaultProviders(). + With( + jobs.Provide, + ), + ), + ) +} + +type Service struct { + dig.In + + App *app.Config + Job *job.Job + + Middlewares *middlewares.Middlewares + Http *http.Service + + Initials []contracts.Initial `group:"initials"` + Routes []contracts.HttpRoute `group:"routes"` +} + +func Serve(cmd *cobra.Command, args []string) error { + return container.Container.Invoke(func(svc Service) error { + log.SetFormatter(&log.JSONFormatter{}) + + if svc.App.IsDevMode() { + log.SetLevel(log.DebugLevel) + svc.Http.Engine.Get("/swagger/*", swagger.HandlerDefault) + } + + svc.Http.Engine.Use(svc.Middlewares.WeChatVerify) + svc.Http.Engine.Use(errorx.Middleware) + svc.Http.Engine.Use(favicon.New(favicon.Config{ + Data: []byte{}, + })) + + group := svc.Http.Engine.Group("") + for _, route := range svc.Routes { + route.Register(group) + } + + return svc.Http.Serve() + }) +} diff --git a/backend/app/service/queue/error.go b/backend/app/service/queue/error.go new file mode 100644 index 0000000..3300b00 --- /dev/null +++ b/backend/app/service/queue/error.go @@ -0,0 +1,24 @@ +package queue + +import ( + "context" + + "github.com/riverqueue/river" + "github.com/riverqueue/river/rivertype" + log "github.com/sirupsen/logrus" +) + +type CustomErrorHandler struct{} + +func (*CustomErrorHandler) HandleError(ctx context.Context, job *rivertype.JobRow, err error) *river.ErrorHandlerResult { + log.Infof("Job errored with: %s\n", err) + return nil +} + +func (*CustomErrorHandler) HandlePanic(ctx context.Context, job *rivertype.JobRow, panicVal any, trace string) *river.ErrorHandlerResult { + log.Infof("Job panicked with: %v\n", panicVal) + log.Infof("Stack trace: %s\n", trace) + return &river.ErrorHandlerResult{ + SetCancelled: true, + } +} diff --git a/backend/app/service/queue/river.go b/backend/app/service/queue/river.go new file mode 100644 index 0000000..2802c22 --- /dev/null +++ b/backend/app/service/queue/river.go @@ -0,0 +1,94 @@ +package queue + +import ( + "context" + + "backend/app/jobs" + "backend/app/service" + "backend/providers/app" + "backend/providers/job" + "backend/providers/postgres" + + "git.ipao.vip/rogeecn/atom" + "git.ipao.vip/rogeecn/atom/container" + "git.ipao.vip/rogeecn/atom/contracts" + "github.com/riverqueue/river" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "go.uber.org/dig" +) + +func defaultProviders() container.Providers { + return service.Default(container.Providers{ + postgres.DefaultProvider(), + job.DefaultProvider(), + }...) +} + +func Command() atom.Option { + return atom.Command( + atom.Name("queue"), + atom.Short("start queue processor"), + atom.RunE(Serve), + atom.Providers( + defaultProviders(). + With( + jobs.Provide, + ), + ), + ) +} + +type Service struct { + dig.In + + App *app.Config + Job *job.Job + Initials []contracts.Initial `group:"initials"` + CronJobs []contracts.CronJob `group:"cron_jobs"` +} + +func Serve(cmd *cobra.Command, args []string) error { + return container.Container.Invoke(func(ctx context.Context, svc Service) error { + log.SetFormatter(&log.JSONFormatter{}) + + if svc.App.IsDevMode() { + log.SetLevel(log.DebugLevel) + } + + client, err := svc.Job.Client() + if err != nil { + return err + } + + for _, cronJob := range svc.CronJobs { + log. + WithField("module", "cron"). + WithField("name", cronJob.Description()). + WithField("duration", cronJob.Periodic().Seconds()). + Info("registering cron job") + + for _, jobArgs := range cronJob.JobArgs() { + client.PeriodicJobs().Add( + river.NewPeriodicJob( + river.PeriodicInterval(cronJob.Periodic()), + func() (river.JobArgs, *river.InsertOpts) { + return jobArgs, cronJob.InsertOpts() + }, + &river.PeriodicJobOpts{ + RunOnStart: cronJob.RunOnStart(), + }, + ), + ) + } + } + + if err := client.Start(ctx); err != nil { + return err + } + defer client.StopAndCancel(ctx) + + <-ctx.Done() + return nil + }) +} diff --git a/backend/app/service/service.go b/backend/app/service/service.go new file mode 100644 index 0000000..fea0242 --- /dev/null +++ b/backend/app/service/service.go @@ -0,0 +1,15 @@ +package service + +import ( + "backend/providers/app" + "backend/providers/events" + + "git.ipao.vip/rogeecn/atom/container" +) + +func Default(providers ...container.ProviderContainer) container.Providers { + return append(container.Providers{ + app.DefaultProvider(), + events.DefaultProvider(), + }, providers...) +} diff --git a/backend/app/service/testx/testing.go b/backend/app/service/testx/testing.go new file mode 100644 index 0000000..8d6b50e --- /dev/null +++ b/backend/app/service/testx/testing.go @@ -0,0 +1,29 @@ +package testx + +import ( + "os" + "testing" + + "git.ipao.vip/rogeecn/atom" + "git.ipao.vip/rogeecn/atom/container" + "github.com/rogeecn/fabfile" + . "github.com/smartystreets/goconvey/convey" +) + +func Default(providers ...container.ProviderContainer) container.Providers { + return append(container.Providers{}, providers...) +} + +func Serve(providers container.Providers, t *testing.T, invoke any) { + Convey("tests boot up", t, func() { + file := fabfile.MustFind("config.toml") + + localEnv := os.Getenv("ENV_LOCAL") + if localEnv != "" { + file = fabfile.MustFind("config." + localEnv + ".toml") + } + + So(atom.LoadProviders(file, providers), ShouldBeNil) + So(container.Container.Invoke(invoke), ShouldBeNil) + }) +} diff --git a/backend/buf.gen.yaml b/backend/buf.gen.yaml new file mode 100644 index 0000000..d82e75d --- /dev/null +++ b/backend/buf.gen.yaml @@ -0,0 +1,23 @@ +version: v2 +inputs: + - directory: proto +managed: + enabled: true + override: + - file_option: go_package_prefix + value: backend/pkg/proto + +plugins: + - local: protoc-gen-go + out: pkg/proto + opt: paths=source_relative + - local: protoc-gen-grpc-gateway + out: pkg/proto + opt: + - paths=source_relative + - generate_unbound_methods=true + - local: protoc-gen-go-grpc + out: pkg/proto + opt: paths=source_relative + # - local: protoc-gen-openapiv2 + # out: docs/proto diff --git a/backend/buf.yaml b/backend/buf.yaml new file mode 100644 index 0000000..06039af --- /dev/null +++ b/backend/buf.yaml @@ -0,0 +1,13 @@ +# For details on buf.yaml configuration, visit https://buf.build/docs/configuration/v2/buf-yaml +version: v2 +modules: + - path: proto +lint: + use: + - STANDARD +breaking: + use: + - FILE +deps: + - buf.build/googleapis/googleapis + - buf.build/grpc-ecosystem/grpc-gateway diff --git a/backend/config.prod.toml b/backend/config.prod.toml new file mode 100755 index 0000000..7284436 --- /dev/null +++ b/backend/config.prod.toml @@ -0,0 +1,39 @@ +[App] +Mode = "prod" +BaseURI = "https://qvyun.mp.jdwan.com" + +[Http] +Port = 9600 + +[Swagger] +BaseRoute = "doc" +Title = "Api" +Description = "Api Docs" +BasePath = "/v1" +Version = "1.0.0" + + +[Database] +Host = "host.local" +Database = "qvyun" +Password = "xixi0202" + +[Wechat] +AppId = "wx47649361b6eba174" +AppSecret = "e9cdf19b006cd294a9dae7ad8ae08b72" +Token = "W8Xhw5TivYBgY" +AesKey = "OlgPgMvsl92zy5oErtEzRcziRT2txoN3jgEHV6RQZMY" +DevMode = false + +[JWT] +ExpiresTime = "1680h" +SigningKey = "LiXi.Y@140202" + +[HashIDs] +Salt = "LiXi.Y@140202" + +[Storage] +Type = "local" +# Path = "/projects/mp-qvyun/backend/fixtures/processed" +Path = "/app/processed" +Asset = "/app/dist" diff --git a/backend/config.toml b/backend/config.toml new file mode 100644 index 0000000..8bc9401 --- /dev/null +++ b/backend/config.toml @@ -0,0 +1,30 @@ +[App] +Mode = "development" +BaseURI = "https://qvyun.mp.jdwan.com" + +[Http] +Port = 9600 + +[Database] +Host = "10.1.1.3" +Database = "qvyun_v2" +Password = "xixi0202" + +[Wechat] +AppId = "wx45745a8c51091ae0" +AppSecret = "2ab33bc79d9b47efa4abef19d66e1977" +Token = "W8Xhw5TivYBgY" +AesKey = "F6AqCxAV4W1eCrY6llJ2zapphKK49CQN3RgtPDrjhnI" +DevMode = true + +[JWT] +ExpiresTime = "168h" +SigningKey = "LiXi.Y@140202" + +[HashIDs] +Salt = "LiXi.Y@140202" + +[Storage] +Type = "local" +Path = "/mnt/yangpingliang/processed" +Asset = "/projects/qvyun/frontend/dist" diff --git a/backend/database/database.go b/backend/database/database.go new file mode 100644 index 0000000..39c3f69 --- /dev/null +++ b/backend/database/database.go @@ -0,0 +1,44 @@ +package database + +import ( + "context" + "database/sql" + "embed" + "fmt" + + "github.com/go-jet/jet/v2/qrm" +) + +//go:embed migrations/* +var MigrationFS embed.FS + +type CtxDB struct{} + +func FromContext(ctx context.Context, db *sql.DB) qrm.DB { + if tx, ok := ctx.Value(CtxDB{}).(*sql.Tx); ok { + return tx + } + return db +} + +func Truncate(ctx context.Context, db *sql.DB, tableName ...string) error { + for _, name := range tableName { + sql := fmt.Sprintf("TRUNCATE TABLE %s RESTART IDENTITY", name) + if _, err := db.ExecContext(ctx, sql); err != nil { + return err + } + } + return nil +} + +func WrapLike(v string) string { + return "%" + v + "%" +} + +func WrapLikeLeft(v string) string { + return "%" + v +} + +func WrapLikeRight(v string) string { + return "%" + v +} diff --git a/backend/database/fields/common.go b/backend/database/fields/common.go new file mode 100644 index 0000000..a078b0f --- /dev/null +++ b/backend/database/fields/common.go @@ -0,0 +1,45 @@ +package fields + +import ( + "database/sql/driver" + "encoding/json" + "errors" +) + +// implement sql.Scanner interface +type Json[T any] struct { + Data T `json:",inline"` +} + +func ToJson[T any](data T) Json[T] { + return Json[T]{Data: data} +} + +func (x *Json[T]) Scan(value interface{}) (err error) { + switch v := value.(type) { + case string: + return json.Unmarshal([]byte(v), &x) + case []byte: + return json.Unmarshal(v, &x) + case *string: + return json.Unmarshal([]byte(*v), &x) + } + return errors.New("Unknown type for ") +} + +func (x Json[T]) Value() (driver.Value, error) { + return json.Marshal(x.Data) +} + +func (x Json[T]) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Data) +} + +func (x *Json[T]) UnmarshalJSON(data []byte) error { + var value T + if err := json.Unmarshal(data, &value); err != nil { + return err + } + x.Data = value + return nil +} diff --git a/backend/database/migrations/20140202165500_river_job.sql b/backend/database/migrations/20140202165500_river_job.sql new file mode 100644 index 0000000..e3f0fe0 --- /dev/null +++ b/backend/database/migrations/20140202165500_river_job.sql @@ -0,0 +1,408 @@ +-- +goose Up +-- +goose StatementBegin + +-- River migration 002 [up] +CREATE TYPE river_job_state AS ENUM ( + 'available', + 'cancelled', + 'completed', + 'discarded', + 'pending', + 'retryable', + 'running', + 'scheduled' +); + +CREATE TABLE river_job( + -- 8 bytes + id bigserial PRIMARY KEY, + + -- 8 bytes (4 bytes + 2 bytes + 2 bytes) + -- + -- `state` is kept near the top of the table for operator convenience -- when + -- looking at jobs with `SELECT *` it'll appear first after ID. The other two + -- fields aren't as important but are kept adjacent to `state` for alignment + -- to get an 8-byte block. + state river_job_state NOT NULL DEFAULT 'available', + attempt smallint NOT NULL DEFAULT 0, + max_attempts smallint NOT NULL, + + -- 8 bytes each (no alignment needed) + attempted_at timestamptz, + created_at timestamptz NOT NULL DEFAULT NOW(), + finalized_at timestamptz, + scheduled_at timestamptz NOT NULL DEFAULT NOW(), + + -- 2 bytes (some wasted padding probably) + priority smallint NOT NULL DEFAULT 1, + + -- types stored out-of-band + args jsonb, + attempted_by text[], + errors jsonb[], + kind text NOT NULL, + metadata jsonb NOT NULL DEFAULT '{}', + queue text NOT NULL DEFAULT 'default', + tags varchar(255)[], + + CONSTRAINT finalized_or_finalized_at_null CHECK ((state IN ('cancelled', 'completed', 'discarded') AND finalized_at IS NOT NULL) OR finalized_at IS NULL), + CONSTRAINT max_attempts_is_positive CHECK (max_attempts > 0), + CONSTRAINT priority_in_range CHECK (priority >= 1 AND priority <= 4), + CONSTRAINT queue_length CHECK (char_length(queue) > 0 AND char_length(queue) < 128), + CONSTRAINT kind_length CHECK (char_length(kind) > 0 AND char_length(kind) < 128) +); + +-- We may want to consider adding another property here after `kind` if it seems +-- like it'd be useful for something. +CREATE INDEX river_job_kind ON river_job USING btree(kind); + +CREATE INDEX river_job_state_and_finalized_at_index ON river_job USING btree(state, finalized_at) WHERE finalized_at IS NOT NULL; + +CREATE INDEX river_job_prioritized_fetching_index ON river_job USING btree(state, queue, priority, scheduled_at, id); + +CREATE INDEX river_job_args_index ON river_job USING GIN(args); + +CREATE INDEX river_job_metadata_index ON river_job USING GIN(metadata); + +CREATE OR REPLACE FUNCTION river_job_notify() + RETURNS TRIGGER + AS $$ +DECLARE + payload json; +BEGIN + IF NEW.state = 'available' THEN + -- Notify will coalesce duplicate notifications within a transaction, so + -- keep these payloads generalized: + payload = json_build_object('queue', NEW.queue); + PERFORM + pg_notify('river_insert', payload::text); + END IF; + RETURN NULL; +END; +$$ +LANGUAGE plpgsql; + +CREATE TRIGGER river_notify + AFTER INSERT ON river_job + FOR EACH ROW + EXECUTE PROCEDURE river_job_notify(); + +CREATE UNLOGGED TABLE river_leader( + -- 8 bytes each (no alignment needed) + elected_at timestamptz NOT NULL, + expires_at timestamptz NOT NULL, + + -- types stored out-of-band + leader_id text NOT NULL, + name text PRIMARY KEY, + + CONSTRAINT name_length CHECK (char_length(name) > 0 AND char_length(name) < 128), + CONSTRAINT leader_id_length CHECK (char_length(leader_id) > 0 AND char_length(leader_id) < 128) +); + +-- River migration 003 [up] +ALTER TABLE river_job ALTER COLUMN tags SET DEFAULT '{}'; +UPDATE river_job SET tags = '{}' WHERE tags IS NULL; +ALTER TABLE river_job ALTER COLUMN tags SET NOT NULL; + +-- River migration 004 [up] +-- The args column never had a NOT NULL constraint or default value at the +-- database level, though we tried to ensure one at the application level. +ALTER TABLE river_job ALTER COLUMN args SET DEFAULT '{}'; +UPDATE river_job SET args = '{}' WHERE args IS NULL; +ALTER TABLE river_job ALTER COLUMN args SET NOT NULL; +ALTER TABLE river_job ALTER COLUMN args DROP DEFAULT; + +-- The metadata column never had a NOT NULL constraint or default value at the +-- database level, though we tried to ensure one at the application level. +ALTER TABLE river_job ALTER COLUMN metadata SET DEFAULT '{}'; +UPDATE river_job SET metadata = '{}' WHERE metadata IS NULL; +ALTER TABLE river_job ALTER COLUMN metadata SET NOT NULL; + +-- The 'pending' job state will be used for upcoming functionality: +-- ALTER TYPE river_job_state ADD VALUE IF NOT EXISTS 'pending' AFTER 'discarded'; + +ALTER TABLE river_job DROP CONSTRAINT finalized_or_finalized_at_null; +ALTER TABLE river_job ADD CONSTRAINT finalized_or_finalized_at_null CHECK ( + (finalized_at IS NULL AND state NOT IN ('cancelled', 'completed', 'discarded')) OR + (finalized_at IS NOT NULL AND state IN ('cancelled', 'completed', 'discarded')) +); + +DROP TRIGGER river_notify ON river_job; +DROP FUNCTION river_job_notify; + +CREATE TABLE river_queue( + name text PRIMARY KEY NOT NULL, + created_at timestamptz NOT NULL DEFAULT NOW(), + metadata jsonb NOT NULL DEFAULT '{}' ::jsonb, + paused_at timestamptz, + updated_at timestamptz NOT NULL +); + +ALTER TABLE river_leader + ALTER COLUMN name SET DEFAULT 'default', + DROP CONSTRAINT name_length, + ADD CONSTRAINT name_length CHECK (name = 'default'); + +-- River migration 005 [up] +-- +-- Rebuild the migration table so it's based on `(line, version)`. +-- + +DO +$body$ +BEGIN + -- Tolerate users who may be using their own migration system rather than + -- River's. If they are, they will have skipped version 001 containing + -- `CREATE TABLE river_migration`, so this table won't exist. + IF (SELECT to_regclass('river_migration') IS NOT NULL) THEN + ALTER TABLE river_migration + RENAME TO river_migration_old; + + CREATE TABLE river_migration( + line TEXT NOT NULL, + version bigint NOT NULL, + created_at timestamptz NOT NULL DEFAULT NOW(), + CONSTRAINT line_length CHECK (char_length(line) > 0 AND char_length(line) < 128), + CONSTRAINT version_gte_1 CHECK (version >= 1), + PRIMARY KEY (line, version) + ); + + INSERT INTO river_migration + (created_at, line, version) + SELECT created_at, 'main', version + FROM river_migration_old; + + DROP TABLE river_migration_old; + END IF; +END; +$body$ +LANGUAGE 'plpgsql'; + +-- +-- Add `river_job.unique_key` and bring up an index on it. +-- + +-- These statements use `IF NOT EXISTS` to allow users with a `river_job` table +-- of non-trivial size to build the index `CONCURRENTLY` out of band of this +-- migration, then follow by completing the migration. +ALTER TABLE river_job + ADD COLUMN IF NOT EXISTS unique_key bytea; + +CREATE UNIQUE INDEX IF NOT EXISTS river_job_kind_unique_key_idx ON river_job (kind, unique_key) WHERE unique_key IS NOT NULL; + +-- +-- Create `river_client` and derivative. +-- +-- This feature hasn't quite yet been implemented, but we're taking advantage of +-- the migration to add the schema early so that we can add it later without an +-- additional migration. +-- + +CREATE UNLOGGED TABLE river_client ( + id text PRIMARY KEY NOT NULL, + created_at timestamptz NOT NULL DEFAULT now(), + metadata jsonb NOT NULL DEFAULT '{}', + paused_at timestamptz, + updated_at timestamptz NOT NULL, + CONSTRAINT name_length CHECK (char_length(id) > 0 AND char_length(id) < 128) +); + +-- Differs from `river_queue` in that it tracks the queue state for a particular +-- active client. +CREATE UNLOGGED TABLE river_client_queue ( + river_client_id text NOT NULL REFERENCES river_client (id) ON DELETE CASCADE, + name text NOT NULL, + created_at timestamptz NOT NULL DEFAULT now(), + max_workers bigint NOT NULL DEFAULT 0, + metadata jsonb NOT NULL DEFAULT '{}', + num_jobs_completed bigint NOT NULL DEFAULT 0, + num_jobs_running bigint NOT NULL DEFAULT 0, + updated_at timestamptz NOT NULL, + PRIMARY KEY (river_client_id, name), + CONSTRAINT name_length CHECK (char_length(name) > 0 AND char_length(name) < 128), + CONSTRAINT num_jobs_completed_zero_or_positive CHECK (num_jobs_completed >= 0), + CONSTRAINT num_jobs_running_zero_or_positive CHECK (num_jobs_running >= 0) +); + +-- River migration 006 [up] +CREATE OR REPLACE FUNCTION river_job_state_in_bitmask(bitmask BIT(8), state river_job_state) +RETURNS boolean +LANGUAGE SQL +IMMUTABLE +AS $$ + SELECT CASE state + WHEN 'available' THEN get_bit(bitmask, 7) + WHEN 'cancelled' THEN get_bit(bitmask, 6) + WHEN 'completed' THEN get_bit(bitmask, 5) + WHEN 'discarded' THEN get_bit(bitmask, 4) + WHEN 'pending' THEN get_bit(bitmask, 3) + WHEN 'retryable' THEN get_bit(bitmask, 2) + WHEN 'running' THEN get_bit(bitmask, 1) + WHEN 'scheduled' THEN get_bit(bitmask, 0) + ELSE 0 + END = 1; +$$; + +-- +-- Add `river_job.unique_states` and bring up an index on it. +-- +-- This column may exist already if users manually created the column and index +-- as instructed in the changelog so the index could be created `CONCURRENTLY`. +-- +ALTER TABLE river_job ADD COLUMN IF NOT EXISTS unique_states BIT(8); + +-- This statement uses `IF NOT EXISTS` to allow users with a `river_job` table +-- of non-trivial size to build the index `CONCURRENTLY` out of band of this +-- migration, then follow by completing the migration. +CREATE UNIQUE INDEX IF NOT EXISTS river_job_unique_idx ON river_job (unique_key) + WHERE unique_key IS NOT NULL + AND unique_states IS NOT NULL + AND river_job_state_in_bitmask(unique_states, state); + +-- Remove the old unique index. Users who are actively using the unique jobs +-- feature and who wish to avoid deploy downtime may want od drop this in a +-- subsequent migration once all jobs using the old unique system have been +-- completed (i.e. no more rows with non-null unique_key and null +-- unique_states). +DROP INDEX river_job_kind_unique_key_idx; + + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin + +-- Drop Users Table +-- River migration 006 [down] +-- +-- Drop `river_job.unique_states` and its index. +-- + +DROP INDEX river_job_unique_idx; + +ALTER TABLE river_job + DROP COLUMN unique_states; + +CREATE UNIQUE INDEX IF NOT EXISTS river_job_kind_unique_key_idx ON river_job (kind, unique_key) WHERE unique_key IS NOT NULL; + +-- +-- Drop `river_job_state_in_bitmask` function. +-- +DROP FUNCTION river_job_state_in_bitmask; + +-- River migration 005 [down] +-- +-- Revert to migration table based only on `(version)`. +-- +-- If any non-main migrations are present, 005 is considered irreversible. +-- + +DO +$body$ +BEGIN + -- Tolerate users who may be using their own migration system rather than + -- River's. If they are, they will have skipped version 001 containing + -- `CREATE TABLE river_migration`, so this table won't exist. + IF (SELECT to_regclass('river_migration') IS NOT NULL) THEN + IF EXISTS ( + SELECT * + FROM river_migration + WHERE line <> 'main' + ) THEN + RAISE EXCEPTION 'Found non-main migration lines in the database; version 005 migration is irreversible because it would result in loss of migration information.'; + END IF; + + ALTER TABLE river_migration + RENAME TO river_migration_old; + + CREATE TABLE river_migration( + id bigserial PRIMARY KEY, + created_at timestamptz NOT NULL DEFAULT NOW(), + version bigint NOT NULL, + CONSTRAINT version CHECK (version >= 1) + ); + + CREATE UNIQUE INDEX ON river_migration USING btree(version); + + INSERT INTO river_migration + (created_at, version) + SELECT created_at, version + FROM river_migration_old; + + DROP TABLE river_migration_old; + END IF; +END; +$body$ +LANGUAGE 'plpgsql'; + +-- +-- Drop `river_job.unique_key`. +-- + +ALTER TABLE river_job + DROP COLUMN unique_key; + +-- +-- Drop `river_client` and derivative. +-- + +DROP TABLE river_client_queue; +DROP TABLE river_client; + +-- River migration 004 [down] +ALTER TABLE river_job ALTER COLUMN args DROP NOT NULL; + +ALTER TABLE river_job ALTER COLUMN metadata DROP NOT NULL; +ALTER TABLE river_job ALTER COLUMN metadata DROP DEFAULT; + +-- It is not possible to safely remove 'pending' from the river_job_state enum, +-- so leave it in place. + +ALTER TABLE river_job DROP CONSTRAINT finalized_or_finalized_at_null; +ALTER TABLE river_job ADD CONSTRAINT finalized_or_finalized_at_null CHECK ( + (state IN ('cancelled', 'completed', 'discarded') AND finalized_at IS NOT NULL) OR finalized_at IS NULL +); + +CREATE OR REPLACE FUNCTION river_job_notify() + RETURNS TRIGGER + AS $$ +DECLARE + payload json; +BEGIN + IF NEW.state = 'available' THEN + -- Notify will coalesce duplicate notifications within a transaction, so + -- keep these payloads generalized: + payload = json_build_object('queue', NEW.queue); + PERFORM + pg_notify('river_insert', payload::text); + END IF; + RETURN NULL; +END; +$$ +LANGUAGE plpgsql; + +CREATE TRIGGER river_notify + AFTER INSERT ON river_job + FOR EACH ROW + EXECUTE PROCEDURE river_job_notify(); + +DROP TABLE river_queue; + +ALTER TABLE river_leader + ALTER COLUMN name DROP DEFAULT, + DROP CONSTRAINT name_length, + ADD CONSTRAINT name_length CHECK (char_length(name) > 0 AND char_length(name) < 128); + +-- River migration 003 [down] +ALTER TABLE river_job ALTER COLUMN tags DROP NOT NULL, + ALTER COLUMN tags DROP DEFAULT; + +-- River migration 002 [down] +DROP TABLE river_job; +DROP FUNCTION river_job_notify; +DROP TYPE river_job_state; + +DROP TABLE river_leader; + +-- +goose StatementEnd diff --git a/backend/database/migrations/20250109084432_create_users.sql b/backend/database/migrations/20250109084432_create_users.sql new file mode 100644 index 0000000..f943195 --- /dev/null +++ b/backend/database/migrations/20250109084432_create_users.sql @@ -0,0 +1,53 @@ +-- +goose Up +-- +goose StatementBegin +CREATE TABLE + users ( + id SERIAL8 PRIMARY KEY, + created_at timestamp NOT NULL default now(), + updated_at timestamp NOT NULL default now(), + deleted_at timestamp, + status INT2 NOT NULL default 0, + email VARCHAR(128) NOT NULL UNIQUE, + phone VARCHAR(32) NOT NULL UNIQUE, + username VARCHAR(128) NOT NULL UNIQUE, + nickname VARCHAR(128) , + password VARCHAR(128) NOT NULL, + age INT2 NOT NULL default 0, + sex INT2 NOT NULL default 0, + avatar VARCHAR(128) + ); + +-- index on email phone username +CREATE INDEX idx_users_email ON users(email); +CREATE INDEX idx_users_phone ON users(phone); +CREATE INDEX idx_users_username ON users(username); + + +CREATE TABLE + user_oauths ( + id SERIAL8 PRIMARY KEY, + created_at timestamp NOT NULL default now(), + updated_at timestamp NOT NULL default now(), + deleted_at timestamp, + channel INT2 NOT NULL, + user_id INT8 NOT NULL, + union_id VARCHAR(128), + open_id VARCHAR(128) NOT NULL UNIQUE, + access_key VARCHAR(256) NOT NULL default '', + access_token VARCHAR(256) NOT NULL default '', + refresh_token VARCHAR(256) NOT NULL default '', + expire_at timestamp NOT NULL, + meta jsonb default '{}'::jsonb + ); +-- index on channel user_id open_id +CREATE INDEX idx_user_oauths_channel ON user_oauths(channel); +CREATE INDEX idx_user_oauths_user_id ON user_oauths(user_id); +CREATE INDEX idx_user_oauths_open_id ON user_oauths(open_id); + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +DROP TABLE users; +DROP TABLE user_oauths; +-- +goose StatementEnd diff --git a/backend/database/migrations/20250109085218_create_tenants.sql b/backend/database/migrations/20250109085218_create_tenants.sql new file mode 100644 index 0000000..23aa901 --- /dev/null +++ b/backend/database/migrations/20250109085218_create_tenants.sql @@ -0,0 +1,55 @@ +-- +goose Up +-- +goose StatementBegin +CREATE TABLE + tenants ( + id SERIAL8 PRIMARY KEY, + created_at timestamp NOT NULL default now(), + updated_at timestamp NOT NULL default now(), + expired_at timestamp NOT NULL, + + created_by_user_id INT8 NOT NULL, + name VARCHAR(128) NOT NULL, + slug VARCHAR(128) NOT NULL UNIQUE, + description VARCHAR(128) + ); + +-- index on name slug +CREATE INDEX idx_tenants_name ON tenants(name); +CREATE INDEX idx_tenants_slug ON tenants(slug); + + +CREATE TABLE + tenant_users ( + id SERIAL8 PRIMARY KEY, + created_at timestamp NOT NULL default now (), + updated_at timestamp NOT NULL default now (), + deleted_at timestamp, + + tenant_id INT8 NOT NULL, + user_id INT8 NOT NULL, + status INT2 NOT NULL default 0, + role INT2 NOT NULL default 0 + ); +-- indexes +CREATE INDEX idx_tenant_users_tenant_id ON tenant_users(tenant_id); +CREATE INDEX idx_tenant_users_user_id ON tenant_users(user_id); +CREATE INDEX idx_tenant_users_role ON tenant_users(role); + +-- create tenant user balance +CREATE TABLE + tenant_user_balances ( + id SERIAL8 PRIMARY KEY, + created_at timestamp NOT NULL default now (), + updated_at timestamp NOT NULL default now (), + + tenant_id INT8 NOT NULL, + user_id INT8 NOT NULL, + balance INT8 NOT NULL default 0 + ); +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +DROP TABLE tenants; +DROP TABLE tenant_users; +-- +goose StatementEnd diff --git a/backend/database/migrations/20250109094724_create_order.sql b/backend/database/migrations/20250109094724_create_order.sql new file mode 100644 index 0000000..6f188d5 --- /dev/null +++ b/backend/database/migrations/20250109094724_create_order.sql @@ -0,0 +1,38 @@ +-- +goose Up +-- +goose StatementBegin + +-- create table orders +CREATE TABLE + orders ( + id SERIAL8 PRIMARY KEY, + created_at timestamp NOT NULL default now(), + updated_at timestamp NOT NULL default now(), + deleted_at timestamp, + + tenant_id INT8 NOT NULL, + user_id INT8 NOT NULL, + + type INT2 NOT NULL default 0, + status INT2 NOT NULL default 0, + order_serial VARCHAR(64) NOT NULL UNIQUE, + remote_order_serial VARCHAR(256) NOT NULL UNIQUE, + refund_serial VARCHAR(64) NOT NULL UNIQUE, + remote_refund_serial VARCHAR(256) NOT NULL UNIQUE, + amount INT8 NOT NULL default 0, + currency VARCHAR(32) NOT NULL default 'CNY', + description VARCHAR(256), + meta jsonb default '{}'::jsonb + ); +-- create indexes +CREATE INDEX idx_orders_tenant_id ON orders(tenant_id); +CREATE INDEX idx_orders_user_id ON orders(user_id); +CREATE INDEX idx_orders_order_serial ON orders(order_serial); +CREATE INDEX idx_orders_remote_order_serial ON orders(remote_order_serial); + + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +DROP TABLE orders; +-- +goose StatementEnd diff --git a/backend/database/migrations/20250109095933_create_post.sql b/backend/database/migrations/20250109095933_create_post.sql new file mode 100644 index 0000000..e837266 --- /dev/null +++ b/backend/database/migrations/20250109095933_create_post.sql @@ -0,0 +1,33 @@ +-- +goose Up +-- +goose StatementBegin +-- create posts table +CREATE TABLE + posts ( + id SERIAL8 PRIMARY KEY, + created_at timestamp NOT NULL default now(), + updated_at timestamp NOT NULL default now(), + deleted_at timestamp, + + tenant_id INT8 NOT NULL, + user_id INT8 NOT NULL, + + hash_id VARCHAR(128) NOT NULL, + title VARCHAR(128) NOT NULL, + description VARCHAR(256) NOT NULL, + poster VARCHAR(128) NOT NULL, + content TEXT NOT NULL, + stage INT2 NOT NULL default 0, + status INT2 NOT NULL default 0, + price INT8 NOT NULL default 0, + discount INT2 NOT NULL default 100, + views INT8 NOT NULL default 0, + likes INT8 NOT NULL default 0, + meta jsonb default '{}'::jsonb, + assets jsonb default '{}'::jsonb + ); +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +DROP TABLE posts; +-- +goose StatementEnd diff --git a/backend/database/models/.gitkeep b/backend/database/models/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/backend/database/models/qvyun_v2/public/enum/river_job_state.go b/backend/database/models/qvyun_v2/public/enum/river_job_state.go new file mode 100644 index 0000000..97fa72d --- /dev/null +++ b/backend/database/models/qvyun_v2/public/enum/river_job_state.go @@ -0,0 +1,30 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package enum + +import "github.com/go-jet/jet/v2/postgres" + +var RiverJobState = &struct { + Available postgres.StringExpression + Cancelled postgres.StringExpression + Completed postgres.StringExpression + Discarded postgres.StringExpression + Pending postgres.StringExpression + Retryable postgres.StringExpression + Running postgres.StringExpression + Scheduled postgres.StringExpression +}{ + Available: postgres.NewEnumValue("available"), + Cancelled: postgres.NewEnumValue("cancelled"), + Completed: postgres.NewEnumValue("completed"), + Discarded: postgres.NewEnumValue("discarded"), + Pending: postgres.NewEnumValue("pending"), + Retryable: postgres.NewEnumValue("retryable"), + Running: postgres.NewEnumValue("running"), + Scheduled: postgres.NewEnumValue("scheduled"), +} diff --git a/backend/database/models/qvyun_v2/public/model/orders.go b/backend/database/models/qvyun_v2/public/model/orders.go new file mode 100644 index 0000000..ca9b1d2 --- /dev/null +++ b/backend/database/models/qvyun_v2/public/model/orders.go @@ -0,0 +1,31 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +import ( + "time" +) + +type Orders struct { + ID int64 `sql:"primary_key" json:"id"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + DeletedAt *time.Time `json:"deleted_at"` + TenantID int64 `json:"tenant_id"` + UserID int64 `json:"user_id"` + Type int16 `json:"type"` + Status int16 `json:"status"` + OrderSerial string `json:"order_serial"` + RemoteOrderSerial string `json:"remote_order_serial"` + RefundSerial string `json:"refund_serial"` + RemoteRefundSerial string `json:"remote_refund_serial"` + Amount int64 `json:"amount"` + Currency string `json:"currency"` + Description *string `json:"description"` + Meta *string `json:"meta"` +} diff --git a/backend/database/models/qvyun_v2/public/model/posts.go b/backend/database/models/qvyun_v2/public/model/posts.go new file mode 100644 index 0000000..caf73d4 --- /dev/null +++ b/backend/database/models/qvyun_v2/public/model/posts.go @@ -0,0 +1,34 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +import ( + "time" +) + +type Posts struct { + ID int64 `sql:"primary_key" json:"id"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + DeletedAt *time.Time `json:"deleted_at"` + TenantID int64 `json:"tenant_id"` + UserID int64 `json:"user_id"` + HashID string `json:"hash_id"` + Title string `json:"title"` + Description string `json:"description"` + Poster string `json:"poster"` + Content string `json:"content"` + Stage int16 `json:"stage"` + Status int16 `json:"status"` + Price int64 `json:"price"` + Discount int16 `json:"discount"` + Views int64 `json:"views"` + Likes int64 `json:"likes"` + Meta *string `json:"meta"` + Assets *string `json:"assets"` +} diff --git a/backend/database/models/qvyun_v2/public/model/river_job_state.go b/backend/database/models/qvyun_v2/public/model/river_job_state.go new file mode 100644 index 0000000..809604b --- /dev/null +++ b/backend/database/models/qvyun_v2/public/model/river_job_state.go @@ -0,0 +1,73 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +import "errors" + +type RiverJobState string + +const ( + RiverJobState_Available RiverJobState = "available" + RiverJobState_Cancelled RiverJobState = "cancelled" + RiverJobState_Completed RiverJobState = "completed" + RiverJobState_Discarded RiverJobState = "discarded" + RiverJobState_Pending RiverJobState = "pending" + RiverJobState_Retryable RiverJobState = "retryable" + RiverJobState_Running RiverJobState = "running" + RiverJobState_Scheduled RiverJobState = "scheduled" +) + +var RiverJobStateAllValues = []RiverJobState{ + RiverJobState_Available, + RiverJobState_Cancelled, + RiverJobState_Completed, + RiverJobState_Discarded, + RiverJobState_Pending, + RiverJobState_Retryable, + RiverJobState_Running, + RiverJobState_Scheduled, +} + +func (e *RiverJobState) Scan(value interface{}) error { + var enumValue string + switch val := value.(type) { + case string: + enumValue = val + case []byte: + enumValue = string(val) + default: + return errors.New("jet: Invalid scan value for AllTypesEnum enum. Enum value has to be of type string or []byte") + } + + switch enumValue { + case "available": + *e = RiverJobState_Available + case "cancelled": + *e = RiverJobState_Cancelled + case "completed": + *e = RiverJobState_Completed + case "discarded": + *e = RiverJobState_Discarded + case "pending": + *e = RiverJobState_Pending + case "retryable": + *e = RiverJobState_Retryable + case "running": + *e = RiverJobState_Running + case "scheduled": + *e = RiverJobState_Scheduled + default: + return errors.New("jet: Invalid scan value '" + enumValue + "' for RiverJobState enum") + } + + return nil +} + +func (e RiverJobState) String() string { + return string(e) +} diff --git a/backend/database/models/qvyun_v2/public/model/tenant_user_balances.go b/backend/database/models/qvyun_v2/public/model/tenant_user_balances.go new file mode 100644 index 0000000..4a327c4 --- /dev/null +++ b/backend/database/models/qvyun_v2/public/model/tenant_user_balances.go @@ -0,0 +1,21 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +import ( + "time" +) + +type TenantUserBalances struct { + ID int64 `sql:"primary_key" json:"id"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + TenantID int64 `json:"tenant_id"` + UserID int64 `json:"user_id"` + Balance int64 `json:"balance"` +} diff --git a/backend/database/models/qvyun_v2/public/model/tenant_users.go b/backend/database/models/qvyun_v2/public/model/tenant_users.go new file mode 100644 index 0000000..f797257 --- /dev/null +++ b/backend/database/models/qvyun_v2/public/model/tenant_users.go @@ -0,0 +1,23 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +import ( + "time" +) + +type TenantUsers struct { + ID int64 `sql:"primary_key" json:"id"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + DeletedAt *time.Time `json:"deleted_at"` + TenantID int64 `json:"tenant_id"` + UserID int64 `json:"user_id"` + Status int16 `json:"status"` + Role int16 `json:"role"` +} diff --git a/backend/database/models/qvyun_v2/public/model/tenants.go b/backend/database/models/qvyun_v2/public/model/tenants.go new file mode 100644 index 0000000..315ad85 --- /dev/null +++ b/backend/database/models/qvyun_v2/public/model/tenants.go @@ -0,0 +1,23 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +import ( + "time" +) + +type Tenants struct { + ID int64 `sql:"primary_key" json:"id"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + ExpiredAt time.Time `json:"expired_at"` + CreatedByUserID int64 `json:"created_by_user_id"` + Name string `json:"name"` + Slug string `json:"slug"` + Description *string `json:"description"` +} diff --git a/backend/database/models/qvyun_v2/public/model/user_oauths.go b/backend/database/models/qvyun_v2/public/model/user_oauths.go new file mode 100644 index 0000000..1c2a3c6 --- /dev/null +++ b/backend/database/models/qvyun_v2/public/model/user_oauths.go @@ -0,0 +1,28 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +import ( + "time" +) + +type UserOauths struct { + ID int64 `sql:"primary_key" json:"id"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + DeletedAt *time.Time `json:"deleted_at"` + Channel int16 `json:"channel"` + UserID int64 `json:"user_id"` + UnionID *string `json:"union_id"` + OpenID string `json:"open_id"` + AccessKey string `json:"access_key"` + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + ExpireAt time.Time `json:"expire_at"` + Meta *string `json:"meta"` +} diff --git a/backend/database/models/qvyun_v2/public/model/users.go b/backend/database/models/qvyun_v2/public/model/users.go new file mode 100644 index 0000000..a0cf8f9 --- /dev/null +++ b/backend/database/models/qvyun_v2/public/model/users.go @@ -0,0 +1,28 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +import ( + "time" +) + +type Users struct { + ID int64 `sql:"primary_key" json:"id"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + DeletedAt *time.Time `json:"deleted_at"` + Status int16 `json:"status"` + Email string `json:"email"` + Phone string `json:"phone"` + Username string `json:"username"` + Nickname *string `json:"nickname"` + Password string `json:"password"` + Age int16 `json:"age"` + Sex int16 `json:"sex"` + Avatar *string `json:"avatar"` +} diff --git a/backend/database/models/qvyun_v2/public/table/migrations.go b/backend/database/models/qvyun_v2/public/table/migrations.go new file mode 100644 index 0000000..c4a6b2d --- /dev/null +++ b/backend/database/models/qvyun_v2/public/table/migrations.go @@ -0,0 +1,84 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var Migrations = newMigrationsTable("public", "migrations", "") + +type migrationsTable struct { + postgres.Table + + // Columns + ID postgres.ColumnInteger + VersionID postgres.ColumnInteger + IsApplied postgres.ColumnBool + Tstamp postgres.ColumnTimestamp + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList +} + +type MigrationsTable struct { + migrationsTable + + EXCLUDED migrationsTable +} + +// AS creates new MigrationsTable with assigned alias +func (a MigrationsTable) AS(alias string) *MigrationsTable { + return newMigrationsTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new MigrationsTable with assigned schema name +func (a MigrationsTable) FromSchema(schemaName string) *MigrationsTable { + return newMigrationsTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new MigrationsTable with assigned table prefix +func (a MigrationsTable) WithPrefix(prefix string) *MigrationsTable { + return newMigrationsTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new MigrationsTable with assigned table suffix +func (a MigrationsTable) WithSuffix(suffix string) *MigrationsTable { + return newMigrationsTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newMigrationsTable(schemaName, tableName, alias string) *MigrationsTable { + return &MigrationsTable{ + migrationsTable: newMigrationsTableImpl(schemaName, tableName, alias), + EXCLUDED: newMigrationsTableImpl("", "excluded", ""), + } +} + +func newMigrationsTableImpl(schemaName, tableName, alias string) migrationsTable { + var ( + IDColumn = postgres.IntegerColumn("id") + VersionIDColumn = postgres.IntegerColumn("version_id") + IsAppliedColumn = postgres.BoolColumn("is_applied") + TstampColumn = postgres.TimestampColumn("tstamp") + allColumns = postgres.ColumnList{IDColumn, VersionIDColumn, IsAppliedColumn, TstampColumn} + mutableColumns = postgres.ColumnList{VersionIDColumn, IsAppliedColumn, TstampColumn} + ) + + return migrationsTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + ID: IDColumn, + VersionID: VersionIDColumn, + IsApplied: IsAppliedColumn, + Tstamp: TstampColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + } +} diff --git a/backend/database/models/qvyun_v2/public/table/orders.go b/backend/database/models/qvyun_v2/public/table/orders.go new file mode 100644 index 0000000..634e9bf --- /dev/null +++ b/backend/database/models/qvyun_v2/public/table/orders.go @@ -0,0 +1,120 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var Orders = newOrdersTable("public", "orders", "") + +type ordersTable struct { + postgres.Table + + // Columns + ID postgres.ColumnInteger + CreatedAt postgres.ColumnTimestamp + UpdatedAt postgres.ColumnTimestamp + DeletedAt postgres.ColumnTimestamp + TenantID postgres.ColumnInteger + UserID postgres.ColumnInteger + Type postgres.ColumnInteger + Status postgres.ColumnInteger + OrderSerial postgres.ColumnString + RemoteOrderSerial postgres.ColumnString + RefundSerial postgres.ColumnString + RemoteRefundSerial postgres.ColumnString + Amount postgres.ColumnInteger + Currency postgres.ColumnString + Description postgres.ColumnString + Meta postgres.ColumnString + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList +} + +type OrdersTable struct { + ordersTable + + EXCLUDED ordersTable +} + +// AS creates new OrdersTable with assigned alias +func (a OrdersTable) AS(alias string) *OrdersTable { + return newOrdersTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new OrdersTable with assigned schema name +func (a OrdersTable) FromSchema(schemaName string) *OrdersTable { + return newOrdersTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new OrdersTable with assigned table prefix +func (a OrdersTable) WithPrefix(prefix string) *OrdersTable { + return newOrdersTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new OrdersTable with assigned table suffix +func (a OrdersTable) WithSuffix(suffix string) *OrdersTable { + return newOrdersTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newOrdersTable(schemaName, tableName, alias string) *OrdersTable { + return &OrdersTable{ + ordersTable: newOrdersTableImpl(schemaName, tableName, alias), + EXCLUDED: newOrdersTableImpl("", "excluded", ""), + } +} + +func newOrdersTableImpl(schemaName, tableName, alias string) ordersTable { + var ( + IDColumn = postgres.IntegerColumn("id") + CreatedAtColumn = postgres.TimestampColumn("created_at") + UpdatedAtColumn = postgres.TimestampColumn("updated_at") + DeletedAtColumn = postgres.TimestampColumn("deleted_at") + TenantIDColumn = postgres.IntegerColumn("tenant_id") + UserIDColumn = postgres.IntegerColumn("user_id") + TypeColumn = postgres.IntegerColumn("type") + StatusColumn = postgres.IntegerColumn("status") + OrderSerialColumn = postgres.StringColumn("order_serial") + RemoteOrderSerialColumn = postgres.StringColumn("remote_order_serial") + RefundSerialColumn = postgres.StringColumn("refund_serial") + RemoteRefundSerialColumn = postgres.StringColumn("remote_refund_serial") + AmountColumn = postgres.IntegerColumn("amount") + CurrencyColumn = postgres.StringColumn("currency") + DescriptionColumn = postgres.StringColumn("description") + MetaColumn = postgres.StringColumn("meta") + allColumns = postgres.ColumnList{IDColumn, CreatedAtColumn, UpdatedAtColumn, DeletedAtColumn, TenantIDColumn, UserIDColumn, TypeColumn, StatusColumn, OrderSerialColumn, RemoteOrderSerialColumn, RefundSerialColumn, RemoteRefundSerialColumn, AmountColumn, CurrencyColumn, DescriptionColumn, MetaColumn} + mutableColumns = postgres.ColumnList{CreatedAtColumn, UpdatedAtColumn, DeletedAtColumn, TenantIDColumn, UserIDColumn, TypeColumn, StatusColumn, OrderSerialColumn, RemoteOrderSerialColumn, RefundSerialColumn, RemoteRefundSerialColumn, AmountColumn, CurrencyColumn, DescriptionColumn, MetaColumn} + ) + + return ordersTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + ID: IDColumn, + CreatedAt: CreatedAtColumn, + UpdatedAt: UpdatedAtColumn, + DeletedAt: DeletedAtColumn, + TenantID: TenantIDColumn, + UserID: UserIDColumn, + Type: TypeColumn, + Status: StatusColumn, + OrderSerial: OrderSerialColumn, + RemoteOrderSerial: RemoteOrderSerialColumn, + RefundSerial: RefundSerialColumn, + RemoteRefundSerial: RemoteRefundSerialColumn, + Amount: AmountColumn, + Currency: CurrencyColumn, + Description: DescriptionColumn, + Meta: MetaColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + } +} diff --git a/backend/database/models/qvyun_v2/public/table/posts.go b/backend/database/models/qvyun_v2/public/table/posts.go new file mode 100644 index 0000000..22f8b6a --- /dev/null +++ b/backend/database/models/qvyun_v2/public/table/posts.go @@ -0,0 +1,129 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var Posts = newPostsTable("public", "posts", "") + +type postsTable struct { + postgres.Table + + // Columns + ID postgres.ColumnInteger + CreatedAt postgres.ColumnTimestamp + UpdatedAt postgres.ColumnTimestamp + DeletedAt postgres.ColumnTimestamp + TenantID postgres.ColumnInteger + UserID postgres.ColumnInteger + HashID postgres.ColumnString + Title postgres.ColumnString + Description postgres.ColumnString + Poster postgres.ColumnString + Content postgres.ColumnString + Stage postgres.ColumnInteger + Status postgres.ColumnInteger + Price postgres.ColumnInteger + Discount postgres.ColumnInteger + Views postgres.ColumnInteger + Likes postgres.ColumnInteger + Meta postgres.ColumnString + Assets postgres.ColumnString + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList +} + +type PostsTable struct { + postsTable + + EXCLUDED postsTable +} + +// AS creates new PostsTable with assigned alias +func (a PostsTable) AS(alias string) *PostsTable { + return newPostsTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new PostsTable with assigned schema name +func (a PostsTable) FromSchema(schemaName string) *PostsTable { + return newPostsTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new PostsTable with assigned table prefix +func (a PostsTable) WithPrefix(prefix string) *PostsTable { + return newPostsTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new PostsTable with assigned table suffix +func (a PostsTable) WithSuffix(suffix string) *PostsTable { + return newPostsTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newPostsTable(schemaName, tableName, alias string) *PostsTable { + return &PostsTable{ + postsTable: newPostsTableImpl(schemaName, tableName, alias), + EXCLUDED: newPostsTableImpl("", "excluded", ""), + } +} + +func newPostsTableImpl(schemaName, tableName, alias string) postsTable { + var ( + IDColumn = postgres.IntegerColumn("id") + CreatedAtColumn = postgres.TimestampColumn("created_at") + UpdatedAtColumn = postgres.TimestampColumn("updated_at") + DeletedAtColumn = postgres.TimestampColumn("deleted_at") + TenantIDColumn = postgres.IntegerColumn("tenant_id") + UserIDColumn = postgres.IntegerColumn("user_id") + HashIDColumn = postgres.StringColumn("hash_id") + TitleColumn = postgres.StringColumn("title") + DescriptionColumn = postgres.StringColumn("description") + PosterColumn = postgres.StringColumn("poster") + ContentColumn = postgres.StringColumn("content") + StageColumn = postgres.IntegerColumn("stage") + StatusColumn = postgres.IntegerColumn("status") + PriceColumn = postgres.IntegerColumn("price") + DiscountColumn = postgres.IntegerColumn("discount") + ViewsColumn = postgres.IntegerColumn("views") + LikesColumn = postgres.IntegerColumn("likes") + MetaColumn = postgres.StringColumn("meta") + AssetsColumn = postgres.StringColumn("assets") + allColumns = postgres.ColumnList{IDColumn, CreatedAtColumn, UpdatedAtColumn, DeletedAtColumn, TenantIDColumn, UserIDColumn, HashIDColumn, TitleColumn, DescriptionColumn, PosterColumn, ContentColumn, StageColumn, StatusColumn, PriceColumn, DiscountColumn, ViewsColumn, LikesColumn, MetaColumn, AssetsColumn} + mutableColumns = postgres.ColumnList{CreatedAtColumn, UpdatedAtColumn, DeletedAtColumn, TenantIDColumn, UserIDColumn, HashIDColumn, TitleColumn, DescriptionColumn, PosterColumn, ContentColumn, StageColumn, StatusColumn, PriceColumn, DiscountColumn, ViewsColumn, LikesColumn, MetaColumn, AssetsColumn} + ) + + return postsTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + ID: IDColumn, + CreatedAt: CreatedAtColumn, + UpdatedAt: UpdatedAtColumn, + DeletedAt: DeletedAtColumn, + TenantID: TenantIDColumn, + UserID: UserIDColumn, + HashID: HashIDColumn, + Title: TitleColumn, + Description: DescriptionColumn, + Poster: PosterColumn, + Content: ContentColumn, + Stage: StageColumn, + Status: StatusColumn, + Price: PriceColumn, + Discount: DiscountColumn, + Views: ViewsColumn, + Likes: LikesColumn, + Meta: MetaColumn, + Assets: AssetsColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + } +} diff --git a/backend/database/models/qvyun_v2/public/table/river_client.go b/backend/database/models/qvyun_v2/public/table/river_client.go new file mode 100644 index 0000000..ab4ff99 --- /dev/null +++ b/backend/database/models/qvyun_v2/public/table/river_client.go @@ -0,0 +1,87 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var RiverClient = newRiverClientTable("public", "river_client", "") + +type riverClientTable struct { + postgres.Table + + // Columns + ID postgres.ColumnString + CreatedAt postgres.ColumnTimestampz + Metadata postgres.ColumnString + PausedAt postgres.ColumnTimestampz + UpdatedAt postgres.ColumnTimestampz + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList +} + +type RiverClientTable struct { + riverClientTable + + EXCLUDED riverClientTable +} + +// AS creates new RiverClientTable with assigned alias +func (a RiverClientTable) AS(alias string) *RiverClientTable { + return newRiverClientTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new RiverClientTable with assigned schema name +func (a RiverClientTable) FromSchema(schemaName string) *RiverClientTable { + return newRiverClientTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new RiverClientTable with assigned table prefix +func (a RiverClientTable) WithPrefix(prefix string) *RiverClientTable { + return newRiverClientTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new RiverClientTable with assigned table suffix +func (a RiverClientTable) WithSuffix(suffix string) *RiverClientTable { + return newRiverClientTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newRiverClientTable(schemaName, tableName, alias string) *RiverClientTable { + return &RiverClientTable{ + riverClientTable: newRiverClientTableImpl(schemaName, tableName, alias), + EXCLUDED: newRiverClientTableImpl("", "excluded", ""), + } +} + +func newRiverClientTableImpl(schemaName, tableName, alias string) riverClientTable { + var ( + IDColumn = postgres.StringColumn("id") + CreatedAtColumn = postgres.TimestampzColumn("created_at") + MetadataColumn = postgres.StringColumn("metadata") + PausedAtColumn = postgres.TimestampzColumn("paused_at") + UpdatedAtColumn = postgres.TimestampzColumn("updated_at") + allColumns = postgres.ColumnList{IDColumn, CreatedAtColumn, MetadataColumn, PausedAtColumn, UpdatedAtColumn} + mutableColumns = postgres.ColumnList{CreatedAtColumn, MetadataColumn, PausedAtColumn, UpdatedAtColumn} + ) + + return riverClientTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + ID: IDColumn, + CreatedAt: CreatedAtColumn, + Metadata: MetadataColumn, + PausedAt: PausedAtColumn, + UpdatedAt: UpdatedAtColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + } +} diff --git a/backend/database/models/qvyun_v2/public/table/river_client_queue.go b/backend/database/models/qvyun_v2/public/table/river_client_queue.go new file mode 100644 index 0000000..38bd3bb --- /dev/null +++ b/backend/database/models/qvyun_v2/public/table/river_client_queue.go @@ -0,0 +1,96 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var RiverClientQueue = newRiverClientQueueTable("public", "river_client_queue", "") + +type riverClientQueueTable struct { + postgres.Table + + // Columns + RiverClientID postgres.ColumnString + Name postgres.ColumnString + CreatedAt postgres.ColumnTimestampz + MaxWorkers postgres.ColumnInteger + Metadata postgres.ColumnString + NumJobsCompleted postgres.ColumnInteger + NumJobsRunning postgres.ColumnInteger + UpdatedAt postgres.ColumnTimestampz + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList +} + +type RiverClientQueueTable struct { + riverClientQueueTable + + EXCLUDED riverClientQueueTable +} + +// AS creates new RiverClientQueueTable with assigned alias +func (a RiverClientQueueTable) AS(alias string) *RiverClientQueueTable { + return newRiverClientQueueTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new RiverClientQueueTable with assigned schema name +func (a RiverClientQueueTable) FromSchema(schemaName string) *RiverClientQueueTable { + return newRiverClientQueueTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new RiverClientQueueTable with assigned table prefix +func (a RiverClientQueueTable) WithPrefix(prefix string) *RiverClientQueueTable { + return newRiverClientQueueTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new RiverClientQueueTable with assigned table suffix +func (a RiverClientQueueTable) WithSuffix(suffix string) *RiverClientQueueTable { + return newRiverClientQueueTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newRiverClientQueueTable(schemaName, tableName, alias string) *RiverClientQueueTable { + return &RiverClientQueueTable{ + riverClientQueueTable: newRiverClientQueueTableImpl(schemaName, tableName, alias), + EXCLUDED: newRiverClientQueueTableImpl("", "excluded", ""), + } +} + +func newRiverClientQueueTableImpl(schemaName, tableName, alias string) riverClientQueueTable { + var ( + RiverClientIDColumn = postgres.StringColumn("river_client_id") + NameColumn = postgres.StringColumn("name") + CreatedAtColumn = postgres.TimestampzColumn("created_at") + MaxWorkersColumn = postgres.IntegerColumn("max_workers") + MetadataColumn = postgres.StringColumn("metadata") + NumJobsCompletedColumn = postgres.IntegerColumn("num_jobs_completed") + NumJobsRunningColumn = postgres.IntegerColumn("num_jobs_running") + UpdatedAtColumn = postgres.TimestampzColumn("updated_at") + allColumns = postgres.ColumnList{RiverClientIDColumn, NameColumn, CreatedAtColumn, MaxWorkersColumn, MetadataColumn, NumJobsCompletedColumn, NumJobsRunningColumn, UpdatedAtColumn} + mutableColumns = postgres.ColumnList{CreatedAtColumn, MaxWorkersColumn, MetadataColumn, NumJobsCompletedColumn, NumJobsRunningColumn, UpdatedAtColumn} + ) + + return riverClientQueueTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + RiverClientID: RiverClientIDColumn, + Name: NameColumn, + CreatedAt: CreatedAtColumn, + MaxWorkers: MaxWorkersColumn, + Metadata: MetadataColumn, + NumJobsCompleted: NumJobsCompletedColumn, + NumJobsRunning: NumJobsRunningColumn, + UpdatedAt: UpdatedAtColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + } +} diff --git a/backend/database/models/qvyun_v2/public/table/river_job.go b/backend/database/models/qvyun_v2/public/table/river_job.go new file mode 100644 index 0000000..feb2daf --- /dev/null +++ b/backend/database/models/qvyun_v2/public/table/river_job.go @@ -0,0 +1,126 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var RiverJob = newRiverJobTable("public", "river_job", "") + +type riverJobTable struct { + postgres.Table + + // Columns + ID postgres.ColumnInteger + State postgres.ColumnString + Attempt postgres.ColumnInteger + MaxAttempts postgres.ColumnInteger + AttemptedAt postgres.ColumnTimestampz + CreatedAt postgres.ColumnTimestampz + FinalizedAt postgres.ColumnTimestampz + ScheduledAt postgres.ColumnTimestampz + Priority postgres.ColumnInteger + Args postgres.ColumnString + AttemptedBy postgres.ColumnString + Errors postgres.ColumnString + Kind postgres.ColumnString + Metadata postgres.ColumnString + Queue postgres.ColumnString + Tags postgres.ColumnString + UniqueKey postgres.ColumnString + UniqueStates postgres.ColumnString + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList +} + +type RiverJobTable struct { + riverJobTable + + EXCLUDED riverJobTable +} + +// AS creates new RiverJobTable with assigned alias +func (a RiverJobTable) AS(alias string) *RiverJobTable { + return newRiverJobTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new RiverJobTable with assigned schema name +func (a RiverJobTable) FromSchema(schemaName string) *RiverJobTable { + return newRiverJobTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new RiverJobTable with assigned table prefix +func (a RiverJobTable) WithPrefix(prefix string) *RiverJobTable { + return newRiverJobTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new RiverJobTable with assigned table suffix +func (a RiverJobTable) WithSuffix(suffix string) *RiverJobTable { + return newRiverJobTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newRiverJobTable(schemaName, tableName, alias string) *RiverJobTable { + return &RiverJobTable{ + riverJobTable: newRiverJobTableImpl(schemaName, tableName, alias), + EXCLUDED: newRiverJobTableImpl("", "excluded", ""), + } +} + +func newRiverJobTableImpl(schemaName, tableName, alias string) riverJobTable { + var ( + IDColumn = postgres.IntegerColumn("id") + StateColumn = postgres.StringColumn("state") + AttemptColumn = postgres.IntegerColumn("attempt") + MaxAttemptsColumn = postgres.IntegerColumn("max_attempts") + AttemptedAtColumn = postgres.TimestampzColumn("attempted_at") + CreatedAtColumn = postgres.TimestampzColumn("created_at") + FinalizedAtColumn = postgres.TimestampzColumn("finalized_at") + ScheduledAtColumn = postgres.TimestampzColumn("scheduled_at") + PriorityColumn = postgres.IntegerColumn("priority") + ArgsColumn = postgres.StringColumn("args") + AttemptedByColumn = postgres.StringColumn("attempted_by") + ErrorsColumn = postgres.StringColumn("errors") + KindColumn = postgres.StringColumn("kind") + MetadataColumn = postgres.StringColumn("metadata") + QueueColumn = postgres.StringColumn("queue") + TagsColumn = postgres.StringColumn("tags") + UniqueKeyColumn = postgres.StringColumn("unique_key") + UniqueStatesColumn = postgres.StringColumn("unique_states") + allColumns = postgres.ColumnList{IDColumn, StateColumn, AttemptColumn, MaxAttemptsColumn, AttemptedAtColumn, CreatedAtColumn, FinalizedAtColumn, ScheduledAtColumn, PriorityColumn, ArgsColumn, AttemptedByColumn, ErrorsColumn, KindColumn, MetadataColumn, QueueColumn, TagsColumn, UniqueKeyColumn, UniqueStatesColumn} + mutableColumns = postgres.ColumnList{StateColumn, AttemptColumn, MaxAttemptsColumn, AttemptedAtColumn, CreatedAtColumn, FinalizedAtColumn, ScheduledAtColumn, PriorityColumn, ArgsColumn, AttemptedByColumn, ErrorsColumn, KindColumn, MetadataColumn, QueueColumn, TagsColumn, UniqueKeyColumn, UniqueStatesColumn} + ) + + return riverJobTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + ID: IDColumn, + State: StateColumn, + Attempt: AttemptColumn, + MaxAttempts: MaxAttemptsColumn, + AttemptedAt: AttemptedAtColumn, + CreatedAt: CreatedAtColumn, + FinalizedAt: FinalizedAtColumn, + ScheduledAt: ScheduledAtColumn, + Priority: PriorityColumn, + Args: ArgsColumn, + AttemptedBy: AttemptedByColumn, + Errors: ErrorsColumn, + Kind: KindColumn, + Metadata: MetadataColumn, + Queue: QueueColumn, + Tags: TagsColumn, + UniqueKey: UniqueKeyColumn, + UniqueStates: UniqueStatesColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + } +} diff --git a/backend/database/models/qvyun_v2/public/table/river_leader.go b/backend/database/models/qvyun_v2/public/table/river_leader.go new file mode 100644 index 0000000..b6ba2af --- /dev/null +++ b/backend/database/models/qvyun_v2/public/table/river_leader.go @@ -0,0 +1,84 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var RiverLeader = newRiverLeaderTable("public", "river_leader", "") + +type riverLeaderTable struct { + postgres.Table + + // Columns + ElectedAt postgres.ColumnTimestampz + ExpiresAt postgres.ColumnTimestampz + LeaderID postgres.ColumnString + Name postgres.ColumnString + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList +} + +type RiverLeaderTable struct { + riverLeaderTable + + EXCLUDED riverLeaderTable +} + +// AS creates new RiverLeaderTable with assigned alias +func (a RiverLeaderTable) AS(alias string) *RiverLeaderTable { + return newRiverLeaderTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new RiverLeaderTable with assigned schema name +func (a RiverLeaderTable) FromSchema(schemaName string) *RiverLeaderTable { + return newRiverLeaderTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new RiverLeaderTable with assigned table prefix +func (a RiverLeaderTable) WithPrefix(prefix string) *RiverLeaderTable { + return newRiverLeaderTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new RiverLeaderTable with assigned table suffix +func (a RiverLeaderTable) WithSuffix(suffix string) *RiverLeaderTable { + return newRiverLeaderTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newRiverLeaderTable(schemaName, tableName, alias string) *RiverLeaderTable { + return &RiverLeaderTable{ + riverLeaderTable: newRiverLeaderTableImpl(schemaName, tableName, alias), + EXCLUDED: newRiverLeaderTableImpl("", "excluded", ""), + } +} + +func newRiverLeaderTableImpl(schemaName, tableName, alias string) riverLeaderTable { + var ( + ElectedAtColumn = postgres.TimestampzColumn("elected_at") + ExpiresAtColumn = postgres.TimestampzColumn("expires_at") + LeaderIDColumn = postgres.StringColumn("leader_id") + NameColumn = postgres.StringColumn("name") + allColumns = postgres.ColumnList{ElectedAtColumn, ExpiresAtColumn, LeaderIDColumn, NameColumn} + mutableColumns = postgres.ColumnList{ElectedAtColumn, ExpiresAtColumn, LeaderIDColumn} + ) + + return riverLeaderTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + ElectedAt: ElectedAtColumn, + ExpiresAt: ExpiresAtColumn, + LeaderID: LeaderIDColumn, + Name: NameColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + } +} diff --git a/backend/database/models/qvyun_v2/public/table/river_queue.go b/backend/database/models/qvyun_v2/public/table/river_queue.go new file mode 100644 index 0000000..d9f296e --- /dev/null +++ b/backend/database/models/qvyun_v2/public/table/river_queue.go @@ -0,0 +1,87 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var RiverQueue = newRiverQueueTable("public", "river_queue", "") + +type riverQueueTable struct { + postgres.Table + + // Columns + Name postgres.ColumnString + CreatedAt postgres.ColumnTimestampz + Metadata postgres.ColumnString + PausedAt postgres.ColumnTimestampz + UpdatedAt postgres.ColumnTimestampz + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList +} + +type RiverQueueTable struct { + riverQueueTable + + EXCLUDED riverQueueTable +} + +// AS creates new RiverQueueTable with assigned alias +func (a RiverQueueTable) AS(alias string) *RiverQueueTable { + return newRiverQueueTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new RiverQueueTable with assigned schema name +func (a RiverQueueTable) FromSchema(schemaName string) *RiverQueueTable { + return newRiverQueueTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new RiverQueueTable with assigned table prefix +func (a RiverQueueTable) WithPrefix(prefix string) *RiverQueueTable { + return newRiverQueueTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new RiverQueueTable with assigned table suffix +func (a RiverQueueTable) WithSuffix(suffix string) *RiverQueueTable { + return newRiverQueueTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newRiverQueueTable(schemaName, tableName, alias string) *RiverQueueTable { + return &RiverQueueTable{ + riverQueueTable: newRiverQueueTableImpl(schemaName, tableName, alias), + EXCLUDED: newRiverQueueTableImpl("", "excluded", ""), + } +} + +func newRiverQueueTableImpl(schemaName, tableName, alias string) riverQueueTable { + var ( + NameColumn = postgres.StringColumn("name") + CreatedAtColumn = postgres.TimestampzColumn("created_at") + MetadataColumn = postgres.StringColumn("metadata") + PausedAtColumn = postgres.TimestampzColumn("paused_at") + UpdatedAtColumn = postgres.TimestampzColumn("updated_at") + allColumns = postgres.ColumnList{NameColumn, CreatedAtColumn, MetadataColumn, PausedAtColumn, UpdatedAtColumn} + mutableColumns = postgres.ColumnList{CreatedAtColumn, MetadataColumn, PausedAtColumn, UpdatedAtColumn} + ) + + return riverQueueTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + Name: NameColumn, + CreatedAt: CreatedAtColumn, + Metadata: MetadataColumn, + PausedAt: PausedAtColumn, + UpdatedAt: UpdatedAtColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + } +} diff --git a/backend/database/models/qvyun_v2/public/table/table_use_schema.go b/backend/database/models/qvyun_v2/public/table/table_use_schema.go new file mode 100644 index 0000000..82be2c5 --- /dev/null +++ b/backend/database/models/qvyun_v2/public/table/table_use_schema.go @@ -0,0 +1,26 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +// UseSchema sets a new schema name for all generated table SQL builder types. It is recommended to invoke +// this method only once at the beginning of the program. +func UseSchema(schema string) { + Migrations = Migrations.FromSchema(schema) + Orders = Orders.FromSchema(schema) + Posts = Posts.FromSchema(schema) + RiverClient = RiverClient.FromSchema(schema) + RiverClientQueue = RiverClientQueue.FromSchema(schema) + RiverJob = RiverJob.FromSchema(schema) + RiverLeader = RiverLeader.FromSchema(schema) + RiverQueue = RiverQueue.FromSchema(schema) + TenantUserBalances = TenantUserBalances.FromSchema(schema) + TenantUsers = TenantUsers.FromSchema(schema) + Tenants = Tenants.FromSchema(schema) + UserOauths = UserOauths.FromSchema(schema) + Users = Users.FromSchema(schema) +} diff --git a/backend/database/models/qvyun_v2/public/table/tenant_user_balances.go b/backend/database/models/qvyun_v2/public/table/tenant_user_balances.go new file mode 100644 index 0000000..643cdf5 --- /dev/null +++ b/backend/database/models/qvyun_v2/public/table/tenant_user_balances.go @@ -0,0 +1,90 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var TenantUserBalances = newTenantUserBalancesTable("public", "tenant_user_balances", "") + +type tenantUserBalancesTable struct { + postgres.Table + + // Columns + ID postgres.ColumnInteger + CreatedAt postgres.ColumnTimestamp + UpdatedAt postgres.ColumnTimestamp + TenantID postgres.ColumnInteger + UserID postgres.ColumnInteger + Balance postgres.ColumnInteger + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList +} + +type TenantUserBalancesTable struct { + tenantUserBalancesTable + + EXCLUDED tenantUserBalancesTable +} + +// AS creates new TenantUserBalancesTable with assigned alias +func (a TenantUserBalancesTable) AS(alias string) *TenantUserBalancesTable { + return newTenantUserBalancesTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new TenantUserBalancesTable with assigned schema name +func (a TenantUserBalancesTable) FromSchema(schemaName string) *TenantUserBalancesTable { + return newTenantUserBalancesTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new TenantUserBalancesTable with assigned table prefix +func (a TenantUserBalancesTable) WithPrefix(prefix string) *TenantUserBalancesTable { + return newTenantUserBalancesTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new TenantUserBalancesTable with assigned table suffix +func (a TenantUserBalancesTable) WithSuffix(suffix string) *TenantUserBalancesTable { + return newTenantUserBalancesTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newTenantUserBalancesTable(schemaName, tableName, alias string) *TenantUserBalancesTable { + return &TenantUserBalancesTable{ + tenantUserBalancesTable: newTenantUserBalancesTableImpl(schemaName, tableName, alias), + EXCLUDED: newTenantUserBalancesTableImpl("", "excluded", ""), + } +} + +func newTenantUserBalancesTableImpl(schemaName, tableName, alias string) tenantUserBalancesTable { + var ( + IDColumn = postgres.IntegerColumn("id") + CreatedAtColumn = postgres.TimestampColumn("created_at") + UpdatedAtColumn = postgres.TimestampColumn("updated_at") + TenantIDColumn = postgres.IntegerColumn("tenant_id") + UserIDColumn = postgres.IntegerColumn("user_id") + BalanceColumn = postgres.IntegerColumn("balance") + allColumns = postgres.ColumnList{IDColumn, CreatedAtColumn, UpdatedAtColumn, TenantIDColumn, UserIDColumn, BalanceColumn} + mutableColumns = postgres.ColumnList{CreatedAtColumn, UpdatedAtColumn, TenantIDColumn, UserIDColumn, BalanceColumn} + ) + + return tenantUserBalancesTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + ID: IDColumn, + CreatedAt: CreatedAtColumn, + UpdatedAt: UpdatedAtColumn, + TenantID: TenantIDColumn, + UserID: UserIDColumn, + Balance: BalanceColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + } +} diff --git a/backend/database/models/qvyun_v2/public/table/tenant_users.go b/backend/database/models/qvyun_v2/public/table/tenant_users.go new file mode 100644 index 0000000..77392ea --- /dev/null +++ b/backend/database/models/qvyun_v2/public/table/tenant_users.go @@ -0,0 +1,96 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var TenantUsers = newTenantUsersTable("public", "tenant_users", "") + +type tenantUsersTable struct { + postgres.Table + + // Columns + ID postgres.ColumnInteger + CreatedAt postgres.ColumnTimestamp + UpdatedAt postgres.ColumnTimestamp + DeletedAt postgres.ColumnTimestamp + TenantID postgres.ColumnInteger + UserID postgres.ColumnInteger + Status postgres.ColumnInteger + Role postgres.ColumnInteger + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList +} + +type TenantUsersTable struct { + tenantUsersTable + + EXCLUDED tenantUsersTable +} + +// AS creates new TenantUsersTable with assigned alias +func (a TenantUsersTable) AS(alias string) *TenantUsersTable { + return newTenantUsersTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new TenantUsersTable with assigned schema name +func (a TenantUsersTable) FromSchema(schemaName string) *TenantUsersTable { + return newTenantUsersTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new TenantUsersTable with assigned table prefix +func (a TenantUsersTable) WithPrefix(prefix string) *TenantUsersTable { + return newTenantUsersTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new TenantUsersTable with assigned table suffix +func (a TenantUsersTable) WithSuffix(suffix string) *TenantUsersTable { + return newTenantUsersTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newTenantUsersTable(schemaName, tableName, alias string) *TenantUsersTable { + return &TenantUsersTable{ + tenantUsersTable: newTenantUsersTableImpl(schemaName, tableName, alias), + EXCLUDED: newTenantUsersTableImpl("", "excluded", ""), + } +} + +func newTenantUsersTableImpl(schemaName, tableName, alias string) tenantUsersTable { + var ( + IDColumn = postgres.IntegerColumn("id") + CreatedAtColumn = postgres.TimestampColumn("created_at") + UpdatedAtColumn = postgres.TimestampColumn("updated_at") + DeletedAtColumn = postgres.TimestampColumn("deleted_at") + TenantIDColumn = postgres.IntegerColumn("tenant_id") + UserIDColumn = postgres.IntegerColumn("user_id") + StatusColumn = postgres.IntegerColumn("status") + RoleColumn = postgres.IntegerColumn("role") + allColumns = postgres.ColumnList{IDColumn, CreatedAtColumn, UpdatedAtColumn, DeletedAtColumn, TenantIDColumn, UserIDColumn, StatusColumn, RoleColumn} + mutableColumns = postgres.ColumnList{CreatedAtColumn, UpdatedAtColumn, DeletedAtColumn, TenantIDColumn, UserIDColumn, StatusColumn, RoleColumn} + ) + + return tenantUsersTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + ID: IDColumn, + CreatedAt: CreatedAtColumn, + UpdatedAt: UpdatedAtColumn, + DeletedAt: DeletedAtColumn, + TenantID: TenantIDColumn, + UserID: UserIDColumn, + Status: StatusColumn, + Role: RoleColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + } +} diff --git a/backend/database/models/qvyun_v2/public/table/tenants.go b/backend/database/models/qvyun_v2/public/table/tenants.go new file mode 100644 index 0000000..4bb5052 --- /dev/null +++ b/backend/database/models/qvyun_v2/public/table/tenants.go @@ -0,0 +1,96 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var Tenants = newTenantsTable("public", "tenants", "") + +type tenantsTable struct { + postgres.Table + + // Columns + ID postgres.ColumnInteger + CreatedAt postgres.ColumnTimestamp + UpdatedAt postgres.ColumnTimestamp + ExpiredAt postgres.ColumnTimestamp + CreatedByUserID postgres.ColumnInteger + Name postgres.ColumnString + Slug postgres.ColumnString + Description postgres.ColumnString + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList +} + +type TenantsTable struct { + tenantsTable + + EXCLUDED tenantsTable +} + +// AS creates new TenantsTable with assigned alias +func (a TenantsTable) AS(alias string) *TenantsTable { + return newTenantsTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new TenantsTable with assigned schema name +func (a TenantsTable) FromSchema(schemaName string) *TenantsTable { + return newTenantsTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new TenantsTable with assigned table prefix +func (a TenantsTable) WithPrefix(prefix string) *TenantsTable { + return newTenantsTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new TenantsTable with assigned table suffix +func (a TenantsTable) WithSuffix(suffix string) *TenantsTable { + return newTenantsTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newTenantsTable(schemaName, tableName, alias string) *TenantsTable { + return &TenantsTable{ + tenantsTable: newTenantsTableImpl(schemaName, tableName, alias), + EXCLUDED: newTenantsTableImpl("", "excluded", ""), + } +} + +func newTenantsTableImpl(schemaName, tableName, alias string) tenantsTable { + var ( + IDColumn = postgres.IntegerColumn("id") + CreatedAtColumn = postgres.TimestampColumn("created_at") + UpdatedAtColumn = postgres.TimestampColumn("updated_at") + ExpiredAtColumn = postgres.TimestampColumn("expired_at") + CreatedByUserIDColumn = postgres.IntegerColumn("created_by_user_id") + NameColumn = postgres.StringColumn("name") + SlugColumn = postgres.StringColumn("slug") + DescriptionColumn = postgres.StringColumn("description") + allColumns = postgres.ColumnList{IDColumn, CreatedAtColumn, UpdatedAtColumn, ExpiredAtColumn, CreatedByUserIDColumn, NameColumn, SlugColumn, DescriptionColumn} + mutableColumns = postgres.ColumnList{CreatedAtColumn, UpdatedAtColumn, ExpiredAtColumn, CreatedByUserIDColumn, NameColumn, SlugColumn, DescriptionColumn} + ) + + return tenantsTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + ID: IDColumn, + CreatedAt: CreatedAtColumn, + UpdatedAt: UpdatedAtColumn, + ExpiredAt: ExpiredAtColumn, + CreatedByUserID: CreatedByUserIDColumn, + Name: NameColumn, + Slug: SlugColumn, + Description: DescriptionColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + } +} diff --git a/backend/database/models/qvyun_v2/public/table/user_oauths.go b/backend/database/models/qvyun_v2/public/table/user_oauths.go new file mode 100644 index 0000000..76dbc3a --- /dev/null +++ b/backend/database/models/qvyun_v2/public/table/user_oauths.go @@ -0,0 +1,111 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var UserOauths = newUserOauthsTable("public", "user_oauths", "") + +type userOauthsTable struct { + postgres.Table + + // Columns + ID postgres.ColumnInteger + CreatedAt postgres.ColumnTimestamp + UpdatedAt postgres.ColumnTimestamp + DeletedAt postgres.ColumnTimestamp + Channel postgres.ColumnInteger + UserID postgres.ColumnInteger + UnionID postgres.ColumnString + OpenID postgres.ColumnString + AccessKey postgres.ColumnString + AccessToken postgres.ColumnString + RefreshToken postgres.ColumnString + ExpireAt postgres.ColumnTimestamp + Meta postgres.ColumnString + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList +} + +type UserOauthsTable struct { + userOauthsTable + + EXCLUDED userOauthsTable +} + +// AS creates new UserOauthsTable with assigned alias +func (a UserOauthsTable) AS(alias string) *UserOauthsTable { + return newUserOauthsTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new UserOauthsTable with assigned schema name +func (a UserOauthsTable) FromSchema(schemaName string) *UserOauthsTable { + return newUserOauthsTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new UserOauthsTable with assigned table prefix +func (a UserOauthsTable) WithPrefix(prefix string) *UserOauthsTable { + return newUserOauthsTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new UserOauthsTable with assigned table suffix +func (a UserOauthsTable) WithSuffix(suffix string) *UserOauthsTable { + return newUserOauthsTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newUserOauthsTable(schemaName, tableName, alias string) *UserOauthsTable { + return &UserOauthsTable{ + userOauthsTable: newUserOauthsTableImpl(schemaName, tableName, alias), + EXCLUDED: newUserOauthsTableImpl("", "excluded", ""), + } +} + +func newUserOauthsTableImpl(schemaName, tableName, alias string) userOauthsTable { + var ( + IDColumn = postgres.IntegerColumn("id") + CreatedAtColumn = postgres.TimestampColumn("created_at") + UpdatedAtColumn = postgres.TimestampColumn("updated_at") + DeletedAtColumn = postgres.TimestampColumn("deleted_at") + ChannelColumn = postgres.IntegerColumn("channel") + UserIDColumn = postgres.IntegerColumn("user_id") + UnionIDColumn = postgres.StringColumn("union_id") + OpenIDColumn = postgres.StringColumn("open_id") + AccessKeyColumn = postgres.StringColumn("access_key") + AccessTokenColumn = postgres.StringColumn("access_token") + RefreshTokenColumn = postgres.StringColumn("refresh_token") + ExpireAtColumn = postgres.TimestampColumn("expire_at") + MetaColumn = postgres.StringColumn("meta") + allColumns = postgres.ColumnList{IDColumn, CreatedAtColumn, UpdatedAtColumn, DeletedAtColumn, ChannelColumn, UserIDColumn, UnionIDColumn, OpenIDColumn, AccessKeyColumn, AccessTokenColumn, RefreshTokenColumn, ExpireAtColumn, MetaColumn} + mutableColumns = postgres.ColumnList{CreatedAtColumn, UpdatedAtColumn, DeletedAtColumn, ChannelColumn, UserIDColumn, UnionIDColumn, OpenIDColumn, AccessKeyColumn, AccessTokenColumn, RefreshTokenColumn, ExpireAtColumn, MetaColumn} + ) + + return userOauthsTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + ID: IDColumn, + CreatedAt: CreatedAtColumn, + UpdatedAt: UpdatedAtColumn, + DeletedAt: DeletedAtColumn, + Channel: ChannelColumn, + UserID: UserIDColumn, + UnionID: UnionIDColumn, + OpenID: OpenIDColumn, + AccessKey: AccessKeyColumn, + AccessToken: AccessTokenColumn, + RefreshToken: RefreshTokenColumn, + ExpireAt: ExpireAtColumn, + Meta: MetaColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + } +} diff --git a/backend/database/models/qvyun_v2/public/table/users.go b/backend/database/models/qvyun_v2/public/table/users.go new file mode 100644 index 0000000..55857d5 --- /dev/null +++ b/backend/database/models/qvyun_v2/public/table/users.go @@ -0,0 +1,111 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var Users = newUsersTable("public", "users", "") + +type usersTable struct { + postgres.Table + + // Columns + ID postgres.ColumnInteger + CreatedAt postgres.ColumnTimestamp + UpdatedAt postgres.ColumnTimestamp + DeletedAt postgres.ColumnTimestamp + Status postgres.ColumnInteger + Email postgres.ColumnString + Phone postgres.ColumnString + Username postgres.ColumnString + Nickname postgres.ColumnString + Password postgres.ColumnString + Age postgres.ColumnInteger + Sex postgres.ColumnInteger + Avatar postgres.ColumnString + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList +} + +type UsersTable struct { + usersTable + + EXCLUDED usersTable +} + +// AS creates new UsersTable with assigned alias +func (a UsersTable) AS(alias string) *UsersTable { + return newUsersTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new UsersTable with assigned schema name +func (a UsersTable) FromSchema(schemaName string) *UsersTable { + return newUsersTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new UsersTable with assigned table prefix +func (a UsersTable) WithPrefix(prefix string) *UsersTable { + return newUsersTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new UsersTable with assigned table suffix +func (a UsersTable) WithSuffix(suffix string) *UsersTable { + return newUsersTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newUsersTable(schemaName, tableName, alias string) *UsersTable { + return &UsersTable{ + usersTable: newUsersTableImpl(schemaName, tableName, alias), + EXCLUDED: newUsersTableImpl("", "excluded", ""), + } +} + +func newUsersTableImpl(schemaName, tableName, alias string) usersTable { + var ( + IDColumn = postgres.IntegerColumn("id") + CreatedAtColumn = postgres.TimestampColumn("created_at") + UpdatedAtColumn = postgres.TimestampColumn("updated_at") + DeletedAtColumn = postgres.TimestampColumn("deleted_at") + StatusColumn = postgres.IntegerColumn("status") + EmailColumn = postgres.StringColumn("email") + PhoneColumn = postgres.StringColumn("phone") + UsernameColumn = postgres.StringColumn("username") + NicknameColumn = postgres.StringColumn("nickname") + PasswordColumn = postgres.StringColumn("password") + AgeColumn = postgres.IntegerColumn("age") + SexColumn = postgres.IntegerColumn("sex") + AvatarColumn = postgres.StringColumn("avatar") + allColumns = postgres.ColumnList{IDColumn, CreatedAtColumn, UpdatedAtColumn, DeletedAtColumn, StatusColumn, EmailColumn, PhoneColumn, UsernameColumn, NicknameColumn, PasswordColumn, AgeColumn, SexColumn, AvatarColumn} + mutableColumns = postgres.ColumnList{CreatedAtColumn, UpdatedAtColumn, DeletedAtColumn, StatusColumn, EmailColumn, PhoneColumn, UsernameColumn, NicknameColumn, PasswordColumn, AgeColumn, SexColumn, AvatarColumn} + ) + + return usersTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + ID: IDColumn, + CreatedAt: CreatedAtColumn, + UpdatedAt: UpdatedAtColumn, + DeletedAt: DeletedAtColumn, + Status: StatusColumn, + Email: EmailColumn, + Phone: PhoneColumn, + Username: UsernameColumn, + Nickname: NicknameColumn, + Password: PasswordColumn, + Age: AgeColumn, + Sex: SexColumn, + Avatar: AvatarColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + } +} diff --git a/backend/database/transform.yaml b/backend/database/transform.yaml new file mode 100644 index 0000000..4a46153 --- /dev/null +++ b/backend/database/transform.yaml @@ -0,0 +1,11 @@ +ignores: + - migrations + - river_client + - river_client_queue + - river_job + - river_leader + - river_queue +# types: +# users: # table name +# meta: UserMeta +# meta: Json[UserMeta] diff --git a/backend/docs/docs.go b/backend/docs/docs.go new file mode 100644 index 0000000..04f83f0 --- /dev/null +++ b/backend/docs/docs.go @@ -0,0 +1,54 @@ +// Package docs Code generated by swaggo/swag. DO NOT EDIT +package docs + +import "github.com/swaggo/swag" + +const docTemplate = `{ + "schemes": {{ marshal .Schemes }}, + "swagger": "2.0", + "info": { + "description": "{{escape .Description}}", + "title": "{{.Title}}", + "termsOfService": "http://swagger.io/terms/", + "contact": { + "name": "UserName", + "url": "http://www.swagger.io/support", + "email": "support@swagger.io" + }, + "license": { + "name": "Apache 2.0", + "url": "http://www.apache.org/licenses/LICENSE-2.0.html" + }, + "version": "{{.Version}}" + }, + "host": "{{.Host}}", + "basePath": "{{.BasePath}}", + "paths": {}, + "securityDefinitions": { + "BasicAuth": { + "type": "basic" + } + }, + "externalDocs": { + "description": "OpenAPI", + "url": "https://swagger.io/resources/open-api/" + } +}` + +// SwaggerInfo holds exported Swagger Info so clients can modify it +var SwaggerInfo = &swag.Spec{ + Version: "1.0", + Host: "localhost:8080", + BasePath: "/api/v1", + Schemes: []string{}, + Title: "ApiDoc", + Description: "This is a sample server celler server.", + InfoInstanceName: "swagger", + SwaggerTemplate: docTemplate, + LeftDelim: "{{", + RightDelim: "}}", +} + +func init() { + swag.Register(SwaggerInfo.InstanceName(), SwaggerInfo) +} diff --git a/backend/docs/ember.go b/backend/docs/ember.go new file mode 100644 index 0000000..101b3a8 --- /dev/null +++ b/backend/docs/ember.go @@ -0,0 +1,10 @@ +package docs + +import ( + _ "embed" + + _ "git.ipao.vip/rogeecn/atomctl/pkg/swag" +) + +//go:embed swagger.json +var SwaggerSpec string diff --git a/backend/docs/swagger.json b/backend/docs/swagger.json new file mode 100644 index 0000000..b5640f4 --- /dev/null +++ b/backend/docs/swagger.json @@ -0,0 +1,30 @@ +{ + "swagger": "2.0", + "info": { + "description": "This is a sample server celler server.", + "title": "ApiDoc", + "termsOfService": "http://swagger.io/terms/", + "contact": { + "name": "UserName", + "url": "http://www.swagger.io/support", + "email": "support@swagger.io" + }, + "license": { + "name": "Apache 2.0", + "url": "http://www.apache.org/licenses/LICENSE-2.0.html" + }, + "version": "1.0" + }, + "host": "localhost:8080", + "basePath": "/api/v1", + "paths": {}, + "securityDefinitions": { + "BasicAuth": { + "type": "basic" + } + }, + "externalDocs": { + "description": "OpenAPI", + "url": "https://swagger.io/resources/open-api/" + } +} \ No newline at end of file diff --git a/backend/docs/swagger.yaml b/backend/docs/swagger.yaml new file mode 100644 index 0000000..9f48b7e --- /dev/null +++ b/backend/docs/swagger.yaml @@ -0,0 +1,22 @@ +basePath: /api/v1 +externalDocs: + description: OpenAPI + url: https://swagger.io/resources/open-api/ +host: localhost:8080 +info: + contact: + email: support@swagger.io + name: UserName + url: http://www.swagger.io/support + description: This is a sample server celler server. + license: + name: Apache 2.0 + url: http://www.apache.org/licenses/LICENSE-2.0.html + termsOfService: http://swagger.io/terms/ + title: ApiDoc + version: "1.0" +paths: {} +securityDefinitions: + BasicAuth: + type: basic +swagger: "2.0" diff --git a/backend/fixtures/.gitkeep b/backend/fixtures/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/backend/go.mod b/backend/go.mod new file mode 100644 index 0000000..5ae9e10 --- /dev/null +++ b/backend/go.mod @@ -0,0 +1,161 @@ +module backend + +go 1.23.2 + +require ( + git.ipao.vip/rogeecn/atom v1.0.15 + git.ipao.vip/rogeecn/atomctl v0.0.0-20250109030503-bd6d6bc6e82c + github.com/ThreeDotsLabs/watermill v1.4.2 + github.com/ThreeDotsLabs/watermill-kafka/v3 v3.0.5 + github.com/ThreeDotsLabs/watermill-redisstream v1.4.2 + github.com/ThreeDotsLabs/watermill-sql/v3 v3.1.0 + github.com/go-jet/jet/v2 v2.12.0 + github.com/gofiber/fiber/v3 v3.0.0-beta.4 + github.com/gofiber/utils/v2 v2.0.0-beta.7 + github.com/golang-jwt/jwt/v4 v4.5.1 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 + github.com/imroc/req/v3 v3.49.1 + github.com/jackc/pgx/v5 v5.7.2 + github.com/juju/go4 v0.0.0-20160222163258-40d72ab9641a + github.com/lib/pq v1.10.9 + github.com/opentracing/opentracing-go v1.2.0 + github.com/pkg/errors v0.9.1 + github.com/redis/go-redis/v9 v9.7.0 + github.com/riverqueue/river v0.15.0 + github.com/riverqueue/river/riverdriver/riverpgxv5 v0.15.0 + github.com/riverqueue/river/rivertype v0.15.0 + github.com/rogeecn/fabfile v1.4.0 + github.com/samber/lo v1.47.0 + github.com/sirupsen/logrus v1.9.3 + github.com/smartystreets/goconvey v1.8.1 + github.com/soheilhy/cmux v0.1.5 + github.com/speps/go-hashids/v2 v2.0.1 + github.com/spf13/cobra v1.8.1 + github.com/swaggo/files/v2 v2.0.2 + github.com/swaggo/swag v1.16.4 + github.com/uber/jaeger-client-go v2.30.0+incompatible + go.opentelemetry.io/contrib/instrumentation/runtime v0.58.0 + go.opentelemetry.io/otel v1.33.0 + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.33.0 + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.33.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 + go.opentelemetry.io/otel/metric v1.33.0 + go.opentelemetry.io/otel/sdk v1.33.0 + go.opentelemetry.io/otel/sdk/metric v1.33.0 + go.opentelemetry.io/otel/trace v1.33.0 + go.uber.org/dig v1.18.0 + golang.org/x/net v0.34.0 + golang.org/x/sync v0.10.0 + google.golang.org/grpc v1.69.2 + google.golang.org/protobuf v1.35.2 + gopkg.in/retry.v1 v1.0.3 +) + +require ( + github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect + github.com/IBM/sarama v1.43.3 // indirect + github.com/KyleBanks/depth v1.2.1 // indirect + github.com/PuerkitoBio/purell v1.1.1 // indirect + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect + github.com/Rican7/retry v0.3.1 // indirect + github.com/andybalholm/brotli v1.1.1 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cloudflare/circl v1.5.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/dnwe/otelsarama v0.0.0-20240308230250-9388d9d40bc0 // indirect + github.com/eapache/go-resiliency v1.7.0 // indirect + github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect + github.com/eapache/queue v1.1.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.19.6 // indirect + github.com/go-openapi/spec v0.20.4 // indirect + github.com/go-openapi/swag v0.19.15 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/gofiber/schema v1.2.0 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/gopherjs/gopherjs v1.17.2 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-uuid v1.0.3 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect + github.com/jackc/puddle/v2 v2.2.2 // indirect + github.com/jcmturner/aescts/v2 v2.0.0 // indirect + github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect + github.com/jcmturner/gofork v1.7.6 // indirect + github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect + github.com/jcmturner/rpc/v2 v2.0.3 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/jtolds/gls v4.20.0+incompatible // indirect + github.com/klauspost/compress v1.17.11 // indirect + github.com/lithammer/shortuuid/v3 v3.0.7 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mailru/easyjson v0.7.6 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/oklog/ulid v1.3.1 // indirect + github.com/onsi/ginkgo/v2 v2.22.0 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect + github.com/pierrec/lz4/v4 v4.1.21 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/quic-go/qpack v0.5.1 // indirect + github.com/quic-go/quic-go v0.48.2 // indirect + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect + github.com/refraction-networking/utls v1.6.7 // indirect + github.com/riverqueue/river/riverdriver v0.15.0 // indirect + github.com/riverqueue/river/rivershared v0.15.0 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/smarty/assertions v1.15.0 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/viper v1.19.0 // indirect + github.com/stretchr/testify v1.10.0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/tidwall/gjson v1.18.0 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.1 // indirect + github.com/tidwall/sjson v1.2.5 // indirect + github.com/tinylib/msgp v1.2.5 // indirect + github.com/uber/jaeger-lib v2.4.1+incompatible // indirect + github.com/valyala/bytebufferpool v1.0.0 // indirect + github.com/valyala/fasthttp v1.58.0 // indirect + github.com/valyala/tcplisten v1.0.0 // indirect + github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect + github.com/x448/float16 v0.8.4 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/proto/otlp v1.4.0 // indirect + go.uber.org/atomic v1.9.0 // indirect + go.uber.org/goleak v1.3.0 // indirect + go.uber.org/mock v0.5.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.32.0 // indirect + golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e // indirect + golang.org/x/mod v0.22.0 // indirect + golang.org/x/sys v0.29.0 // indirect + golang.org/x/text v0.21.0 // indirect + golang.org/x/tools v0.28.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/backend/go.sum b/backend/go.sum new file mode 100644 index 0000000..06abd7a --- /dev/null +++ b/backend/go.sum @@ -0,0 +1,476 @@ +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +git.ipao.vip/rogeecn/atom v1.0.15 h1:2+Mj9WblGqpSMuiIA1ZiOOt+wxarHrpxNQGMTERLHtY= +git.ipao.vip/rogeecn/atom v1.0.15/go.mod h1:lCz4RuZNDjiZe1Z4asBfbkfDrcr2dkjhD1IoQQ66ZAA= +git.ipao.vip/rogeecn/atomctl v0.0.0-20250109030503-bd6d6bc6e82c h1:FJ1J4mI/rwEI8c010FJwlh9brg8cBfWlv2S2Ts9wjNk= +git.ipao.vip/rogeecn/atomctl v0.0.0-20250109030503-bd6d6bc6e82c/go.mod h1:tBI/WbTcMb9SArd7JZeArSfSoZSo02Kj9ci6d1FdgdE= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= +github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= +github.com/IBM/sarama v1.43.3 h1:Yj6L2IaNvb2mRBop39N7mmJAHBVY3dTPncr3qGVkxPA= +github.com/IBM/sarama v1.43.3/go.mod h1:FVIRaLrhK3Cla/9FfRF5X9Zua2KpS3SYIXxhac1H+FQ= +github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc= +github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Rican7/retry v0.3.1 h1:scY4IbO8swckzoA/11HgBwaZRJEyY9vaNJshcdhp1Mc= +github.com/Rican7/retry v0.3.1/go.mod h1:CxSDrhAyXmTMeEuRAnArMu1FHu48vtfjLREWqVl7Vw0= +github.com/ThreeDotsLabs/watermill v1.4.2 h1:lX/J79HyUipxZ2VetC7vMPqlw29xreHMxzhPlcZnYoQ= +github.com/ThreeDotsLabs/watermill v1.4.2/go.mod h1:lBnrLbxOjeMRgcJbv+UiZr8Ylz8RkJ4m6i/VN/Nk+to= +github.com/ThreeDotsLabs/watermill-kafka/v3 v3.0.5 h1:ud+4txnRgtr3kZXfXZ5+C7kVQEvsLc5HSNUEa0g+X1Q= +github.com/ThreeDotsLabs/watermill-kafka/v3 v3.0.5/go.mod h1:t4o+4A6GB+XC8WL3DandhzPwd265zQuyWMQC/I+WIOU= +github.com/ThreeDotsLabs/watermill-redisstream v1.4.2 h1:FY6tsBcbhbJpKDOssU4bfybstqY0hQHwiZmVq9qyILQ= +github.com/ThreeDotsLabs/watermill-redisstream v1.4.2/go.mod h1:69++855LyB+ckYDe60PiJLBcUrpckfDE2WwyzuVJRCk= +github.com/ThreeDotsLabs/watermill-sql/v3 v3.1.0 h1:g4uE5Nm3Z6LVB3m+uMgHlN4ne4bDpwf3RJmXYRgMv94= +github.com/ThreeDotsLabs/watermill-sql/v3 v3.1.0/go.mod h1:G8/otZYWLTCeYL2Ww3ujQ7gQ/3+jw5Bj0UtyKn7bBjA= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= +github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= +github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= +github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= +github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= +github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cloudflare/circl v1.5.0 h1:hxIWksrX6XN5a1L2TI/h53AGPhNHoUBo+TD1ms9+pys= +github.com/cloudflare/circl v1.5.0/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/dnwe/otelsarama v0.0.0-20240308230250-9388d9d40bc0 h1:R2zQhFwSCyyd7L43igYjDrH0wkC/i+QBPELuY0HOu84= +github.com/dnwe/otelsarama v0.0.0-20240308230250-9388d9d40bc0/go.mod h1:2MqLKYJfjs3UriXXF9Fd0Qmh/lhxi/6tHXkqtXxyIHc= +github.com/eapache/go-resiliency v1.7.0 h1:n3NRTnBn5N0Cbi/IeOHuQn9s2UwVUH7Ga0ZWcP+9JTA= +github.com/eapache/go-resiliency v1.7.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= +github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= +github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= +github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/frankban/quicktest v1.2.2/go.mod h1:Qh/WofXFeiAFII1aEBu529AtJo6Zg2VHscnEsbBnJ20= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-jet/jet/v2 v2.12.0 h1:z2JfvBAZgsfxlQz6NXBYdZTXc7ep3jhbszTLtETv1JE= +github.com/go-jet/jet/v2 v2.12.0/go.mod h1:ufQVRQeI1mbcO5R8uCEVcVf3Foej9kReBdwDx7YMWUM= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.19.6 h1:UBIxjkht+AWIgYzCDSv2GN+E/togfwXUJFRTWhl2Jjs= +github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= +github.com/go-openapi/spec v0.20.4 h1:O8hJrt0UMnhHcluhIdUgCLRWyM2x7QkBXRvOs7m+O1M= +github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM= +github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gofiber/fiber/v3 v3.0.0-beta.4 h1:KzDSavvhG7m81NIsmnu5l3ZDbVS4feCidl4xlIfu6V0= +github.com/gofiber/fiber/v3 v3.0.0-beta.4/go.mod h1:/WFUoHRkZEsGHyy2+fYcdqi109IVOFbVwxv1n1RU+kk= +github.com/gofiber/schema v1.2.0 h1:j+ZRrNnUa/0ZuWrn/6kAtAufEr4jCJ+JuTURAMxNSZg= +github.com/gofiber/schema v1.2.0/go.mod h1:YYwj01w3hVfaNjhtJzaqetymL56VW642YS3qZPhuE6c= +github.com/gofiber/utils/v2 v2.0.0-beta.7 h1:NnHFrRHvhrufPABdWajcKZejz9HnCWmT/asoxRsiEbQ= +github.com/gofiber/utils/v2 v2.0.0-beta.7/go.mod h1:J/M03s+HMdZdvhAeyh76xT72IfVqBzuz/OJkrMa7cwU= +github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= +github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.2.1-0.20190312032427-6f77996f0c42/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= +github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/imroc/req/v3 v3.49.1 h1:Nvwo02riiPEzh74ozFHeEJrtjakFxnoWNR3YZYuQm9U= +github.com/imroc/req/v3 v3.49.1/go.mod h1:tsOk8K7zI6cU4xu/VWCZVtq9Djw9IWm4MslKzme5woU= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w= +github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM= +github.com/jackc/pgerrcode v0.0.0-20220416144525-469b46aa5efa h1:s+4MhCQ6YrzisK6hFJUX53drDT4UsSW3DEhKn0ifuHw= +github.com/jackc/pgerrcode v0.0.0-20220416144525-469b46aa5efa/go.mod h1:a/s9Lp5W7n/DD0VrVoyJ00FbP2ytTPDVOivvn2bMlds= +github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag= +github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgtype v1.14.4 h1:fKuNiCumbKTAIxQwXfB/nsrnkEI6bPJrrSiMKgbJ2j8= +github.com/jackc/pgtype v1.14.4/go.mod h1:aKeozOde08iifGosdJpz9MBZonJOUJxqNpPBcMJTlVA= +github.com/jackc/pgx/v4 v4.18.3 h1:dE2/TrEsGX3RBprb3qryqSV9Y60iZN1C6i8IrmW9/BA= +github.com/jackc/pgx/v4 v4.18.3/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw= +github.com/jackc/pgx/v5 v5.7.2 h1:mLoDLV6sonKlvjIEsV56SkWNCnuNv531l94GaIzO+XI= +github.com/jackc/pgx/v5 v5.7.2/go.mod h1:ncY89UGWxg82EykZUwSpUKEfccBGGYq1xjrOpsbsfGQ= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= +github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= +github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= +github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= +github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= +github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/juju/go4 v0.0.0-20160222163258-40d72ab9641a h1:45JtCyuNYE+QN9aPuR1ID9++BQU+NMTMudHSuaK0Las= +github.com/juju/go4 v0.0.0-20160222163258-40d72ab9641a/go.mod h1:RVHtZuvrpETIepiNUrNlih2OynoFf1eM6DGC6dloXzk= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lithammer/shortuuid/v3 v3.0.7 h1:trX0KTHy4Pbwo/6ia8fscyHoGA+mf1jWbPJVuvyJQQ8= +github.com/lithammer/shortuuid/v3 v3.0.7/go.mod h1:vMk8ke37EmiewwolSO1NLW8vP4ZaKlRuDIi8tWWmAts= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= +github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8= +github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c h1:dAMKvw0MlJT1GshSTtih8C2gDs04w8dReiOGXrGLNoY= +github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= +github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= +github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= +github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= +github.com/quic-go/quic-go v0.48.2 h1:wsKXZPeGWpMpCGSWqOcqpW2wZYic/8T3aqiOID0/KWE= +github.com/quic-go/quic-go v0.48.2/go.mod h1:yBgs3rWBOADpga7F+jJsb6Ybg1LSYiQvwWlLX+/6HMs= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E= +github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw= +github.com/refraction-networking/utls v1.6.7 h1:zVJ7sP1dJx/WtVuITug3qYUq034cDq9B2MR1K67ULZM= +github.com/refraction-networking/utls v1.6.7/go.mod h1:BC3O4vQzye5hqpmDTWUqi4P5DDhzJfkV1tdqtawQIH0= +github.com/riverqueue/river v0.15.0 h1:5jvE5KEvLvigJRTAtE28R/bvVwIb9GCdXo68IiKF700= +github.com/riverqueue/river v0.15.0/go.mod h1:k4v54wv5HMnnOCUPf+iEi3fs3RiJxXYpppuhXsW9UG8= +github.com/riverqueue/river/riverdriver v0.15.0 h1:Nv88t7tK51HvGfiSIe7ov/2PrAFntY4b3ak4MEF3Dxs= +github.com/riverqueue/river/riverdriver v0.15.0/go.mod h1:UERKTvUg0M7qWLuQLmHiEM/hbJEMP3+qcNDhvIx7R4s= +github.com/riverqueue/river/riverdriver/riverdatabasesql v0.15.0 h1:4OKdSQVJ3OQ2VfPtOPB5OHjzLpWEju4/eafxe/hbn84= +github.com/riverqueue/river/riverdriver/riverdatabasesql v0.15.0/go.mod h1:KwoUPnt2zu1UONPk7NchnHyZWcHKMwMM6B95Cx6SI2g= +github.com/riverqueue/river/riverdriver/riverpgxv5 v0.15.0 h1:IBNuPdflDav+sxd8EDXomyv93fvMG9IBgEToQPkFWNs= +github.com/riverqueue/river/riverdriver/riverpgxv5 v0.15.0/go.mod h1:b8CGkpQlpHacnULdaJk8+4Dnvj4lBeEaIOnd+UT3Ciw= +github.com/riverqueue/river/rivershared v0.15.0 h1:hDClNzZHUJzF9wdg6FgFMjvaMV74zY9FZZPQmBaVVM0= +github.com/riverqueue/river/rivershared v0.15.0/go.mod h1:5pyQTv4W6BVoazOvN1p4EQ3a3jopsSgcHB1NxVRQRgU= +github.com/riverqueue/river/rivertype v0.15.0 h1:+TXRnvQv1ulV24uQnsuZmbb3yJdmbpizKQf0b0SM+f0= +github.com/riverqueue/river/rivertype v0.15.0/go.mod h1:4vpt5ZSdZ35mFbRAV4oXgeRdH3Mq5h1pUzQTvaGfCUA= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/rogeecn/fabfile v1.4.0 h1:Rw7/7OH8cV4aRPw79Oa4hHHFKaC/ol+sNmGcB/usHaQ= +github.com/rogeecn/fabfile v1.4.0/go.mod h1:EPwX7TtVcIWSLJkJAqxSzYjM/aV1Q0wymcaXqnMgzas= +github.com/rogpeppe/clock v0.0.0-20190514195947-2896927a307a h1:3QH7VyOaaiUHNrA9Se4YQIRkDTCw1EJls9xTUCaCeRM= +github.com/rogpeppe/clock v0.0.0-20190514195947-2896927a307a/go.mod h1:4r5QyqhjIWCcK8DO4KMclc5Iknq5qVBAlbYYzAbUScQ= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/samber/lo v1.47.0 h1:z7RynLwP5nbyRscyvcD043DWYoOcYRv3mV8lBeqOCLc= +github.com/samber/lo v1.47.0/go.mod h1:RmDH9Ct32Qy3gduHQuKJ3gW1fMHAnE/fAzQuf6He5cU= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY= +github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec= +github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY= +github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60= +github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/speps/go-hashids/v2 v2.0.1 h1:ViWOEqWES/pdOSq+C1SLVa8/Tnsd52XC34RY7lt7m4g= +github.com/speps/go-hashids/v2 v2.0.1/go.mod h1:47LKunwvDZki/uRVD6NImtyk712yFzIs3UF3KlHohGw= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= +github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/swaggo/files/v2 v2.0.2 h1:Bq4tgS/yxLB/3nwOMcul5oLEUKa877Ykgz3CJMVbQKU= +github.com/swaggo/files/v2 v2.0.2/go.mod h1:TVqetIzZsO9OhHX1Am9sRf9LdrFZqoK49N37KON/jr0= +github.com/swaggo/swag v1.16.4 h1:clWJtd9LStiG3VeijiCfOVODP6VpHtKdQy9ELFG3s1A= +github.com/swaggo/swag v1.16.4/go.mod h1:VBsHJRsDvfYvqoiMKnsdwhNV9LEMHgEDZcyVYX0sxPg= +github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= +github.com/tinylib/msgp v1.2.5 h1:WeQg1whrXRFiZusidTQqzETkRpGjFjcIhW6uqWH09po= +github.com/tinylib/msgp v1.2.5/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= +github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= +github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= +github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.58.0 h1:GGB2dWxSbEprU9j0iMJHgdKYJVDyjrOwF9RE59PbRuE= +github.com/valyala/fasthttp v1.58.0/go.mod h1:SYXvHHaFp7QZHGKSHmoMipInhrI5StHrhDTYVEjK/Kw= +github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8= +github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= +github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= +github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= +github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/runtime v0.58.0 h1:GrcF8ABgnBHQFgp4zu5/jTSqLkoJ9uiDz2e7eKkjq+w= +go.opentelemetry.io/contrib/instrumentation/runtime v0.58.0/go.mod h1:+kxR5prZLoFAJVXJWZKWO2e4PY2dYyXIRNklBuOyzpM= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.33.0 h1:7F29RDmnlqk6B5d+sUqemt8TBfDqxryYW5gX6L74RFA= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.33.0/go.mod h1:ZiGDq7xwDMKmWDrN1XsXAj0iC7hns+2DhxBFSncNHSE= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.33.0 h1:bSjzTvsXZbLSWU8hnZXcKmEVaJjjnandxD0PxThhVU8= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.33.0/go.mod h1:aj2rilHL8WjXY1I5V+ra+z8FELtk681deydgYT8ikxU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 h1:wpMfgF8E1rkrT1Z6meFh1NDtownE9Ii3n3X2GJYjsaU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0/go.mod h1:wAy0T/dUbs468uOlkT31xjvqQgEVXv58BRFWEgn5v/0= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/sdk/metric v1.33.0 h1:Gs5VK9/WUJhNXZgn8MR6ITatvAmKeIuCtNbsP3JkNqU= +go.opentelemetry.io/otel/sdk/metric v1.33.0/go.mod h1:dL5ykHZmm1B1nVRk9dDjChwDmt81MjVp3gLkQRwKf/Q= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= +go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= +go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= +go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= +go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e h1:4qufH0hlUYs6AO6XmZC3GqfDPGSXHVXUFR6OND+iJX4= +golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= +golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= +google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= +google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU= +google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= +google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/retry.v1 v1.0.3 h1:a9CArYczAVv6Qs6VGoLMio99GEs7kY9UzSF9+LD+iGs= +gopkg.in/retry.v1 v1.0.3/go.mod h1:FJkXmWiMaAo7xB+xhvDF59zhfjDWyzmyAxiT4dB688g= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/backend/main.go b/backend/main.go new file mode 100644 index 0000000..bd06a70 --- /dev/null +++ b/backend/main.go @@ -0,0 +1,33 @@ +package main + +import ( + "backend/app/service/http" + + "git.ipao.vip/rogeecn/atom" + log "github.com/sirupsen/logrus" +) + +// @title ApiDoc +// @version 1.0 +// @description This is a sample server celler server. +// @termsOfService http://swagger.io/terms/ +// @contact.name UserName +// @contact.url http://www.swagger.io/support +// @contact.email support@swagger.io +// @license.name Apache 2.0 +// @license.url http://www.apache.org/licenses/LICENSE-2.0.html +// @host localhost:8080 +// @BasePath /api/v1 +// @securityDefinitions.basic BasicAuth +// @externalDocs.description OpenAPI +// @externalDocs.url https://swagger.io/resources/open-api/ +func main() { + opts := []atom.Option{ + atom.Name("backend"), + http.Command(), + } + + if err := atom.Serve(opts...); err != nil { + log.Fatal(err) + } +} diff --git a/backend/main_test.go b/backend/main_test.go new file mode 100644 index 0000000..06ab7d0 --- /dev/null +++ b/backend/main_test.go @@ -0,0 +1 @@ +package main diff --git a/backend/pkg/consts/consts.go b/backend/pkg/consts/consts.go new file mode 100644 index 0000000..85fa520 --- /dev/null +++ b/backend/pkg/consts/consts.go @@ -0,0 +1,8 @@ +package consts + +// Format +// +// // swagger:enum CacheKey +// // ENUM( +// // VerifyCode = "code:__CHANNEL__:%s", +// // ) diff --git a/backend/pkg/f/bind.go b/backend/pkg/f/bind.go new file mode 100644 index 0000000..a0ce329 --- /dev/null +++ b/backend/pkg/f/bind.go @@ -0,0 +1,58 @@ +package f + +import ( + "github.com/gofiber/fiber/v3" + "github.com/pkg/errors" +) + +func Path[T fiber.GenericType](key string) func(fiber.Ctx) (T, error) { + return func(ctx fiber.Ctx) (T, error) { + v := fiber.Params[T](ctx, key) + return v, nil + } +} + +func URI[T any](name string) func(fiber.Ctx) (*T, error) { + return func(ctx fiber.Ctx) (*T, error) { + p := new(T) + if err := ctx.Bind().URI(p); err != nil { + return nil, errors.Wrapf(err, "uri: %s", name) + } + + return p, nil + } +} + +func Body[T any](name string) func(fiber.Ctx) (*T, error) { + return func(ctx fiber.Ctx) (*T, error) { + p := new(T) + if err := ctx.Bind().Body(p); err != nil { + return nil, errors.Wrapf(err, "body: %s", name) + } + + return p, nil + } +} + +func Query[T any](name string) func(fiber.Ctx) (*T, error) { + return func(ctx fiber.Ctx) (*T, error) { + p := new(T) + if err := ctx.Bind().Query(p); err != nil { + return nil, errors.Wrapf(err, "query: %s", name) + } + + return p, nil + } +} + +func Header[T any](name string) func(fiber.Ctx) (*T, error) { + return func(ctx fiber.Ctx) (*T, error) { + p := new(T) + err := ctx.Bind().Header(p) + if err != nil { + return nil, errors.Wrapf(err, "header: %s", name) + } + + return p, nil + } +} diff --git a/backend/pkg/f/func.go b/backend/pkg/f/func.go new file mode 100644 index 0000000..cee78bb --- /dev/null +++ b/backend/pkg/f/func.go @@ -0,0 +1,372 @@ +package f + +import ( + "github.com/gofiber/fiber/v3" +) + +func Func(f fiber.Handler) fiber.Handler { + return f +} + +func Func1[P1 any]( + f func(fiber.Ctx, P1) error, + pf1 func(fiber.Ctx) (P1, error), +) fiber.Handler { + return func(ctx fiber.Ctx) error { + p, err := pf1(ctx) + if err != nil { + return err + } + + return f(ctx, p) + } +} + +func Func2[P1, P2 any]( + f func(fiber.Ctx, P1, P2) error, + pf1 func(fiber.Ctx) (P1, error), + pf2 func(fiber.Ctx) (P2, error), +) fiber.Handler { + return func(ctx fiber.Ctx) error { + p1, err := pf1(ctx) + if err != nil { + return err + } + + p2, err := pf2(ctx) + if err != nil { + return err + } + + return f(ctx, p1, p2) + } +} + +func Func3[P1, P2, P3 any]( + f func(fiber.Ctx, P1, P2, P3) error, + pf1 func(fiber.Ctx) (P1, error), + pf2 func(fiber.Ctx) (P2, error), + pf3 func(fiber.Ctx) (P3, error), +) fiber.Handler { + return func(ctx fiber.Ctx) error { + p1, err := pf1(ctx) + if err != nil { + return err + } + + p2, err := pf2(ctx) + if err != nil { + return err + } + p3, err := pf3(ctx) + if err != nil { + return err + } + return f(ctx, p1, p2, p3) + } +} + +func Func4[P1, P2, P3, P4 any]( + f func(fiber.Ctx, P1, P2, P3, P4) error, + pf1 func(fiber.Ctx) (P1, error), + pf2 func(fiber.Ctx) (P2, error), + pf3 func(fiber.Ctx) (P3, error), + pf4 func(fiber.Ctx) (P4, error), +) fiber.Handler { + return func(ctx fiber.Ctx) error { + p1, err := pf1(ctx) + if err != nil { + return err + } + + p2, err := pf2(ctx) + if err != nil { + return err + } + + p3, err := pf3(ctx) + if err != nil { + return err + } + + p4, err := pf4(ctx) + if err != nil { + return err + } + + return f(ctx, p1, p2, p3, p4) + } +} + +func Func5[P1, P2, P3, P4, P5 any]( + f func(fiber.Ctx, P1, P2, P3, P4, P5) error, + pf1 func(fiber.Ctx) (P1, error), + pf2 func(fiber.Ctx) (P2, error), + pf3 func(fiber.Ctx) (P3, error), + pf4 func(fiber.Ctx) (P4, error), + pf5 func(fiber.Ctx) (P5, error), +) fiber.Handler { + return func(ctx fiber.Ctx) error { + p1, err := pf1(ctx) + if err != nil { + return err + } + p2, err := pf2(ctx) + if err != nil { + return err + } + p3, err := pf3(ctx) + if err != nil { + return err + } + p4, err := pf4(ctx) + if err != nil { + return err + } + p5, err := pf5(ctx) + if err != nil { + return err + } + return f(ctx, p1, p2, p3, p4, p5) + } +} + +func Func6[P1, P2, P3, P4, P5, P6 any]( + f func(fiber.Ctx, P1, P2, P3, P4, P5, P6) error, + pf1 func(fiber.Ctx) (P1, error), + pf2 func(fiber.Ctx) (P2, error), + pf3 func(fiber.Ctx) (P3, error), + pf4 func(fiber.Ctx) (P4, error), + pf5 func(fiber.Ctx) (P5, error), + pf6 func(fiber.Ctx) (P6, error), +) fiber.Handler { + return func(ctx fiber.Ctx) error { + p1, err := pf1(ctx) + if err != nil { + return err + } + p2, err := pf2(ctx) + if err != nil { + return err + } + p3, err := pf3(ctx) + if err != nil { + return err + } + p4, err := pf4(ctx) + if err != nil { + return err + } + p5, err := pf5(ctx) + if err != nil { + return err + } + p6, err := pf6(ctx) + if err != nil { + return err + } + return f(ctx, p1, p2, p3, p4, p5, p6) + } +} + +func Func7[P1, P2, P3, P4, P5, P6, P7 any]( + f func(fiber.Ctx, P1, P2, P3, P4, P5, P6, P7) error, + pf1 func(fiber.Ctx) (P1, error), + pf2 func(fiber.Ctx) (P2, error), + pf3 func(fiber.Ctx) (P3, error), + pf4 func(fiber.Ctx) (P4, error), + pf5 func(fiber.Ctx) (P5, error), + pf6 func(fiber.Ctx) (P6, error), + pf7 func(fiber.Ctx) (P7, error), +) fiber.Handler { + return func(ctx fiber.Ctx) error { + p1, err := pf1(ctx) + if err != nil { + return err + } + p2, err := pf2(ctx) + if err != nil { + return err + } + p3, err := pf3(ctx) + if err != nil { + return err + } + p4, err := pf4(ctx) + if err != nil { + return err + } + p5, err := pf5(ctx) + if err != nil { + return err + } + p6, err := pf6(ctx) + if err != nil { + return err + } + p7, err := pf7(ctx) + if err != nil { + return err + } + return f(ctx, p1, p2, p3, p4, p5, p6, p7) + } +} + +func Func8[P1, P2, P3, P4, P5, P6, P7, P8 any]( + f func(fiber.Ctx, P1, P2, P3, P4, P5, P6, P7, P8) error, + pf1 func(fiber.Ctx) (P1, error), + pf2 func(fiber.Ctx) (P2, error), + pf3 func(fiber.Ctx) (P3, error), + pf4 func(fiber.Ctx) (P4, error), + pf5 func(fiber.Ctx) (P5, error), + pf6 func(fiber.Ctx) (P6, error), + pf7 func(fiber.Ctx) (P7, error), + pf8 func(fiber.Ctx) (P8, error), +) fiber.Handler { + return func(ctx fiber.Ctx) error { + p1, err := pf1(ctx) + if err != nil { + return err + } + p2, err := pf2(ctx) + if err != nil { + return err + } + p3, err := pf3(ctx) + if err != nil { + return err + } + p4, err := pf4(ctx) + if err != nil { + return err + } + p5, err := pf5(ctx) + if err != nil { + return err + } + p6, err := pf6(ctx) + if err != nil { + return err + } + p7, err := pf7(ctx) + if err != nil { + return err + } + p8, err := pf8(ctx) + if err != nil { + return err + } + return f(ctx, p1, p2, p3, p4, p5, p6, p7, p8) + } +} + +func Func9[P1, P2, P3, P4, P5, P6, P7, P8, P9 any]( + f func(fiber.Ctx, P1, P2, P3, P4, P5, P6, P7, P8, P9) error, + pf1 func(fiber.Ctx) (P1, error), + pf2 func(fiber.Ctx) (P2, error), + pf3 func(fiber.Ctx) (P3, error), + pf4 func(fiber.Ctx) (P4, error), + pf5 func(fiber.Ctx) (P5, error), + pf6 func(fiber.Ctx) (P6, error), + pf7 func(fiber.Ctx) (P7, error), + pf8 func(fiber.Ctx) (P8, error), + pf9 func(fiber.Ctx) (P9, error), +) fiber.Handler { + return func(ctx fiber.Ctx) error { + p1, err := pf1(ctx) + if err != nil { + return err + } + p2, err := pf2(ctx) + if err != nil { + return err + } + p3, err := pf3(ctx) + if err != nil { + return err + } + p4, err := pf4(ctx) + if err != nil { + return err + } + p5, err := pf5(ctx) + if err != nil { + return err + } + p6, err := pf6(ctx) + if err != nil { + return err + } + p7, err := pf7(ctx) + if err != nil { + return err + } + p8, err := pf8(ctx) + if err != nil { + return err + } + p9, err := pf9(ctx) + if err != nil { + return err + } + return f(ctx, p1, p2, p3, p4, p5, p6, p7, p8, p9) + } +} + +func Func10[P1, P2, P3, P4, P5, P6, P7, P8, P9, P10 any]( + f func(fiber.Ctx, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10) error, + pf1 func(fiber.Ctx) (P1, error), + pf2 func(fiber.Ctx) (P2, error), + pf3 func(fiber.Ctx) (P3, error), + pf4 func(fiber.Ctx) (P4, error), + pf5 func(fiber.Ctx) (P5, error), + pf6 func(fiber.Ctx) (P6, error), + pf7 func(fiber.Ctx) (P7, error), + pf8 func(fiber.Ctx) (P8, error), + pf9 func(fiber.Ctx) (P9, error), + pf10 func(fiber.Ctx) (P10, error), +) fiber.Handler { + return func(ctx fiber.Ctx) error { + p1, err := pf1(ctx) + if err != nil { + return err + } + p2, err := pf2(ctx) + if err != nil { + return err + } + p3, err := pf3(ctx) + if err != nil { + return err + } + p4, err := pf4(ctx) + if err != nil { + return err + } + p5, err := pf5(ctx) + if err != nil { + return err + } + p6, err := pf6(ctx) + if err != nil { + return err + } + p7, err := pf7(ctx) + if err != nil { + return err + } + p8, err := pf8(ctx) + if err != nil { + return err + } + p9, err := pf9(ctx) + if err != nil { + return err + } + p10, err := pf10(ctx) + if err != nil { + return err + } + return f(ctx, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10) + } +} diff --git a/backend/pkg/f/func_data.go b/backend/pkg/f/func_data.go new file mode 100644 index 0000000..ceb515f --- /dev/null +++ b/backend/pkg/f/func_data.go @@ -0,0 +1,413 @@ +package f + +import ( + "github.com/gofiber/fiber/v3" +) + +func DataFunc[T any]( + f func(fiber.Ctx) (T, error), +) fiber.Handler { + return func(ctx fiber.Ctx) error { + data, err := f(ctx) + if err != nil { + return err + } + return ctx.JSON(data) + } +} + +func DataFunc1[T, P1 any]( + f func(fiber.Ctx, P1) (T, error), + pf1 func(fiber.Ctx) (P1, error), +) fiber.Handler { + return func(ctx fiber.Ctx) error { + p, err := pf1(ctx) + if err != nil { + return err + } + + data, err := f(ctx, p) + if err != nil { + return err + } + return ctx.JSON(data) + } +} + +func DataFunc2[T, P1, P2 any]( + f func(fiber.Ctx, P1, P2) (T, error), + pf1 func(fiber.Ctx) (P1, error), + pf2 func(fiber.Ctx) (P2, error), +) fiber.Handler { + return func(ctx fiber.Ctx) error { + p1, err := pf1(ctx) + if err != nil { + return err + } + p2, err := pf2(ctx) + if err != nil { + return err + } + data, err := f(ctx, p1, p2) + if err != nil { + return err + } + return ctx.JSON(data) + } +} + +func DataFunc3[T, P1, P2, P3 any]( + f func(fiber.Ctx, P1, P2, P3) (T, error), + pf1 func(fiber.Ctx) (P1, error), + pf2 func(fiber.Ctx) (P2, error), + pf3 func(fiber.Ctx) (P3, error), +) fiber.Handler { + return func(ctx fiber.Ctx) error { + p1, err := pf1(ctx) + if err != nil { + return err + } + p2, err := pf2(ctx) + if err != nil { + return err + } + p3, err := pf3(ctx) + if err != nil { + return err + } + data, err := f(ctx, p1, p2, p3) + if err != nil { + return err + } + return ctx.JSON(data) + } +} + +func DataFunc4[T, P1, P2, P3, P4 any]( + f func(fiber.Ctx, P1, P2, P3, P4) (T, error), + pf1 func(fiber.Ctx) (P1, error), + pf2 func(fiber.Ctx) (P2, error), + pf3 func(fiber.Ctx) (P3, error), + pf4 func(fiber.Ctx) (P4, error), +) fiber.Handler { + return func(ctx fiber.Ctx) error { + p1, err := pf1(ctx) + if err != nil { + return err + } + p2, err := pf2(ctx) + if err != nil { + return err + } + p3, err := pf3(ctx) + if err != nil { + return err + } + p4, err := pf4(ctx) + if err != nil { + return err + } + data, err := f(ctx, p1, p2, p3, p4) + if err != nil { + return err + } + return ctx.JSON(data) + } +} + +func DataFunc5[T, P1, P2, P3, P4, P5 any]( + f func(fiber.Ctx, P1, P2, P3, P4, P5) (T, error), + pf1 func(fiber.Ctx) (P1, error), + pf2 func(fiber.Ctx) (P2, error), + pf3 func(fiber.Ctx) (P3, error), + pf4 func(fiber.Ctx) (P4, error), + pf5 func(fiber.Ctx) (P5, error), +) fiber.Handler { + return func(ctx fiber.Ctx) error { + p1, err := pf1(ctx) + if err != nil { + return err + } + p2, err := pf2(ctx) + if err != nil { + return err + } + p3, err := pf3(ctx) + if err != nil { + return err + } + p4, err := pf4(ctx) + if err != nil { + return err + } + p5, err := pf5(ctx) + if err != nil { + return err + } + data, err := f(ctx, p1, p2, p3, p4, p5) + if err != nil { + return err + } + return ctx.JSON(data) + } +} + +func DataFunc6[T, P1, P2, P3, P4, P5, P6 any]( + f func(fiber.Ctx, P1, P2, P3, P4, P5, P6) (T, error), + pf1 func(fiber.Ctx) (P1, error), + pf2 func(fiber.Ctx) (P2, error), + pf3 func(fiber.Ctx) (P3, error), + pf4 func(fiber.Ctx) (P4, error), + pf5 func(fiber.Ctx) (P5, error), + pf6 func(fiber.Ctx) (P6, error), +) fiber.Handler { + return func(ctx fiber.Ctx) error { + p1, err := pf1(ctx) + if err != nil { + return err + } + p2, err := pf2(ctx) + if err != nil { + return err + } + p3, err := pf3(ctx) + if err != nil { + return err + } + p4, err := pf4(ctx) + if err != nil { + return err + } + p5, err := pf5(ctx) + if err != nil { + return err + } + p6, err := pf6(ctx) + if err != nil { + return err + } + data, err := f(ctx, p1, p2, p3, p4, p5, p6) + if err != nil { + return err + } + return ctx.JSON(data) + } +} + +func DataFunc7[T, P1, P2, P3, P4, P5, P6, P7 any]( + f func(fiber.Ctx, P1, P2, P3, P4, P5, P6, P7) (T, error), + pf1 func(fiber.Ctx) (P1, error), + pf2 func(fiber.Ctx) (P2, error), + pf3 func(fiber.Ctx) (P3, error), + pf4 func(fiber.Ctx) (P4, error), + pf5 func(fiber.Ctx) (P5, error), + pf6 func(fiber.Ctx) (P6, error), + pf7 func(fiber.Ctx) (P7, error), +) fiber.Handler { + return func(ctx fiber.Ctx) error { + p1, err := pf1(ctx) + if err != nil { + return err + } + p2, err := pf2(ctx) + if err != nil { + return err + } + p3, err := pf3(ctx) + if err != nil { + return err + } + p4, err := pf4(ctx) + if err != nil { + return err + } + p5, err := pf5(ctx) + if err != nil { + return err + } + p6, err := pf6(ctx) + if err != nil { + return err + } + p7, err := pf7(ctx) + if err != nil { + return err + } + data, err := f(ctx, p1, p2, p3, p4, p5, p6, p7) + if err != nil { + return err + } + return ctx.JSON(data) + } +} + +func DataFunc8[T, P1, P2, P3, P4, P5, P6, P7, P8 any]( + f func(fiber.Ctx, P1, P2, P3, P4, P5, P6, P7, P8) (T, error), + pf1 func(fiber.Ctx) (P1, error), + pf2 func(fiber.Ctx) (P2, error), + pf3 func(fiber.Ctx) (P3, error), + pf4 func(fiber.Ctx) (P4, error), + pf5 func(fiber.Ctx) (P5, error), + pf6 func(fiber.Ctx) (P6, error), + pf7 func(fiber.Ctx) (P7, error), + pf8 func(fiber.Ctx) (P8, error), +) fiber.Handler { + return func(ctx fiber.Ctx) error { + p1, err := pf1(ctx) + if err != nil { + return err + } + p2, err := pf2(ctx) + if err != nil { + return err + } + p3, err := pf3(ctx) + if err != nil { + return err + } + p4, err := pf4(ctx) + if err != nil { + return err + } + p5, err := pf5(ctx) + if err != nil { + return err + } + p6, err := pf6(ctx) + if err != nil { + return err + } + p7, err := pf7(ctx) + if err != nil { + return err + } + p8, err := pf8(ctx) + if err != nil { + return err + } + data, err := f(ctx, p1, p2, p3, p4, p5, p6, p7, p8) + if err != nil { + return err + } + return ctx.JSON(data) + } +} + +func DataFunc9[T, P1, P2, P3, P4, P5, P6, P7, P8, P9 any]( + f func(fiber.Ctx, P1, P2, P3, P4, P5, P6, P7, P8, P9) (T, error), + pf1 func(fiber.Ctx) (P1, error), + pf2 func(fiber.Ctx) (P2, error), + pf3 func(fiber.Ctx) (P3, error), + pf4 func(fiber.Ctx) (P4, error), + pf5 func(fiber.Ctx) (P5, error), + pf6 func(fiber.Ctx) (P6, error), + pf7 func(fiber.Ctx) (P7, error), + pf8 func(fiber.Ctx) (P8, error), + pf9 func(fiber.Ctx) (P9, error), +) fiber.Handler { + return func(ctx fiber.Ctx) error { + p1, err := pf1(ctx) + if err != nil { + return err + } + p2, err := pf2(ctx) + if err != nil { + return err + } + p3, err := pf3(ctx) + if err != nil { + return err + } + p4, err := pf4(ctx) + if err != nil { + return err + } + p5, err := pf5(ctx) + if err != nil { + return err + } + p6, err := pf6(ctx) + if err != nil { + return err + } + p7, err := pf7(ctx) + if err != nil { + return err + } + p8, err := pf8(ctx) + if err != nil { + return err + } + p9, err := pf9(ctx) + if err != nil { + return err + } + data, err := f(ctx, p1, p2, p3, p4, p5, p6, p7, p8, p9) + if err != nil { + return err + } + return ctx.JSON(data) + } +} + +func DataFunc10[T, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10 any]( + f func(fiber.Ctx, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10) (T, error), + pf1 func(fiber.Ctx) (P1, error), + pf2 func(fiber.Ctx) (P2, error), + pf3 func(fiber.Ctx) (P3, error), + pf4 func(fiber.Ctx) (P4, error), + pf5 func(fiber.Ctx) (P5, error), + pf6 func(fiber.Ctx) (P6, error), + pf7 func(fiber.Ctx) (P7, error), + pf8 func(fiber.Ctx) (P8, error), + pf9 func(fiber.Ctx) (P9, error), + pf10 func(fiber.Ctx) (P10, error), +) fiber.Handler { + return func(ctx fiber.Ctx) error { + p1, err := pf1(ctx) + if err != nil { + return err + } + p2, err := pf2(ctx) + if err != nil { + return err + } + p3, err := pf3(ctx) + if err != nil { + return err + } + p4, err := pf4(ctx) + if err != nil { + return err + } + p5, err := pf5(ctx) + if err != nil { + return err + } + p6, err := pf6(ctx) + if err != nil { + return err + } + p7, err := pf7(ctx) + if err != nil { + return err + } + p8, err := pf8(ctx) + if err != nil { + return err + } + p9, err := pf9(ctx) + if err != nil { + return err + } + p10, err := pf10(ctx) + if err != nil { + return err + } + data, err := f(ctx, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10) + if err != nil { + return err + } + return ctx.JSON(data) + } +} diff --git a/backend/pkg/proto/user/v1/user.pb.go b/backend/pkg/proto/user/v1/user.pb.go new file mode 100644 index 0000000..da9a534 --- /dev/null +++ b/backend/pkg/proto/user/v1/user.pb.go @@ -0,0 +1,485 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc (unknown) +// source: user/v1/user.proto + +package userv1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// User represents a user entity +type User struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` + Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"` + Phone string `protobuf:"bytes,4,opt,name=phone,proto3" json:"phone,omitempty"` + CreateTime string `protobuf:"bytes,5,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + UpdateTime string `protobuf:"bytes,6,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` +} + +func (x *User) Reset() { + *x = User{} + if protoimpl.UnsafeEnabled { + mi := &file_user_v1_user_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *User) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*User) ProtoMessage() {} + +func (x *User) ProtoReflect() protoreflect.Message { + mi := &file_user_v1_user_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use User.ProtoReflect.Descriptor instead. +func (*User) Descriptor() ([]byte, []int) { + return file_user_v1_user_proto_rawDescGZIP(), []int{0} +} + +func (x *User) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *User) GetUsername() string { + if x != nil { + return x.Username + } + return "" +} + +func (x *User) GetEmail() string { + if x != nil { + return x.Email + } + return "" +} + +func (x *User) GetPhone() string { + if x != nil { + return x.Phone + } + return "" +} + +func (x *User) GetCreateTime() string { + if x != nil { + return x.CreateTime + } + return "" +} + +func (x *User) GetUpdateTime() string { + if x != nil { + return x.UpdateTime + } + return "" +} + +type ListUsersRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PageSize int32 `protobuf:"varint,1,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + PageNumber int32 `protobuf:"varint,2,opt,name=page_number,json=pageNumber,proto3" json:"page_number,omitempty"` +} + +func (x *ListUsersRequest) Reset() { + *x = ListUsersRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_user_v1_user_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListUsersRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListUsersRequest) ProtoMessage() {} + +func (x *ListUsersRequest) ProtoReflect() protoreflect.Message { + mi := &file_user_v1_user_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListUsersRequest.ProtoReflect.Descriptor instead. +func (*ListUsersRequest) Descriptor() ([]byte, []int) { + return file_user_v1_user_proto_rawDescGZIP(), []int{1} +} + +func (x *ListUsersRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListUsersRequest) GetPageNumber() int32 { + if x != nil { + return x.PageNumber + } + return 0 +} + +type ListUsersResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Users []*User `protobuf:"bytes,1,rep,name=users,proto3" json:"users,omitempty"` + Total int32 `protobuf:"varint,2,opt,name=total,proto3" json:"total,omitempty"` +} + +func (x *ListUsersResponse) Reset() { + *x = ListUsersResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_user_v1_user_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListUsersResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListUsersResponse) ProtoMessage() {} + +func (x *ListUsersResponse) ProtoReflect() protoreflect.Message { + mi := &file_user_v1_user_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListUsersResponse.ProtoReflect.Descriptor instead. +func (*ListUsersResponse) Descriptor() ([]byte, []int) { + return file_user_v1_user_proto_rawDescGZIP(), []int{2} +} + +func (x *ListUsersResponse) GetUsers() []*User { + if x != nil { + return x.Users + } + return nil +} + +func (x *ListUsersResponse) GetTotal() int32 { + if x != nil { + return x.Total + } + return 0 +} + +type GetUserRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *GetUserRequest) Reset() { + *x = GetUserRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_user_v1_user_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetUserRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetUserRequest) ProtoMessage() {} + +func (x *GetUserRequest) ProtoReflect() protoreflect.Message { + mi := &file_user_v1_user_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetUserRequest.ProtoReflect.Descriptor instead. +func (*GetUserRequest) Descriptor() ([]byte, []int) { + return file_user_v1_user_proto_rawDescGZIP(), []int{3} +} + +func (x *GetUserRequest) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +type GetUserResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + User *User `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"` +} + +func (x *GetUserResponse) Reset() { + *x = GetUserResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_user_v1_user_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetUserResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetUserResponse) ProtoMessage() {} + +func (x *GetUserResponse) ProtoReflect() protoreflect.Message { + mi := &file_user_v1_user_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetUserResponse.ProtoReflect.Descriptor instead. +func (*GetUserResponse) Descriptor() ([]byte, []int) { + return file_user_v1_user_proto_rawDescGZIP(), []int{4} +} + +func (x *GetUserResponse) GetUser() *User { + if x != nil { + return x.User + } + return nil +} + +var File_user_v1_user_proto protoreflect.FileDescriptor + +var file_user_v1_user_proto_rawDesc = []byte{ + 0x0a, 0x12, 0x75, 0x73, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x75, 0x73, 0x65, 0x72, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x75, 0x73, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x22, 0xa0, 0x01, + 0x0a, 0x04, 0x55, 0x73, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x68, 0x6f, 0x6e, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x70, 0x68, 0x6f, 0x6e, 0x65, 0x12, 0x1f, + 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, + 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, + 0x22, 0x50, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, + 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x4e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x22, 0x4e, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x05, 0x75, 0x73, 0x65, 0x72, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x75, 0x73, 0x65, 0x72, 0x2e, 0x76, 0x31, + 0x2e, 0x55, 0x73, 0x65, 0x72, 0x52, 0x05, 0x75, 0x73, 0x65, 0x72, 0x73, 0x12, 0x14, 0x0a, 0x05, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x74, 0x6f, 0x74, + 0x61, 0x6c, 0x22, 0x20, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x02, 0x69, 0x64, 0x22, 0x34, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x55, 0x73, 0x65, 0x72, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x75, 0x73, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, + 0x55, 0x73, 0x65, 0x72, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x32, 0x93, 0x01, 0x0a, 0x0b, 0x55, + 0x73, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x44, 0x0a, 0x09, 0x4c, 0x69, + 0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73, 0x12, 0x19, 0x2e, 0x75, 0x73, 0x65, 0x72, 0x2e, 0x76, + 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x75, 0x73, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x55, 0x73, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x3e, 0x0a, 0x07, 0x47, 0x65, 0x74, 0x55, 0x73, 0x65, 0x72, 0x12, 0x17, 0x2e, 0x75, 0x73, + 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x75, 0x73, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x47, + 0x65, 0x74, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x42, 0x77, 0x0a, 0x0b, 0x63, 0x6f, 0x6d, 0x2e, 0x75, 0x73, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x42, + 0x09, 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x20, 0x62, 0x61, + 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x75, 0x73, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x3b, 0x75, 0x73, 0x65, 0x72, 0x76, 0x31, 0xa2, 0x02, + 0x03, 0x55, 0x58, 0x58, 0xaa, 0x02, 0x07, 0x55, 0x73, 0x65, 0x72, 0x2e, 0x56, 0x31, 0xca, 0x02, + 0x07, 0x55, 0x73, 0x65, 0x72, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x13, 0x55, 0x73, 0x65, 0x72, 0x5c, + 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, + 0x08, 0x55, 0x73, 0x65, 0x72, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_user_v1_user_proto_rawDescOnce sync.Once + file_user_v1_user_proto_rawDescData = file_user_v1_user_proto_rawDesc +) + +func file_user_v1_user_proto_rawDescGZIP() []byte { + file_user_v1_user_proto_rawDescOnce.Do(func() { + file_user_v1_user_proto_rawDescData = protoimpl.X.CompressGZIP(file_user_v1_user_proto_rawDescData) + }) + return file_user_v1_user_proto_rawDescData +} + +var file_user_v1_user_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_user_v1_user_proto_goTypes = []any{ + (*User)(nil), // 0: user.v1.User + (*ListUsersRequest)(nil), // 1: user.v1.ListUsersRequest + (*ListUsersResponse)(nil), // 2: user.v1.ListUsersResponse + (*GetUserRequest)(nil), // 3: user.v1.GetUserRequest + (*GetUserResponse)(nil), // 4: user.v1.GetUserResponse +} +var file_user_v1_user_proto_depIdxs = []int32{ + 0, // 0: user.v1.ListUsersResponse.users:type_name -> user.v1.User + 0, // 1: user.v1.GetUserResponse.user:type_name -> user.v1.User + 1, // 2: user.v1.UserService.ListUsers:input_type -> user.v1.ListUsersRequest + 3, // 3: user.v1.UserService.GetUser:input_type -> user.v1.GetUserRequest + 2, // 4: user.v1.UserService.ListUsers:output_type -> user.v1.ListUsersResponse + 4, // 5: user.v1.UserService.GetUser:output_type -> user.v1.GetUserResponse + 4, // [4:6] is the sub-list for method output_type + 2, // [2:4] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_user_v1_user_proto_init() } +func file_user_v1_user_proto_init() { + if File_user_v1_user_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_user_v1_user_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*User); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_user_v1_user_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*ListUsersRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_user_v1_user_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*ListUsersResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_user_v1_user_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*GetUserRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_user_v1_user_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*GetUserResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_user_v1_user_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_user_v1_user_proto_goTypes, + DependencyIndexes: file_user_v1_user_proto_depIdxs, + MessageInfos: file_user_v1_user_proto_msgTypes, + }.Build() + File_user_v1_user_proto = out.File + file_user_v1_user_proto_rawDesc = nil + file_user_v1_user_proto_goTypes = nil + file_user_v1_user_proto_depIdxs = nil +} diff --git a/backend/pkg/proto/user/v1/user.pb.gw.go b/backend/pkg/proto/user/v1/user.pb.gw.go new file mode 100644 index 0000000..d965c00 --- /dev/null +++ b/backend/pkg/proto/user/v1/user.pb.gw.go @@ -0,0 +1,256 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: user/v1/user.proto + +/* +Package userv1 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package userv1 + +import ( + "context" + "io" + "net/http" + + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = metadata.Join + +func request_UserService_ListUsers_0(ctx context.Context, marshaler runtime.Marshaler, client UserServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListUsersRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.ListUsers(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_UserService_ListUsers_0(ctx context.Context, marshaler runtime.Marshaler, server UserServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListUsersRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.ListUsers(ctx, &protoReq) + return msg, metadata, err + +} + +func request_UserService_GetUser_0(ctx context.Context, marshaler runtime.Marshaler, client UserServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetUserRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetUser(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_UserService_GetUser_0(ctx context.Context, marshaler runtime.Marshaler, server UserServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq GetUserRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.GetUser(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterUserServiceHandlerServer registers the http handlers for service UserService to "mux". +// UnaryRPC :call UserServiceServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterUserServiceHandlerFromEndpoint instead. +func RegisterUserServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server UserServiceServer) error { + + mux.Handle("POST", pattern_UserService_ListUsers_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/user.v1.UserService/ListUsers", runtime.WithHTTPPathPattern("/user.v1.UserService/ListUsers")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_UserService_ListUsers_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_UserService_ListUsers_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_UserService_GetUser_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/user.v1.UserService/GetUser", runtime.WithHTTPPathPattern("/user.v1.UserService/GetUser")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_UserService_GetUser_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_UserService_GetUser_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterUserServiceHandlerFromEndpoint is same as RegisterUserServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterUserServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterUserServiceHandler(ctx, mux, conn) +} + +// RegisterUserServiceHandler registers the http handlers for service UserService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterUserServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterUserServiceHandlerClient(ctx, mux, NewUserServiceClient(conn)) +} + +// RegisterUserServiceHandlerClient registers the http handlers for service UserService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "UserServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "UserServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "UserServiceClient" to call the correct interceptors. +func RegisterUserServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client UserServiceClient) error { + + mux.Handle("POST", pattern_UserService_ListUsers_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/user.v1.UserService/ListUsers", runtime.WithHTTPPathPattern("/user.v1.UserService/ListUsers")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_UserService_ListUsers_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_UserService_ListUsers_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_UserService_GetUser_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/user.v1.UserService/GetUser", runtime.WithHTTPPathPattern("/user.v1.UserService/GetUser")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_UserService_GetUser_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_UserService_GetUser_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_UserService_ListUsers_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"user.v1.UserService", "ListUsers"}, "")) + + pattern_UserService_GetUser_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"user.v1.UserService", "GetUser"}, "")) +) + +var ( + forward_UserService_ListUsers_0 = runtime.ForwardResponseMessage + + forward_UserService_GetUser_0 = runtime.ForwardResponseMessage +) diff --git a/backend/pkg/proto/user/v1/user_grpc.pb.go b/backend/pkg/proto/user/v1/user_grpc.pb.go new file mode 100644 index 0000000..0e44a14 --- /dev/null +++ b/backend/pkg/proto/user/v1/user_grpc.pb.go @@ -0,0 +1,141 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package userv1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// UserServiceClient is the client API for UserService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type UserServiceClient interface { + // ListUsers returns a list of users with pagination + ListUsers(ctx context.Context, in *ListUsersRequest, opts ...grpc.CallOption) (*ListUsersResponse, error) + // GetUser returns detailed information about a specific user + GetUser(ctx context.Context, in *GetUserRequest, opts ...grpc.CallOption) (*GetUserResponse, error) +} + +type userServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewUserServiceClient(cc grpc.ClientConnInterface) UserServiceClient { + return &userServiceClient{cc} +} + +func (c *userServiceClient) ListUsers(ctx context.Context, in *ListUsersRequest, opts ...grpc.CallOption) (*ListUsersResponse, error) { + out := new(ListUsersResponse) + err := c.cc.Invoke(ctx, "/user.v1.UserService/ListUsers", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *userServiceClient) GetUser(ctx context.Context, in *GetUserRequest, opts ...grpc.CallOption) (*GetUserResponse, error) { + out := new(GetUserResponse) + err := c.cc.Invoke(ctx, "/user.v1.UserService/GetUser", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// UserServiceServer is the server API for UserService service. +// All implementations must embed UnimplementedUserServiceServer +// for forward compatibility +type UserServiceServer interface { + // ListUsers returns a list of users with pagination + ListUsers(context.Context, *ListUsersRequest) (*ListUsersResponse, error) + // GetUser returns detailed information about a specific user + GetUser(context.Context, *GetUserRequest) (*GetUserResponse, error) + mustEmbedUnimplementedUserServiceServer() +} + +// UnimplementedUserServiceServer must be embedded to have forward compatible implementations. +type UnimplementedUserServiceServer struct { +} + +func (UnimplementedUserServiceServer) ListUsers(context.Context, *ListUsersRequest) (*ListUsersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListUsers not implemented") +} +func (UnimplementedUserServiceServer) GetUser(context.Context, *GetUserRequest) (*GetUserResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetUser not implemented") +} +func (UnimplementedUserServiceServer) mustEmbedUnimplementedUserServiceServer() {} + +// UnsafeUserServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to UserServiceServer will +// result in compilation errors. +type UnsafeUserServiceServer interface { + mustEmbedUnimplementedUserServiceServer() +} + +func RegisterUserServiceServer(s grpc.ServiceRegistrar, srv UserServiceServer) { + s.RegisterService(&UserService_ServiceDesc, srv) +} + +func _UserService_ListUsers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListUsersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UserServiceServer).ListUsers(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/user.v1.UserService/ListUsers", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UserServiceServer).ListUsers(ctx, req.(*ListUsersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UserService_GetUser_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetUserRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UserServiceServer).GetUser(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/user.v1.UserService/GetUser", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UserServiceServer).GetUser(ctx, req.(*GetUserRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// UserService_ServiceDesc is the grpc.ServiceDesc for UserService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var UserService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "user.v1.UserService", + HandlerType: (*UserServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListUsers", + Handler: _UserService_ListUsers_Handler, + }, + { + MethodName: "GetUser", + Handler: _UserService_GetUser_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "user/v1/user.proto", +} diff --git a/backend/pkg/utils/buffer.go b/backend/pkg/utils/buffer.go new file mode 100644 index 0000000..5746d74 --- /dev/null +++ b/backend/pkg/utils/buffer.go @@ -0,0 +1,26 @@ +package utils + +import ( + "bufio" + "io" +) + +// NewLogBuffer creates a buffer that can be used to capture output stream +// and write to a logger in real time +func NewLogBuffer(output func(string)) io.Writer { + reader, writer := io.Pipe() + + go func() { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + output(scanner.Text()) + } + }() + + return writer +} + +// NewCombinedBuffer combines multiple io.Writers +func NewCombinedBuffer(writers ...io.Writer) io.Writer { + return io.MultiWriter(writers...) +} diff --git a/backend/proto/user/v1/user.proto b/backend/proto/user/v1/user.proto new file mode 100644 index 0000000..40be68d --- /dev/null +++ b/backend/proto/user/v1/user.proto @@ -0,0 +1,40 @@ +syntax = "proto3"; + +package user.v1; + +// User represents a user entity +message User { + int64 id = 1; + string username = 2; + string email = 3; + string phone = 4; + string create_time = 5; + string update_time = 6; +} + +message ListUsersRequest { + int32 page_size = 1; + int32 page_number = 2; +} + +message ListUsersResponse { + repeated User users = 1; + int32 total = 2; +} + +message GetUserRequest { + int64 id = 1; +} + +message GetUserResponse { + User user = 1; +} + +// UserService provides user-related operations +service UserService { + // ListUsers returns a list of users with pagination + rpc ListUsers(ListUsersRequest) returns (ListUsersResponse) {} + + // GetUser returns detailed information about a specific user + rpc GetUser(GetUserRequest) returns (GetUserResponse) {} +} diff --git a/backend/providers/app/app.go b/backend/providers/app/app.go new file mode 100644 index 0000000..17209eb --- /dev/null +++ b/backend/providers/app/app.go @@ -0,0 +1,18 @@ +package app + +import ( + "git.ipao.vip/rogeecn/atom/container" + "git.ipao.vip/rogeecn/atom/utils/opt" +) + +func Provide(opts ...opt.Option) error { + o := opt.New(opts...) + var config Config + if err := o.UnmarshalConfig(&config); err != nil { + return err + } + + return container.Container.Provide(func() (*Config, error) { + return &config, nil + }, o.DiOptions()...) +} diff --git a/backend/providers/app/config.gen.go b/backend/providers/app/config.gen.go new file mode 100644 index 0000000..702160e --- /dev/null +++ b/backend/providers/app/config.gen.go @@ -0,0 +1,179 @@ +// Code generated by go-enum DO NOT EDIT. +// Version: - +// Revision: - +// Build Date: - +// Built By: - + +package app + +import ( + "database/sql/driver" + "errors" + "fmt" + "strings" +) + +const ( + // AppModeDevelopment is a AppMode of type development. + AppModeDevelopment AppMode = "development" + // AppModeRelease is a AppMode of type release. + AppModeRelease AppMode = "release" + // AppModeTest is a AppMode of type test. + AppModeTest AppMode = "test" +) + +var ErrInvalidAppMode = fmt.Errorf("not a valid AppMode, try [%s]", strings.Join(_AppModeNames, ", ")) + +var _AppModeNames = []string{ + string(AppModeDevelopment), + string(AppModeRelease), + string(AppModeTest), +} + +// AppModeNames returns a list of possible string values of AppMode. +func AppModeNames() []string { + tmp := make([]string, len(_AppModeNames)) + copy(tmp, _AppModeNames) + return tmp +} + +// AppModeValues returns a list of the values for AppMode +func AppModeValues() []AppMode { + return []AppMode{ + AppModeDevelopment, + AppModeRelease, + AppModeTest, + } +} + +// String implements the Stringer interface. +func (x AppMode) String() string { + return string(x) +} + +// IsValid provides a quick way to determine if the typed value is +// part of the allowed enumerated values +func (x AppMode) IsValid() bool { + _, err := ParseAppMode(string(x)) + return err == nil +} + +var _AppModeValue = map[string]AppMode{ + "development": AppModeDevelopment, + "release": AppModeRelease, + "test": AppModeTest, +} + +// ParseAppMode attempts to convert a string to a AppMode. +func ParseAppMode(name string) (AppMode, error) { + if x, ok := _AppModeValue[name]; ok { + return x, nil + } + return AppMode(""), fmt.Errorf("%s is %w", name, ErrInvalidAppMode) +} + +var errAppModeNilPtr = errors.New("value pointer is nil") // one per type for package clashes + +// Scan implements the Scanner interface. +func (x *AppMode) Scan(value interface{}) (err error) { + if value == nil { + *x = AppMode("") + return + } + + // A wider range of scannable types. + // driver.Value values at the top of the list for expediency + switch v := value.(type) { + case string: + *x, err = ParseAppMode(v) + case []byte: + *x, err = ParseAppMode(string(v)) + case AppMode: + *x = v + case *AppMode: + if v == nil { + return errAppModeNilPtr + } + *x = *v + case *string: + if v == nil { + return errAppModeNilPtr + } + *x, err = ParseAppMode(*v) + default: + return errors.New("invalid type for AppMode") + } + + return +} + +// Value implements the driver Valuer interface. +func (x AppMode) Value() (driver.Value, error) { + return x.String(), nil +} + +// Set implements the Golang flag.Value interface func. +func (x *AppMode) Set(val string) error { + v, err := ParseAppMode(val) + *x = v + return err +} + +// Get implements the Golang flag.Getter interface func. +func (x *AppMode) Get() interface{} { + return *x +} + +// Type implements the github.com/spf13/pFlag Value interface. +func (x *AppMode) Type() string { + return "AppMode" +} + +type NullAppMode struct { + AppMode AppMode + Valid bool +} + +func NewNullAppMode(val interface{}) (x NullAppMode) { + err := x.Scan(val) // yes, we ignore this error, it will just be an invalid value. + _ = err // make any errcheck linters happy + return +} + +// Scan implements the Scanner interface. +func (x *NullAppMode) Scan(value interface{}) (err error) { + if value == nil { + x.AppMode, x.Valid = AppMode(""), false + return + } + + err = x.AppMode.Scan(value) + x.Valid = (err == nil) + return +} + +// Value implements the driver Valuer interface. +func (x NullAppMode) Value() (driver.Value, error) { + if !x.Valid { + return nil, nil + } + // driver.Value accepts int64 for int values. + return string(x.AppMode), nil +} + +type NullAppModeStr struct { + NullAppMode +} + +func NewNullAppModeStr(val interface{}) (x NullAppModeStr) { + x.Scan(val) // yes, we ignore this error, it will just be an invalid value. + return +} + +// Value implements the driver Valuer interface. +func (x NullAppModeStr) Value() (driver.Value, error) { + if !x.Valid { + return nil, nil + } + return x.AppMode.String(), nil +} diff --git a/backend/providers/app/config.go b/backend/providers/app/config.go new file mode 100644 index 0000000..c4e37d7 --- /dev/null +++ b/backend/providers/app/config.go @@ -0,0 +1,45 @@ +package app + +import ( + "git.ipao.vip/rogeecn/atom/container" + "git.ipao.vip/rogeecn/atom/utils/opt" +) + +const DefaultPrefix = "App" + +func DefaultProvider() container.ProviderContainer { + return container.ProviderContainer{ + Provider: Provide, + Options: []opt.Option{ + opt.Prefix(DefaultPrefix), + }, + } +} + +// swagger:enum AppMode +// ENUM(development, release, test) +type AppMode string + +type Config struct { + Mode AppMode + Cert *Cert + BaseURI *string +} + +func (c *Config) IsDevMode() bool { + return c.Mode == AppModeDevelopment +} + +func (c *Config) IsReleaseMode() bool { + return c.Mode == AppModeRelease +} + +func (c *Config) IsTestMode() bool { + return c.Mode == AppModeTest +} + +type Cert struct { + CA string + Cert string + Key string +} diff --git a/backend/providers/cmux/config.go b/backend/providers/cmux/config.go new file mode 100644 index 0000000..8850d24 --- /dev/null +++ b/backend/providers/cmux/config.go @@ -0,0 +1,61 @@ +package cmux + +import ( + "fmt" + + "backend/providers/grpc" + "backend/providers/http" + + "git.ipao.vip/rogeecn/atom/container" + "git.ipao.vip/rogeecn/atom/utils/opt" + "github.com/soheilhy/cmux" + "golang.org/x/sync/errgroup" +) + +const DefaultPrefix = "Cmux" + +func DefaultProvider() container.ProviderContainer { + return container.ProviderContainer{ + Provider: Provide, + Options: []opt.Option{ + opt.Prefix(DefaultPrefix), + }, + } +} + +type Config struct { + Host *string + Port uint +} + +func (h *Config) Address() string { + if h.Host == nil { + return fmt.Sprintf(":%d", h.Port) + } + return fmt.Sprintf("%s:%d", *h.Host, h.Port) +} + +type CMux struct { + Http *http.Service + Grpc *grpc.Grpc + Mux cmux.CMux +} + +func (c *CMux) Serve() error { + // grpcL := c.Mux.Match(cmux.HTTP2HeaderField("content-type", "application/grpc")) + // httpL := c.Mux.Match(cmux.HTTP1Fast()) + // httpL := c.Mux.Match(cmux.Any()) + httpL := c.Mux.Match(cmux.HTTP1Fast()) + grpcL := c.Mux.Match(cmux.Any()) + + var eg errgroup.Group + eg.Go(func() error { + return c.Grpc.ServeWithListener(grpcL) + }) + + eg.Go(func() error { + return c.Http.Listener(httpL) + }) + + return c.Mux.Serve() +} diff --git a/backend/providers/cmux/provider.go b/backend/providers/cmux/provider.go new file mode 100644 index 0000000..5e3b2c8 --- /dev/null +++ b/backend/providers/cmux/provider.go @@ -0,0 +1,32 @@ +package cmux + +import ( + "net" + + "backend/providers/grpc" + "backend/providers/http" + + "git.ipao.vip/rogeecn/atom/container" + "git.ipao.vip/rogeecn/atom/utils/opt" + "github.com/soheilhy/cmux" +) + +func Provide(opts ...opt.Option) error { + o := opt.New(opts...) + var config Config + if err := o.UnmarshalConfig(&config); err != nil { + return err + } + return container.Container.Provide(func(http *http.Service, grpc *grpc.Grpc) (*CMux, error) { + l, err := net.Listen("tcp", config.Address()) + if err != nil { + return nil, err + } + + return &CMux{ + Http: http, + Grpc: grpc, + Mux: cmux.New(l), + }, nil + }, o.DiOptions()...) +} diff --git a/backend/providers/events/config.go b/backend/providers/events/config.go new file mode 100644 index 0000000..69c79a2 --- /dev/null +++ b/backend/providers/events/config.go @@ -0,0 +1,48 @@ +package events + +import ( + "context" + + "git.ipao.vip/rogeecn/atom/container" + "git.ipao.vip/rogeecn/atom/utils/opt" + "github.com/ThreeDotsLabs/watermill/message" +) + +const DefaultPrefix = "Events" + +func DefaultProvider() container.ProviderContainer { + return container.ProviderContainer{ + Provider: Provide, + Options: []opt.Option{ + opt.Prefix(DefaultPrefix), + }, + } +} + +type Config struct { + ConsumerGroup string + + Brokers []string +} + +type PubSub struct { + Publisher message.Publisher + Subscriber message.Subscriber + Router *message.Router +} + +func (ps *PubSub) Serve(ctx context.Context) error { + if err := ps.Router.Run(ctx); err != nil { + return err + } + return nil +} + +func (ps *PubSub) Handle( + handlerName string, + consumerTopic string, + publisherTopic string, + handler message.HandlerFunc, +) { + ps.Router.AddHandler(handlerName, consumerTopic, ps.Subscriber, publisherTopic, ps.Publisher, handler) +} diff --git a/backend/providers/events/logrus_adapter.go b/backend/providers/events/logrus_adapter.go new file mode 100644 index 0000000..a47ab64 --- /dev/null +++ b/backend/providers/events/logrus_adapter.go @@ -0,0 +1,60 @@ +package events + +import ( + "github.com/ThreeDotsLabs/watermill" + "github.com/sirupsen/logrus" +) + +// LogrusLoggerAdapter is a watermill logger adapter for logrus. +type LogrusLoggerAdapter struct { + log *logrus.Logger + fields watermill.LogFields +} + +// NewLogrusLogger returns a LogrusLoggerAdapter that sends all logs to +// the passed logrus instance. +func LogrusAdapter() watermill.LoggerAdapter { + return &LogrusLoggerAdapter{log: logrus.StandardLogger()} +} + +// Error logs on level error with err as field and optional fields. +func (l *LogrusLoggerAdapter) Error(msg string, err error, fields watermill.LogFields) { + l.createEntry(fields.Add(watermill.LogFields{"err": err})).Error(msg) +} + +// Info logs on level info with optional fields. +func (l *LogrusLoggerAdapter) Info(msg string, fields watermill.LogFields) { + l.createEntry(fields).Info(msg) +} + +// Debug logs on level debug with optional fields. +func (l *LogrusLoggerAdapter) Debug(msg string, fields watermill.LogFields) { + l.createEntry(fields).Debug(msg) +} + +// Trace logs on level trace with optional fields. +func (l *LogrusLoggerAdapter) Trace(msg string, fields watermill.LogFields) { + l.createEntry(fields).Trace(msg) +} + +// With returns a new LogrusLoggerAdapter that includes fields +// to be re-used between logging statements. +func (l *LogrusLoggerAdapter) With(fields watermill.LogFields) watermill.LoggerAdapter { + return &LogrusLoggerAdapter{ + log: l.log, + fields: l.fields.Add(fields), + } +} + +// createEntry is a helper to add fields to a logrus entry if necessary. +func (l *LogrusLoggerAdapter) createEntry(fields watermill.LogFields) *logrus.Entry { + entry := logrus.NewEntry(l.log) + + allFields := fields.Add(l.fields) + + if len(allFields) > 0 { + entry = entry.WithFields(logrus.Fields(allFields)) + } + + return entry +} diff --git a/backend/providers/events/provider.go b/backend/providers/events/provider.go new file mode 100644 index 0000000..c228926 --- /dev/null +++ b/backend/providers/events/provider.go @@ -0,0 +1,32 @@ +package events + +import ( + "git.ipao.vip/rogeecn/atom/container" + "git.ipao.vip/rogeecn/atom/utils/opt" + "github.com/ThreeDotsLabs/watermill/message" + "github.com/ThreeDotsLabs/watermill/pubsub/gochannel" +) + +func Provide(opts ...opt.Option) error { + o := opt.New(opts...) + var config Config + if err := o.UnmarshalConfig(&config); err != nil { + return err + } + + return container.Container.Provide(func() (*PubSub, error) { + logger := LogrusAdapter() + + client := gochannel.NewGoChannel(gochannel.Config{}, logger) + router, err := message.NewRouter(message.RouterConfig{}, logger) + if err != nil { + return nil, err + } + + return &PubSub{ + Publisher: client, + Subscriber: client, + Router: router, + }, nil + }, o.DiOptions()...) +} diff --git a/backend/providers/events/provider_kafka.go b/backend/providers/events/provider_kafka.go new file mode 100644 index 0000000..6f58f97 --- /dev/null +++ b/backend/providers/events/provider_kafka.go @@ -0,0 +1,48 @@ +package events + +import ( + "git.ipao.vip/rogeecn/atom/container" + "git.ipao.vip/rogeecn/atom/utils/opt" + "github.com/ThreeDotsLabs/watermill-kafka/v3/pkg/kafka" + "github.com/ThreeDotsLabs/watermill/message" +) + +func ProvideKafka(opts ...opt.Option) error { + o := opt.New(opts...) + var config Config + if err := o.UnmarshalConfig(&config); err != nil { + return err + } + + return container.Container.Provide(func() (*PubSub, error) { + logger := LogrusAdapter() + + publisher, err := kafka.NewPublisher(kafka.PublisherConfig{ + Brokers: config.Brokers, + Marshaler: kafka.DefaultMarshaler{}, + }, logger) + if err != nil { + return nil, err + } + + subscriber, err := kafka.NewSubscriber(kafka.SubscriberConfig{ + Brokers: config.Brokers, + Unmarshaler: kafka.DefaultMarshaler{}, + ConsumerGroup: config.ConsumerGroup, + }, logger) + if err != nil { + return nil, err + } + + router, err := message.NewRouter(message.RouterConfig{}, logger) + if err != nil { + return nil, err + } + + return &PubSub{ + Publisher: publisher, + Subscriber: subscriber, + Router: router, + }, nil + }, o.DiOptions()...) +} diff --git a/backend/providers/events/provider_redis.go b/backend/providers/events/provider_redis.go new file mode 100644 index 0000000..0d48bb4 --- /dev/null +++ b/backend/providers/events/provider_redis.go @@ -0,0 +1,49 @@ +package events + +import ( + "git.ipao.vip/rogeecn/atom/container" + "git.ipao.vip/rogeecn/atom/utils/opt" + "github.com/ThreeDotsLabs/watermill-redisstream/pkg/redisstream" + "github.com/ThreeDotsLabs/watermill/message" + "github.com/redis/go-redis/v9" +) + +func ProvideRedis(opts ...opt.Option) error { + o := opt.New(opts...) + var config Config + if err := o.UnmarshalConfig(&config); err != nil { + return err + } + + return container.Container.Provide(func(rdb redis.UniversalClient) (*PubSub, error) { + logger := LogrusAdapter() + + subscriber, err := redisstream.NewSubscriber(redisstream.SubscriberConfig{ + Client: rdb, + Unmarshaller: redisstream.DefaultMarshallerUnmarshaller{}, + ConsumerGroup: config.ConsumerGroup, + }, logger) + if err != nil { + return nil, err + } + + publisher, err := redisstream.NewPublisher(redisstream.PublisherConfig{ + Client: rdb, + Marshaller: redisstream.DefaultMarshallerUnmarshaller{}, + }, logger) + if err != nil { + return nil, err + } + + router, err := message.NewRouter(message.RouterConfig{}, logger) + if err != nil { + return nil, err + } + + return &PubSub{ + Publisher: publisher, + Subscriber: subscriber, + Router: router, + }, nil + }, o.DiOptions()...) +} diff --git a/backend/providers/events/provider_sql.go b/backend/providers/events/provider_sql.go new file mode 100644 index 0000000..b862313 --- /dev/null +++ b/backend/providers/events/provider_sql.go @@ -0,0 +1,49 @@ +package events + +import ( + sqlDB "database/sql" + + "git.ipao.vip/rogeecn/atom/container" + "git.ipao.vip/rogeecn/atom/utils/opt" + "github.com/ThreeDotsLabs/watermill-sql/v3/pkg/sql" + "github.com/ThreeDotsLabs/watermill/message" +) + +func ProvideSQL(opts ...opt.Option) error { + o := opt.New(opts...) + var config Config + if err := o.UnmarshalConfig(&config); err != nil { + return err + } + + return container.Container.Provide(func(db *sqlDB.DB) (*PubSub, error) { + logger := LogrusAdapter() + + publisher, err := sql.NewPublisher(db, sql.PublisherConfig{ + SchemaAdapter: sql.DefaultPostgreSQLSchema{}, + AutoInitializeSchema: false, + }, logger) + if err != nil { + return nil, err + } + + subscriber, err := sql.NewSubscriber(db, sql.SubscriberConfig{ + SchemaAdapter: sql.DefaultPostgreSQLSchema{}, + ConsumerGroup: config.ConsumerGroup, + }, logger) + if err != nil { + return nil, err + } + + router, err := message.NewRouter(message.RouterConfig{}, logger) + if err != nil { + return nil, err + } + + return &PubSub{ + Publisher: publisher, + Subscriber: subscriber, + Router: router, + }, nil + }, o.DiOptions()...) +} diff --git a/backend/providers/grpc/config.go b/backend/providers/grpc/config.go new file mode 100644 index 0000000..fcb7764 --- /dev/null +++ b/backend/providers/grpc/config.go @@ -0,0 +1,52 @@ +package grpc + +import ( + "fmt" + "net" + + "git.ipao.vip/rogeecn/atom/container" + "git.ipao.vip/rogeecn/atom/utils/opt" + "google.golang.org/grpc" +) + +const DefaultPrefix = "Grpc" + +func DefaultProvider() container.ProviderContainer { + return container.ProviderContainer{ + Provider: Provide, + Options: []opt.Option{ + opt.Prefix(DefaultPrefix), + }, + } +} + +type Config struct { + Host *string + Port uint +} + +func (h *Config) Address() string { + if h.Host == nil { + return fmt.Sprintf(":%d", h.Port) + } + return fmt.Sprintf("%s:%d", *h.Host, h.Port) +} + +type Grpc struct { + Server *grpc.Server + config *Config +} + +// Serve +func (g *Grpc) Serve() error { + l, err := net.Listen("tcp", g.config.Address()) + if err != nil { + return err + } + + return g.Server.Serve(l) +} + +func (g *Grpc) ServeWithListener(ln net.Listener) error { + return g.Server.Serve(ln) +} diff --git a/backend/providers/grpc/provider.go b/backend/providers/grpc/provider.go new file mode 100644 index 0000000..0629103 --- /dev/null +++ b/backend/providers/grpc/provider.go @@ -0,0 +1,26 @@ +package grpc + +import ( + "git.ipao.vip/rogeecn/atom/container" + "git.ipao.vip/rogeecn/atom/utils/opt" + "google.golang.org/grpc" +) + +func Provide(opts ...opt.Option) error { + o := opt.New(opts...) + var config Config + if err := o.UnmarshalConfig(&config); err != nil { + return err + } + return container.Container.Provide(func() (*Grpc, error) { + server := grpc.NewServer() + + grpc := &Grpc{ + Server: server, + config: &config, + } + container.AddCloseAble(grpc.Server.GracefulStop) + + return grpc, nil + }, o.DiOptions()...) +} diff --git a/backend/providers/hashids/config.go b/backend/providers/hashids/config.go new file mode 100644 index 0000000..dd3b45a --- /dev/null +++ b/backend/providers/hashids/config.go @@ -0,0 +1,23 @@ +package hashids + +import ( + "git.ipao.vip/rogeecn/atom/container" + "git.ipao.vip/rogeecn/atom/utils/opt" +) + +const DefaultPrefix = "HashIDs" + +func DefaultProvider() container.ProviderContainer { + return container.ProviderContainer{ + Provider: Provide, + Options: []opt.Option{ + opt.Prefix(DefaultPrefix), + }, + } +} + +type Config struct { + Alphabet string + Salt string + MinLength uint +} diff --git a/backend/providers/hashids/hashids.go b/backend/providers/hashids/hashids.go new file mode 100644 index 0000000..c296ad4 --- /dev/null +++ b/backend/providers/hashids/hashids.go @@ -0,0 +1,35 @@ +package hashids + +import ( + "git.ipao.vip/rogeecn/atom/container" + "git.ipao.vip/rogeecn/atom/utils/opt" + + "github.com/speps/go-hashids/v2" +) + +func Provide(opts ...opt.Option) error { + o := opt.New(opts...) + var config Config + if err := o.UnmarshalConfig(&config); err != nil { + return err + } + return container.Container.Provide(func() (*hashids.HashID, error) { + data := hashids.NewData() + data.MinLength = int(config.MinLength) + if data.MinLength == 0 { + data.MinLength = 10 + } + + data.Salt = config.Salt + if data.Salt == "" { + data.Salt = "default-salt-key" + } + + data.Alphabet = config.Alphabet + if config.Alphabet == "" { + data.Alphabet = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + } + + return hashids.NewWithData(data) + }, o.DiOptions()...) +} diff --git a/backend/providers/http/config.go b/backend/providers/http/config.go new file mode 100644 index 0000000..611f210 --- /dev/null +++ b/backend/providers/http/config.go @@ -0,0 +1,38 @@ +package http + +import ( + "fmt" +) + +const DefaultPrefix = "Http" + +type Config struct { + StaticPath *string + StaticRoute *string + BaseURI *string + Port uint + Tls *Tls + Cors *Cors +} + +type Tls struct { + Cert string + Key string +} + +type Cors struct { + Mode string + Whitelist []Whitelist +} + +type Whitelist struct { + AllowOrigin string + AllowHeaders string + AllowMethods string + ExposeHeaders string + AllowCredentials bool +} + +func (h *Config) Address() string { + return fmt.Sprintf(":%d", h.Port) +} diff --git a/backend/providers/http/engine.go b/backend/providers/http/engine.go new file mode 100644 index 0000000..15594a0 --- /dev/null +++ b/backend/providers/http/engine.go @@ -0,0 +1,100 @@ +package http + +import ( + "errors" + "fmt" + "net" + "runtime/debug" + "time" + + "git.ipao.vip/rogeecn/atom/container" + "git.ipao.vip/rogeecn/atom/utils/opt" + log "github.com/sirupsen/logrus" + + "github.com/gofiber/fiber/v3" + "github.com/gofiber/fiber/v3/middleware/logger" + "github.com/gofiber/fiber/v3/middleware/recover" +) + +func DefaultProvider() container.ProviderContainer { + return container.ProviderContainer{ + Provider: Provide, + Options: []opt.Option{ + opt.Prefix(DefaultPrefix), + }, + } +} + +type Service struct { + conf *Config + Engine *fiber.App +} + +func (svc *Service) listenerConfig() fiber.ListenConfig { + listenConfig := fiber.ListenConfig{ + EnablePrintRoutes: true, + OnShutdownSuccess: func() { + log.Info("http server shutdown success") + }, + OnShutdownError: func(err error) { + log.Error("http server shutdown error: ", err) + }, + + // DisableStartupMessage: true, + } + + if svc.conf.Tls != nil { + if svc.conf.Tls.Cert == "" || svc.conf.Tls.Key == "" { + panic(errors.New("tls cert and key must be set")) + } + listenConfig.CertFile = svc.conf.Tls.Cert + listenConfig.CertKeyFile = svc.conf.Tls.Key + } + container.AddCloseAble(func() { + svc.Engine.Shutdown() + }) + return listenConfig +} + +func (svc *Service) Listener(ln net.Listener) error { + return svc.Engine.Listener(ln, svc.listenerConfig()) +} + +func (svc *Service) Serve() error { + return svc.Engine.Listen(svc.conf.Address(), svc.listenerConfig()) +} + +func Provide(opts ...opt.Option) error { + o := opt.New(opts...) + var config Config + if err := o.UnmarshalConfig(&config); err != nil { + return err + } + + return container.Container.Provide(func() (*Service, error) { + engine := fiber.New(fiber.Config{ + StrictRouting: true, + }) + engine.Use(recover.New(recover.Config{ + EnableStackTrace: true, + StackTraceHandler: func(c fiber.Ctx, e any) { + log.Error(fmt.Sprintf("panic: %v\n%s\n", e, debug.Stack())) + }, + })) + + if config.StaticRoute != nil && config.StaticPath != nil { + engine.Use(config.StaticRoute, config.StaticPath) + } + + engine.Use(logger.New(logger.Config{ + Format: `[${ip}:${port}] - [${time}] - ${method} - ${status} - ${path} ${latency} "${ua}"` + "\n", + TimeFormat: time.RFC1123, + TimeZone: "Asia/Shanghai", + })) + + return &Service{ + Engine: engine, + conf: &config, + }, nil + }, o.DiOptions()...) +} diff --git a/backend/providers/http/swagger/config.go b/backend/providers/http/swagger/config.go new file mode 100644 index 0000000..4b535a7 --- /dev/null +++ b/backend/providers/http/swagger/config.go @@ -0,0 +1,317 @@ +package swagger + +import ( + "html/template" +) + +// Config stores SwaggerUI configuration variables +type Config struct { + // This parameter can be used to name different swagger document instances. + // default: "" + InstanceName string `json:"-"` + + // Title pointing to title of HTML page. + // default: "Swagger UI" + Title string `json:"-"` + + // URL to fetch external configuration document from. + // default: "" + ConfigURL string `json:"configUrl,omitempty"` + + // The URL pointing to API definition (normally swagger.json or swagger.yaml). + // default: "doc.json" + URL string `json:"url,omitempty"` + + // Enables overriding configuration parameters via URL search params. + // default: false + QueryConfigEnabled bool `json:"queryConfigEnabled,omitempty"` + + // The name of a component available via the plugin system to use as the top-level layout for Swagger UI. + // default: "StandaloneLayout" + Layout string `json:"layout,omitempty"` + + // An array of plugin functions to use in Swagger UI. + // default: [SwaggerUIBundle.plugins.DownloadUrl] + Plugins []template.JS `json:"-"` + + // An array of presets to use in Swagger UI. Usually, you'll want to include ApisPreset if you use this option. + // default: [SwaggerUIBundle.presets.apis, SwaggerUIStandalonePreset] + Presets []template.JS `json:"-"` + + // If set to true, enables deep linking for tags and operations. + // default: true + DeepLinking bool `json:"deepLinking"` + + // Controls the display of operationId in operations list. + // default: false + DisplayOperationId bool `json:"displayOperationId,omitempty"` + + // The default expansion depth for models (set to -1 completely hide the models). + // default: 1 + DefaultModelsExpandDepth int `json:"defaultModelsExpandDepth,omitempty"` + + // The default expansion depth for the model on the model-example section. + // default: 1 + DefaultModelExpandDepth int `json:"defaultModelExpandDepth,omitempty"` + + // Controls how the model is shown when the API is first rendered. + // The user can always switch the rendering for a given model by clicking the 'Model' and 'Example Value' links. + // default: "example" + DefaultModelRendering string `json:"defaultModelRendering,omitempty"` + + // Controls the display of the request duration (in milliseconds) for "Try it out" requests. + // default: false + DisplayRequestDuration bool `json:"displayRequestDuration,omitempty"` + + // Controls the default expansion setting for the operations and tags. + // 'list' (default, expands only the tags), + // 'full' (expands the tags and operations), + // 'none' (expands nothing) + DocExpansion string `json:"docExpansion,omitempty"` + + // If set, enables filtering. The top bar will show an edit box that you can use to filter the tagged operations that are shown. + // Can be Boolean to enable or disable, or a string, in which case filtering will be enabled using that string as the filter expression. + // Filtering is case sensitive matching the filter expression anywhere inside the tag. + // default: false + Filter FilterConfig `json:"-"` + + // If set, limits the number of tagged operations displayed to at most this many. The default is to show all operations. + // default: 0 + MaxDisplayedTags int `json:"maxDisplayedTags,omitempty"` + + // Controls the display of vendor extension (x-) fields and values for Operations, Parameters, Responses, and Schema. + // default: false + ShowExtensions bool `json:"showExtensions,omitempty"` + + // Controls the display of extensions (pattern, maxLength, minLength, maximum, minimum) fields and values for Parameters. + // default: false + ShowCommonExtensions bool `json:"showCommonExtensions,omitempty"` + + // Apply a sort to the tag list of each API. It can be 'alpha' (sort by paths alphanumerically) or a function (see Array.prototype.sort(). + // to learn how to write a sort function). Two tag name strings are passed to the sorter for each pass. + // default: "" -> Default is the order determined by Swagger UI. + TagsSorter template.JS `json:"-"` + + // Provides a mechanism to be notified when Swagger UI has finished rendering a newly provided definition. + // default: "" -> Function=NOOP + OnComplete template.JS `json:"-"` + + // An object with the activate and theme properties. + SyntaxHighlight *SyntaxHighlightConfig `json:"-"` + + // Controls whether the "Try it out" section should be enabled by default. + // default: false + TryItOutEnabled bool `json:"tryItOutEnabled,omitempty"` + + // Enables the request snippet section. When disabled, the legacy curl snippet will be used. + // default: false + RequestSnippetsEnabled bool `json:"requestSnippetsEnabled,omitempty"` + + // OAuth redirect URL. + // default: "" + OAuth2RedirectUrl string `json:"oauth2RedirectUrl,omitempty"` + + // MUST be a function. Function to intercept remote definition, "Try it out", and OAuth 2.0 requests. + // Accepts one argument requestInterceptor(request) and must return the modified request, or a Promise that resolves to the modified request. + // default: "" + RequestInterceptor template.JS `json:"-"` + + // If set, MUST be an array of command line options available to the curl command. This can be set on the mutated request in the requestInterceptor function. + // For example request.curlOptions = ["-g", "--limit-rate 20k"] + // default: nil + RequestCurlOptions []string `json:"request.curlOptions,omitempty"` + + // MUST be a function. Function to intercept remote definition, "Try it out", and OAuth 2.0 responses. + // Accepts one argument responseInterceptor(response) and must return the modified response, or a Promise that resolves to the modified response. + // default: "" + ResponseInterceptor template.JS `json:"-"` + + // If set to true, uses the mutated request returned from a requestInterceptor to produce the curl command in the UI, + // otherwise the request before the requestInterceptor was applied is used. + // default: true + ShowMutatedRequest bool `json:"showMutatedRequest"` + + // List of HTTP methods that have the "Try it out" feature enabled. An empty array disables "Try it out" for all operations. + // This does not filter the operations from the display. + // Possible values are ["get", "put", "post", "delete", "options", "head", "patch", "trace"] + // default: nil + SupportedSubmitMethods []string `json:"supportedSubmitMethods,omitempty"` + + // By default, Swagger UI attempts to validate specs against swagger.io's online validator. You can use this parameter to set a different validator URL. + // For example for locally deployed validators (https://github.com/swagger-api/validator-badge). + // Setting it to either none, 127.0.0.1 or localhost will disable validation. + // default: "" + ValidatorUrl string `json:"validatorUrl,omitempty"` + + // If set to true, enables passing credentials, as defined in the Fetch standard, in CORS requests that are sent by the browser. + // Note that Swagger UI cannot currently set cookies cross-domain (see https://github.com/swagger-api/swagger-js/issues/1163). + // as a result, you will have to rely on browser-supplied cookies (which this setting enables sending) that Swagger UI cannot control. + // default: false + WithCredentials bool `json:"withCredentials,omitempty"` + + // Function to set default values to each property in model. Accepts one argument modelPropertyMacro(property), property is immutable. + // default: "" + ModelPropertyMacro template.JS `json:"-"` + + // Function to set default value to parameters. Accepts two arguments parameterMacro(operation, parameter). + // Operation and parameter are objects passed for context, both remain immutable. + // default: "" + ParameterMacro template.JS `json:"-"` + + // If set to true, it persists authorization data and it would not be lost on browser close/refresh. + // default: false + PersistAuthorization bool `json:"persistAuthorization,omitempty"` + + // Configuration information for OAuth2, optional if using OAuth2 + OAuth *OAuthConfig `json:"-"` + + // (authDefinitionKey, username, password) => action + // Programmatically set values for a Basic authorization scheme. + // default: "" + PreauthorizeBasic template.JS `json:"-"` + + // (authDefinitionKey, apiKeyValue) => action + // Programmatically set values for an API key or Bearer authorization scheme. + // In case of OpenAPI 3.0 Bearer scheme, apiKeyValue must contain just the token itself without the Bearer prefix. + // default: "" + PreauthorizeApiKey template.JS `json:"-"` + + // Applies custom CSS styles. + // default: "" + CustomStyle template.CSS `json:"-"` + + // Applies custom JavaScript scripts. + // default "" + CustomScript template.JS `json:"-"` +} + +type FilterConfig struct { + Enabled bool + Expression string +} + +func (fc FilterConfig) Value() interface{} { + if fc.Expression != "" { + return fc.Expression + } + return fc.Enabled +} + +type SyntaxHighlightConfig struct { + // Whether syntax highlighting should be activated or not. + // default: true + Activate bool `json:"activate"` + // Highlight.js syntax coloring theme to use. + // Possible values are ["agate", "arta", "monokai", "nord", "obsidian", "tomorrow-night"] + // default: "agate" + Theme string `json:"theme,omitempty"` +} + +func (shc SyntaxHighlightConfig) Value() interface{} { + if shc.Activate { + return shc + } + return false +} + +type OAuthConfig struct { + // ID of the client sent to the OAuth2 provider. + // default: "" + ClientId string `json:"clientId,omitempty"` + + // Never use this parameter in your production environment. + // It exposes cruicial security information. This feature is intended for dev/test environments only. + // Secret of the client sent to the OAuth2 provider. + // default: "" + ClientSecret string `json:"clientSecret,omitempty"` + + // Application name, displayed in authorization popup. + // default: "" + AppName string `json:"appName,omitempty"` + + // Realm query parameter (for oauth1) added to authorizationUrl and tokenUrl. + // default: "" + Realm string `json:"realm,omitempty"` + + // String array of initially selected oauth scopes + // default: nil + Scopes []string `json:"scopes,omitempty"` + + // Additional query parameters added to authorizationUrl and tokenUrl. + // default: nil + AdditionalQueryStringParams map[string]string `json:"additionalQueryStringParams,omitempty"` + + // Unavailable Only activated for the accessCode flow. + // During the authorization_code request to the tokenUrl, pass the Client Password using the HTTP Basic Authentication scheme + // (Authorization header with Basic base64encode(client_id + client_secret)). + // default: false + UseBasicAuthenticationWithAccessCodeGrant bool `json:"useBasicAuthenticationWithAccessCodeGrant,omitempty"` + + // Only applies to authorizatonCode flows. + // Proof Key for Code Exchange brings enhanced security for OAuth public clients. + // default: false + UsePkceWithAuthorizationCodeGrant bool `json:"usePkceWithAuthorizationCodeGrant,omitempty"` +} + +var ConfigDefault = Config{ + Title: "Swagger UI", + Layout: "StandaloneLayout", + Plugins: []template.JS{ + template.JS("SwaggerUIBundle.plugins.DownloadUrl"), + }, + Presets: []template.JS{ + template.JS("SwaggerUIBundle.presets.apis"), + template.JS("SwaggerUIStandalonePreset"), + }, + DeepLinking: true, + DefaultModelsExpandDepth: 1, + DefaultModelExpandDepth: 1, + DefaultModelRendering: "example", + DocExpansion: "list", + SyntaxHighlight: &SyntaxHighlightConfig{ + Activate: true, + Theme: "agate", + }, + ShowMutatedRequest: true, +} + +// Helper function to set default values +func configDefault(config ...Config) Config { + // Return default config if nothing provided + if len(config) < 1 { + return ConfigDefault + } + + // Override default config + cfg := config[0] + + if cfg.Title == "" { + cfg.Title = ConfigDefault.Title + } + + if cfg.Layout == "" { + cfg.Layout = ConfigDefault.Layout + } + + if cfg.DefaultModelRendering == "" { + cfg.DefaultModelRendering = ConfigDefault.DefaultModelRendering + } + + if cfg.DocExpansion == "" { + cfg.DocExpansion = ConfigDefault.DocExpansion + } + + if cfg.Plugins == nil { + cfg.Plugins = ConfigDefault.Plugins + } + + if cfg.Presets == nil { + cfg.Presets = ConfigDefault.Presets + } + + if cfg.SyntaxHighlight == nil { + cfg.SyntaxHighlight = ConfigDefault.SyntaxHighlight + } + + return cfg +} diff --git a/backend/providers/http/swagger/swagger.go b/backend/providers/http/swagger/swagger.go new file mode 100644 index 0000000..2eaf9ed --- /dev/null +++ b/backend/providers/http/swagger/swagger.go @@ -0,0 +1,103 @@ +package swagger + +import ( + "fmt" + "html/template" + "path" + "strings" + "sync" + + "git.ipao.vip/rogeecn/atomctl/pkg/swag" + "github.com/gofiber/fiber/v3" + "github.com/gofiber/fiber/v3/middleware/static" + "github.com/gofiber/utils/v2" + swaggerFiles "github.com/swaggo/files/v2" +) + +const ( + defaultDocURL = "doc.json" + defaultIndex = "index.html" +) + +var HandlerDefault = New() + +// New returns custom handler +func New(config ...Config) fiber.Handler { + cfg := configDefault(config...) + + index, err := template.New("swagger_index.html").Parse(indexTmpl) + if err != nil { + panic(fmt.Errorf("fiber: swagger middleware error -> %w", err)) + } + + var ( + prefix string + once sync.Once + ) + + return func(c fiber.Ctx) error { + // Set prefix + once.Do( + func() { + prefix = strings.ReplaceAll(c.Route().Path, "*", "") + + forwardedPrefix := getForwardedPrefix(c) + if forwardedPrefix != "" { + prefix = forwardedPrefix + prefix + } + + // Set doc url + if len(cfg.URL) == 0 { + cfg.URL = path.Join(prefix, defaultDocURL) + } + }, + ) + + p := c.Path(utils.CopyString(c.Params("*"))) + + switch p { + case defaultIndex: + c.Type("html") + return index.Execute(c, cfg) + case defaultDocURL: + var doc string + if doc, err = swag.ReadDoc(cfg.InstanceName); err != nil { + return err + } + return c.Type("json").SendString(doc) + case "", "/": + return c.Redirect().To(path.Join(prefix, defaultIndex)) + default: + // return fs(c) + return static.New("/swagger", static.Config{ + FS: swaggerFiles.FS, + Browse: true, + })(c) + } + } +} + +func getForwardedPrefix(c fiber.Ctx) string { + header := c.GetReqHeaders()["X-Forwarded-Prefix"] + + if len(header) == 0 { + return "" + } + + prefix := "" + + for _, rawPrefix := range header { + endIndex := len(rawPrefix) + for endIndex > 1 && rawPrefix[endIndex-1] == '/' { + endIndex-- + } + + if endIndex != len(rawPrefix) { + prefix += rawPrefix[:endIndex] + } else { + prefix += rawPrefix + } + } + + return prefix +} diff --git a/backend/providers/http/swagger/template.go b/backend/providers/http/swagger/template.go new file mode 100644 index 0000000..d90607f --- /dev/null +++ b/backend/providers/http/swagger/template.go @@ -0,0 +1,107 @@ +package swagger + +const indexTmpl string = ` + + + + + + {{.Title}} + + + + + {{- if .CustomStyle}} + + {{- end}} + {{- if .CustomScript}} + + {{- end}} + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +` diff --git a/backend/providers/job/config.go b/backend/providers/job/config.go new file mode 100644 index 0000000..e5ed4b0 --- /dev/null +++ b/backend/providers/job/config.go @@ -0,0 +1,33 @@ +package job + +import ( + "git.ipao.vip/rogeecn/atom/container" + "git.ipao.vip/rogeecn/atom/utils/opt" + "github.com/riverqueue/river" +) + +const DefaultPrefix = "Job" + +func DefaultProvider() container.ProviderContainer { + return container.ProviderContainer{ + Provider: Provide, + Options: []opt.Option{ + opt.Prefix(DefaultPrefix), + }, + } +} + +type Config struct{} + +const ( + PriorityDefault = river.PriorityDefault + PriorityLow = 2 + PriorityMiddle = 3 + PriorityHigh = 3 +) + +const ( + QueueHigh = "high" + QueueDefault = river.QueueDefault + QueueLow = "low" +) diff --git a/backend/providers/job/provider.go b/backend/providers/job/provider.go new file mode 100644 index 0000000..a5739d7 --- /dev/null +++ b/backend/providers/job/provider.go @@ -0,0 +1,85 @@ +package job + +import ( + "context" + "sync" + + "backend/providers/postgres" + + "git.ipao.vip/rogeecn/atom/container" + "git.ipao.vip/rogeecn/atom/utils/opt" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" + "github.com/riverqueue/river" + "github.com/riverqueue/river/riverdriver/riverpgxv5" + log "github.com/sirupsen/logrus" +) + +func Provide(opts ...opt.Option) error { + o := opt.New(opts...) + var config Config + if err := o.UnmarshalConfig(&config); err != nil { + return err + } + return container.Container.Provide(func(ctx context.Context, dbConf *postgres.Config) (*Job, error) { + workers := river.NewWorkers() + + dbPoolConfig, err := pgxpool.ParseConfig(dbConf.DSN()) + if err != nil { + return nil, err + } + + dbPool, err := pgxpool.NewWithConfig(ctx, dbPoolConfig) + if err != nil { + return nil, err + } + container.AddCloseAble(dbPool.Close) + pool := riverpgxv5.New(dbPool) + + queue := &Job{Workers: workers, Driver: pool, ctx: ctx} + container.AddCloseAble(queue.Close) + + return queue, nil + }, o.DiOptions()...) +} + +type Job struct { + ctx context.Context + Workers *river.Workers + Driver *riverpgxv5.Driver + + l sync.Mutex + client *river.Client[pgx.Tx] +} + +func (q *Job) Close() { + if q.client == nil { + return + } + + if err := q.client.StopAndCancel(q.ctx); err != nil { + log.Errorf("Failed to stop and cancel client: %s", err) + } +} + +func (q *Job) Client() (*river.Client[pgx.Tx], error) { + q.l.Lock() + defer q.l.Unlock() + + if q.client == nil { + var err error + q.client, err = river.NewClient(q.Driver, &river.Config{ + Workers: q.Workers, + Queues: map[string]river.QueueConfig{ + QueueHigh: {MaxWorkers: 10}, + QueueDefault: {MaxWorkers: 10}, + QueueLow: {MaxWorkers: 10}, + }, + }) + if err != nil { + return nil, err + } + } + + return q.client, nil +} diff --git a/backend/providers/jwt/config.go b/backend/providers/jwt/config.go new file mode 100644 index 0000000..2689a31 --- /dev/null +++ b/backend/providers/jwt/config.go @@ -0,0 +1,35 @@ +package jwt + +import ( + "time" + + log "github.com/sirupsen/logrus" + + "git.ipao.vip/rogeecn/atom/container" + "git.ipao.vip/rogeecn/atom/utils/opt" +) + +const DefaultPrefix = "JWT" + +func DefaultProvider() container.ProviderContainer { + return container.ProviderContainer{ + Provider: Provide, + Options: []opt.Option{ + opt.Prefix(DefaultPrefix), + }, + } +} + +type Config struct { + SigningKey string // jwt签名 + ExpiresTime string // 过期时间 + Issuer string // 签发者 +} + +func (c *Config) ExpiresTimeDuration() time.Duration { + d, err := time.ParseDuration(c.ExpiresTime) + if err != nil { + log.Fatal(err) + } + return d +} diff --git a/backend/providers/jwt/jwt.go b/backend/providers/jwt/jwt.go new file mode 100644 index 0000000..ee39a95 --- /dev/null +++ b/backend/providers/jwt/jwt.go @@ -0,0 +1,118 @@ +package jwt + +import ( + "errors" + "strings" + "time" + + "git.ipao.vip/rogeecn/atom/container" + "git.ipao.vip/rogeecn/atom/utils/opt" + + jwt "github.com/golang-jwt/jwt/v4" + "golang.org/x/sync/singleflight" +) + +const ( + CtxKey = "claims" + HttpHeader = "Authorization" +) + +type BaseClaims struct { + OpenID string `json:"open_id,omitempty"` + Tenant string `json:"tenant,omitempty"` + UserID int64 `json:"user_id,omitempty"` + TenantID int64 `json:"tenant_id,omitempty"` +} + +// Custom claims structure +type Claims struct { + BaseClaims + jwt.RegisteredClaims +} + +const TokenPrefix = "Bearer " + +type JWT struct { + singleflight *singleflight.Group + config *Config + SigningKey []byte +} + +var ( + TokenExpired = errors.New("Token is expired") + TokenNotValidYet = errors.New("Token not active yet") + TokenMalformed = errors.New("That's not even a token") + TokenInvalid = errors.New("Couldn't handle this token:") +) + +func Provide(opts ...opt.Option) error { + o := opt.New(opts...) + var config Config + if err := o.UnmarshalConfig(&config); err != nil { + return err + } + return container.Container.Provide(func() (*JWT, error) { + return &JWT{ + singleflight: &singleflight.Group{}, + config: &config, + SigningKey: []byte(config.SigningKey), + }, nil + }, o.DiOptions()...) +} + +func (j *JWT) CreateClaims(baseClaims BaseClaims) *Claims { + ep, _ := time.ParseDuration(j.config.ExpiresTime) + claims := Claims{ + BaseClaims: baseClaims, + RegisteredClaims: jwt.RegisteredClaims{ + NotBefore: jwt.NewNumericDate(time.Now().Add(-time.Second * 10)), // 签名生效时间 + ExpiresAt: jwt.NewNumericDate(time.Now().Add(ep)), // 过期时间 7天 配置文件 + Issuer: j.config.Issuer, // 签名的发行者 + }, + } + return &claims +} + +// 创建一个token +func (j *JWT) CreateToken(claims *Claims) (string, error) { + token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) + return token.SignedString(j.SigningKey) +} + +// CreateTokenByOldToken 旧token 换新token 使用归并回源避免并发问题 +func (j *JWT) CreateTokenByOldToken(oldToken string, claims *Claims) (string, error) { + v, err, _ := j.singleflight.Do("JWT:"+oldToken, func() (interface{}, error) { + return j.CreateToken(claims) + }) + return v.(string), err +} + +// 解析 token +func (j *JWT) Parse(tokenString string) (*Claims, error) { + tokenString = strings.TrimPrefix(tokenString, TokenPrefix) + token, err := jwt.ParseWithClaims(tokenString, &Claims{}, func(token *jwt.Token) (i interface{}, e error) { + return j.SigningKey, nil + }) + if err != nil { + if ve, ok := err.(*jwt.ValidationError); ok { + if ve.Errors&jwt.ValidationErrorMalformed != 0 { + return nil, TokenMalformed + } else if ve.Errors&jwt.ValidationErrorExpired != 0 { + // Token is expired + return nil, TokenExpired + } else if ve.Errors&jwt.ValidationErrorNotValidYet != 0 { + return nil, TokenNotValidYet + } else { + return nil, TokenInvalid + } + } + } + if token != nil { + if claims, ok := token.Claims.(*Claims); ok && token.Valid { + return claims, nil + } + return nil, TokenInvalid + } else { + return nil, TokenInvalid + } +} diff --git a/backend/providers/otel/config.go b/backend/providers/otel/config.go new file mode 100644 index 0000000..afd2782 --- /dev/null +++ b/backend/providers/otel/config.go @@ -0,0 +1,54 @@ +package otel + +import ( + "os" + + "git.ipao.vip/rogeecn/atom" + "git.ipao.vip/rogeecn/atom/container" + "git.ipao.vip/rogeecn/atom/utils/opt" +) + +const DefaultPrefix = "OTEL" + +func DefaultProvider() container.ProviderContainer { + return container.ProviderContainer{ + Provider: Provide, + Options: []opt.Option{ + opt.Prefix(DefaultPrefix), + opt.Group(atom.GroupInitialName), + }, + } +} + +type Config struct { + ServiceName string + Version string + Env string + + EndpointGRPC string + EndpointHTTP string + Token string +} + +func (c *Config) format() { + if c.ServiceName == "" { + c.ServiceName = os.Getenv("SERVICE_NAME") + if c.ServiceName == "" { + c.ServiceName = "unknown" + } + } + + if c.Version == "" { + c.Version = os.Getenv("SERVICE_VERSION") + if c.Version == "" { + c.Version = "unknown" + } + } + + if c.Env == "" { + c.Env = os.Getenv("DEPLOY_ENVIRONMENT") + if c.Env == "" { + c.Env = "unknown" + } + } +} diff --git a/backend/providers/otel/docker/.env b/backend/providers/otel/docker/.env new file mode 100644 index 0000000..7a96191 --- /dev/null +++ b/backend/providers/otel/docker/.env @@ -0,0 +1,30 @@ +# Dependent images +GRAFANA_IMAGE=docker.hub.ipao.vip/grafana/grafana:11.4.0 +JAEGERTRACING_IMAGE=docker.hub.ipao.vip/jaegertracing/all-in-one:1.64.0 +OPENSEARCH_IMAGE=docker.hub.ipao.vip/opensearchproject/opensearch:2.18.0 +COLLECTOR_CONTRIB_IMAGE=docker-ghcr.hub.ipao.vip/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-contrib:0.116.1 +PROMETHEUS_IMAGE=docker-quay.hub.ipao.vip/prometheus/prometheus:v3.0.1 + +# OpenTelemetry Collector +HOST_FILESYSTEM=/ +DOCKER_SOCK=/var/run/docker.sock +OTEL_COLLECTOR_HOST=otel-collector +OTEL_COLLECTOR_PORT_GRPC=4317 +OTEL_COLLECTOR_PORT_HTTP=4318 +OTEL_COLLECTOR_CONFIG=./otel-collector/otelcol-config.yml +OTEL_COLLECTOR_CONFIG_EXTRAS=./otel-collector/otelcol-config-extras.yml +OTEL_EXPORTER_OTLP_ENDPOINT=http://${OTEL_COLLECTOR_HOST}:${OTEL_COLLECTOR_PORT_GRPC} +PUBLIC_OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=http://localhost:8080/otlp-http/v1/traces + +# Grafana +GRAFANA_SERVICE_PORT=3000 +GRAFANA_SERVICE_HOST=grafana + +# Jaeger +JAEGER_SERVICE_PORT=16686 +JAEGER_SERVICE_HOST=jaeger + +# Prometheus +PROMETHEUS_SERVICE_PORT=9090 +PROMETHEUS_SERVICE_HOST=prometheus +PROMETHEUS_ADDR=${PROMETHEUS_SERVICE_HOST}:${PROMETHEUS_SERVICE_PORT} diff --git a/backend/providers/otel/docker/docker-compose.yaml b/backend/providers/otel/docker/docker-compose.yaml new file mode 100644 index 0000000..cdbe983 --- /dev/null +++ b/backend/providers/otel/docker/docker-compose.yaml @@ -0,0 +1,153 @@ +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +x-default-logging: &logging + driver: "json-file" + options: + max-size: "5m" + max-file: "2" + tag: "{{.Name}}" + +networks: + default: + name: opentelemetry-demo + driver: bridge + +services: + # ******************** + # Telemetry Components + # ******************** + # Jaeger + jaeger: + image: ${JAEGERTRACING_IMAGE} + container_name: jaeger + command: + - "--memory.max-traces=5000" + - "--query.base-path=/jaeger/ui" + - "--prometheus.server-url=http://${PROMETHEUS_ADDR}" + - "--prometheus.query.normalize-calls=true" + - "--prometheus.query.normalize-duration=true" + deploy: + resources: + limits: + memory: 400M + restart: unless-stopped + ports: + - "${JAEGER_SERVICE_PORT}:${JAEGER_SERVICE_PORT}" # Jaeger UI + # - "${OTEL_COLLECTOR_PORT_GRPC}" + environment: + - METRICS_STORAGE_TYPE=prometheus + logging: *logging + + # Grafana + grafana: + image: ${GRAFANA_IMAGE} + container_name: grafana + deploy: + resources: + limits: + memory: 100M + restart: unless-stopped + environment: + - "GF_INSTALL_PLUGINS=grafana-opensearch-datasource" + volumes: + - ./grafana/grafana.ini:/etc/grafana/grafana.ini + - ./grafana/provisioning/:/etc/grafana/provisioning/ + ports: + - "${GRAFANA_SERVICE_PORT}:${GRAFANA_SERVICE_PORT}" + logging: *logging + + # OpenTelemetry Collector + otel-collector: + image: ${COLLECTOR_CONTRIB_IMAGE} + container_name: otel-collector + deploy: + resources: + limits: + memory: 200M + restart: unless-stopped + command: + [ + "--config=/etc/otelcol-config.yml", + "--config=/etc/otelcol-config-extras.yml", + ] + user: 0:0 + volumes: + - ${HOST_FILESYSTEM}:/hostfs:ro + - ${DOCKER_SOCK}:/var/run/docker.sock:ro + - ${OTEL_COLLECTOR_CONFIG}:/etc/otelcol-config.yml + - ${OTEL_COLLECTOR_CONFIG_EXTRAS}:/etc/otelcol-config-extras.yml + ports: + - "${OTEL_COLLECTOR_PORT_GRPC}:${OTEL_COLLECTOR_PORT_GRPC}" + - "${OTEL_COLLECTOR_PORT_HTTP}:${OTEL_COLLECTOR_PORT_HTTP}" + depends_on: + jaeger: + condition: service_started + opensearch: + condition: service_healthy + logging: *logging + environment: + - ENVOY_PORT + - HOST_FILESYSTEM + - OTEL_COLLECTOR_HOST + - OTEL_COLLECTOR_PORT_GRPC + - OTEL_COLLECTOR_PORT_HTTP + + # Prometheus + prometheus: + image: ${PROMETHEUS_IMAGE} + container_name: prometheus + command: + - --web.console.templates=/etc/prometheus/consoles + - --web.console.libraries=/etc/prometheus/console_libraries + - --storage.tsdb.retention.time=1h + - --config.file=/etc/prometheus/prometheus-config.yaml + - --storage.tsdb.path=/prometheus + - --web.enable-lifecycle + - --web.route-prefix=/ + - --web.enable-otlp-receiver + - --enable-feature=exemplar-storage + volumes: + - ./prometheus/prometheus-config.yaml:/etc/prometheus/prometheus-config.yaml + deploy: + resources: + limits: + memory: 300M + restart: unless-stopped + ports: + - "${PROMETHEUS_SERVICE_PORT}:${PROMETHEUS_SERVICE_PORT}" + logging: *logging + + # OpenSearch + opensearch: + image: ${OPENSEARCH_IMAGE} + container_name: opensearch + deploy: + resources: + limits: + memory: 1G + restart: unless-stopped + environment: + - cluster.name=demo-cluster + - node.name=demo-node + - bootstrap.memory_lock=true + - discovery.type=single-node + - OPENSEARCH_JAVA_OPTS=-Xms300m -Xmx300m + - DISABLE_INSTALL_DEMO_CONFIG=true + - DISABLE_SECURITY_PLUGIN=true + ulimits: + memlock: + soft: -1 + hard: -1 + nofile: + soft: 65536 + hard: 65536 + ports: + - "9200:9200" + healthcheck: + test: curl -s http://localhost:9200/_cluster/health | grep -E '"status":"(green|yellow)"' + start_period: 10s + interval: 5s + timeout: 10s + retries: 10 + logging: *logging diff --git a/backend/providers/otel/docker/grafana/grafana.ini b/backend/providers/otel/docker/grafana/grafana.ini new file mode 100644 index 0000000..c21262f --- /dev/null +++ b/backend/providers/otel/docker/grafana/grafana.ini @@ -0,0 +1,1170 @@ +##################### Grafana Configuration Example ##################### +# +# Everything has defaults so you only need to uncomment things you want to +# change + +# possible values : production, development +;app_mode = production + +# instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty +;instance_name = ${HOSTNAME} + +# force migration will run migrations that might cause dataloss +;force_migration = false + +#################################### Paths #################################### +[paths] +# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used) +;data = /var/lib/grafana + +# Temporary files in `data` directory older than given duration will be removed +;temp_data_lifetime = 24h + +# Directory where grafana can store logs +;logs = /var/log/grafana + +# Directory where grafana will automatically scan and look for plugins +;plugins = /var/lib/grafana/plugins + +# folder that contains provisioning config files that grafana will apply on startup and while running. +provisioning = /etc/grafana/provisioning + +#################################### Server #################################### +[server] +# Protocol (http, https, h2, socket) +protocol = http + +# The ip address to bind to, empty will bind to all interfaces +;http_addr = + +# The http port to use +http_port = 3000 + +# The public facing domain name used to access grafana from a browser +domain = localhost + +# Redirect to correct domain if host header does not match domain +# Prevents DNS rebinding attacks +;enforce_domain = false + +# The full public facing url you use in browser, used for redirects and emails +# If you use reverse proxy and sub path specify full url (with sub path) +root_url = %(protocol)s://%(domain)s:3000/grafana/ + +# Serve Grafana from subpath specified in `root_url` setting. By default it is set to `false` for compatibility reasons. +serve_from_sub_path = true + +# Log web requests +;router_logging = false + +# the path relative working path +;static_root_path = public + +# enable gzip +;enable_gzip = false + +# https certs & key file +;cert_file = +;cert_key = + +# Unix socket path +;socket = + +# CDN Url +;cdn_url = + +# Sets the maximum time using a duration format (5s/5m/5ms) before timing out read of an incoming request and closing idle connections. +# `0` means there is no timeout for reading the request. +;read_timeout = 0 + +#################################### Database #################################### +[database] +# You can configure the database connection by specifying type, host, name, user and password +# as separate properties or as on string using the url properties. + +# Either "mysql", "postgres" or "sqlite3", it's your choice +;type = sqlite3 +;host = 127.0.0.1:3306 +;name = grafana +;user = root +# If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;""" +;password = + +# Use either URL or the previous fields to configure the database +# Example: mysql://user:secret@host:port/database +;url = + +# For "postgres" only, either "disable", "require" or "verify-full" +;ssl_mode = disable + +# Database drivers may support different transaction isolation levels. +# Currently, only "mysql" driver supports isolation levels. +# If the value is empty - driver's default isolation level is applied. +# For "mysql" use "READ-UNCOMMITTED", "READ-COMMITTED", "REPEATABLE-READ" or "SERIALIZABLE". +;isolation_level = + +;ca_cert_path = +;client_key_path = +;client_cert_path = +;server_cert_name = + +# For "sqlite3" only, path relative to data_path setting +;path = grafana.db + +# Max idle conn setting default is 2 +;max_idle_conn = 2 + +# Max conn setting default is 0 (mean not set) +;max_open_conn = + +# Connection Max Lifetime default is 14400 (means 14400 seconds or 4 hours) +;conn_max_lifetime = 14400 + +# Set to true to log the sql calls and execution times. +;log_queries = + +# For "sqlite3" only. cache mode setting used for connecting to the database. (private, shared) +;cache_mode = private + +# For "mysql" only if lockingMigration feature toggle is set. How many seconds to wait before failing to lock the database for the migrations, default is 0. +;locking_attempt_timeout_sec = 0 + +################################### Data sources ######################### +[datasources] +# Upper limit of data sources that Grafana will return. This limit is a temporary configuration and it will be deprecated when pagination will be introduced on the list data sources API. +;datasource_limit = 5000 + +#################################### Cache server ############################# +[remote_cache] +# Either "redis", "memcached" or "database" default is "database" +;type = database + +# cache connectionstring options +# database: will use Grafana primary database. +# redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=0,ssl=false`. Only addr is required. ssl may be 'true', 'false', or 'insecure'. +# memcache: 127.0.0.1:11211 +;connstr = + +#################################### Data proxy ########################### +[dataproxy] + +# This enables data proxy logging, default is false +;logging = false + +# How long the data proxy waits to read the headers of the response before timing out, default is 30 seconds. +# This setting also applies to core backend HTTP data sources where query requests use an HTTP client with timeout set. +;timeout = 30 + +# How long the data proxy waits to establish a TCP connection before timing out, default is 10 seconds. +;dialTimeout = 10 + +# How many seconds the data proxy waits before sending a keepalive probe request. +;keep_alive_seconds = 30 + +# How many seconds the data proxy waits for a successful TLS Handshake before timing out. +;tls_handshake_timeout_seconds = 10 + +# How many seconds the data proxy will wait for a server's first response headers after +# fully writing the request headers if the request has an "Expect: 100-continue" +# header. A value of 0 will result in the body being sent immediately, without +# waiting for the server to approve. +;expect_continue_timeout_seconds = 1 + +# Optionally limits the total number of connections per host, including connections in the dialing, +# active, and idle states. On limit violation, dials will block. +# A value of zero (0) means no limit. +;max_conns_per_host = 0 + +# The maximum number of idle connections that Grafana will keep alive. +;max_idle_connections = 100 + +# How many seconds the data proxy keeps an idle connection open before timing out. +;idle_conn_timeout_seconds = 90 + +# If enabled and user is not anonymous, data proxy will add X-Grafana-User header with username into the request, default is false. +;send_user_header = false + +# Limit the amount of bytes that will be read/accepted from responses of outgoing HTTP requests. +;response_limit = 0 + +# Limits the number of rows that Grafana will process from SQL data sources. +;row_limit = 1000000 + +#################################### Analytics #################################### +[analytics] +# Server reporting, sends usage counters to stats.grafana.org every 24 hours. +# No ip addresses are being tracked, only simple counters to track +# running instances, dashboard and error counts. It is very helpful to us. +# Change this option to false to disable reporting. +;reporting_enabled = true + +# The name of the distributor of the Grafana instance. Ex hosted-grafana, grafana-labs +;reporting_distributor = grafana-labs + +# Set to false to disable all checks to https://grafana.com +# for new versions of grafana. The check is used +# in some UI views to notify that a grafana update exists. +# This option does not cause any auto updates, nor send any information +# only a GET request to https://raw.githubusercontent.com/grafana/grafana/main/latest.json to get the latest version. +;check_for_updates = true + +# Set to false to disable all checks to https://grafana.com +# for new versions of plugins. The check is used +# in some UI views to notify that a plugin update exists. +# This option does not cause any auto updates, nor send any information +# only a GET request to https://grafana.com to get the latest versions. +;check_for_plugin_updates = true + +# Google Analytics universal tracking code, only enabled if you specify an id here +;google_analytics_ua_id = + +# Google Tag Manager ID, only enabled if you specify an id here +;google_tag_manager_id = + +# Rudderstack write key, enabled only if rudderstack_data_plane_url is also set +;rudderstack_write_key = + +# Rudderstack data plane url, enabled only if rudderstack_write_key is also set +;rudderstack_data_plane_url = + +# Rudderstack SDK url, optional, only valid if rudderstack_write_key and rudderstack_data_plane_url is also set +;rudderstack_sdk_url = + +# Rudderstack Config url, optional, used by Rudderstack SDK to fetch source config +;rudderstack_config_url = + +# Controls if the UI contains any links to user feedback forms +;feedback_links_enabled = true + +#################################### Security #################################### +[security] +# disable creation of admin user on first start of grafana +;disable_initial_admin_creation = false + +# default admin user, created on startup +;admin_user = admin + +# default admin password, can be changed before first start of grafana, or in profile settings +;admin_password = admin + +# used for signing +;secret_key = SW2YcwTIb9zpOOhoPsMm + +# current key provider used for envelope encryption, default to static value specified by secret_key +;encryption_provider = secretKey.v1 + +# list of configured key providers, space separated (Enterprise only): e.g., awskms.v1 azurekv.v1 +;available_encryption_providers = + +# disable gravatar profile images +;disable_gravatar = false + +# data source proxy whitelist (ip_or_domain:port separated by spaces) +;data_source_proxy_whitelist = + +# disable protection against brute force login attempts +;disable_brute_force_login_protection = false + +# set to true if you host Grafana behind HTTPS. default is false. +;cookie_secure = false + +# set cookie SameSite attribute. defaults to `lax`. can be set to "lax", "strict", "none" and "disabled" +;cookie_samesite = lax + +# set to true if you want to allow browsers to render Grafana in a ,