commit 306491168cb14e9d77a6f6311100a9d193d1c65c Author: rogeecn Date: Sat Mar 22 16:39:52 2025 +0800 init diff --git a/.air.toml b/.air.toml new file mode 100644 index 0000000..f0dcfc1 --- /dev/null +++ b/.air.toml @@ -0,0 +1,52 @@ +root = "." +testdata_dir = "testdata" +tmp_dir = "tmp" + +[build] +args_bin = [] +bin = "./tmp/main serve" +cmd = "go build -o ./tmp/main ." +delay = 1000 +exclude_dir = ["assets", "tmp", "vendor", "testdata"] +exclude_file = [] +exclude_regex = ["_test.go"] +exclude_unchanged = false +follow_symlink = false +full_bin = "" +include_dir = [] +include_ext = ["go", "tpl", "tmpl", "html"] +include_file = [] +kill_delay = "0s" +log = "build-errors.log" +poll = false +poll_interval = 0 +post_cmd = [] +pre_cmd = [] +rerun = false +rerun_delay = 500 +send_interrupt = false +stop_on_error = false + +[color] +app = "" +build = "yellow" +main = "magenta" +runner = "green" +watcher = "cyan" + +[log] +main_only = false +silent = false +time = false + +[misc] +clean_on_exit = false + +[proxy] +app_port = 0 +enabled = false +proxy_port = 0 + +[screen] +clear_on_rebuild = false +keep_scroll = true diff --git a/.clinerules b/.clinerules new file mode 100644 index 0000000..525a868 --- /dev/null +++ b/.clinerules @@ -0,0 +1,301 @@ +# 全局指令 + +我的主语言是简体中文,所以请用简体中文回答我,与我交流。 + +# 角色定义 + +您是一名高级 Go 程序员,具有丰富的后端开发经验,偏好干净的编程和设计模式。 + +# 基本原则 + +- 所有代码和文档使用中文。 +- 遵循 Go 的官方规范和最佳实践。 +- 使用 `gofumpt -w -l -extra .` 格式化代码。 +- 错误处理优先使用 errors.New 和 fmt.Errorf。 +- 业务返回的错误需要在 `app/errorx` 包中定义。 +- 在错误处理时,使用适当的上下文信息提供更多错误细节。 + +# 命名规范 + +- 包名使用小写单词。 +- 文件名使用小写下划线。 +- 环境变量使用大写。 +- 常量使用驼峰命名。 +- 导出的标识符必须以大写字母开头。 +- 缩写规则: +- i、j 用于循环 +- err 用于错误 +- ctx 用于上下文 +- req、res 用于请求响应 + +# 函数设计 + +- 函数应该短小精悍,单一职责。 +- 参数数量控制在 5 个以内。 +- 使用多值返回处理错误。 +- 优先使用命名返回值。 +- 避免嵌套超过 3 层。 +- 使用 defer 处理资源清理。 + +# 错误处理 + +- 总是检查错误返回。 +- 使用自定义错误类型。 +- 错误应该携带上下文信息。 +- 使用 errors.Is 和 errors.As 进行错误比较。 + +# 并发处理 + +- 使用 channel 通信而非共享内存。 +- 谨慎使用 goroutine。 +- 使用 context 控制超时和取消。 +- 使用 sync 包进行同步。 + +# 测试规范 + +- 编写单元测试和基准测试。 +- 使用表驱动测试。 +- 测试文件以 _test.go 结尾。 +- 使用 `stretchr/testify` `github.com/agiledragon/gomonkey/v2` 测试框架。 + +# 项目技术栈 + +- github.com/uber-go/dig 依赖注入 +- github.com/go-jet/jet 数据库查询构建器 +- github.com/ThreeDotsLabs/watermill 即时Event消息队列 +- github.com/riverqueue/river Job队列 +- github.com/gofiber/fiber/v3 HTTP框架 +- github.com/swaggo/swag 自动生成API文档, 在controller的方法上使用注解即可 + +# Atomctl 工具使用 + +## 生成命令 + +- gen model:从数据库生成模型 +- gen provider:生成依赖注入提供者 +- gen route:生成路由定义 + +## 数据库命令 + +- migrate:执行数据库迁移 +- migrate up/down:迁移或回滚,up 命令执行成功即表示数据库操作完成,无需其它确认操作。 +- migrate status:查看迁移状态 +- migrate create:创建迁移文件,迁移文件的命名需要使用动词名词的结合方式,如 create_users_table, 创建完成后文件会存在于 `database/migrations` 目录下 + +## 最佳实践 + +- migration 创建后需要执行 `atomctl migrate up` 执行数据库表迁移 +- 使用 gen model 前确保已migrate完成,并配置好 database/transform.yaml +- 对model中需要转换的数据结构声明在目录 `database/fields` 中,文件名与model名一致 +- provider 生成时使用适当的注解标记 +- 遵循目录结构约定 + +# 项目结构 + +## 标准目录 + +- main.go:主程序入口 +- providers/:依赖注入提供者, 通过 atomctl gen provider 生成, 但是你不可以对其中的内容进行修改 +- database/fields:数据库模型字段定义 +- database/schemas:数据库自动生成的模型文件,不可以进行任何修改!! +- database/migrations: 数据库迁移文件,通过 atomctl migrate create 创建,你不可以手工创建,只可以使用脚手架工具进行创建 +- configs.toml:配置文件 +- proto/: gRPC proto 定义 +- pkg/atom: 为依赖注入框架的核心代码,你不可以进行修改 +- fixtures/:测试文件 +- app/errorx: 业务错误定义 +- app/http: HTTP 服务 +- app/grpc: gRPC 服务 +- app/jobs: 后台任务定义 +- app/middlewares: HTTP 中间件 +- app/services: 服务启动逻辑,不可以进行任何修改 + +# 开发示例 + +## migration 定义 + +migration 文件示例. +``` +-- +goose Up +-- +goose StatementBegin + +CREATE TABLE tenants ( +id BIGSERIAL PRIMARY KEY, +created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP, +updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP, +deleted_at TIMESTAMP WITH TIME ZONE DEFAULT NULL, +) ; + +COMMENT ON COLUMN tenants.created_at IS '创建时间'; +COMMENT ON COLUMN tenants.updated_at IS '更新时间'; +COMMENT ON COLUMN tenants.deleted_at IS '删除时间'; + +-- +goose StatementEnd + +------------------------------------------------------------------------------------------------------ + +-- +goose Down +-- +goose StatementBegin + +DROP TABLE IF EXISTS tenants ; + +-- +goose StatementEnd + +``` + +## http module + +1. 创建一个新的 http module `atomctl new module [users]` +2. 在 `app/http` 目录下创建相关的处理程序。 +3. 定义用户相关的路由。 +4. 实现相关逻辑操作 +5. module 名称需要使用复数形式,支持多层级目录,如 `atomctl new module [users.orders]` + +## controller + +- controller 的定义 +```go +// @provider +type PayController struct { + svc *Service + log *log.Entry `inject:"false"` +} + +func (c *PayController) Prepare() error { + c.log = log.WithField("module", "orders.Controller") + return nil +} + +// actions ... +} +``` +- controller 文件定义完成后运行 `atomctl gen provider` 来生成 provider + +- 一个 action 方法的定义, **@Router**不再使用swago的定义方式,替换为下面的定义方式,参数做用@Bind来进行声明,会自动注入,不需要业务内获取参数 +```go +// Orders show user orders +// @swagger definitions +// @Router /api/v1/orders/:channel [get] +// @Bind channel path +// @Bind claim local +// @Bind pagination query +// @Bind filter query +func (c *OrderController) List(ctx fiber.Ctx, claim *jwt.Claims,channel string, pagination *requests.Pagination, filter *UserOrderFilter) (*requests.Pager, error) { + pagination.Format() + pager := &requests.Pager{ + Pagination: *pagination, + } + + filter.UserID = claim.UserID + orders, total, err := c.svc.GetOrders(ctx.Context(), pagination, filter) + if err != nil { + return nil, err + } + pager.Total = total + + pager.Items = lo.FilterMap(orders, func(item model.Orders, _ int) (UserOrder, bool) { + var o UserOrder + if err := copier.Copy(&o, item) ; err != nil { + return o, false + } + return o, true + }) + + return pager, nil +} +``` +- 你需要把第二行的 `@swagger definitions` 替换成你的swagger定义 +- @Bind 参数会有几个位置 path/query/body/header/cookie/local/file 会分别从 url/get query/post body/header/cookie/fiber.Local/file/中取出所需要的数据绑定到方法的请求参数中去。 +- controller 只负责数据的接收返回及相关数据装饰,具体的复杂逻辑实现需要在service文件中定义。 +- action 文件内容完成运行 `atomctl gen route` 来生成路由 + +## service + +- service 的定义 +```go +// @provider +type Service struct { + db *sql.DB + log *log.Entry `inject:"false"` +} + +func (svc *Service) Prepare() error { + svc.log = log.WithField("module", "orders.service") + _ = Int(1) + return nil +} +``` +- service 文件定义完成后运行 `atomctl gen provider` 来生成 provider + +- service 中 model 数据查询的示例,需要注意table需要定义为一个短小的tblXXX以便代码展示简洁 +```go +// GetUserOrderByOrderID +func (svc *Service) Get(ctx context.Context, orderID string, userID int64) (*model.Orders, error) { + _, span := otel.Start(ctx, "users.service.GetUserOrderByOrderID") + defer span.End() + span.SetAttributes( + attribute.String("order.id", orderID), + attribute.Int64("user.id", userID), + ) + + tbl := table.Orders + stmt := tbl.SELECT(tbl.AllColumns).WHERE(tbl.OrderSerial.EQ(String(orderID)).AND(tbl.UserID.EQ(Int64(userID)))) + span.SetAttributes(semconv.DBStatementKey.String(stmt.DebugSql())) + + var order model.Orders + if err := stmt.QueryContext(ctx, svc.db, &order) ; err != nil { +span.RecordError(err) + return nil, err + } + return &order, nil +} + +// UpdateStage +func (svc *Service) Update(ctx context.Context, tenantID, userID, postID int64, stage fields.PostStage) error { + _, span := otel.Start(ctx, "users.service.UpdateStage") + defer span.End() + span.SetAttributes( + attribute.Int64("tenant.id", tenantID), + attribute.Int64("user.id", userID), + attribute.Int64("post.id", postID), + ) + + tbl := table.Posts + stmt := tbl. + UPDATE(tbl.UpdatedAt, tbl.Stage). + SET( + tbl.UpdatedAt.SET(TimestampT(time.Now())), + tbl.Stage.SET(Int16(int16(stage))), + ). + WHERE( + tbl.ID.EQ(Int64(postID)).AND( + tbl.TenantID.EQ(Int64(tenantID)).AND( + tbl.UserID.EQ(Int64(userID)), + ), + ), + ) + span.SetAttributes(semconv.DBStatementKey.String(stmt.DebugSql())) + + if _, err := stmt.ExecContext(ctx, svc.db) ; err != nil { + span.RecordError(err) + return err + } + + return svc.Update(ctx, tenantID, userID, postID, post) +} +``` + +# 本项目说明 + +- 设计一个支持多租户的用户系统,一个用户可以同时属于多个租户 +- 每一个租户有一个租户管理员角色,这个角色可以在后台由系统管理员指定,或者用户在申请创建租户申请时自动指定。 +- 除系统管理员外,一个普通用户只可以是一个租户的管理员,不能同时管理多个租户。 + +**重要提示:** +- `database/schemas` 目录下所有为件为 `atomctl gen model` 自动生成,不能进行任何修改! +- migration SQL 中不要使用 `FOREIGN KEY` 约束,而是在业务中使用代码逻辑进行约束。 +- 数据库表需要按需要添加 `created_at` `updated_at` `deleted_at` 字段,并且这三个时间字段(`created_at` `updated_at` `deleted_at`)需要**直接**位于 id 字段后面, **中间不可以包含其它任何字段声明**。 +- ID 使用 `bigserial` 类型,数字类的使用 `int8`类型 +- 所有表不使用 `FOREIGN KEY` 约束,而是在业务中使用代码逻辑进行约束。 +- 所有字段需要添加中文字段 `comment` +- 执行 `migrate up` 命令完成后你不需要再使用 `psql` 来验证是否创建成功 diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000..90df546 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,19 @@ +# EditorConfig is awesome: https://EditorConfig.org + +# top-most EditorConfig file +root = true + +[*] +indent_style = space +indent_size = 4 +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = false + +[*.{yaml,yml}] +indent_style = space +indent_size = 2 + +[Makefile] +indent_style = tab \ No newline at end of file diff --git a/.gitea/workflows/build.yml b/.gitea/workflows/build.yml new file mode 100644 index 0000000..7de4b1d --- /dev/null +++ b/.gitea/workflows/build.yml @@ -0,0 +1,42 @@ +name: Build TGExporter +run-name: ${{ gitea.actor }} Build TGExporter +on: [push] + +jobs: + Build: + runs-on: ubuntu-latest + steps: + - name: Check out repository code + uses: actions/checkout@v4 + + - name: Set up Node.js + uses: actions/setup-node@v3 + with: + node-version: "20" + + - name: Install dependencies and build frontend + run: | + cd frontend + npm config set registry https://npm.hub.ipao.vip + npm install + npm run build + + - name: Set up Go + uses: actions/setup-go@v3 + with: + go-version: "1.22" + + - name: Build Go application + run: | + cd backend + mkdir -p build + go env -w GOPROXY=https://go.hub.ipao.vip,direct + go env -w GONOPROXY='git.ipao.vip' + go env -w GONOSUMDB='git.ipao.vip' + go mod tidy + CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o build/app . + + - name: Build final Docker image + run: | + docker login -u ${{ secrets.DOCKER_AF_USERNAME }} -p ${{ secrets.DOCKER_AF_PASSWORD }} docker-af.hub.ipao.vip + docker build --push -t docker-af.hub.ipao.vip/rogeecn/test:latest . diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..6d47673 --- /dev/null +++ b/.gitignore @@ -0,0 +1,29 @@ +bin/* +vendor/ +__debug_bin* +backend +build/* +.vscode +.idea +tmp/ +docker-compose.yml +sqlite.db +go.work +go.work.sum +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +fixtures/* diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..3b9a775 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,17 @@ +FROM docker.hub.ipao.vip/alpine:3.20 + +# Set timezone +RUN apk add --no-cache tzdata && \ + cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \ + echo "Asia/Shanghai" > /etc/timezone && \ + apk del tzdata + +COPY backend/build/app /app/app +COPY backend/config.toml /app/config.toml +COPY frontend/dist /app/dist + +WORKDIR /app + +ENTRYPOINT ["/app/app"] + +CMD [ "serve" ] diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..ce1fed2 --- /dev/null +++ b/Makefile @@ -0,0 +1,44 @@ +buildAt=`date +%Y/%m/%d-%H:%M:%S` +gitHash=`git rev-parse HEAD` +version=`git rev-parse --abbrev-ref HEAD | grep -v HEAD || git describe --exact-match HEAD || git rev-parse HEAD` ## todo: use current release git tag +flags="-X 'atom/utils.Version=${version}' -X 'atom/utils.BuildAt=${buildAt}' -X 'atom/utils.GitHash=${gitHash}'" +release_flags="-w -s ${flags}" + +GOPATH:=$(shell go env GOPATH) + +.PHONY: tidy +tidy: + @go mod tidy + +.PHONY: release +release: + @CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags=${flags} -o bin/release/{{.ProjectName}} . + @cp config.toml bin/release/ + +.PHONY: test +test: + @go test -v ./... -cover + +.PHONY: lint +lint: + @golangci-lint run + +.PHONY: tools +tools: + go install github.com/air-verse/air@latest + go install github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway@latest + go install github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2@latest + go install google.golang.org/protobuf/cmd/protoc-gen-go@latest + go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest + go install github.com/bufbuild/buf/cmd/buf@latest + go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest + go get go.ipao.vip/atom + +.PHONY: init +init: tools + @atomctl swag init + @atomctl gen route + @atomctl gen enum + @atomctl gen provider + @buf generate + @go mod tidy diff --git a/app/console/.gitkeep b/app/console/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/app/errorx/error.go b/app/errorx/error.go new file mode 100644 index 0000000..9234537 --- /dev/null +++ b/app/errorx/error.go @@ -0,0 +1,152 @@ +package errorx + +import ( + "errors" + "fmt" + "net/http" + "runtime" + "strings" + + "github.com/go-jet/jet/v2/qrm" + "github.com/gofiber/fiber/v3" + "github.com/gofiber/fiber/v3/binder" + "github.com/gofiber/utils/v2" + log "github.com/sirupsen/logrus" +) + +func Middleware(c fiber.Ctx) error { + err := c.Next() + if err != nil { + return Wrap(err).Response(c) + } + return err +} + +type Response struct { + isFormat bool + err error + params []any + sql string + file string + + StatusCode int `json:"-" xml:"-"` + Code int `json:"code" xml:"code"` + Message string `json:"message" xml:"message"` + Data any `json:"data,omitempty" xml:"data"` +} + +func New(code, statusCode int, message string) *Response { + return &Response{ + isFormat: true, + StatusCode: statusCode, + Code: code, + Message: message, + } +} + +func (r *Response) WithMsg(msg string) *Response { + r.Message = msg + return r +} + +func (r *Response) Sql(sql string) *Response { + r.sql = sql + return r +} + +func (r *Response) from(err *Response) *Response { + r.Code = err.Code + r.Message = err.Message + r.StatusCode = err.StatusCode + return r +} + +func (r *Response) Params(params ...any) *Response { + r.params = params + if _, file, line, ok := runtime.Caller(1); ok { + r.file = fmt.Sprintf("%s:%d", file, line) + } + return r +} + +func Wrap(err error) *Response { + if e, ok := err.(*Response); ok { + return e + } + return &Response{err: err} +} + +func (r *Response) Wrap(err error) *Response { + r.err = err + return r +} + +func (r *Response) format() { + r.isFormat = true + if errors.Is(r.err, qrm.ErrNoRows) { + r.from(RecordNotExists) + return + } + + if e, ok := r.err.(*fiber.Error); ok { + r.Code = e.Code + r.Message = e.Message + r.StatusCode = e.Code + return + } + + if r.err != nil { + msg := r.err.Error() + if strings.Contains(msg, "duplicate key value") || strings.Contains(msg, "unique constraint") { + r.from(RecordDuplicated) + return + } + + r.Code = http.StatusInternalServerError + r.StatusCode = http.StatusInternalServerError + r.Message = msg + } + return +} + +func (r *Response) Error() string { + if !r.isFormat { + r.format() + } + + return fmt.Sprintf("[%d] %s", r.Code, r.Message) +} + +func (r *Response) Response(ctx fiber.Ctx) error { + if !r.isFormat { + r.format() + } + + contentType := utils.ToLower(utils.UnsafeString(ctx.Request().Header.ContentType())) + contentType = binder.FilterFlags(utils.ParseVendorSpecificContentType(contentType)) + + log. + WithError(r.err). + WithField("file", r.file). + WithField("sql", r.sql). + WithField("params", r.params). + Errorf("response error: %+v", r) + + // Parse body accordingly + switch contentType { + case fiber.MIMETextXML, fiber.MIMEApplicationXML: + return ctx.Status(r.StatusCode).XML(r) + case fiber.MIMETextHTML, fiber.MIMETextPlain: + return ctx.Status(r.StatusCode).SendString(r.Message) + default: + return ctx.Status(r.StatusCode).JSON(r) + } +} + +var ( + RecordDuplicated = New(1001, http.StatusBadRequest, "记录重复") + RecordNotExists = New(http.StatusNotFound, http.StatusNotFound, "记录不存在") + BadRequest = New(http.StatusBadRequest, http.StatusBadRequest, "请求错误") + Unauthorized = New(http.StatusUnauthorized, http.StatusUnauthorized, "未授权") + InternalErr = New(http.StatusInternalServerError, http.StatusInternalServerError, "内部错误") +) diff --git a/app/events/publishers/user_register.go b/app/events/publishers/user_register.go new file mode 100644 index 0000000..bc5a285 --- /dev/null +++ b/app/events/publishers/user_register.go @@ -0,0 +1,22 @@ +package publishers + +import ( + "encoding/json" + + "go.ipao.vip/atom/contracts" + "quyun/app/events" +) + +var _ contracts.EventPublisher = (*UserRegister)(nil) + +type UserRegister struct { + ID int64 `json:"id"` +} + +func (e *UserRegister) Marshal() ([]byte, error) { + return json.Marshal(e) +} + +func (e *UserRegister) Topic() string { + return events.TopicUserRegister +} diff --git a/app/events/subscribers/provider.gen.go b/app/events/subscribers/provider.gen.go new file mode 100755 index 0000000..a7c40d2 --- /dev/null +++ b/app/events/subscribers/provider.gen.go @@ -0,0 +1,27 @@ +package subscribers + +import ( + "quyun/providers/event" + + "go.ipao.vip/atom" + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/contracts" + "go.ipao.vip/atom/opt" +) + +func Provide(opts ...opt.Option) error { + if err := container.Container.Provide(func( + __event *event.PubSub, + ) (contracts.Initial, error) { + obj := &UserRegister{} + if err := obj.Prepare(); err != nil { + return nil, err + } + __event.Handle("handler:UserRegister", obj.Topic(), obj.PublishToTopic(), obj.Handler) + + return obj, nil + }, atom.GroupInitial); err != nil { + return err + } + return nil +} diff --git a/app/events/subscribers/user_register.go b/app/events/subscribers/user_register.go new file mode 100644 index 0000000..8a41a51 --- /dev/null +++ b/app/events/subscribers/user_register.go @@ -0,0 +1,46 @@ +package subscribers + +import ( + "encoding/json" + + "go.ipao.vip/atom/contracts" + "quyun/app/events" + "quyun/app/events/publishers" + + "github.com/ThreeDotsLabs/watermill/message" + "github.com/sirupsen/logrus" +) + +var _ contracts.EventHandler = (*UserRegister)(nil) + +// @provider(event) +type UserRegister struct { + log *logrus.Entry `inject:"false"` +} + +func (e *UserRegister) Prepare() error { + e.log = logrus.WithField("module", "events.subscribers.user_register") + return nil +} + +// PublishToTopic implements contracts.EventHandler. +func (e *UserRegister) PublishToTopic() string { + return events.TopicProcessed +} + +// Topic implements contracts.EventHandler. +func (e *UserRegister) Topic() string { + return events.TopicUserRegister +} + +// Handler implements contracts.EventHandler. +func (e *UserRegister) Handler(msg *message.Message) ([]*message.Message, error) { + var payload publishers.UserRegister + err := json.Unmarshal(msg.Payload, &payload) + if err != nil { + return nil, err + } + e.log.Infof("received event %s", msg.Payload) + + return nil, nil +} diff --git a/app/events/topics.go b/app/events/topics.go new file mode 100644 index 0000000..e2b777a --- /dev/null +++ b/app/events/topics.go @@ -0,0 +1,6 @@ +package events + +const ( + TopicProcessed = "event:processed" + TopicUserRegister = "event:user_register" +) diff --git a/app/grpc/users/handler.go b/app/grpc/users/handler.go new file mode 100644 index 0000000..aefa9a2 --- /dev/null +++ b/app/grpc/users/handler.go @@ -0,0 +1,26 @@ +package users + +import ( + "context" + + userv1 "quyun/pkg/proto/user/v1" +) + +// @provider(grpc) userv1.RegisterUserServiceServer +type Users struct { + userv1.UnimplementedUserServiceServer +} + +func (u *Users) ListUsers(ctx context.Context, in *userv1.ListUsersRequest) (*userv1.ListUsersResponse, error) { + // userv1.UserServiceServer + return &userv1.ListUsersResponse{}, nil +} + +// GetUser implements userv1.UserServiceServer +func (u *Users) GetUser(ctx context.Context, in *userv1.GetUserRequest) (*userv1.GetUserResponse, error) { + return &userv1.GetUserResponse{ + User: &userv1.User{ + Id: in.Id, + }, + }, nil +} diff --git a/app/grpc/users/provider.gen.go b/app/grpc/users/provider.gen.go new file mode 100755 index 0000000..42f1654 --- /dev/null +++ b/app/grpc/users/provider.gen.go @@ -0,0 +1,25 @@ +package users + +import ( + userv1 "quyun/pkg/proto/user/v1" + "quyun/providers/grpc" + + "go.ipao.vip/atom" + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/contracts" + "go.ipao.vip/atom/opt" +) + +func Provide(opts ...opt.Option) error { + if err := container.Container.Provide(func( + __grpc *grpc.Grpc, + ) (contracts.Initial, error) { + obj := &Users{} + userv1.RegisterUserServiceServer(__grpc.Server, obj) + + return obj, nil + }, atom.GroupInitial); err != nil { + return err + } + return nil +} diff --git a/app/http/.gitkeep b/app/http/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/app/http/.help.md b/app/http/.help.md new file mode 100644 index 0000000..e69de29 diff --git a/app/http/medias.go b/app/http/medias.go new file mode 100644 index 0000000..e19487e --- /dev/null +++ b/app/http/medias.go @@ -0,0 +1,18 @@ +package http + +import ( + "quyun/app/models" + "quyun/app/requests" + + "github.com/gofiber/fiber/v3" +) + +// @provider +type medias struct{} + +// List medias +// @Router /v1/medias [get] +// @Bind pagination query +func (ctl *medias) List(ctx fiber.Ctx, pagination *requests.Pagination) (*requests.Pager, error) { + return models.Medias.List(ctx.Context(), pagination) +} diff --git a/app/http/provider.gen.go b/app/http/provider.gen.go new file mode 100755 index 0000000..29423dd --- /dev/null +++ b/app/http/provider.gen.go @@ -0,0 +1,48 @@ +package http + +import ( + "quyun/providers/app" + + "go.ipao.vip/atom" + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/contracts" + "go.ipao.vip/atom/opt" +) + +func Provide(opts ...opt.Option) error { + if err := container.Container.Provide(func() (*medias, error) { + obj := &medias{} + + return obj, nil + }); err != nil { + return err + } + if err := container.Container.Provide(func( + medias *medias, + uploads *uploads, + ) (contracts.HttpRoute, error) { + obj := &Routes{ + medias: medias, + uploads: uploads, + } + if err := obj.Prepare(); err != nil { + return nil, err + } + + return obj, nil + }, atom.GroupRoutes); err != nil { + return err + } + if err := container.Container.Provide(func( + app *app.Config, + ) (*uploads, error) { + obj := &uploads{ + app: app, + } + + return obj, nil + }); err != nil { + return err + } + return nil +} diff --git a/app/http/routes.gen.go b/app/http/routes.gen.go new file mode 100644 index 0000000..8a13341 --- /dev/null +++ b/app/http/routes.gen.go @@ -0,0 +1,52 @@ +// Code generated by the atomctl ; DO NOT EDIT. + +package http + +import ( + "github.com/gofiber/fiber/v3" + log "github.com/sirupsen/logrus" + _ "go.ipao.vip/atom" + _ "go.ipao.vip/atom/contracts" + . "go.ipao.vip/atom/fen" + "mime/multipart" + "quyun/app/requests" +) + +// @provider contracts.HttpRoute atom.GroupRoutes +type Routes struct { + log *log.Entry `inject:"false"` + medias *medias + uploads *uploads +} + +func (r *Routes) Prepare() error { + r.log = log.WithField("module", "routes.http") + return nil +} + +func (r *Routes) Name() string { + return "http" +} + +func (r *Routes) Register(router fiber.Router) { + // 注册路由组: medias + router.Get("/v1/medias", DataFunc1( + r.medias.List, + Query[requests.Pagination]("pagination"), + )) + + // 注册路由组: uploads + router.Post("/v1/uploads/:md5/chunks/:idx", Func3( + r.uploads.Chunks, + PathParam[string]("md5"), + PathParam[string]("idx"), + File[multipart.FileHeader]("file"), + )) + + router.Post("/v1/uploads/:md5/complete", Func2( + r.uploads.Complete, + PathParam[string]("md5"), + Body[UploadFileInfo]("body"), + )) + +} diff --git a/app/http/uploads.go b/app/http/uploads.go new file mode 100644 index 0000000..1e7d089 --- /dev/null +++ b/app/http/uploads.go @@ -0,0 +1,119 @@ +package http + +import ( + "errors" + "fmt" + "mime/multipart" + "os" + "path/filepath" + + "quyun/pkg/utils" + "quyun/providers/app" + + "github.com/gofiber/fiber/v3" + log "github.com/sirupsen/logrus" +) + +// @provider +type uploads struct { + app *app.Config +} + +func (up *uploads) storagePath() string { + return filepath.Join(up.app.StoragePath, "uploads/tmp") +} + +type UploadChunk struct { + Chunk int `query:"chunk"` + Md5 string `query:"md5"` +} + +type UploadFileInfo struct { + Md5 string `json:"md5"` + Filename string `json:"filename"` + Mime string `json:"mime"` + Size int64 `json:"size"` + Chunks int `json:"chunks"` +} + +// Upload chunks +// @Router /v1/uploads/:md5/chunks/:idx [post] +// @Bind md5 path +// @Bind idx path +// @Bind file file +func (up *uploads) Chunks(ctx fiber.Ctx, md5, idx string, file *multipart.FileHeader) error { + tmpPath := filepath.Join(up.storagePath(), md5, idx) + + // if tmpPath not exists, create it + if _, err := os.Stat(tmpPath); os.IsNotExist(err) { + if err := os.MkdirAll(filepath.Dir(tmpPath), os.ModePerm); err != nil { + log.WithError(err).Errorf("create tmpPath failed %s", tmpPath) + return err + } + } + + // save file to tmpPath + if err := ctx.SaveFile(file, tmpPath); err != nil { + log.WithError(err).Errorf("save file to tmpPath failed %s", tmpPath) + return err + } + return nil +} + +// Complete uploads +// @Router /v1/uploads/:md5/complete [post] +// @Bind md5 path +// @Bind body body +func (up *uploads) Complete(ctx fiber.Ctx, md5 string, body *UploadFileInfo) error { + // merge chunks + path := filepath.Join(up.storagePath(), md5) + defer os.RemoveAll(path) + + targetFile := filepath.Join(up.storagePath(), md5, body.Filename) + + // if targetFile not exists, create it + tf, err := os.Create(targetFile) + if err != nil { + return err + } + + for i := 0; i < body.Chunks; i++ { + tmpPath := filepath.Join(up.storagePath(), md5, fmt.Sprintf("%d", i)) + + // open chunk file + chunkFile, err := os.Open(tmpPath) + if err != nil { + tf.Close() + return err + } + + // copy chunk file to target file + if _, err := tf.ReadFrom(chunkFile); err != nil { + chunkFile.Close() + tf.Close() + return err + } + + chunkFile.Close() + } + tf.Close() + + // validate md5 + ok, err := utils.CompareFileMd5(targetFile, md5) + if err != nil { + return err + } + if !ok { + return errors.New("md5 not match") + } + + // save file to target path + targetPath := filepath.Join(up.storagePath(), body.Filename) + + if err := os.Rename(targetFile, targetPath); err != nil { + return err + } + // TODO: save file to database + + return nil +} diff --git a/app/jobs/demo_cron.go b/app/jobs/demo_cron.go new file mode 100644 index 0000000..1240493 --- /dev/null +++ b/app/jobs/demo_cron.go @@ -0,0 +1,36 @@ +package jobs + +import ( + "time" + + . "github.com/riverqueue/river" + "github.com/sirupsen/logrus" + _ "go.ipao.vip/atom" + "go.ipao.vip/atom/contracts" +) + +var _ contracts.CronJob = (*CronJob)(nil) + +// @provider(cronjob) +type CronJob struct { + log *logrus.Entry `inject:"false"` +} + +// Prepare implements contracts.CronJob. +func (CronJob) Prepare() error { + return nil +} + +// JobArgs implements contracts.CronJob. +func (CronJob) Args() []contracts.CronJobArg { + return []contracts.CronJobArg{ + { + Arg: SortArgs{ + Strings: []string{"a", "b", "c", "d"}, + }, + + PeriodicInterval: PeriodicInterval(time.Second * 10), + RunOnStart: false, + }, + } +} diff --git a/app/jobs/demo_job.go b/app/jobs/demo_job.go new file mode 100644 index 0000000..b9a01d7 --- /dev/null +++ b/app/jobs/demo_job.go @@ -0,0 +1,47 @@ +package jobs + +import ( + "context" + "sort" + "time" + + . "github.com/riverqueue/river" + log "github.com/sirupsen/logrus" + _ "go.ipao.vip/atom" + "go.ipao.vip/atom/contracts" + _ "go.ipao.vip/atom/contracts" +) + +var _ contracts.JobArgs = SortArgs{} + +type SortArgs struct { + Strings []string `json:"strings"` +} + +func (s SortArgs) InsertOpts() InsertOpts { + return InsertOpts{ + Queue: QueueDefault, + Priority: PriorityDefault, + } +} + +func (SortArgs) Kind() string { return "sort" } +func (a SortArgs) UniqueID() string { return a.Kind() } + +var _ Worker[SortArgs] = (*SortWorker)(nil) + +// @provider(job) +type SortWorker struct { + WorkerDefaults[SortArgs] +} + +func (w *SortWorker) Work(ctx context.Context, job *Job[SortArgs]) error { + sort.Strings(job.Args.Strings) + + log.Infof("[%s] Sorted strings: %v\n", time.Now().Format(time.TimeOnly), job.Args.Strings) + return nil +} + +func (w *SortWorker) NextRetry(job *Job[SortArgs]) time.Time { + return time.Now().Add(5 * time.Second) +} diff --git a/app/jobs/provider.gen.go b/app/jobs/provider.gen.go new file mode 100755 index 0000000..b7d50bc --- /dev/null +++ b/app/jobs/provider.gen.go @@ -0,0 +1,41 @@ +package jobs + +import ( + "quyun/providers/job" + + "github.com/riverqueue/river" + "go.ipao.vip/atom" + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/contracts" + "go.ipao.vip/atom/opt" +) + +func Provide(opts ...opt.Option) error { + if err := container.Container.Provide(func( + __job *job.Job, + ) (contracts.Initial, error) { + obj := &CronJob{} + if err := obj.Prepare(); err != nil { + return nil, err + } + + container.Later(func() error { return __job.AddPeriodicJobs(obj) }) + + return obj, nil + }, atom.GroupInitial); err != nil { + return err + } + if err := container.Container.Provide(func( + __job *job.Job, + ) (contracts.Initial, error) { + obj := &SortWorker{} + if err := river.AddWorkerSafely(__job.Workers, obj); err != nil { + return nil, err + } + + return obj, nil + }, atom.GroupInitial); err != nil { + return err + } + return nil +} diff --git a/app/middlewares/mid_debug.go b/app/middlewares/mid_debug.go new file mode 100644 index 0000000..ecb33af --- /dev/null +++ b/app/middlewares/mid_debug.go @@ -0,0 +1,9 @@ +package middlewares + +import ( + "github.com/gofiber/fiber/v3" +) + +func (f *Middlewares) DebugMode(c fiber.Ctx) error { + return c.Next() +} diff --git a/app/middlewares/middlewares.go b/app/middlewares/middlewares.go new file mode 100644 index 0000000..69e0e4c --- /dev/null +++ b/app/middlewares/middlewares.go @@ -0,0 +1,15 @@ +package middlewares + +import ( + log "github.com/sirupsen/logrus" +) + +// @provider +type Middlewares struct { + log *log.Entry `inject:"false"` +} + +func (f *Middlewares) Prepare() error { + f.log = log.WithField("module", "middleware") + return nil +} diff --git a/app/middlewares/provider.gen.go b/app/middlewares/provider.gen.go new file mode 100755 index 0000000..f84d36c --- /dev/null +++ b/app/middlewares/provider.gen.go @@ -0,0 +1,20 @@ +package middlewares + +import ( + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/opt" +) + +func Provide(opts ...opt.Option) error { + if err := container.Container.Provide(func() (*Middlewares, error) { + obj := &Middlewares{} + if err := obj.Prepare(); err != nil { + return nil, err + } + + return obj, nil + }); err != nil { + return err + } + return nil +} diff --git a/app/models/medias.go b/app/models/medias.go new file mode 100644 index 0000000..f1fc03d --- /dev/null +++ b/app/models/medias.go @@ -0,0 +1,86 @@ +package models + +import ( + "context" + + "quyun/app/requests" + "quyun/database/schemas/public/model" + "quyun/database/schemas/public/table" + + . "github.com/go-jet/jet/v2/postgres" + "github.com/sirupsen/logrus" +) + +// @provider +type mediasModel struct { + log *logrus.Entry `inject:"false"` +} + +func (m *mediasModel) Prepare() error { + m.log = logrus.WithField("module", "mediasModel") + return nil +} + +// countByCond +func (m *mediasModel) countByCondition(ctx context.Context, expr BoolExpression) (int64, error) { + var cnt struct { + Cnt int64 + } + + tbl := table.Medias + stmt := SELECT(COUNT(tbl.ID).AS("cnt")).FROM(tbl).WHERE(expr) + m.log.Infof("sql: %s", stmt.DebugSql()) + + err := stmt.QueryContext(ctx, db, &cnt) + if err != nil { + m.log.Errorf("error counting media items: %v", err) + return 0, err + } + + return cnt.Cnt, nil +} + +func (m *mediasModel) List(ctx context.Context, pagination *requests.Pagination) (*requests.Pager, error) { + limit := pagination.Limit + offset := pagination.Offset() + + tbl := table.Medias + stmt := tbl. + SELECT(tbl.AllColumns). + ORDER_BY(tbl.ID.DESC()). + LIMIT(limit). + OFFSET(offset) + m.log.Infof("sql: %s", stmt.DebugSql()) + + var medias []model.Medias + err := stmt.QueryContext(ctx, db, &medias) + if err != nil { + m.log.Errorf("error querying media items: %v", err) + return nil, err + } + + count, err := m.countByCondition(ctx, Bool(true)) + if err != nil { + m.log.Errorf("error getting media count: %v", err) + return nil, err + } + + return &requests.Pager{ + Items: medias, + Total: count, + Pagination: *pagination, + }, nil +} + +func (m *mediasModel) Create(ctx context.Context, model *model.Medias) error { + stmt := table.Medias.INSERT(table.Medias.MutableColumns).MODEL(model) + m.log.Infof("sql: %s", stmt.DebugSql()) + + if _, err := stmt.ExecContext(ctx, db); err != nil { + m.log.Errorf("error creating media item: %v", err) + return err + } + + m.log.Infof("media item created successfully") + return nil +} diff --git a/app/models/medias_test.go b/app/models/medias_test.go new file mode 100644 index 0000000..7302dd3 --- /dev/null +++ b/app/models/medias_test.go @@ -0,0 +1,134 @@ +package models + +import ( + "context" + "fmt" + "testing" + "time" + + "quyun/app/requests" + "quyun/app/service/testx" + "quyun/database" + "quyun/database/schemas/public/model" + "quyun/database/schemas/public/table" + + . "github.com/smartystreets/goconvey/convey" + "go.ipao.vip/atom/contracts" + + // . "github.com/go-jet/jet/v2/postgres" + "github.com/stretchr/testify/suite" + "go.uber.org/dig" +) + +type MediasInjectParams struct { + dig.In + Initials []contracts.Initial `group:"initials"` +} + +type MediasTestSuite struct { + suite.Suite + + MediasInjectParams +} + +func Test_medias(t *testing.T) { + providers := testx.Default().With(Provide) + testx.Serve(providers, t, func(params MediasInjectParams) { + suite.Run(t, &MediasTestSuite{MediasInjectParams: params}) + }) +} + +func (s *MediasTestSuite) Test_countByCondition() { + Convey("countByCondition", s.T(), func() { + Convey("no cond", func() { + database.Truncate(context.Background(), db, table.Medias.TableName()) + + cnt, err := Medias.countByCondition(context.Background(), nil) + Convey("should not return an error", func() { + So(err, ShouldBeNil) + }) + Convey("should return a count of zero", func() { + So(cnt, ShouldEqual, 0) + }) + }) + }) +} + +func (s *MediasTestSuite) Test_Create() { + Convey("Create", s.T(), func() { + Convey("valid media", func() { + database.Truncate(context.Background(), db, table.Medias.TableName()) + + model := &model.Medias{ + Name: "test", + CreatedAt: time.Now(), + MimeType: "application/pdf", + Size: 100, + Path: "path/to/media.pdf", + } + + err := Medias.Create(context.Background(), model) + Convey("Create should not return an error", func() { + So(err, ShouldBeNil) + }) + + cnt, err := Medias.countByCondition(context.Background(), nil) + Convey("Count should not return an error", func() { + So(err, ShouldBeNil) + }) + Convey("should return a count of one", func() { + So(cnt, ShouldEqual, 1) + }) + Convey("should create the media successfully", func() { + So(model.ID, ShouldNotBeEmpty) + }) + }) + }) +} + +func (s *MediasTestSuite) Test_Page() { + Convey("Create", s.T(), func() { + Convey("Insert Items", func() { + database.Truncate(context.Background(), db, table.Medias.TableName()) + + for i := 0; i < 20; i++ { + model := &model.Medias{ + Name: fmt.Sprintf("test-%d", i), + CreatedAt: time.Now(), + MimeType: "application/pdf", + Size: 100, + Path: "path/to/media.pdf", + } + + err := Medias.Create(context.Background(), model) + So(err, ShouldBeNil) + } + + cnt, err := Medias.countByCondition(context.Background(), nil) + So(err, ShouldBeNil) + So(cnt, ShouldEqual, 20) + }) + + Convey("Page", func() { + Convey("page 1", func() { + pager, err := Medias.List(context.Background(), &requests.Pagination{Page: 1, Limit: 10}) + So(err, ShouldBeNil) + So(pager.Total, ShouldEqual, 20) + So(pager.Items, ShouldHaveLength, 10) + }) + Convey("page 2", func() { + pager, err := Medias.List(context.Background(), &requests.Pagination{Page: 2, Limit: 10}) + So(err, ShouldBeNil) + So(pager.Total, ShouldEqual, 20) + So(pager.Items, ShouldHaveLength, 10) + }) + + Convey("page 3", func() { + pager, err := Medias.List(context.Background(), &requests.Pagination{Page: 3, Limit: 10}) + So(err, ShouldBeNil) + So(pager.Total, ShouldEqual, 20) + So(pager.Items, ShouldBeEmpty) + }) + }) + }) +} diff --git a/app/models/migrations.go b/app/models/migrations.go new file mode 100644 index 0000000..57cc83a --- /dev/null +++ b/app/models/migrations.go @@ -0,0 +1,8 @@ +package models + +// @provider +type migrationsModel struct{} + +func (m *migrationsModel) Prepare() error { + return nil +} diff --git a/app/models/models.gen.go b/app/models/models.gen.go new file mode 100644 index 0000000..74051c8 --- /dev/null +++ b/app/models/models.gen.go @@ -0,0 +1,26 @@ +// Code generated by the atomctl ; DO NOT EDIT. +// Code generated by the atomctl ; DO NOT EDIT. +// Code generated by the atomctl ; DO NOT EDIT. +package models + +import ( + "database/sql" +) + +var db *sql.DB +var Medias *mediasModel +var Migrations *migrationsModel + +// @provider(model) +type models struct { + db *sql.DB + medias *mediasModel + migrations *migrationsModel +} + +func (m *models) Prepare() error { + db = m.db + Medias = m.medias + Migrations = m.migrations + return nil +} diff --git a/app/models/provider.gen.go b/app/models/provider.gen.go new file mode 100755 index 0000000..5802450 --- /dev/null +++ b/app/models/provider.gen.go @@ -0,0 +1,49 @@ +package models + +import ( + "database/sql" + + "go.ipao.vip/atom" + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/contracts" + "go.ipao.vip/atom/opt" +) + +func Provide(opts ...opt.Option) error { + if err := container.Container.Provide(func() (*mediasModel, error) { + obj := &mediasModel{} + if err := obj.Prepare(); err != nil { + return nil, err + } + + return obj, nil + }); err != nil { + return err + } + if err := container.Container.Provide(func() (*migrationsModel, error) { + obj := &migrationsModel{} + + return obj, nil + }); err != nil { + return err + } + if err := container.Container.Provide(func( + db *sql.DB, + medias *mediasModel, + migrations *migrationsModel, + ) (contracts.Initial, error) { + obj := &models{ + db: db, + medias: medias, + migrations: migrations, + } + if err := obj.Prepare(); err != nil { + return nil, err + } + + return obj, nil + }, atom.GroupInitial); err != nil { + return err + } + return nil +} diff --git a/app/requests/pagination.go b/app/requests/pagination.go new file mode 100644 index 0000000..e98528d --- /dev/null +++ b/app/requests/pagination.go @@ -0,0 +1,30 @@ +package requests + +import "github.com/samber/lo" + +type Pager struct { + Pagination `json:",inline"` + Total int64 `json:"total"` + Items any `json:"items"` +} + +type Pagination struct { + Page int64 `json:"page" form:"page" query:"page"` + Limit int64 `json:"limit" form:"limit" query:"limit"` +} + +func (filter *Pagination) Offset() int64 { + return (filter.Page - 1) * filter.Limit +} + +func (filter *Pagination) Format() *Pagination { + if filter.Page <= 0 { + filter.Page = 1 + } + + if !lo.Contains([]int64{10, 20, 50, 100}, filter.Limit) { + filter.Limit = 10 + } + + return filter +} diff --git a/app/requests/sort.go b/app/requests/sort.go new file mode 100644 index 0000000..517b419 --- /dev/null +++ b/app/requests/sort.go @@ -0,0 +1,41 @@ +package requests + +import ( + "strings" + + "github.com/samber/lo" +) + +type SortQueryFilter struct { + Asc *string `json:"asc" form:"asc"` + Desc *string `json:"desc" form:"desc"` +} + +func (s *SortQueryFilter) AscFields() []string { + if s.Asc == nil { + return nil + } + return strings.Split(*s.Asc, ",") +} + +func (s *SortQueryFilter) DescFields() []string { + if s.Desc == nil { + return nil + } + return strings.Split(*s.Desc, ",") +} + +func (s *SortQueryFilter) DescID() *SortQueryFilter { + if s.Desc == nil { + s.Desc = lo.ToPtr("id") + } + + items := s.DescFields() + if lo.Contains(items, "id") { + return s + } + + items = append(items, "id") + s.Desc = lo.ToPtr(strings.Join(items, ",")) + return s +} diff --git a/app/service/event/event.go b/app/service/event/event.go new file mode 100644 index 0000000..0ee18f2 --- /dev/null +++ b/app/service/event/event.go @@ -0,0 +1,58 @@ +package event + +import ( + "context" + + "go.ipao.vip/atom" + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/contracts" + "quyun/app/events/subscribers" + "quyun/app/service" + "quyun/providers/app" + "quyun/providers/event" + "quyun/providers/postgres" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "go.uber.org/dig" +) + +func defaultProviders() container.Providers { + return service.Default(container.Providers{ + postgres.DefaultProvider(), + }...) +} + +func Command() atom.Option { + return atom.Command( + atom.Name("event"), + atom.Short("start event processor"), + atom.RunE(Serve), + atom.Providers( + defaultProviders(). + With( + subscribers.Provide, + ), + ), + ) +} + +type Service struct { + dig.In + + App *app.Config + PubSub *event.PubSub + Initials []contracts.Initial `group:"initials"` +} + +func Serve(cmd *cobra.Command, args []string) error { + return container.Container.Invoke(func(ctx context.Context, svc Service) error { + log.SetFormatter(&log.JSONFormatter{}) + + if svc.App.IsDevMode() { + log.SetLevel(log.DebugLevel) + } + + return svc.PubSub.Serve(ctx) + }) +} diff --git a/app/service/grpc/grpc.go b/app/service/grpc/grpc.go new file mode 100644 index 0000000..6d10f04 --- /dev/null +++ b/app/service/grpc/grpc.go @@ -0,0 +1,57 @@ +package grpc + +import ( + "go.ipao.vip/atom" + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/contracts" + "quyun/app/grpc/users" + "quyun/app/service" + "quyun/providers/app" + "quyun/providers/grpc" + "quyun/providers/postgres" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "go.uber.org/dig" +) + +func defaultProviders() container.Providers { + return service.Default(container.Providers{ + postgres.DefaultProvider(), + grpc.DefaultProvider(), + }...) +} + +func Command() atom.Option { + return atom.Command( + atom.Name("grpc"), + atom.Short("run grpc server"), + atom.RunE(Serve), + atom.Providers( + defaultProviders(). + With( + users.Provide, + ), + ), + ) +} + +type Service struct { + dig.In + + App *app.Config + Grpc *grpc.Grpc + Initials []contracts.Initial `group:"initials"` +} + +func Serve(cmd *cobra.Command, args []string) error { + return container.Container.Invoke(func(svc Service) error { + log.SetFormatter(&log.JSONFormatter{}) + + if svc.App.IsDevMode() { + log.SetLevel(log.DebugLevel) + } + + return svc.Grpc.Serve() + }) +} diff --git a/app/service/http/http.go b/app/service/http/http.go new file mode 100644 index 0000000..1c32372 --- /dev/null +++ b/app/service/http/http.go @@ -0,0 +1,86 @@ +package http + +import ( + "context" + + "quyun/app/errorx" + appHttp "quyun/app/http" + "quyun/app/jobs" + "quyun/app/service" + _ "quyun/docs" + "quyun/providers/app" + "quyun/providers/hashids" + "quyun/providers/http" + "quyun/providers/http/swagger" + "quyun/providers/job" + "quyun/providers/jwt" + "quyun/providers/postgres" + + "go.ipao.vip/atom" + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/contracts" + + "github.com/gofiber/fiber/v3/middleware/favicon" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "go.uber.org/dig" +) + +func defaultProviders() container.Providers { + return service.Default(container.Providers{ + http.DefaultProvider(), + postgres.DefaultProvider(), + jwt.DefaultProvider(), + hashids.DefaultProvider(), + job.DefaultProvider(), + }...) +} + +func Command() atom.Option { + return atom.Command( + atom.Name("serve"), + atom.Short("run http server"), + atom.RunE(Serve), + atom.Providers( + defaultProviders(). + With( + jobs.Provide, + appHttp.Provide, + ), + ), + ) +} + +type Service struct { + dig.In + + Initials []contracts.Initial `group:"initials"` + + App *app.Config + Job *job.Job + Http *http.Service + Routes []contracts.HttpRoute `group:"routes"` +} + +func Serve(cmd *cobra.Command, args []string) error { + return container.Container.Invoke(func(ctx context.Context, svc Service) error { + log.SetFormatter(&log.JSONFormatter{}) + + if svc.App.Mode == app.AppModeDevelopment { + log.SetLevel(log.DebugLevel) + + svc.Http.Engine.Get("/swagger/*", swagger.HandlerDefault) + } + svc.Http.Engine.Use(errorx.Middleware) + svc.Http.Engine.Use(favicon.New(favicon.Config{ + Data: []byte{}, + })) + + group := svc.Http.Engine.Group("") + for _, route := range svc.Routes { + route.Register(group) + } + + return svc.Http.Serve() + }) +} diff --git a/app/service/migrate/migrate.go b/app/service/migrate/migrate.go new file mode 100644 index 0000000..33a9f0c --- /dev/null +++ b/app/service/migrate/migrate.go @@ -0,0 +1,60 @@ +package migrate + +import ( + "context" + "database/sql" + + "quyun/app/service" + "quyun/database" + "quyun/providers/postgres" + + "github.com/pressly/goose/v3" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "go.ipao.vip/atom" + "go.ipao.vip/atom/container" + "go.uber.org/dig" +) + +func defaultProviders() container.Providers { + return service.Default(container.Providers{ + postgres.DefaultProvider(), + }...) +} + +func Command() atom.Option { + return atom.Command( + atom.Name("migrate"), + atom.Short("run migrations"), + atom.RunE(Serve), + atom.Providers(defaultProviders()), + atom.Example("migrate [up|up-by-one|up-to|create|down|down-to|fix|redo|reset|status|version]"), + ) +} + +type Service struct { + dig.In + + DB *sql.DB +} + +// migrate +func Serve(cmd *cobra.Command, args []string) error { + return container.Container.Invoke(func(ctx context.Context, svc Service) error { + if len(args) == 0 { + args = append(args, "up") + } + + if args[0] == "create" { + return nil + } + + action, args := args[0], args[1:] + log.Infof("migration action: %s args: %+v", action, args) + + goose.SetBaseFS(database.MigrationFS) + goose.SetTableName("migrations") + + return goose.RunContext(context.Background(), action, svc.DB, "migrations", args...) + }) +} diff --git a/app/service/queue/error.go b/app/service/queue/error.go new file mode 100644 index 0000000..3300b00 --- /dev/null +++ b/app/service/queue/error.go @@ -0,0 +1,24 @@ +package queue + +import ( + "context" + + "github.com/riverqueue/river" + "github.com/riverqueue/river/rivertype" + log "github.com/sirupsen/logrus" +) + +type CustomErrorHandler struct{} + +func (*CustomErrorHandler) HandleError(ctx context.Context, job *rivertype.JobRow, err error) *river.ErrorHandlerResult { + log.Infof("Job errored with: %s\n", err) + return nil +} + +func (*CustomErrorHandler) HandlePanic(ctx context.Context, job *rivertype.JobRow, panicVal any, trace string) *river.ErrorHandlerResult { + log.Infof("Job panicked with: %v\n", panicVal) + log.Infof("Stack trace: %s\n", trace) + return &river.ErrorHandlerResult{ + SetCancelled: true, + } +} diff --git a/app/service/queue/river.go b/app/service/queue/river.go new file mode 100644 index 0000000..498c446 --- /dev/null +++ b/app/service/queue/river.go @@ -0,0 +1,66 @@ +package queue + +import ( + "context" + + "go.ipao.vip/atom" + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/contracts" + "quyun/app/jobs" + "quyun/app/service" + "quyun/providers/app" + "quyun/providers/job" + "quyun/providers/postgres" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "go.uber.org/dig" +) + +func defaultProviders() container.Providers { + return service.Default(container.Providers{ + postgres.DefaultProvider(), + job.DefaultProvider(), + }...) +} + +func Command() atom.Option { + return atom.Command( + atom.Name("queue"), + atom.Short("start queue processor"), + atom.RunE(Serve), + atom.Providers( + defaultProviders(). + With( + jobs.Provide, + ), + ), + ) +} + +type Service struct { + dig.In + + App *app.Config + Job *job.Job + Initials []contracts.Initial `group:"initials"` + CronJobs []contracts.CronJob `group:"cron_jobs"` +} + +func Serve(cmd *cobra.Command, args []string) error { + return container.Container.Invoke(func(ctx context.Context, svc Service) error { + log.SetFormatter(&log.JSONFormatter{}) + + if svc.App.IsDevMode() { + log.SetLevel(log.DebugLevel) + } + + if err := svc.Job.Start(ctx); err != nil { + return err + } + defer svc.Job.Close() + + <-ctx.Done() + return nil + }) +} diff --git a/app/service/service.go b/app/service/service.go new file mode 100644 index 0000000..b064599 --- /dev/null +++ b/app/service/service.go @@ -0,0 +1,14 @@ +package service + +import ( + "go.ipao.vip/atom/container" + "quyun/providers/app" + "quyun/providers/event" +) + +func Default(providers ...container.ProviderContainer) container.Providers { + return append(container.Providers{ + app.DefaultProvider(), + event.DefaultProvider(), + }, providers...) +} diff --git a/app/service/testx/testing.go b/app/service/testx/testing.go new file mode 100644 index 0000000..8dcc2ee --- /dev/null +++ b/app/service/testx/testing.go @@ -0,0 +1,36 @@ +package testx + +import ( + "os" + "testing" + + "quyun/providers/app" + "quyun/providers/postgres" + + "go.ipao.vip/atom" + "go.ipao.vip/atom/container" + + "github.com/rogeecn/fabfile" + . "github.com/smartystreets/goconvey/convey" +) + +func Default(providers ...container.ProviderContainer) container.Providers { + return append(container.Providers{ + app.DefaultProvider(), + postgres.DefaultProvider(), + }, providers...) +} + +func Serve(providers container.Providers, t *testing.T, invoke any) { + Convey("tests boot up", t, func() { + file := fabfile.MustFind("config.toml") + + localEnv := os.Getenv("ENV_LOCAL") + if localEnv != "" { + file = fabfile.MustFind("config." + localEnv + ".toml") + } + + So(atom.LoadProviders(file, providers), ShouldBeNil) + So(container.Container.Invoke(invoke), ShouldBeNil) + }) +} diff --git a/buf.gen.yaml b/buf.gen.yaml new file mode 100644 index 0000000..084f718 --- /dev/null +++ b/buf.gen.yaml @@ -0,0 +1,23 @@ +version: v2 +inputs: + - directory: proto +managed: + enabled: true + override: + - file_option: go_package_prefix + value: quyun/pkg/proto + +plugins: + - local: protoc-gen-go + out: pkg/proto + opt: paths=source_relative + # - local: protoc-gen-grpc-gateway + # out: pkg/proto + # opt: + # - paths=source_relative + # - generate_unbound_methods=true + - local: protoc-gen-go-grpc + out: pkg/proto + opt: paths=source_relative + # - local: protoc-gen-openapiv2 + # out: docs/proto diff --git a/buf.yaml b/buf.yaml new file mode 100644 index 0000000..06039af --- /dev/null +++ b/buf.yaml @@ -0,0 +1,13 @@ +# For details on buf.yaml configuration, visit https://buf.build/docs/configuration/v2/buf-yaml +version: v2 +modules: + - path: proto +lint: + use: + - STANDARD +breaking: + use: + - FILE +deps: + - buf.build/googleapis/googleapis + - buf.build/grpc-ecosystem/grpc-gateway diff --git a/config.toml b/config.toml new file mode 100644 index 0000000..b305432 --- /dev/null +++ b/config.toml @@ -0,0 +1,24 @@ +[App] +Mode = "development" +BaseURI = "baseURI" + +[Http] +Port = 8088 + +[Database] +Host = "localhost" +Database = "postgres" +Password = "xixi0202" + +[JWT] +ExpiresTime = "168h" +SigningKey = "Key" + +[HashIDs] +Salt = "Salt" + +[Redis] +Host = "" +Port = 6379 +Password = "hello" +DB = 0 diff --git a/config.yh.toml b/config.yh.toml new file mode 100644 index 0000000..02a6dbe --- /dev/null +++ b/config.yh.toml @@ -0,0 +1,25 @@ +[App] +Mode = "development" +BaseURI = "baseURI" +Storage = "/Users/rogee/Projects/self/quyun/fixtures" + +[Http] +Port = 8088 + +[Database] +Host = "localhost" +Database = "postgres" +Password = "xixi0202" + +[JWT] +ExpiresTime = "168h" +SigningKey = "Key" + +[HashIDs] +Salt = "Salt" + +[Redis] +Host = "" +Port = 6379 +Password = "hello" +DB = 0 diff --git a/database/database.go b/database/database.go new file mode 100644 index 0000000..39c3f69 --- /dev/null +++ b/database/database.go @@ -0,0 +1,44 @@ +package database + +import ( + "context" + "database/sql" + "embed" + "fmt" + + "github.com/go-jet/jet/v2/qrm" +) + +//go:embed migrations/* +var MigrationFS embed.FS + +type CtxDB struct{} + +func FromContext(ctx context.Context, db *sql.DB) qrm.DB { + if tx, ok := ctx.Value(CtxDB{}).(*sql.Tx); ok { + return tx + } + return db +} + +func Truncate(ctx context.Context, db *sql.DB, tableName ...string) error { + for _, name := range tableName { + sql := fmt.Sprintf("TRUNCATE TABLE %s RESTART IDENTITY", name) + if _, err := db.ExecContext(ctx, sql); err != nil { + return err + } + } + return nil +} + +func WrapLike(v string) string { + return "%" + v + "%" +} + +func WrapLikeLeft(v string) string { + return "%" + v +} + +func WrapLikeRight(v string) string { + return "%" + v +} diff --git a/database/fields/common.go b/database/fields/common.go new file mode 100644 index 0000000..a078b0f --- /dev/null +++ b/database/fields/common.go @@ -0,0 +1,45 @@ +package fields + +import ( + "database/sql/driver" + "encoding/json" + "errors" +) + +// implement sql.Scanner interface +type Json[T any] struct { + Data T `json:",inline"` +} + +func ToJson[T any](data T) Json[T] { + return Json[T]{Data: data} +} + +func (x *Json[T]) Scan(value interface{}) (err error) { + switch v := value.(type) { + case string: + return json.Unmarshal([]byte(v), &x) + case []byte: + return json.Unmarshal(v, &x) + case *string: + return json.Unmarshal([]byte(*v), &x) + } + return errors.New("Unknown type for ") +} + +func (x Json[T]) Value() (driver.Value, error) { + return json.Marshal(x.Data) +} + +func (x Json[T]) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Data) +} + +func (x *Json[T]) UnmarshalJSON(data []byte) error { + var value T + if err := json.Unmarshal(data, &value); err != nil { + return err + } + x.Data = value + return nil +} diff --git a/database/migrations/20140202165500_river_job.sql b/database/migrations/20140202165500_river_job.sql new file mode 100644 index 0000000..8d01ac6 --- /dev/null +++ b/database/migrations/20140202165500_river_job.sql @@ -0,0 +1,408 @@ +-- +goose Up +-- +goose StatementBegin + +-- River migration 002 [up] +CREATE TYPE river_job_state AS ENUM( + 'available', + 'cancelled', + 'completed', + 'discarded', + 'pending', + 'retryable', + 'running', + 'scheduled' +); + +CREATE TABLE river_job( + -- 8 bytes + id bigserial PRIMARY KEY, + + -- 8 bytes (4 bytes + 2 bytes + 2 bytes) + -- + -- `state` is kept near the top of the table for operator convenience -- when + -- looking at jobs with `SELECT *` it'll appear first after ID. The other two + -- fields aren't as important but are kept adjacent to `state` for alignment + -- to get an 8-byte block. + state river_job_state NOT NULL DEFAULT 'available', + attempt smallint NOT NULL DEFAULT 0, + max_attempts smallint NOT NULL, + + -- 8 bytes each (no alignment needed) + attempted_at timestamptz, + created_at timestamptz NOT NULL DEFAULT NOW(), + finalized_at timestamptz, + scheduled_at timestamptz NOT NULL DEFAULT NOW(), + + -- 2 bytes (some wasted padding probably) + priority smallint NOT NULL DEFAULT 1, + + -- types stored out-of-band + args jsonb, + attempted_by text[], + errors jsonb[], + kind text NOT NULL, + metadata jsonb NOT NULL DEFAULT '{}', + queue text NOT NULL DEFAULT 'default', + tags varchar(255)[], + + CONSTRAINT finalized_or_finalized_at_null CHECK ((state IN ('cancelled', 'completed', 'discarded') AND finalized_at IS NOT NULL) OR finalized_at IS NULL), + CONSTRAINT max_attempts_is_positive CHECK (max_attempts > 0), + CONSTRAINT priority_in_range CHECK (priority >= 1 AND priority <= 4), + CONSTRAINT queue_length CHECK (char_length(queue) > 0 AND char_length(queue) < 128), + CONSTRAINT kind_length CHECK (char_length(kind) > 0 AND char_length(kind) < 128) +); + +-- We may want to consider adding another property here after `kind` if it seems +-- like it'd be useful for something. +CREATE INDEX river_job_kind ON river_job USING btree(kind); + +CREATE INDEX river_job_state_and_finalized_at_index ON river_job USING btree(state, finalized_at) WHERE finalized_at IS NOT NULL; + +CREATE INDEX river_job_prioritized_fetching_index ON river_job USING btree(state, queue, priority, scheduled_at, id); + +CREATE INDEX river_job_args_index ON river_job USING GIN(args); + +CREATE INDEX river_job_metadata_index ON river_job USING GIN(metadata); + +CREATE OR REPLACE FUNCTION river_job_notify() + RETURNS TRIGGER + AS $$ +DECLARE + payload json; +BEGIN + IF NEW.state = 'available' THEN + -- Notify will coalesce duplicate notifications within a transaction, so + -- keep these payloads generalized: + payload = json_build_object('queue', NEW.queue); + PERFORM + pg_notify('river_insert', payload::text); + END IF; + RETURN NULL; +END; +$$ +LANGUAGE plpgsql; + +CREATE TRIGGER river_notify + AFTER INSERT ON river_job + FOR EACH ROW + EXECUTE PROCEDURE river_job_notify(); + +CREATE UNLOGGED TABLE river_leader( + -- 8 bytes each (no alignment needed) + elected_at timestamptz NOT NULL, + expires_at timestamptz NOT NULL, + + -- types stored out-of-band + leader_id text NOT NULL, + name text PRIMARY KEY, + + CONSTRAINT name_length CHECK (char_length(name) > 0 AND char_length(name) < 128), + CONSTRAINT leader_id_length CHECK (char_length(leader_id) > 0 AND char_length(leader_id) < 128) +); + +-- River migration 003 [up] +ALTER TABLE river_job ALTER COLUMN tags SET DEFAULT '{}'; +UPDATE river_job SET tags = '{}' WHERE tags IS NULL; +ALTER TABLE river_job ALTER COLUMN tags SET NOT NULL; + +-- River migration 004 [up] +-- The args column never had a NOT NULL constraint or default value at the +-- database level, though we tried to ensure one at the application level. +ALTER TABLE river_job ALTER COLUMN args SET DEFAULT '{}'; +UPDATE river_job SET args = '{}' WHERE args IS NULL; +ALTER TABLE river_job ALTER COLUMN args SET NOT NULL; +ALTER TABLE river_job ALTER COLUMN args DROP DEFAULT; + +-- The metadata column never had a NOT NULL constraint or default value at the +-- database level, though we tried to ensure one at the application level. +ALTER TABLE river_job ALTER COLUMN metadata SET DEFAULT '{}'; +UPDATE river_job SET metadata = '{}' WHERE metadata IS NULL; +ALTER TABLE river_job ALTER COLUMN metadata SET NOT NULL; + +-- The 'pending' job state will be used for upcoming functionality: +-- ALTER TYPE river_job_state ADD VALUE IF NOT EXISTS 'pending' AFTER 'discarded'; + +ALTER TABLE river_job DROP CONSTRAINT finalized_or_finalized_at_null; +ALTER TABLE river_job ADD CONSTRAINT finalized_or_finalized_at_null CHECK ( + (finalized_at IS NULL AND state NOT IN ('cancelled', 'completed', 'discarded')) OR + (finalized_at IS NOT NULL AND state IN ('cancelled', 'completed', 'discarded')) +); + +DROP TRIGGER river_notify ON river_job; +DROP FUNCTION river_job_notify; + +CREATE TABLE river_queue( + name text PRIMARY KEY NOT NULL, + created_at timestamptz NOT NULL DEFAULT NOW(), + metadata jsonb NOT NULL DEFAULT '{}' ::jsonb, + paused_at timestamptz, + updated_at timestamptz NOT NULL +); + +ALTER TABLE river_leader + ALTER COLUMN name SET DEFAULT 'default', + DROP CONSTRAINT name_length, + ADD CONSTRAINT name_length CHECK (name = 'default'); + +-- River migration 005 [up] +-- +-- Rebuild the migration table so it's based on `(line, version)`. +-- + +DO +$body$ +BEGIN + -- Tolerate users who may be using their own migration system rather than + -- River's. If they are, they will have skipped version 001 containing + -- `CREATE TABLE river_migration`, so this table won't exist. + IF (SELECT to_regclass('river_migration') IS NOT NULL) THEN + ALTER TABLE river_migration + RENAME TO river_migration_old; + + CREATE TABLE river_migration( + line TEXT NOT NULL, + version bigint NOT NULL, + created_at timestamptz NOT NULL DEFAULT NOW(), + CONSTRAINT line_length CHECK (char_length(line) > 0 AND char_length(line) < 128), + CONSTRAINT version_gte_1 CHECK (version >= 1), + PRIMARY KEY (line, version) + ); + + INSERT INTO river_migration + (created_at, line, version) + SELECT created_at, 'main', version + FROM river_migration_old; + + DROP TABLE river_migration_old; + END IF; +END; +$body$ +LANGUAGE 'plpgsql'; + +-- +-- Add `river_job.unique_key` and bring up an index on it. +-- + +-- These statements use `IF NOT EXISTS` to allow users with a `river_job` table +-- of non-trivial size to build the index `CONCURRENTLY` out of band of this +-- migration, then follow by completing the migration. +ALTER TABLE river_job + ADD COLUMN IF NOT EXISTS unique_key bytea; + +CREATE UNIQUE INDEX IF NOT EXISTS river_job_kind_unique_key_idx ON river_job (kind, unique_key) WHERE unique_key IS NOT NULL; + +-- +-- Create `river_client` and derivative. +-- +-- This feature hasn't quite yet been implemented, but we're taking advantage of +-- the migration to add the schema early so that we can add it later without an +-- additional migration. +-- + +CREATE UNLOGGED TABLE river_client ( + id text PRIMARY KEY NOT NULL, + created_at timestamptz NOT NULL DEFAULT now(), + metadata jsonb NOT NULL DEFAULT '{}', + paused_at timestamptz, + updated_at timestamptz NOT NULL, + CONSTRAINT name_length CHECK (char_length(id) > 0 AND char_length(id) < 128) +); + +-- Differs from `river_queue` in that it tracks the queue state for a particular +-- active client. +CREATE UNLOGGED TABLE river_client_queue ( + river_client_id text NOT NULL REFERENCES river_client (id) ON DELETE CASCADE, + name text NOT NULL, + created_at timestamptz NOT NULL DEFAULT now(), + max_workers bigint NOT NULL DEFAULT 0, + metadata jsonb NOT NULL DEFAULT '{}', + num_jobs_completed bigint NOT NULL DEFAULT 0, + num_jobs_running bigint NOT NULL DEFAULT 0, + updated_at timestamptz NOT NULL, + PRIMARY KEY (river_client_id, name), + CONSTRAINT name_length CHECK (char_length(name) > 0 AND char_length(name) < 128), + CONSTRAINT num_jobs_completed_zero_or_positive CHECK (num_jobs_completed >= 0), + CONSTRAINT num_jobs_running_zero_or_positive CHECK (num_jobs_running >= 0) +); + +-- River migration 006 [up] +CREATE OR REPLACE FUNCTION river_job_state_in_bitmask(bitmask BIT(8), state river_job_state) +RETURNS boolean +LANGUAGE SQL +IMMUTABLE +AS $$ + SELECT CASE state + WHEN 'available' THEN get_bit(bitmask, 7) + WHEN 'cancelled' THEN get_bit(bitmask, 6) + WHEN 'completed' THEN get_bit(bitmask, 5) + WHEN 'discarded' THEN get_bit(bitmask, 4) + WHEN 'pending' THEN get_bit(bitmask, 3) + WHEN 'retryable' THEN get_bit(bitmask, 2) + WHEN 'running' THEN get_bit(bitmask, 1) + WHEN 'scheduled' THEN get_bit(bitmask, 0) + ELSE 0 + END = 1; +$$; + +-- +-- Add `river_job.unique_states` and bring up an index on it. +-- +-- This column may exist already if users manually created the column and index +-- as instructed in the changelog so the index could be created `CONCURRENTLY`. +-- +ALTER TABLE river_job ADD COLUMN IF NOT EXISTS unique_states BIT(8); + +-- This statement uses `IF NOT EXISTS` to allow users with a `river_job` table +-- of non-trivial size to build the index `CONCURRENTLY` out of band of this +-- migration, then follow by completing the migration. +CREATE UNIQUE INDEX IF NOT EXISTS river_job_unique_idx ON river_job (unique_key) + WHERE unique_key IS NOT NULL + AND unique_states IS NOT NULL + AND river_job_state_in_bitmask(unique_states, state); + +-- Remove the old unique index. Users who are actively using the unique jobs +-- feature and who wish to avoid deploy downtime may want od drop this in a +-- subsequent migration once all jobs using the old unique system have been +-- completed (i.e. no more rows with non-null unique_key and null +-- unique_states). +DROP INDEX river_job_kind_unique_key_idx; + + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin + +-- Drop Users Table +-- River migration 006 [down] +-- +-- Drop `river_job.unique_states` and its index. +-- + +DROP INDEX river_job_unique_idx; + +ALTER TABLE river_job + DROP COLUMN unique_states; + +CREATE UNIQUE INDEX IF NOT EXISTS river_job_kind_unique_key_idx ON river_job (kind, unique_key) WHERE unique_key IS NOT NULL; + +-- +-- Drop `river_job_state_in_bitmask` function. +-- +DROP FUNCTION river_job_state_in_bitmask; + +-- River migration 005 [down] +-- +-- Revert to migration table based only on `(version)`. +-- +-- If any non-main migrations are present, 005 is considered irreversible. +-- + +DO +$body$ +BEGIN + -- Tolerate users who may be using their own migration system rather than + -- River's. If they are, they will have skipped version 001 containing + -- `CREATE TABLE river_migration`, so this table won't exist. + IF (SELECT to_regclass('river_migration') IS NOT NULL) THEN + IF EXISTS ( + SELECT * + FROM river_migration + WHERE line <> 'main' + ) THEN + RAISE EXCEPTION 'Found non-main migration lines in the database; version 005 migration is irreversible because it would result in loss of migration information.'; + END IF; + + ALTER TABLE river_migration + RENAME TO river_migration_old; + + CREATE TABLE river_migration( + id bigserial PRIMARY KEY, + created_at timestamptz NOT NULL DEFAULT NOW(), + version bigint NOT NULL, + CONSTRAINT version CHECK (version >= 1) + ); + + CREATE UNIQUE INDEX ON river_migration USING btree(version); + + INSERT INTO river_migration + (created_at, version) + SELECT created_at, version + FROM river_migration_old; + + DROP TABLE river_migration_old; + END IF; +END; +$body$ +LANGUAGE 'plpgsql'; + +-- +-- Drop `river_job.unique_key`. +-- + +ALTER TABLE river_job + DROP COLUMN unique_key; + +-- +-- Drop `river_client` and derivative. +-- + +DROP TABLE river_client_queue; +DROP TABLE river_client; + +-- River migration 004 [down] +ALTER TABLE river_job ALTER COLUMN args DROP NOT NULL; + +ALTER TABLE river_job ALTER COLUMN metadata DROP NOT NULL; +ALTER TABLE river_job ALTER COLUMN metadata DROP DEFAULT; + +-- It is not possible to safely remove 'pending' from the river_job_state enum, +-- so leave it in place. + +ALTER TABLE river_job DROP CONSTRAINT finalized_or_finalized_at_null; +ALTER TABLE river_job ADD CONSTRAINT finalized_or_finalized_at_null CHECK ( + (state IN ('cancelled', 'completed', 'discarded') AND finalized_at IS NOT NULL) OR finalized_at IS NULL +); + +CREATE OR REPLACE FUNCTION river_job_notify() + RETURNS TRIGGER + AS $$ +DECLARE + payload json; +BEGIN + IF NEW.state = 'available' THEN + -- Notify will coalesce duplicate notifications within a transaction, so + -- keep these payloads generalized: + payload = json_build_object('queue', NEW.queue); + PERFORM + pg_notify('river_insert', payload::text); + END IF; + RETURN NULL; +END; +$$ +LANGUAGE plpgsql; + +CREATE TRIGGER river_notify + AFTER INSERT ON river_job + FOR EACH ROW + EXECUTE PROCEDURE river_job_notify(); + +DROP TABLE river_queue; + +ALTER TABLE river_leader + ALTER COLUMN name DROP DEFAULT, + DROP CONSTRAINT name_length, + ADD CONSTRAINT name_length CHECK (char_length(name) > 0 AND char_length(name) < 128); + +-- River migration 003 [down] +ALTER TABLE river_job ALTER COLUMN tags DROP NOT NULL, + ALTER COLUMN tags DROP DEFAULT; + +-- River migration 002 [down] +DROP TABLE river_job; +DROP FUNCTION river_job_notify; +DROP TYPE river_job_state; + +DROP TABLE river_leader; + +-- +goose StatementEnd diff --git a/database/migrations/20250321112535_create_medias.sql b/database/migrations/20250321112535_create_medias.sql new file mode 100644 index 0000000..838e9e4 --- /dev/null +++ b/database/migrations/20250321112535_create_medias.sql @@ -0,0 +1,17 @@ +-- +goose Up +-- +goose StatementBegin +CREATE TABLE medias( + id SERIAL8 PRIMARY KEY, + created_at timestamp NOT NULL DEFAULT now(), + name varchar(255) NOT NULL DEFAULT '', + mime_type varchar(128) NOT NULL DEFAULT '', + size int8 NOT NULL DEFAULT 0, + path varchar(255) NOT NULL DEFAULT '' +); + +-- +goose StatementEnd +-- +goose Down +-- +goose StatementBegin +DROP TABLE medias; + +-- +goose StatementEnd diff --git a/database/schemas/public/enum/river_job_state.go b/database/schemas/public/enum/river_job_state.go new file mode 100644 index 0000000..97fa72d --- /dev/null +++ b/database/schemas/public/enum/river_job_state.go @@ -0,0 +1,30 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package enum + +import "github.com/go-jet/jet/v2/postgres" + +var RiverJobState = &struct { + Available postgres.StringExpression + Cancelled postgres.StringExpression + Completed postgres.StringExpression + Discarded postgres.StringExpression + Pending postgres.StringExpression + Retryable postgres.StringExpression + Running postgres.StringExpression + Scheduled postgres.StringExpression +}{ + Available: postgres.NewEnumValue("available"), + Cancelled: postgres.NewEnumValue("cancelled"), + Completed: postgres.NewEnumValue("completed"), + Discarded: postgres.NewEnumValue("discarded"), + Pending: postgres.NewEnumValue("pending"), + Retryable: postgres.NewEnumValue("retryable"), + Running: postgres.NewEnumValue("running"), + Scheduled: postgres.NewEnumValue("scheduled"), +} diff --git a/database/schemas/public/model/medias.go b/database/schemas/public/model/medias.go new file mode 100644 index 0000000..8a38196 --- /dev/null +++ b/database/schemas/public/model/medias.go @@ -0,0 +1,21 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +import ( + "time" +) + +type Medias struct { + ID int64 `sql:"primary_key" json:"id"` + CreatedAt time.Time `json:"created_at"` + Name string `json:"name"` + MimeType string `json:"mime_type"` + Size int64 `json:"size"` + Path string `json:"path"` +} diff --git a/database/schemas/public/model/migrations.go b/database/schemas/public/model/migrations.go new file mode 100644 index 0000000..ba622c1 --- /dev/null +++ b/database/schemas/public/model/migrations.go @@ -0,0 +1,19 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +import ( + "time" +) + +type Migrations struct { + ID int32 `sql:"primary_key" json:"id"` + VersionID int64 `json:"version_id"` + IsApplied bool `json:"is_applied"` + Tstamp time.Time `json:"tstamp"` +} diff --git a/database/schemas/public/model/river_job_state.go b/database/schemas/public/model/river_job_state.go new file mode 100644 index 0000000..809604b --- /dev/null +++ b/database/schemas/public/model/river_job_state.go @@ -0,0 +1,73 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +import "errors" + +type RiverJobState string + +const ( + RiverJobState_Available RiverJobState = "available" + RiverJobState_Cancelled RiverJobState = "cancelled" + RiverJobState_Completed RiverJobState = "completed" + RiverJobState_Discarded RiverJobState = "discarded" + RiverJobState_Pending RiverJobState = "pending" + RiverJobState_Retryable RiverJobState = "retryable" + RiverJobState_Running RiverJobState = "running" + RiverJobState_Scheduled RiverJobState = "scheduled" +) + +var RiverJobStateAllValues = []RiverJobState{ + RiverJobState_Available, + RiverJobState_Cancelled, + RiverJobState_Completed, + RiverJobState_Discarded, + RiverJobState_Pending, + RiverJobState_Retryable, + RiverJobState_Running, + RiverJobState_Scheduled, +} + +func (e *RiverJobState) Scan(value interface{}) error { + var enumValue string + switch val := value.(type) { + case string: + enumValue = val + case []byte: + enumValue = string(val) + default: + return errors.New("jet: Invalid scan value for AllTypesEnum enum. Enum value has to be of type string or []byte") + } + + switch enumValue { + case "available": + *e = RiverJobState_Available + case "cancelled": + *e = RiverJobState_Cancelled + case "completed": + *e = RiverJobState_Completed + case "discarded": + *e = RiverJobState_Discarded + case "pending": + *e = RiverJobState_Pending + case "retryable": + *e = RiverJobState_Retryable + case "running": + *e = RiverJobState_Running + case "scheduled": + *e = RiverJobState_Scheduled + default: + return errors.New("jet: Invalid scan value '" + enumValue + "' for RiverJobState enum") + } + + return nil +} + +func (e RiverJobState) String() string { + return string(e) +} diff --git a/database/schemas/public/table/medias.go b/database/schemas/public/table/medias.go new file mode 100644 index 0000000..b43c13a --- /dev/null +++ b/database/schemas/public/table/medias.go @@ -0,0 +1,90 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var Medias = newMediasTable("public", "medias", "") + +type mediasTable struct { + postgres.Table + + // Columns + ID postgres.ColumnInteger + CreatedAt postgres.ColumnTimestamp + Name postgres.ColumnString + MimeType postgres.ColumnString + Size postgres.ColumnInteger + Path postgres.ColumnString + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList +} + +type MediasTable struct { + mediasTable + + EXCLUDED mediasTable +} + +// AS creates new MediasTable with assigned alias +func (a MediasTable) AS(alias string) *MediasTable { + return newMediasTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new MediasTable with assigned schema name +func (a MediasTable) FromSchema(schemaName string) *MediasTable { + return newMediasTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new MediasTable with assigned table prefix +func (a MediasTable) WithPrefix(prefix string) *MediasTable { + return newMediasTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new MediasTable with assigned table suffix +func (a MediasTable) WithSuffix(suffix string) *MediasTable { + return newMediasTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newMediasTable(schemaName, tableName, alias string) *MediasTable { + return &MediasTable{ + mediasTable: newMediasTableImpl(schemaName, tableName, alias), + EXCLUDED: newMediasTableImpl("", "excluded", ""), + } +} + +func newMediasTableImpl(schemaName, tableName, alias string) mediasTable { + var ( + IDColumn = postgres.IntegerColumn("id") + CreatedAtColumn = postgres.TimestampColumn("created_at") + NameColumn = postgres.StringColumn("name") + MimeTypeColumn = postgres.StringColumn("mime_type") + SizeColumn = postgres.IntegerColumn("size") + PathColumn = postgres.StringColumn("path") + allColumns = postgres.ColumnList{IDColumn, CreatedAtColumn, NameColumn, MimeTypeColumn, SizeColumn, PathColumn} + mutableColumns = postgres.ColumnList{CreatedAtColumn, NameColumn, MimeTypeColumn, SizeColumn, PathColumn} + ) + + return mediasTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + ID: IDColumn, + CreatedAt: CreatedAtColumn, + Name: NameColumn, + MimeType: MimeTypeColumn, + Size: SizeColumn, + Path: PathColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + } +} diff --git a/database/schemas/public/table/migrations.go b/database/schemas/public/table/migrations.go new file mode 100644 index 0000000..c4a6b2d --- /dev/null +++ b/database/schemas/public/table/migrations.go @@ -0,0 +1,84 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var Migrations = newMigrationsTable("public", "migrations", "") + +type migrationsTable struct { + postgres.Table + + // Columns + ID postgres.ColumnInteger + VersionID postgres.ColumnInteger + IsApplied postgres.ColumnBool + Tstamp postgres.ColumnTimestamp + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList +} + +type MigrationsTable struct { + migrationsTable + + EXCLUDED migrationsTable +} + +// AS creates new MigrationsTable with assigned alias +func (a MigrationsTable) AS(alias string) *MigrationsTable { + return newMigrationsTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new MigrationsTable with assigned schema name +func (a MigrationsTable) FromSchema(schemaName string) *MigrationsTable { + return newMigrationsTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new MigrationsTable with assigned table prefix +func (a MigrationsTable) WithPrefix(prefix string) *MigrationsTable { + return newMigrationsTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new MigrationsTable with assigned table suffix +func (a MigrationsTable) WithSuffix(suffix string) *MigrationsTable { + return newMigrationsTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newMigrationsTable(schemaName, tableName, alias string) *MigrationsTable { + return &MigrationsTable{ + migrationsTable: newMigrationsTableImpl(schemaName, tableName, alias), + EXCLUDED: newMigrationsTableImpl("", "excluded", ""), + } +} + +func newMigrationsTableImpl(schemaName, tableName, alias string) migrationsTable { + var ( + IDColumn = postgres.IntegerColumn("id") + VersionIDColumn = postgres.IntegerColumn("version_id") + IsAppliedColumn = postgres.BoolColumn("is_applied") + TstampColumn = postgres.TimestampColumn("tstamp") + allColumns = postgres.ColumnList{IDColumn, VersionIDColumn, IsAppliedColumn, TstampColumn} + mutableColumns = postgres.ColumnList{VersionIDColumn, IsAppliedColumn, TstampColumn} + ) + + return migrationsTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + ID: IDColumn, + VersionID: VersionIDColumn, + IsApplied: IsAppliedColumn, + Tstamp: TstampColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + } +} diff --git a/database/schemas/public/table/table_use_schema.go b/database/schemas/public/table/table_use_schema.go new file mode 100644 index 0000000..9c5f1a9 --- /dev/null +++ b/database/schemas/public/table/table_use_schema.go @@ -0,0 +1,15 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +// UseSchema sets a new schema name for all generated table SQL builder types. It is recommended to invoke +// this method only once at the beginning of the program. +func UseSchema(schema string) { + Medias = Medias.FromSchema(schema) + Migrations = Migrations.FromSchema(schema) +} diff --git a/database/transform.yaml b/database/transform.yaml new file mode 100644 index 0000000..8c65047 --- /dev/null +++ b/database/transform.yaml @@ -0,0 +1,11 @@ +ignores: + - migrations + - river_leader + - river_job + - river_client + - river_client_queue + - river_queue +# types: +# users: # table name +# meta: UserMeta +# meta: Json[UserMeta] diff --git a/docs/docs.go b/docs/docs.go new file mode 100644 index 0000000..ea53106 --- /dev/null +++ b/docs/docs.go @@ -0,0 +1,54 @@ +// Package docs Code generated by swaggo/swag. DO NOT EDIT +package docs + +import "github.com/rogeecn/swag" + +const docTemplate = `{ + "schemes": {{ marshal .Schemes }}, + "swagger": "2.0", + "info": { + "description": "{{escape .Description}}", + "title": "{{.Title}}", + "termsOfService": "http://swagger.io/terms/", + "contact": { + "name": "UserName", + "url": "http://www.swagger.io/support", + "email": "support@swagger.io" + }, + "license": { + "name": "Apache 2.0", + "url": "http://www.apache.org/licenses/LICENSE-2.0.html" + }, + "version": "{{.Version}}" + }, + "host": "{{.Host}}", + "basePath": "{{.BasePath}}", + "paths": {}, + "securityDefinitions": { + "BasicAuth": { + "type": "basic" + } + }, + "externalDocs": { + "description": "OpenAPI", + "url": "https://swagger.io/resources/open-api/" + } +}` + +// SwaggerInfo holds exported Swagger Info so clients can modify it +var SwaggerInfo = &swag.Spec{ + Version: "1.0", + Host: "localhost:8080", + BasePath: "/api/v1", + Schemes: []string{}, + Title: "ApiDoc", + Description: "This is a sample server celler server.", + InfoInstanceName: "swagger", + SwaggerTemplate: docTemplate, + LeftDelim: "{{", + RightDelim: "}}", +} + +func init() { + swag.Register(SwaggerInfo.InstanceName(), SwaggerInfo) +} diff --git a/docs/ember.go b/docs/ember.go new file mode 100644 index 0000000..ae898ec --- /dev/null +++ b/docs/ember.go @@ -0,0 +1,10 @@ +package docs + +import ( + _ "embed" + + _ "github.com/rogeecn/swag" +) + +//go:embed swagger.json +var SwaggerSpec string diff --git a/docs/swagger.json b/docs/swagger.json new file mode 100644 index 0000000..b5640f4 --- /dev/null +++ b/docs/swagger.json @@ -0,0 +1,30 @@ +{ + "swagger": "2.0", + "info": { + "description": "This is a sample server celler server.", + "title": "ApiDoc", + "termsOfService": "http://swagger.io/terms/", + "contact": { + "name": "UserName", + "url": "http://www.swagger.io/support", + "email": "support@swagger.io" + }, + "license": { + "name": "Apache 2.0", + "url": "http://www.apache.org/licenses/LICENSE-2.0.html" + }, + "version": "1.0" + }, + "host": "localhost:8080", + "basePath": "/api/v1", + "paths": {}, + "securityDefinitions": { + "BasicAuth": { + "type": "basic" + } + }, + "externalDocs": { + "description": "OpenAPI", + "url": "https://swagger.io/resources/open-api/" + } +} \ No newline at end of file diff --git a/docs/swagger.yaml b/docs/swagger.yaml new file mode 100644 index 0000000..9f48b7e --- /dev/null +++ b/docs/swagger.yaml @@ -0,0 +1,22 @@ +basePath: /api/v1 +externalDocs: + description: OpenAPI + url: https://swagger.io/resources/open-api/ +host: localhost:8080 +info: + contact: + email: support@swagger.io + name: UserName + url: http://www.swagger.io/support + description: This is a sample server celler server. + license: + name: Apache 2.0 + url: http://www.apache.org/licenses/LICENSE-2.0.html + termsOfService: http://swagger.io/terms/ + title: ApiDoc + version: "1.0" +paths: {} +securityDefinitions: + BasicAuth: + type: basic +swagger: "2.0" diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..29022b5 --- /dev/null +++ b/go.mod @@ -0,0 +1,157 @@ +module quyun + +go 1.23.0 + +toolchain go1.24.0 + +require ( + github.com/ThreeDotsLabs/watermill v1.4.3 + github.com/ThreeDotsLabs/watermill-kafka/v3 v3.0.6 + github.com/ThreeDotsLabs/watermill-redisstream v1.4.2 + github.com/ThreeDotsLabs/watermill-sql/v3 v3.1.0 + github.com/go-jet/jet/v2 v2.13.0 + github.com/gofiber/fiber/v3 v3.0.0-beta.4 + github.com/gofiber/utils/v2 v2.0.0-beta.7 + github.com/golang-jwt/jwt/v4 v4.5.1 + github.com/imroc/req/v3 v3.50.0 + github.com/jackc/pgx/v5 v5.7.2 + github.com/juju/go4 v0.0.0-20160222163258-40d72ab9641a + github.com/lib/pq v1.10.9 + github.com/opentracing/opentracing-go v1.2.0 + github.com/pkg/errors v0.9.1 + github.com/pressly/goose/v3 v3.24.1 + github.com/redis/go-redis/v9 v9.7.3 + github.com/riverqueue/river v0.15.0 + github.com/riverqueue/river/riverdriver/riverpgxv5 v0.15.0 + github.com/riverqueue/river/rivertype v0.15.0 + github.com/rogeecn/fabfile v1.4.0 + github.com/rogeecn/swag v1.0.1 + github.com/samber/lo v1.49.1 + github.com/sirupsen/logrus v1.9.3 + github.com/smartystreets/goconvey v1.8.1 + github.com/soheilhy/cmux v0.1.5 + github.com/speps/go-hashids/v2 v2.0.1 + github.com/spf13/cobra v1.9.1 + github.com/swaggo/files/v2 v2.0.2 + github.com/uber/jaeger-client-go v2.30.0+incompatible + go.ipao.vip/atom v1.1.8 + go.opentelemetry.io/contrib/instrumentation/runtime v0.60.0 + go.opentelemetry.io/otel v1.35.0 + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0 + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.35.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 + go.opentelemetry.io/otel/metric v1.35.0 + go.opentelemetry.io/otel/sdk v1.35.0 + go.opentelemetry.io/otel/sdk/metric v1.35.0 + go.opentelemetry.io/otel/trace v1.35.0 + go.uber.org/dig v1.18.1 + golang.org/x/net v0.35.0 + golang.org/x/sync v0.12.0 + google.golang.org/grpc v1.71.0 + google.golang.org/protobuf v1.36.5 + gopkg.in/retry.v1 v1.0.3 +) + +require ( + github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect + github.com/IBM/sarama v1.43.3 // indirect + github.com/KyleBanks/depth v1.2.1 // indirect + github.com/Rican7/retry v0.3.1 // indirect + github.com/andybalholm/brotli v1.1.1 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cloudflare/circl v1.5.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/dnwe/otelsarama v0.0.0-20240308230250-9388d9d40bc0 // indirect + github.com/eapache/go-resiliency v1.7.0 // indirect + github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect + github.com/eapache/queue v1.1.0 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/spec v0.21.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/go-viper/mapstructure/v2 v2.2.1 // indirect + github.com/gofiber/schema v1.2.0 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/gopherjs/gopherjs v1.17.2 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-uuid v1.0.3 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect + github.com/jackc/puddle/v2 v2.2.2 // indirect + github.com/jcmturner/aescts/v2 v2.0.0 // indirect + github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect + github.com/jcmturner/gofork v1.7.6 // indirect + github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect + github.com/jcmturner/rpc/v2 v2.0.3 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/jtolds/gls v4.20.0+incompatible // indirect + github.com/klauspost/compress v1.17.11 // indirect + github.com/lithammer/shortuuid/v3 v3.0.7 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mfridman/interpolate v0.0.2 // indirect + github.com/oklog/ulid v1.3.1 // indirect + github.com/onsi/ginkgo/v2 v2.22.0 // indirect + github.com/pelletier/go-toml/v2 v2.2.3 // indirect + github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect + github.com/pierrec/lz4/v4 v4.1.21 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/quic-go/qpack v0.5.1 // indirect + github.com/quic-go/quic-go v0.48.2 // indirect + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect + github.com/refraction-networking/utls v1.6.7 // indirect + github.com/riverqueue/river/riverdriver v0.15.0 // indirect + github.com/riverqueue/river/rivershared v0.15.0 // indirect + github.com/sagikazarmark/locafero v0.8.0 // indirect + github.com/sethvargo/go-retry v0.3.0 // indirect + github.com/smarty/assertions v1.15.0 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.14.0 // indirect + github.com/spf13/cast v1.7.1 // indirect + github.com/spf13/pflag v1.0.6 // indirect + github.com/spf13/viper v1.20.0 // indirect + github.com/stretchr/testify v1.10.0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/tidwall/gjson v1.18.0 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.1 // indirect + github.com/tidwall/sjson v1.2.5 // indirect + github.com/tinylib/msgp v1.2.5 // indirect + github.com/uber/jaeger-lib v2.4.1+incompatible // indirect + github.com/valyala/bytebufferpool v1.0.0 // indirect + github.com/valyala/fasthttp v1.58.0 // indirect + github.com/valyala/tcplisten v1.0.0 // indirect + github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect + github.com/x448/float16 v0.8.4 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/proto/otlp v1.5.0 // indirect + go.uber.org/atomic v1.9.0 // indirect + go.uber.org/goleak v1.3.0 // indirect + go.uber.org/mock v0.5.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.33.0 // indirect + golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e // indirect + golang.org/x/mod v0.22.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/text v0.23.0 // indirect + golang.org/x/tools v0.29.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..2994504 --- /dev/null +++ b/go.sum @@ -0,0 +1,472 @@ +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= +github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= +github.com/IBM/sarama v1.43.3 h1:Yj6L2IaNvb2mRBop39N7mmJAHBVY3dTPncr3qGVkxPA= +github.com/IBM/sarama v1.43.3/go.mod h1:FVIRaLrhK3Cla/9FfRF5X9Zua2KpS3SYIXxhac1H+FQ= +github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc= +github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= +github.com/Rican7/retry v0.3.1 h1:scY4IbO8swckzoA/11HgBwaZRJEyY9vaNJshcdhp1Mc= +github.com/Rican7/retry v0.3.1/go.mod h1:CxSDrhAyXmTMeEuRAnArMu1FHu48vtfjLREWqVl7Vw0= +github.com/ThreeDotsLabs/watermill v1.4.3 h1:cRT1v7jlAgoPyEknvz0IFp3EKdSBRD/0Qbtz6KhexG8= +github.com/ThreeDotsLabs/watermill v1.4.3/go.mod h1:lBnrLbxOjeMRgcJbv+UiZr8Ylz8RkJ4m6i/VN/Nk+to= +github.com/ThreeDotsLabs/watermill-kafka/v3 v3.0.6 h1:xK+VLDjYvBrRZDaFZ7WSqiNmZ9lcDG5RIilFVDZOVyQ= +github.com/ThreeDotsLabs/watermill-kafka/v3 v3.0.6/go.mod h1:o1GcoF/1CSJ9JSmQzUkULvpZeO635pZe+WWrYNFlJNk= +github.com/ThreeDotsLabs/watermill-redisstream v1.4.2 h1:FY6tsBcbhbJpKDOssU4bfybstqY0hQHwiZmVq9qyILQ= +github.com/ThreeDotsLabs/watermill-redisstream v1.4.2/go.mod h1:69++855LyB+ckYDe60PiJLBcUrpckfDE2WwyzuVJRCk= +github.com/ThreeDotsLabs/watermill-sql/v3 v3.1.0 h1:g4uE5Nm3Z6LVB3m+uMgHlN4ne4bDpwf3RJmXYRgMv94= +github.com/ThreeDotsLabs/watermill-sql/v3 v3.1.0/go.mod h1:G8/otZYWLTCeYL2Ww3ujQ7gQ/3+jw5Bj0UtyKn7bBjA= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= +github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= +github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= +github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= +github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= +github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cloudflare/circl v1.5.0 h1:hxIWksrX6XN5a1L2TI/h53AGPhNHoUBo+TD1ms9+pys= +github.com/cloudflare/circl v1.5.0/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/dnwe/otelsarama v0.0.0-20240308230250-9388d9d40bc0 h1:R2zQhFwSCyyd7L43igYjDrH0wkC/i+QBPELuY0HOu84= +github.com/dnwe/otelsarama v0.0.0-20240308230250-9388d9d40bc0/go.mod h1:2MqLKYJfjs3UriXXF9Fd0Qmh/lhxi/6tHXkqtXxyIHc= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/eapache/go-resiliency v1.7.0 h1:n3NRTnBn5N0Cbi/IeOHuQn9s2UwVUH7Ga0ZWcP+9JTA= +github.com/eapache/go-resiliency v1.7.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= +github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= +github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= +github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/frankban/quicktest v1.2.2/go.mod h1:Qh/WofXFeiAFII1aEBu529AtJo6Zg2VHscnEsbBnJ20= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-jet/jet/v2 v2.13.0 h1:DcD2IJRGos+4X40IQRV6S6q9onoOfZY/GPdvU6ImZcQ= +github.com/go-jet/jet/v2 v2.13.0/go.mod h1:YhT75U1FoYAxFOObbQliHmXVYQeffkBKWT7ZilZ3zPc= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-sql-driver/mysql v1.9.0 h1:Y0zIbQXhQKmQgTp44Y1dp3wTXcn804QoTptLZT1vtvo= +github.com/go-sql-driver/mysql v1.9.0/go.mod h1:pDetrLJeA3oMujJuvXc8RJoasr589B6A9fwzD3QMrqw= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= +github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/gofiber/fiber/v3 v3.0.0-beta.4 h1:KzDSavvhG7m81NIsmnu5l3ZDbVS4feCidl4xlIfu6V0= +github.com/gofiber/fiber/v3 v3.0.0-beta.4/go.mod h1:/WFUoHRkZEsGHyy2+fYcdqi109IVOFbVwxv1n1RU+kk= +github.com/gofiber/schema v1.2.0 h1:j+ZRrNnUa/0ZuWrn/6kAtAufEr4jCJ+JuTURAMxNSZg= +github.com/gofiber/schema v1.2.0/go.mod h1:YYwj01w3hVfaNjhtJzaqetymL56VW642YS3qZPhuE6c= +github.com/gofiber/utils/v2 v2.0.0-beta.7 h1:NnHFrRHvhrufPABdWajcKZejz9HnCWmT/asoxRsiEbQ= +github.com/gofiber/utils/v2 v2.0.0-beta.7/go.mod h1:J/M03s+HMdZdvhAeyh76xT72IfVqBzuz/OJkrMa7cwU= +github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= +github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.2.1-0.20190312032427-6f77996f0c42/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= +github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/imroc/req/v3 v3.50.0 h1:n3BVnZiTRpvkN5T1IB79LC/THhFU9iXksNRMH4ZNVaY= +github.com/imroc/req/v3 v3.50.0/go.mod h1:tsOk8K7zI6cU4xu/VWCZVtq9Djw9IWm4MslKzme5woU= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w= +github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM= +github.com/jackc/pgerrcode v0.0.0-20220416144525-469b46aa5efa h1:s+4MhCQ6YrzisK6hFJUX53drDT4UsSW3DEhKn0ifuHw= +github.com/jackc/pgerrcode v0.0.0-20220416144525-469b46aa5efa/go.mod h1:a/s9Lp5W7n/DD0VrVoyJ00FbP2ytTPDVOivvn2bMlds= +github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag= +github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgtype v1.14.4 h1:fKuNiCumbKTAIxQwXfB/nsrnkEI6bPJrrSiMKgbJ2j8= +github.com/jackc/pgtype v1.14.4/go.mod h1:aKeozOde08iifGosdJpz9MBZonJOUJxqNpPBcMJTlVA= +github.com/jackc/pgx/v4 v4.18.3 h1:dE2/TrEsGX3RBprb3qryqSV9Y60iZN1C6i8IrmW9/BA= +github.com/jackc/pgx/v4 v4.18.3/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw= +github.com/jackc/pgx/v5 v5.7.2 h1:mLoDLV6sonKlvjIEsV56SkWNCnuNv531l94GaIzO+XI= +github.com/jackc/pgx/v5 v5.7.2/go.mod h1:ncY89UGWxg82EykZUwSpUKEfccBGGYq1xjrOpsbsfGQ= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= +github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= +github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= +github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= +github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= +github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/juju/go4 v0.0.0-20160222163258-40d72ab9641a h1:45JtCyuNYE+QN9aPuR1ID9++BQU+NMTMudHSuaK0Las= +github.com/juju/go4 v0.0.0-20160222163258-40d72ab9641a/go.mod h1:RVHtZuvrpETIepiNUrNlih2OynoFf1eM6DGC6dloXzk= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lithammer/shortuuid/v3 v3.0.7 h1:trX0KTHy4Pbwo/6ia8fscyHoGA+mf1jWbPJVuvyJQQ8= +github.com/lithammer/shortuuid/v3 v3.0.7/go.mod h1:vMk8ke37EmiewwolSO1NLW8vP4ZaKlRuDIi8tWWmAts= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mfridman/interpolate v0.0.2 h1:pnuTK7MQIxxFz1Gr+rjSIx9u7qVjf5VOoM/u6BbAxPY= +github.com/mfridman/interpolate v0.0.2/go.mod h1:p+7uk6oE07mpE/Ik1b8EckO0O4ZXiGAfshKBWLUM9Xg= +github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= +github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= +github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8= +github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= +github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= +github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c h1:dAMKvw0MlJT1GshSTtih8C2gDs04w8dReiOGXrGLNoY= +github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= +github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= +github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pressly/goose/v3 v3.24.1 h1:bZmxRco2uy5uu5Ng1MMVEfYsFlrMJI+e/VMXHQ3C4LY= +github.com/pressly/goose/v3 v3.24.1/go.mod h1:rEWreU9uVtt0DHCyLzF9gRcWiiTF/V+528DV+4DORug= +github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= +github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= +github.com/quic-go/quic-go v0.48.2 h1:wsKXZPeGWpMpCGSWqOcqpW2wZYic/8T3aqiOID0/KWE= +github.com/quic-go/quic-go v0.48.2/go.mod h1:yBgs3rWBOADpga7F+jJsb6Ybg1LSYiQvwWlLX+/6HMs= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM= +github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA= +github.com/refraction-networking/utls v1.6.7 h1:zVJ7sP1dJx/WtVuITug3qYUq034cDq9B2MR1K67ULZM= +github.com/refraction-networking/utls v1.6.7/go.mod h1:BC3O4vQzye5hqpmDTWUqi4P5DDhzJfkV1tdqtawQIH0= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/riverqueue/river v0.15.0 h1:5jvE5KEvLvigJRTAtE28R/bvVwIb9GCdXo68IiKF700= +github.com/riverqueue/river v0.15.0/go.mod h1:k4v54wv5HMnnOCUPf+iEi3fs3RiJxXYpppuhXsW9UG8= +github.com/riverqueue/river/riverdriver v0.15.0 h1:Nv88t7tK51HvGfiSIe7ov/2PrAFntY4b3ak4MEF3Dxs= +github.com/riverqueue/river/riverdriver v0.15.0/go.mod h1:UERKTvUg0M7qWLuQLmHiEM/hbJEMP3+qcNDhvIx7R4s= +github.com/riverqueue/river/riverdriver/riverdatabasesql v0.15.0 h1:4OKdSQVJ3OQ2VfPtOPB5OHjzLpWEju4/eafxe/hbn84= +github.com/riverqueue/river/riverdriver/riverdatabasesql v0.15.0/go.mod h1:KwoUPnt2zu1UONPk7NchnHyZWcHKMwMM6B95Cx6SI2g= +github.com/riverqueue/river/riverdriver/riverpgxv5 v0.15.0 h1:IBNuPdflDav+sxd8EDXomyv93fvMG9IBgEToQPkFWNs= +github.com/riverqueue/river/riverdriver/riverpgxv5 v0.15.0/go.mod h1:b8CGkpQlpHacnULdaJk8+4Dnvj4lBeEaIOnd+UT3Ciw= +github.com/riverqueue/river/rivershared v0.15.0 h1:hDClNzZHUJzF9wdg6FgFMjvaMV74zY9FZZPQmBaVVM0= +github.com/riverqueue/river/rivershared v0.15.0/go.mod h1:5pyQTv4W6BVoazOvN1p4EQ3a3jopsSgcHB1NxVRQRgU= +github.com/riverqueue/river/rivertype v0.15.0 h1:+TXRnvQv1ulV24uQnsuZmbb3yJdmbpizKQf0b0SM+f0= +github.com/riverqueue/river/rivertype v0.15.0/go.mod h1:4vpt5ZSdZ35mFbRAV4oXgeRdH3Mq5h1pUzQTvaGfCUA= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/rogeecn/fabfile v1.4.0 h1:Rw7/7OH8cV4aRPw79Oa4hHHFKaC/ol+sNmGcB/usHaQ= +github.com/rogeecn/fabfile v1.4.0/go.mod h1:EPwX7TtVcIWSLJkJAqxSzYjM/aV1Q0wymcaXqnMgzas= +github.com/rogeecn/swag v1.0.1 h1:s1yxLgopqO1m8sqGjVmt6ocMBRubMPIh2JtIPG4xjQE= +github.com/rogeecn/swag v1.0.1/go.mod h1:flG2NXERPxlRl2VdpU2VXTO8iBnQiERyowOXSkZVMOc= +github.com/rogpeppe/clock v0.0.0-20190514195947-2896927a307a h1:3QH7VyOaaiUHNrA9Se4YQIRkDTCw1EJls9xTUCaCeRM= +github.com/rogpeppe/clock v0.0.0-20190514195947-2896927a307a/go.mod h1:4r5QyqhjIWCcK8DO4KMclc5Iknq5qVBAlbYYzAbUScQ= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sagikazarmark/locafero v0.8.0 h1:mXaMVw7IqxNBxfv3LdWt9MDmcWDQ1fagDH918lOdVaQ= +github.com/sagikazarmark/locafero v0.8.0/go.mod h1:UBUyz37V+EdMS3hDF3QWIiVr/2dPrx49OMO0Bn0hJqk= +github.com/samber/lo v1.49.1 h1:4BIFyVfuQSEpluc7Fua+j1NolZHiEHEpaSEKdsH0tew= +github.com/samber/lo v1.49.1/go.mod h1:dO6KHFzUKXgP8LDhU0oI8d2hekjXnGOu0DB8Jecxd6o= +github.com/sethvargo/go-retry v0.3.0 h1:EEt31A35QhrcRZtrYFDTBg91cqZVnFL2navjDrah2SE= +github.com/sethvargo/go-retry v0.3.0/go.mod h1:mNX17F0C/HguQMyMyJxcnU471gOZGxCLyYaFyAZraas= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY= +github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec= +github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY= +github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60= +github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/speps/go-hashids/v2 v2.0.1 h1:ViWOEqWES/pdOSq+C1SLVa8/Tnsd52XC34RY7lt7m4g= +github.com/speps/go-hashids/v2 v2.0.1/go.mod h1:47LKunwvDZki/uRVD6NImtyk712yFzIs3UF3KlHohGw= +github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA= +github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.20.0 h1:zrxIyR3RQIOsarIrgL8+sAvALXul9jeEPa06Y0Ph6vY= +github.com/spf13/viper v1.20.0/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/swaggo/files/v2 v2.0.2 h1:Bq4tgS/yxLB/3nwOMcul5oLEUKa877Ykgz3CJMVbQKU= +github.com/swaggo/files/v2 v2.0.2/go.mod h1:TVqetIzZsO9OhHX1Am9sRf9LdrFZqoK49N37KON/jr0= +github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= +github.com/tinylib/msgp v1.2.5 h1:WeQg1whrXRFiZusidTQqzETkRpGjFjcIhW6uqWH09po= +github.com/tinylib/msgp v1.2.5/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= +github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= +github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= +github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.58.0 h1:GGB2dWxSbEprU9j0iMJHgdKYJVDyjrOwF9RE59PbRuE= +github.com/valyala/fasthttp v1.58.0/go.mod h1:SYXvHHaFp7QZHGKSHmoMipInhrI5StHrhDTYVEjK/Kw= +github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8= +github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= +github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= +github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= +github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.ipao.vip/atom v1.1.8 h1:xiEpK2RPv/kxwxvN9tzJ8uClsmVskLEjnyZKoGfm04U= +go.ipao.vip/atom v1.1.8/go.mod h1:woAv+rZf0xd+7mEtKWv4PyazQARFLnrV/qA4qlAK008= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/runtime v0.60.0 h1:0NgN/3SYkqYJ9NBlDfl/2lzVlwos/YQLvi8sUrzJRBE= +go.opentelemetry.io/contrib/instrumentation/runtime v0.60.0/go.mod h1:oxpUfhTkhgQaYIjtBt3T3w135dLoxq//qo3WPlPIKkE= +go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= +go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0 h1:QcFwRrZLc82r8wODjvyCbP7Ifp3UANaBSmhDSFjnqSc= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0/go.mod h1:CXIWhUomyWBG/oY2/r/kLp6K/cmx9e/7DLpBuuGdLCA= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.35.0 h1:0NIXxOCFx+SKbhCVxwl3ETG8ClLPAa0KuKV6p3yhxP8= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.35.0/go.mod h1:ChZSJbbfbl/DcRZNc9Gqh6DYGlfjw4PvO1pEOZH1ZsE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0/go.mod h1:zjPK58DtkqQFn+YUMbx0M2XV3QgKU0gS9LeGohREyK4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 h1:m639+BofXTvcY1q8CGs4ItwQarYtJPOWmVobfM1HpVI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0/go.mod h1:LjReUci/F4BUyv+y4dwnq3h/26iNOeC3wAIqgvTIZVo= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 h1:xJ2qHD0C1BeYVTLLR9sX12+Qb95kfeD/byKj6Ky1pXg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0/go.mod h1:u5BF1xyjstDowA1R5QAO9JHzqK+ublenEW/dyqTjBVk= +go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= +go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= +go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= +go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= +go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= +go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= +go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= +go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= +go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/dig v1.18.1 h1:rLww6NuajVjeQn+49u5NcezUJEGwd5uXmyoCKW2g5Es= +go.uber.org/dig v1.18.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= +go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus= +golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e h1:4qufH0hlUYs6AO6XmZC3GqfDPGSXHVXUFR6OND+iJX4= +golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= +golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE= +golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg= +google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/retry.v1 v1.0.3 h1:a9CArYczAVv6Qs6VGoLMio99GEs7kY9UzSF9+LD+iGs= +gopkg.in/retry.v1 v1.0.3/go.mod h1:FJkXmWiMaAo7xB+xhvDF59zhfjDWyzmyAxiT4dB688g= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 h1:5D53IMaUuA5InSeMu9eJtlQXS2NxAhyWQvkKEgXZhHI= +modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6/go.mod h1:Qz0X07sNOR1jWYCrJMEnbW/X55x206Q7Vt4mz6/wHp4= +modernc.org/libc v1.55.3 h1:AzcW1mhlPNrRtjS5sS+eW2ISCgSOLLNyFzRh/V3Qj/U= +modernc.org/libc v1.55.3/go.mod h1:qFXepLhz+JjFThQ4kzwzOjA/y/artDeg+pcYnY+Q83w= +modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4= +modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo= +modernc.org/memory v1.8.0 h1:IqGTL6eFMaDZZhEWwcREgeMXYwmW83LYW8cROZYkg+E= +modernc.org/memory v1.8.0/go.mod h1:XPZ936zp5OMKGWPqbD3JShgd/ZoQ7899TUuQqxY+peU= +modernc.org/sqlite v1.34.1 h1:u3Yi6M0N8t9yKRDwhXcyp1eS5/ErhPTBggxWFuR6Hfk= +modernc.org/sqlite v1.34.1/go.mod h1:pXV2xHxhzXZsgT/RtTFAPY6JJDEvOTcTdwADQCCWD4k= +modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA= +modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0= +modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= +modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/main.go b/main.go new file mode 100644 index 0000000..9ddb6af --- /dev/null +++ b/main.go @@ -0,0 +1,35 @@ +package main + +import ( + "quyun/app/service/http" + "quyun/app/service/migrate" + + log "github.com/sirupsen/logrus" + "go.ipao.vip/atom" +) + +// @title ApiDoc +// @version 1.0 +// @description This is a sample server celler server. +// @termsOfService http://swagger.io/terms/ +// @contact.name UserName +// @contact.url http://www.swagger.io/support +// @contact.email support@swagger.io +// @license.name Apache 2.0 +// @license.url http://www.apache.org/licenses/LICENSE-2.0.html +// @host localhost:8080 +// @BasePath /api/v1 +// @securityDefinitions.basic BasicAuth +// @externalDocs.description OpenAPI +// @externalDocs.url https://swagger.io/resources/open-api/ +func main() { + opts := []atom.Option{ + atom.Name("quyun"), + http.Command(), + migrate.Command(), + } + + if err := atom.Serve(opts...); err != nil { + log.Fatal(err) + } +} diff --git a/main_test.go b/main_test.go new file mode 100644 index 0000000..06ab7d0 --- /dev/null +++ b/main_test.go @@ -0,0 +1 @@ +package main diff --git a/pkg/consts/consts.go b/pkg/consts/consts.go new file mode 100644 index 0000000..85fa520 --- /dev/null +++ b/pkg/consts/consts.go @@ -0,0 +1,8 @@ +package consts + +// Format +// +// // swagger:enum CacheKey +// // ENUM( +// // VerifyCode = "code:__CHANNEL__:%s", +// // ) diff --git a/pkg/proto/user/v1/user.pb.go b/pkg/proto/user/v1/user.pb.go new file mode 100644 index 0000000..005d56b --- /dev/null +++ b/pkg/proto/user/v1/user.pb.go @@ -0,0 +1,407 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc (unknown) +// source: user/v1/user.proto + +package userv1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// User represents a user entity +type User struct { + state protoimpl.MessageState `protogen:"open.v1"` + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` + Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"` + Phone string `protobuf:"bytes,4,opt,name=phone,proto3" json:"phone,omitempty"` + CreateTime string `protobuf:"bytes,5,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + UpdateTime string `protobuf:"bytes,6,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *User) Reset() { + *x = User{} + mi := &file_user_v1_user_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *User) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*User) ProtoMessage() {} + +func (x *User) ProtoReflect() protoreflect.Message { + mi := &file_user_v1_user_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use User.ProtoReflect.Descriptor instead. +func (*User) Descriptor() ([]byte, []int) { + return file_user_v1_user_proto_rawDescGZIP(), []int{0} +} + +func (x *User) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *User) GetUsername() string { + if x != nil { + return x.Username + } + return "" +} + +func (x *User) GetEmail() string { + if x != nil { + return x.Email + } + return "" +} + +func (x *User) GetPhone() string { + if x != nil { + return x.Phone + } + return "" +} + +func (x *User) GetCreateTime() string { + if x != nil { + return x.CreateTime + } + return "" +} + +func (x *User) GetUpdateTime() string { + if x != nil { + return x.UpdateTime + } + return "" +} + +type ListUsersRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + PageSize int32 `protobuf:"varint,1,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + PageNumber int32 `protobuf:"varint,2,opt,name=page_number,json=pageNumber,proto3" json:"page_number,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListUsersRequest) Reset() { + *x = ListUsersRequest{} + mi := &file_user_v1_user_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListUsersRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListUsersRequest) ProtoMessage() {} + +func (x *ListUsersRequest) ProtoReflect() protoreflect.Message { + mi := &file_user_v1_user_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListUsersRequest.ProtoReflect.Descriptor instead. +func (*ListUsersRequest) Descriptor() ([]byte, []int) { + return file_user_v1_user_proto_rawDescGZIP(), []int{1} +} + +func (x *ListUsersRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListUsersRequest) GetPageNumber() int32 { + if x != nil { + return x.PageNumber + } + return 0 +} + +type ListUsersResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Users []*User `protobuf:"bytes,1,rep,name=users,proto3" json:"users,omitempty"` + Total int32 `protobuf:"varint,2,opt,name=total,proto3" json:"total,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListUsersResponse) Reset() { + *x = ListUsersResponse{} + mi := &file_user_v1_user_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListUsersResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListUsersResponse) ProtoMessage() {} + +func (x *ListUsersResponse) ProtoReflect() protoreflect.Message { + mi := &file_user_v1_user_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListUsersResponse.ProtoReflect.Descriptor instead. +func (*ListUsersResponse) Descriptor() ([]byte, []int) { + return file_user_v1_user_proto_rawDescGZIP(), []int{2} +} + +func (x *ListUsersResponse) GetUsers() []*User { + if x != nil { + return x.Users + } + return nil +} + +func (x *ListUsersResponse) GetTotal() int32 { + if x != nil { + return x.Total + } + return 0 +} + +type GetUserRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetUserRequest) Reset() { + *x = GetUserRequest{} + mi := &file_user_v1_user_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetUserRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetUserRequest) ProtoMessage() {} + +func (x *GetUserRequest) ProtoReflect() protoreflect.Message { + mi := &file_user_v1_user_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetUserRequest.ProtoReflect.Descriptor instead. +func (*GetUserRequest) Descriptor() ([]byte, []int) { + return file_user_v1_user_proto_rawDescGZIP(), []int{3} +} + +func (x *GetUserRequest) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +type GetUserResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + User *User `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetUserResponse) Reset() { + *x = GetUserResponse{} + mi := &file_user_v1_user_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetUserResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetUserResponse) ProtoMessage() {} + +func (x *GetUserResponse) ProtoReflect() protoreflect.Message { + mi := &file_user_v1_user_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetUserResponse.ProtoReflect.Descriptor instead. +func (*GetUserResponse) Descriptor() ([]byte, []int) { + return file_user_v1_user_proto_rawDescGZIP(), []int{4} +} + +func (x *GetUserResponse) GetUser() *User { + if x != nil { + return x.User + } + return nil +} + +var File_user_v1_user_proto protoreflect.FileDescriptor + +var file_user_v1_user_proto_rawDesc = string([]byte{ + 0x0a, 0x12, 0x75, 0x73, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x75, 0x73, 0x65, 0x72, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x75, 0x73, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x22, 0xa0, 0x01, + 0x0a, 0x04, 0x55, 0x73, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x68, 0x6f, 0x6e, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x70, 0x68, 0x6f, 0x6e, 0x65, 0x12, 0x1f, + 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, + 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, + 0x22, 0x50, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, + 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x4e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x22, 0x4e, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x05, 0x75, 0x73, 0x65, 0x72, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x75, 0x73, 0x65, 0x72, 0x2e, 0x76, 0x31, + 0x2e, 0x55, 0x73, 0x65, 0x72, 0x52, 0x05, 0x75, 0x73, 0x65, 0x72, 0x73, 0x12, 0x14, 0x0a, 0x05, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x74, 0x6f, 0x74, + 0x61, 0x6c, 0x22, 0x20, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x02, 0x69, 0x64, 0x22, 0x34, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x55, 0x73, 0x65, 0x72, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x75, 0x73, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, + 0x55, 0x73, 0x65, 0x72, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x32, 0x93, 0x01, 0x0a, 0x0b, 0x55, + 0x73, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x44, 0x0a, 0x09, 0x4c, 0x69, + 0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73, 0x12, 0x19, 0x2e, 0x75, 0x73, 0x65, 0x72, 0x2e, 0x76, + 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x75, 0x73, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x55, 0x73, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x3e, 0x0a, 0x07, 0x47, 0x65, 0x74, 0x55, 0x73, 0x65, 0x72, 0x12, 0x17, 0x2e, 0x75, 0x73, + 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x75, 0x73, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x47, + 0x65, 0x74, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x42, 0x75, 0x0a, 0x0b, 0x63, 0x6f, 0x6d, 0x2e, 0x75, 0x73, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x42, + 0x09, 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x1e, 0x71, 0x75, + 0x79, 0x75, 0x6e, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x75, 0x73, + 0x65, 0x72, 0x2f, 0x76, 0x31, 0x3b, 0x75, 0x73, 0x65, 0x72, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x55, + 0x58, 0x58, 0xaa, 0x02, 0x07, 0x55, 0x73, 0x65, 0x72, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x07, 0x55, + 0x73, 0x65, 0x72, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x13, 0x55, 0x73, 0x65, 0x72, 0x5c, 0x56, 0x31, + 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x08, 0x55, + 0x73, 0x65, 0x72, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) + +var ( + file_user_v1_user_proto_rawDescOnce sync.Once + file_user_v1_user_proto_rawDescData []byte +) + +func file_user_v1_user_proto_rawDescGZIP() []byte { + file_user_v1_user_proto_rawDescOnce.Do(func() { + file_user_v1_user_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_user_v1_user_proto_rawDesc), len(file_user_v1_user_proto_rawDesc))) + }) + return file_user_v1_user_proto_rawDescData +} + +var file_user_v1_user_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_user_v1_user_proto_goTypes = []any{ + (*User)(nil), // 0: user.v1.User + (*ListUsersRequest)(nil), // 1: user.v1.ListUsersRequest + (*ListUsersResponse)(nil), // 2: user.v1.ListUsersResponse + (*GetUserRequest)(nil), // 3: user.v1.GetUserRequest + (*GetUserResponse)(nil), // 4: user.v1.GetUserResponse +} +var file_user_v1_user_proto_depIdxs = []int32{ + 0, // 0: user.v1.ListUsersResponse.users:type_name -> user.v1.User + 0, // 1: user.v1.GetUserResponse.user:type_name -> user.v1.User + 1, // 2: user.v1.UserService.ListUsers:input_type -> user.v1.ListUsersRequest + 3, // 3: user.v1.UserService.GetUser:input_type -> user.v1.GetUserRequest + 2, // 4: user.v1.UserService.ListUsers:output_type -> user.v1.ListUsersResponse + 4, // 5: user.v1.UserService.GetUser:output_type -> user.v1.GetUserResponse + 4, // [4:6] is the sub-list for method output_type + 2, // [2:4] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_user_v1_user_proto_init() } +func file_user_v1_user_proto_init() { + if File_user_v1_user_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_user_v1_user_proto_rawDesc), len(file_user_v1_user_proto_rawDesc)), + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_user_v1_user_proto_goTypes, + DependencyIndexes: file_user_v1_user_proto_depIdxs, + MessageInfos: file_user_v1_user_proto_msgTypes, + }.Build() + File_user_v1_user_proto = out.File + file_user_v1_user_proto_goTypes = nil + file_user_v1_user_proto_depIdxs = nil +} diff --git a/pkg/proto/user/v1/user_grpc.pb.go b/pkg/proto/user/v1/user_grpc.pb.go new file mode 100644 index 0000000..f4a71a0 --- /dev/null +++ b/pkg/proto/user/v1/user_grpc.pb.go @@ -0,0 +1,167 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc (unknown) +// source: user/v1/user.proto + +package userv1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + UserService_ListUsers_FullMethodName = "/user.v1.UserService/ListUsers" + UserService_GetUser_FullMethodName = "/user.v1.UserService/GetUser" +) + +// UserServiceClient is the client API for UserService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// UserService provides user-related operations +type UserServiceClient interface { + // ListUsers returns a list of users with pagination + ListUsers(ctx context.Context, in *ListUsersRequest, opts ...grpc.CallOption) (*ListUsersResponse, error) + // GetUser returns detailed information about a specific user + GetUser(ctx context.Context, in *GetUserRequest, opts ...grpc.CallOption) (*GetUserResponse, error) +} + +type userServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewUserServiceClient(cc grpc.ClientConnInterface) UserServiceClient { + return &userServiceClient{cc} +} + +func (c *userServiceClient) ListUsers(ctx context.Context, in *ListUsersRequest, opts ...grpc.CallOption) (*ListUsersResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ListUsersResponse) + err := c.cc.Invoke(ctx, UserService_ListUsers_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *userServiceClient) GetUser(ctx context.Context, in *GetUserRequest, opts ...grpc.CallOption) (*GetUserResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetUserResponse) + err := c.cc.Invoke(ctx, UserService_GetUser_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// UserServiceServer is the server API for UserService service. +// All implementations must embed UnimplementedUserServiceServer +// for forward compatibility. +// +// UserService provides user-related operations +type UserServiceServer interface { + // ListUsers returns a list of users with pagination + ListUsers(context.Context, *ListUsersRequest) (*ListUsersResponse, error) + // GetUser returns detailed information about a specific user + GetUser(context.Context, *GetUserRequest) (*GetUserResponse, error) + mustEmbedUnimplementedUserServiceServer() +} + +// UnimplementedUserServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedUserServiceServer struct{} + +func (UnimplementedUserServiceServer) ListUsers(context.Context, *ListUsersRequest) (*ListUsersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListUsers not implemented") +} +func (UnimplementedUserServiceServer) GetUser(context.Context, *GetUserRequest) (*GetUserResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetUser not implemented") +} +func (UnimplementedUserServiceServer) mustEmbedUnimplementedUserServiceServer() {} +func (UnimplementedUserServiceServer) testEmbeddedByValue() {} + +// UnsafeUserServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to UserServiceServer will +// result in compilation errors. +type UnsafeUserServiceServer interface { + mustEmbedUnimplementedUserServiceServer() +} + +func RegisterUserServiceServer(s grpc.ServiceRegistrar, srv UserServiceServer) { + // If the following call pancis, it indicates UnimplementedUserServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&UserService_ServiceDesc, srv) +} + +func _UserService_ListUsers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListUsersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UserServiceServer).ListUsers(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: UserService_ListUsers_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UserServiceServer).ListUsers(ctx, req.(*ListUsersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UserService_GetUser_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetUserRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UserServiceServer).GetUser(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: UserService_GetUser_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UserServiceServer).GetUser(ctx, req.(*GetUserRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// UserService_ServiceDesc is the grpc.ServiceDesc for UserService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var UserService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "user.v1.UserService", + HandlerType: (*UserServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListUsers", + Handler: _UserService_ListUsers_Handler, + }, + { + MethodName: "GetUser", + Handler: _UserService_GetUser_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "user/v1/user.proto", +} diff --git a/pkg/utils/buffer.go b/pkg/utils/buffer.go new file mode 100644 index 0000000..5746d74 --- /dev/null +++ b/pkg/utils/buffer.go @@ -0,0 +1,26 @@ +package utils + +import ( + "bufio" + "io" +) + +// NewLogBuffer creates a buffer that can be used to capture output stream +// and write to a logger in real time +func NewLogBuffer(output func(string)) io.Writer { + reader, writer := io.Pipe() + + go func() { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + output(scanner.Text()) + } + }() + + return writer +} + +// NewCombinedBuffer combines multiple io.Writers +func NewCombinedBuffer(writers ...io.Writer) io.Writer { + return io.MultiWriter(writers...) +} diff --git a/pkg/utils/md5.go b/pkg/utils/md5.go new file mode 100644 index 0000000..55550d9 --- /dev/null +++ b/pkg/utils/md5.go @@ -0,0 +1,33 @@ +package utils + +import ( + "crypto/md5" + "fmt" + "io" + "os" +) + +// Compare file md5 +func CompareFileMd5(file, md5 string) (bool, error) { + fileMd5, err := GetFileMd5(file) + if err != nil { + return false, err + } + return fileMd5 == md5, nil +} + +// GetFileMd5 +func GetFileMd5(file string) (string, error) { + f, err := os.Open(file) + if err != nil { + return "", err + } + defer f.Close() + + h := md5.New() + if _, err := io.Copy(h, f); err != nil { + return "", err + } + + return fmt.Sprintf("%x", h.Sum(nil)), nil +} diff --git a/proto/user/v1/user.proto b/proto/user/v1/user.proto new file mode 100644 index 0000000..40be68d --- /dev/null +++ b/proto/user/v1/user.proto @@ -0,0 +1,40 @@ +syntax = "proto3"; + +package user.v1; + +// User represents a user entity +message User { + int64 id = 1; + string username = 2; + string email = 3; + string phone = 4; + string create_time = 5; + string update_time = 6; +} + +message ListUsersRequest { + int32 page_size = 1; + int32 page_number = 2; +} + +message ListUsersResponse { + repeated User users = 1; + int32 total = 2; +} + +message GetUserRequest { + int64 id = 1; +} + +message GetUserResponse { + User user = 1; +} + +// UserService provides user-related operations +service UserService { + // ListUsers returns a list of users with pagination + rpc ListUsers(ListUsersRequest) returns (ListUsersResponse) {} + + // GetUser returns detailed information about a specific user + rpc GetUser(GetUserRequest) returns (GetUserResponse) {} +} diff --git a/providers/app/app.go b/providers/app/app.go new file mode 100644 index 0000000..d0a566e --- /dev/null +++ b/providers/app/app.go @@ -0,0 +1,18 @@ +package app + +import ( + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/opt" +) + +func Provide(opts ...opt.Option) error { + o := opt.New(opts...) + var config Config + if err := o.UnmarshalConfig(&config); err != nil { + return err + } + + return container.Container.Provide(func() (*Config, error) { + return &config, nil + }, o.DiOptions()...) +} diff --git a/providers/app/config.gen.go b/providers/app/config.gen.go new file mode 100644 index 0000000..702160e --- /dev/null +++ b/providers/app/config.gen.go @@ -0,0 +1,179 @@ +// Code generated by go-enum DO NOT EDIT. +// Version: - +// Revision: - +// Build Date: - +// Built By: - + +package app + +import ( + "database/sql/driver" + "errors" + "fmt" + "strings" +) + +const ( + // AppModeDevelopment is a AppMode of type development. + AppModeDevelopment AppMode = "development" + // AppModeRelease is a AppMode of type release. + AppModeRelease AppMode = "release" + // AppModeTest is a AppMode of type test. + AppModeTest AppMode = "test" +) + +var ErrInvalidAppMode = fmt.Errorf("not a valid AppMode, try [%s]", strings.Join(_AppModeNames, ", ")) + +var _AppModeNames = []string{ + string(AppModeDevelopment), + string(AppModeRelease), + string(AppModeTest), +} + +// AppModeNames returns a list of possible string values of AppMode. +func AppModeNames() []string { + tmp := make([]string, len(_AppModeNames)) + copy(tmp, _AppModeNames) + return tmp +} + +// AppModeValues returns a list of the values for AppMode +func AppModeValues() []AppMode { + return []AppMode{ + AppModeDevelopment, + AppModeRelease, + AppModeTest, + } +} + +// String implements the Stringer interface. +func (x AppMode) String() string { + return string(x) +} + +// IsValid provides a quick way to determine if the typed value is +// part of the allowed enumerated values +func (x AppMode) IsValid() bool { + _, err := ParseAppMode(string(x)) + return err == nil +} + +var _AppModeValue = map[string]AppMode{ + "development": AppModeDevelopment, + "release": AppModeRelease, + "test": AppModeTest, +} + +// ParseAppMode attempts to convert a string to a AppMode. +func ParseAppMode(name string) (AppMode, error) { + if x, ok := _AppModeValue[name]; ok { + return x, nil + } + return AppMode(""), fmt.Errorf("%s is %w", name, ErrInvalidAppMode) +} + +var errAppModeNilPtr = errors.New("value pointer is nil") // one per type for package clashes + +// Scan implements the Scanner interface. +func (x *AppMode) Scan(value interface{}) (err error) { + if value == nil { + *x = AppMode("") + return + } + + // A wider range of scannable types. + // driver.Value values at the top of the list for expediency + switch v := value.(type) { + case string: + *x, err = ParseAppMode(v) + case []byte: + *x, err = ParseAppMode(string(v)) + case AppMode: + *x = v + case *AppMode: + if v == nil { + return errAppModeNilPtr + } + *x = *v + case *string: + if v == nil { + return errAppModeNilPtr + } + *x, err = ParseAppMode(*v) + default: + return errors.New("invalid type for AppMode") + } + + return +} + +// Value implements the driver Valuer interface. +func (x AppMode) Value() (driver.Value, error) { + return x.String(), nil +} + +// Set implements the Golang flag.Value interface func. +func (x *AppMode) Set(val string) error { + v, err := ParseAppMode(val) + *x = v + return err +} + +// Get implements the Golang flag.Getter interface func. +func (x *AppMode) Get() interface{} { + return *x +} + +// Type implements the github.com/spf13/pFlag Value interface. +func (x *AppMode) Type() string { + return "AppMode" +} + +type NullAppMode struct { + AppMode AppMode + Valid bool +} + +func NewNullAppMode(val interface{}) (x NullAppMode) { + err := x.Scan(val) // yes, we ignore this error, it will just be an invalid value. + _ = err // make any errcheck linters happy + return +} + +// Scan implements the Scanner interface. +func (x *NullAppMode) Scan(value interface{}) (err error) { + if value == nil { + x.AppMode, x.Valid = AppMode(""), false + return + } + + err = x.AppMode.Scan(value) + x.Valid = (err == nil) + return +} + +// Value implements the driver Valuer interface. +func (x NullAppMode) Value() (driver.Value, error) { + if !x.Valid { + return nil, nil + } + // driver.Value accepts int64 for int values. + return string(x.AppMode), nil +} + +type NullAppModeStr struct { + NullAppMode +} + +func NewNullAppModeStr(val interface{}) (x NullAppModeStr) { + x.Scan(val) // yes, we ignore this error, it will just be an invalid value. + return +} + +// Value implements the driver Valuer interface. +func (x NullAppModeStr) Value() (driver.Value, error) { + if !x.Valid { + return nil, nil + } + return x.AppMode.String(), nil +} diff --git a/providers/app/config.go b/providers/app/config.go new file mode 100644 index 0000000..b646be7 --- /dev/null +++ b/providers/app/config.go @@ -0,0 +1,46 @@ +package app + +import ( + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/opt" +) + +const DefaultPrefix = "App" + +func DefaultProvider() container.ProviderContainer { + return container.ProviderContainer{ + Provider: Provide, + Options: []opt.Option{ + opt.Prefix(DefaultPrefix), + }, + } +} + +// swagger:enum AppMode +// ENUM(development, release, test) +type AppMode string + +type Config struct { + Mode AppMode + Cert *Cert + BaseURI *string + StoragePath string +} + +func (c *Config) IsDevMode() bool { + return c.Mode == AppModeDevelopment +} + +func (c *Config) IsReleaseMode() bool { + return c.Mode == AppModeRelease +} + +func (c *Config) IsTestMode() bool { + return c.Mode == AppModeTest +} + +type Cert struct { + CA string + Cert string + Key string +} diff --git a/providers/cmux/config.go b/providers/cmux/config.go new file mode 100644 index 0000000..74a1254 --- /dev/null +++ b/providers/cmux/config.go @@ -0,0 +1,61 @@ +package cmux + +import ( + "fmt" + + "quyun/providers/grpc" + "quyun/providers/http" + + "github.com/soheilhy/cmux" + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/opt" + "golang.org/x/sync/errgroup" +) + +const DefaultPrefix = "Cmux" + +func DefaultProvider() container.ProviderContainer { + return container.ProviderContainer{ + Provider: Provide, + Options: []opt.Option{ + opt.Prefix(DefaultPrefix), + }, + } +} + +type Config struct { + Host *string + Port uint +} + +func (h *Config) Address() string { + if h.Host == nil { + return fmt.Sprintf(":%d", h.Port) + } + return fmt.Sprintf("%s:%d", *h.Host, h.Port) +} + +type CMux struct { + Http *http.Service + Grpc *grpc.Grpc + Mux cmux.CMux +} + +func (c *CMux) Serve() error { + // grpcL := c.Mux.Match(cmux.HTTP2HeaderField("content-type", "application/grpc")) + // httpL := c.Mux.Match(cmux.HTTP1Fast()) + // httpL := c.Mux.Match(cmux.Any()) + httpL := c.Mux.Match(cmux.HTTP1Fast()) + grpcL := c.Mux.Match(cmux.Any()) + + var eg errgroup.Group + eg.Go(func() error { + return c.Grpc.ServeWithListener(grpcL) + }) + + eg.Go(func() error { + return c.Http.Listener(httpL) + }) + + return c.Mux.Serve() +} diff --git a/providers/cmux/provider.go b/providers/cmux/provider.go new file mode 100644 index 0000000..5a77114 --- /dev/null +++ b/providers/cmux/provider.go @@ -0,0 +1,32 @@ +package cmux + +import ( + "net" + + "quyun/providers/grpc" + "quyun/providers/http" + + "github.com/soheilhy/cmux" + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/opt" +) + +func Provide(opts ...opt.Option) error { + o := opt.New(opts...) + var config Config + if err := o.UnmarshalConfig(&config); err != nil { + return err + } + return container.Container.Provide(func(http *http.Service, grpc *grpc.Grpc) (*CMux, error) { + l, err := net.Listen("tcp", config.Address()) + if err != nil { + return nil, err + } + + return &CMux{ + Http: http, + Grpc: grpc, + Mux: cmux.New(l), + }, nil + }, o.DiOptions()...) +} diff --git a/providers/event/config.go b/providers/event/config.go new file mode 100644 index 0000000..ac20bd2 --- /dev/null +++ b/providers/event/config.go @@ -0,0 +1,65 @@ +package event + +import ( + "context" + + "github.com/ThreeDotsLabs/watermill" + "github.com/ThreeDotsLabs/watermill/message" + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/contracts" + "go.ipao.vip/atom/opt" +) + +const DefaultPrefix = "Events" + +func DefaultProvider() container.ProviderContainer { + return container.ProviderContainer{ + Provider: Provide, + Options: []opt.Option{ + opt.Prefix(DefaultPrefix), + }, + } +} + +type Config struct { + ConsumerGroup string + + Brokers []string +} + +type PubSub struct { + Publisher message.Publisher + Subscriber message.Subscriber + Router *message.Router +} + +func (ps *PubSub) Serve(ctx context.Context) error { + if err := ps.Router.Run(ctx); err != nil { + return err + } + return nil +} + +func (ps *PubSub) Handle( + handlerName string, + consumerTopic string, + publisherTopic string, + handler message.HandlerFunc, +) { + ps.Router.AddHandler(handlerName, consumerTopic, ps.Subscriber, publisherTopic, ps.Publisher, handler) +} + +// publish +func (ps *PubSub) Publish(e contracts.EventPublisher) error { + if e == nil { + return nil + } + + payload, err := e.Marshal() + if err != nil { + return err + } + + msg := message.NewMessage(watermill.NewUUID(), payload) + return ps.Publisher.Publish(e.Topic(), msg) +} diff --git a/providers/event/logrus_adapter.go b/providers/event/logrus_adapter.go new file mode 100644 index 0000000..b4cdd41 --- /dev/null +++ b/providers/event/logrus_adapter.go @@ -0,0 +1,60 @@ +package event + +import ( + "github.com/ThreeDotsLabs/watermill" + "github.com/sirupsen/logrus" +) + +// LogrusLoggerAdapter is a watermill logger adapter for logrus. +type LogrusLoggerAdapter struct { + log *logrus.Logger + fields watermill.LogFields +} + +// NewLogrusLogger returns a LogrusLoggerAdapter that sends all logs to +// the passed logrus instance. +func LogrusAdapter() watermill.LoggerAdapter { + return &LogrusLoggerAdapter{log: logrus.StandardLogger()} +} + +// Error logs on level error with err as field and optional fields. +func (l *LogrusLoggerAdapter) Error(msg string, err error, fields watermill.LogFields) { + l.createEntry(fields.Add(watermill.LogFields{"err": err})).Error(msg) +} + +// Info logs on level info with optional fields. +func (l *LogrusLoggerAdapter) Info(msg string, fields watermill.LogFields) { + l.createEntry(fields).Info(msg) +} + +// Debug logs on level debug with optional fields. +func (l *LogrusLoggerAdapter) Debug(msg string, fields watermill.LogFields) { + l.createEntry(fields).Debug(msg) +} + +// Trace logs on level trace with optional fields. +func (l *LogrusLoggerAdapter) Trace(msg string, fields watermill.LogFields) { + l.createEntry(fields).Trace(msg) +} + +// With returns a new LogrusLoggerAdapter that includes fields +// to be re-used between logging statements. +func (l *LogrusLoggerAdapter) With(fields watermill.LogFields) watermill.LoggerAdapter { + return &LogrusLoggerAdapter{ + log: l.log, + fields: l.fields.Add(fields), + } +} + +// createEntry is a helper to add fields to a logrus entry if necessary. +func (l *LogrusLoggerAdapter) createEntry(fields watermill.LogFields) *logrus.Entry { + entry := logrus.NewEntry(l.log) + + allFields := fields.Add(l.fields) + + if len(allFields) > 0 { + entry = entry.WithFields(logrus.Fields(allFields)) + } + + return entry +} diff --git a/providers/event/provider.go b/providers/event/provider.go new file mode 100644 index 0000000..eb28d27 --- /dev/null +++ b/providers/event/provider.go @@ -0,0 +1,33 @@ +package event + +import ( + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/opt" + + "github.com/ThreeDotsLabs/watermill/message" + "github.com/ThreeDotsLabs/watermill/pubsub/gochannel" +) + +func Provide(opts ...opt.Option) error { + o := opt.New(opts...) + var config Config + if err := o.UnmarshalConfig(&config); err != nil { + return err + } + + return container.Container.Provide(func() (*PubSub, error) { + logger := LogrusAdapter() + + client := gochannel.NewGoChannel(gochannel.Config{}, logger) + router, err := message.NewRouter(message.RouterConfig{}, logger) + if err != nil { + return nil, err + } + + return &PubSub{ + Publisher: client, + Subscriber: client, + Router: router, + }, nil + }, o.DiOptions()...) +} diff --git a/providers/event/provider_kafka.go b/providers/event/provider_kafka.go new file mode 100644 index 0000000..5d660c4 --- /dev/null +++ b/providers/event/provider_kafka.go @@ -0,0 +1,49 @@ +package event + +import ( + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/opt" + + "github.com/ThreeDotsLabs/watermill-kafka/v3/pkg/kafka" + "github.com/ThreeDotsLabs/watermill/message" +) + +func ProvideKafka(opts ...opt.Option) error { + o := opt.New(opts...) + var config Config + if err := o.UnmarshalConfig(&config); err != nil { + return err + } + + return container.Container.Provide(func() (*PubSub, error) { + logger := LogrusAdapter() + + publisher, err := kafka.NewPublisher(kafka.PublisherConfig{ + Brokers: config.Brokers, + Marshaler: kafka.DefaultMarshaler{}, + }, logger) + if err != nil { + return nil, err + } + + subscriber, err := kafka.NewSubscriber(kafka.SubscriberConfig{ + Brokers: config.Brokers, + Unmarshaler: kafka.DefaultMarshaler{}, + ConsumerGroup: config.ConsumerGroup, + }, logger) + if err != nil { + return nil, err + } + + router, err := message.NewRouter(message.RouterConfig{}, logger) + if err != nil { + return nil, err + } + + return &PubSub{ + Publisher: publisher, + Subscriber: subscriber, + Router: router, + }, nil + }, o.DiOptions()...) +} diff --git a/providers/event/provider_redis.go b/providers/event/provider_redis.go new file mode 100644 index 0000000..2fc08de --- /dev/null +++ b/providers/event/provider_redis.go @@ -0,0 +1,50 @@ +package event + +import ( + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/opt" + + "github.com/ThreeDotsLabs/watermill-redisstream/pkg/redisstream" + "github.com/ThreeDotsLabs/watermill/message" + "github.com/redis/go-redis/v9" +) + +func ProvideRedis(opts ...opt.Option) error { + o := opt.New(opts...) + var config Config + if err := o.UnmarshalConfig(&config); err != nil { + return err + } + + return container.Container.Provide(func(rdb redis.UniversalClient) (*PubSub, error) { + logger := LogrusAdapter() + + subscriber, err := redisstream.NewSubscriber(redisstream.SubscriberConfig{ + Client: rdb, + Unmarshaller: redisstream.DefaultMarshallerUnmarshaller{}, + ConsumerGroup: config.ConsumerGroup, + }, logger) + if err != nil { + return nil, err + } + + publisher, err := redisstream.NewPublisher(redisstream.PublisherConfig{ + Client: rdb, + Marshaller: redisstream.DefaultMarshallerUnmarshaller{}, + }, logger) + if err != nil { + return nil, err + } + + router, err := message.NewRouter(message.RouterConfig{}, logger) + if err != nil { + return nil, err + } + + return &PubSub{ + Publisher: publisher, + Subscriber: subscriber, + Router: router, + }, nil + }, o.DiOptions()...) +} diff --git a/providers/event/provider_sql.go b/providers/event/provider_sql.go new file mode 100644 index 0000000..033e2ef --- /dev/null +++ b/providers/event/provider_sql.go @@ -0,0 +1,50 @@ +package event + +import ( + sqlDB "database/sql" + + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/opt" + + "github.com/ThreeDotsLabs/watermill-sql/v3/pkg/sql" + "github.com/ThreeDotsLabs/watermill/message" +) + +func ProvideSQL(opts ...opt.Option) error { + o := opt.New(opts...) + var config Config + if err := o.UnmarshalConfig(&config); err != nil { + return err + } + + return container.Container.Provide(func(db *sqlDB.DB) (*PubSub, error) { + logger := LogrusAdapter() + + publisher, err := sql.NewPublisher(db, sql.PublisherConfig{ + SchemaAdapter: sql.DefaultPostgreSQLSchema{}, + AutoInitializeSchema: false, + }, logger) + if err != nil { + return nil, err + } + + subscriber, err := sql.NewSubscriber(db, sql.SubscriberConfig{ + SchemaAdapter: sql.DefaultPostgreSQLSchema{}, + ConsumerGroup: config.ConsumerGroup, + }, logger) + if err != nil { + return nil, err + } + + router, err := message.NewRouter(message.RouterConfig{}, logger) + if err != nil { + return nil, err + } + + return &PubSub{ + Publisher: publisher, + Subscriber: subscriber, + Router: router, + }, nil + }, o.DiOptions()...) +} diff --git a/providers/grpc/config.go b/providers/grpc/config.go new file mode 100644 index 0000000..ffbcb13 --- /dev/null +++ b/providers/grpc/config.go @@ -0,0 +1,53 @@ +package grpc + +import ( + "fmt" + "net" + + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/opt" + + "google.golang.org/grpc" +) + +const DefaultPrefix = "Grpc" + +func DefaultProvider() container.ProviderContainer { + return container.ProviderContainer{ + Provider: Provide, + Options: []opt.Option{ + opt.Prefix(DefaultPrefix), + }, + } +} + +type Config struct { + Host *string + Port uint +} + +func (h *Config) Address() string { + if h.Host == nil { + return fmt.Sprintf(":%d", h.Port) + } + return fmt.Sprintf("%s:%d", *h.Host, h.Port) +} + +type Grpc struct { + Server *grpc.Server + config *Config +} + +// Serve +func (g *Grpc) Serve() error { + l, err := net.Listen("tcp", g.config.Address()) + if err != nil { + return err + } + + return g.Server.Serve(l) +} + +func (g *Grpc) ServeWithListener(ln net.Listener) error { + return g.Server.Serve(ln) +} diff --git a/providers/grpc/provider.go b/providers/grpc/provider.go new file mode 100644 index 0000000..b517f76 --- /dev/null +++ b/providers/grpc/provider.go @@ -0,0 +1,27 @@ +package grpc + +import ( + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/opt" + + "google.golang.org/grpc" +) + +func Provide(opts ...opt.Option) error { + o := opt.New(opts...) + var config Config + if err := o.UnmarshalConfig(&config); err != nil { + return err + } + return container.Container.Provide(func() (*Grpc, error) { + server := grpc.NewServer() + + grpc := &Grpc{ + Server: server, + config: &config, + } + container.AddCloseAble(grpc.Server.GracefulStop) + + return grpc, nil + }, o.DiOptions()...) +} diff --git a/providers/hashids/config.go b/providers/hashids/config.go new file mode 100644 index 0000000..f57b9e0 --- /dev/null +++ b/providers/hashids/config.go @@ -0,0 +1,23 @@ +package hashids + +import ( + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/opt" +) + +const DefaultPrefix = "HashIDs" + +func DefaultProvider() container.ProviderContainer { + return container.ProviderContainer{ + Provider: Provide, + Options: []opt.Option{ + opt.Prefix(DefaultPrefix), + }, + } +} + +type Config struct { + Alphabet string + Salt string + MinLength uint +} diff --git a/providers/hashids/hashids.go b/providers/hashids/hashids.go new file mode 100644 index 0000000..1d09fc0 --- /dev/null +++ b/providers/hashids/hashids.go @@ -0,0 +1,35 @@ +package hashids + +import ( + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/opt" + + "github.com/speps/go-hashids/v2" +) + +func Provide(opts ...opt.Option) error { + o := opt.New(opts...) + var config Config + if err := o.UnmarshalConfig(&config); err != nil { + return err + } + return container.Container.Provide(func() (*hashids.HashID, error) { + data := hashids.NewData() + data.MinLength = int(config.MinLength) + if data.MinLength == 0 { + data.MinLength = 10 + } + + data.Salt = config.Salt + if data.Salt == "" { + data.Salt = "default-salt-key" + } + + data.Alphabet = config.Alphabet + if config.Alphabet == "" { + data.Alphabet = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + } + + return hashids.NewWithData(data) + }, o.DiOptions()...) +} diff --git a/providers/http/config.go b/providers/http/config.go new file mode 100644 index 0000000..611f210 --- /dev/null +++ b/providers/http/config.go @@ -0,0 +1,38 @@ +package http + +import ( + "fmt" +) + +const DefaultPrefix = "Http" + +type Config struct { + StaticPath *string + StaticRoute *string + BaseURI *string + Port uint + Tls *Tls + Cors *Cors +} + +type Tls struct { + Cert string + Key string +} + +type Cors struct { + Mode string + Whitelist []Whitelist +} + +type Whitelist struct { + AllowOrigin string + AllowHeaders string + AllowMethods string + ExposeHeaders string + AllowCredentials bool +} + +func (h *Config) Address() string { + return fmt.Sprintf(":%d", h.Port) +} diff --git a/providers/http/engine.go b/providers/http/engine.go new file mode 100644 index 0000000..d060c5b --- /dev/null +++ b/providers/http/engine.go @@ -0,0 +1,100 @@ +package http + +import ( + "errors" + "fmt" + "net" + "runtime/debug" + "time" + + log "github.com/sirupsen/logrus" + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/opt" + + "github.com/gofiber/fiber/v3" + "github.com/gofiber/fiber/v3/middleware/logger" + "github.com/gofiber/fiber/v3/middleware/recover" +) + +func DefaultProvider() container.ProviderContainer { + return container.ProviderContainer{ + Provider: Provide, + Options: []opt.Option{ + opt.Prefix(DefaultPrefix), + }, + } +} + +type Service struct { + conf *Config + Engine *fiber.App +} + +func (svc *Service) listenerConfig() fiber.ListenConfig { + listenConfig := fiber.ListenConfig{ + EnablePrintRoutes: true, + OnShutdownSuccess: func() { + log.Info("http server shutdown success") + }, + OnShutdownError: func(err error) { + log.Error("http server shutdown error: ", err) + }, + + // DisableStartupMessage: true, + } + + if svc.conf.Tls != nil { + if svc.conf.Tls.Cert == "" || svc.conf.Tls.Key == "" { + panic(errors.New("tls cert and key must be set")) + } + listenConfig.CertFile = svc.conf.Tls.Cert + listenConfig.CertKeyFile = svc.conf.Tls.Key + } + container.AddCloseAble(func() { + svc.Engine.Shutdown() + }) + return listenConfig +} + +func (svc *Service) Listener(ln net.Listener) error { + return svc.Engine.Listener(ln, svc.listenerConfig()) +} + +func (svc *Service) Serve() error { + return svc.Engine.Listen(svc.conf.Address(), svc.listenerConfig()) +} + +func Provide(opts ...opt.Option) error { + o := opt.New(opts...) + var config Config + if err := o.UnmarshalConfig(&config); err != nil { + return err + } + + return container.Container.Provide(func() (*Service, error) { + engine := fiber.New(fiber.Config{ + StrictRouting: true, + }) + engine.Use(recover.New(recover.Config{ + EnableStackTrace: true, + StackTraceHandler: func(c fiber.Ctx, e any) { + log.Error(fmt.Sprintf("panic: %v\n%s\n", e, debug.Stack())) + }, + })) + + if config.StaticRoute != nil && config.StaticPath != nil { + engine.Use(config.StaticRoute, config.StaticPath) + } + + engine.Use(logger.New(logger.Config{ + Format: `[${ip}:${port}] - [${time}] - ${method} - ${status} - ${path} ${latency} "${ua}"` + "\n", + TimeFormat: time.RFC1123, + TimeZone: "Asia/Shanghai", + })) + + return &Service{ + Engine: engine, + conf: &config, + }, nil + }, o.DiOptions()...) +} diff --git a/providers/http/swagger/config.go b/providers/http/swagger/config.go new file mode 100644 index 0000000..4b535a7 --- /dev/null +++ b/providers/http/swagger/config.go @@ -0,0 +1,317 @@ +package swagger + +import ( + "html/template" +) + +// Config stores SwaggerUI configuration variables +type Config struct { + // This parameter can be used to name different swagger document instances. + // default: "" + InstanceName string `json:"-"` + + // Title pointing to title of HTML page. + // default: "Swagger UI" + Title string `json:"-"` + + // URL to fetch external configuration document from. + // default: "" + ConfigURL string `json:"configUrl,omitempty"` + + // The URL pointing to API definition (normally swagger.json or swagger.yaml). + // default: "doc.json" + URL string `json:"url,omitempty"` + + // Enables overriding configuration parameters via URL search params. + // default: false + QueryConfigEnabled bool `json:"queryConfigEnabled,omitempty"` + + // The name of a component available via the plugin system to use as the top-level layout for Swagger UI. + // default: "StandaloneLayout" + Layout string `json:"layout,omitempty"` + + // An array of plugin functions to use in Swagger UI. + // default: [SwaggerUIBundle.plugins.DownloadUrl] + Plugins []template.JS `json:"-"` + + // An array of presets to use in Swagger UI. Usually, you'll want to include ApisPreset if you use this option. + // default: [SwaggerUIBundle.presets.apis, SwaggerUIStandalonePreset] + Presets []template.JS `json:"-"` + + // If set to true, enables deep linking for tags and operations. + // default: true + DeepLinking bool `json:"deepLinking"` + + // Controls the display of operationId in operations list. + // default: false + DisplayOperationId bool `json:"displayOperationId,omitempty"` + + // The default expansion depth for models (set to -1 completely hide the models). + // default: 1 + DefaultModelsExpandDepth int `json:"defaultModelsExpandDepth,omitempty"` + + // The default expansion depth for the model on the model-example section. + // default: 1 + DefaultModelExpandDepth int `json:"defaultModelExpandDepth,omitempty"` + + // Controls how the model is shown when the API is first rendered. + // The user can always switch the rendering for a given model by clicking the 'Model' and 'Example Value' links. + // default: "example" + DefaultModelRendering string `json:"defaultModelRendering,omitempty"` + + // Controls the display of the request duration (in milliseconds) for "Try it out" requests. + // default: false + DisplayRequestDuration bool `json:"displayRequestDuration,omitempty"` + + // Controls the default expansion setting for the operations and tags. + // 'list' (default, expands only the tags), + // 'full' (expands the tags and operations), + // 'none' (expands nothing) + DocExpansion string `json:"docExpansion,omitempty"` + + // If set, enables filtering. The top bar will show an edit box that you can use to filter the tagged operations that are shown. + // Can be Boolean to enable or disable, or a string, in which case filtering will be enabled using that string as the filter expression. + // Filtering is case sensitive matching the filter expression anywhere inside the tag. + // default: false + Filter FilterConfig `json:"-"` + + // If set, limits the number of tagged operations displayed to at most this many. The default is to show all operations. + // default: 0 + MaxDisplayedTags int `json:"maxDisplayedTags,omitempty"` + + // Controls the display of vendor extension (x-) fields and values for Operations, Parameters, Responses, and Schema. + // default: false + ShowExtensions bool `json:"showExtensions,omitempty"` + + // Controls the display of extensions (pattern, maxLength, minLength, maximum, minimum) fields and values for Parameters. + // default: false + ShowCommonExtensions bool `json:"showCommonExtensions,omitempty"` + + // Apply a sort to the tag list of each API. It can be 'alpha' (sort by paths alphanumerically) or a function (see Array.prototype.sort(). + // to learn how to write a sort function). Two tag name strings are passed to the sorter for each pass. + // default: "" -> Default is the order determined by Swagger UI. + TagsSorter template.JS `json:"-"` + + // Provides a mechanism to be notified when Swagger UI has finished rendering a newly provided definition. + // default: "" -> Function=NOOP + OnComplete template.JS `json:"-"` + + // An object with the activate and theme properties. + SyntaxHighlight *SyntaxHighlightConfig `json:"-"` + + // Controls whether the "Try it out" section should be enabled by default. + // default: false + TryItOutEnabled bool `json:"tryItOutEnabled,omitempty"` + + // Enables the request snippet section. When disabled, the legacy curl snippet will be used. + // default: false + RequestSnippetsEnabled bool `json:"requestSnippetsEnabled,omitempty"` + + // OAuth redirect URL. + // default: "" + OAuth2RedirectUrl string `json:"oauth2RedirectUrl,omitempty"` + + // MUST be a function. Function to intercept remote definition, "Try it out", and OAuth 2.0 requests. + // Accepts one argument requestInterceptor(request) and must return the modified request, or a Promise that resolves to the modified request. + // default: "" + RequestInterceptor template.JS `json:"-"` + + // If set, MUST be an array of command line options available to the curl command. This can be set on the mutated request in the requestInterceptor function. + // For example request.curlOptions = ["-g", "--limit-rate 20k"] + // default: nil + RequestCurlOptions []string `json:"request.curlOptions,omitempty"` + + // MUST be a function. Function to intercept remote definition, "Try it out", and OAuth 2.0 responses. + // Accepts one argument responseInterceptor(response) and must return the modified response, or a Promise that resolves to the modified response. + // default: "" + ResponseInterceptor template.JS `json:"-"` + + // If set to true, uses the mutated request returned from a requestInterceptor to produce the curl command in the UI, + // otherwise the request before the requestInterceptor was applied is used. + // default: true + ShowMutatedRequest bool `json:"showMutatedRequest"` + + // List of HTTP methods that have the "Try it out" feature enabled. An empty array disables "Try it out" for all operations. + // This does not filter the operations from the display. + // Possible values are ["get", "put", "post", "delete", "options", "head", "patch", "trace"] + // default: nil + SupportedSubmitMethods []string `json:"supportedSubmitMethods,omitempty"` + + // By default, Swagger UI attempts to validate specs against swagger.io's online validator. You can use this parameter to set a different validator URL. + // For example for locally deployed validators (https://github.com/swagger-api/validator-badge). + // Setting it to either none, 127.0.0.1 or localhost will disable validation. + // default: "" + ValidatorUrl string `json:"validatorUrl,omitempty"` + + // If set to true, enables passing credentials, as defined in the Fetch standard, in CORS requests that are sent by the browser. + // Note that Swagger UI cannot currently set cookies cross-domain (see https://github.com/swagger-api/swagger-js/issues/1163). + // as a result, you will have to rely on browser-supplied cookies (which this setting enables sending) that Swagger UI cannot control. + // default: false + WithCredentials bool `json:"withCredentials,omitempty"` + + // Function to set default values to each property in model. Accepts one argument modelPropertyMacro(property), property is immutable. + // default: "" + ModelPropertyMacro template.JS `json:"-"` + + // Function to set default value to parameters. Accepts two arguments parameterMacro(operation, parameter). + // Operation and parameter are objects passed for context, both remain immutable. + // default: "" + ParameterMacro template.JS `json:"-"` + + // If set to true, it persists authorization data and it would not be lost on browser close/refresh. + // default: false + PersistAuthorization bool `json:"persistAuthorization,omitempty"` + + // Configuration information for OAuth2, optional if using OAuth2 + OAuth *OAuthConfig `json:"-"` + + // (authDefinitionKey, username, password) => action + // Programmatically set values for a Basic authorization scheme. + // default: "" + PreauthorizeBasic template.JS `json:"-"` + + // (authDefinitionKey, apiKeyValue) => action + // Programmatically set values for an API key or Bearer authorization scheme. + // In case of OpenAPI 3.0 Bearer scheme, apiKeyValue must contain just the token itself without the Bearer prefix. + // default: "" + PreauthorizeApiKey template.JS `json:"-"` + + // Applies custom CSS styles. + // default: "" + CustomStyle template.CSS `json:"-"` + + // Applies custom JavaScript scripts. + // default "" + CustomScript template.JS `json:"-"` +} + +type FilterConfig struct { + Enabled bool + Expression string +} + +func (fc FilterConfig) Value() interface{} { + if fc.Expression != "" { + return fc.Expression + } + return fc.Enabled +} + +type SyntaxHighlightConfig struct { + // Whether syntax highlighting should be activated or not. + // default: true + Activate bool `json:"activate"` + // Highlight.js syntax coloring theme to use. + // Possible values are ["agate", "arta", "monokai", "nord", "obsidian", "tomorrow-night"] + // default: "agate" + Theme string `json:"theme,omitempty"` +} + +func (shc SyntaxHighlightConfig) Value() interface{} { + if shc.Activate { + return shc + } + return false +} + +type OAuthConfig struct { + // ID of the client sent to the OAuth2 provider. + // default: "" + ClientId string `json:"clientId,omitempty"` + + // Never use this parameter in your production environment. + // It exposes cruicial security information. This feature is intended for dev/test environments only. + // Secret of the client sent to the OAuth2 provider. + // default: "" + ClientSecret string `json:"clientSecret,omitempty"` + + // Application name, displayed in authorization popup. + // default: "" + AppName string `json:"appName,omitempty"` + + // Realm query parameter (for oauth1) added to authorizationUrl and tokenUrl. + // default: "" + Realm string `json:"realm,omitempty"` + + // String array of initially selected oauth scopes + // default: nil + Scopes []string `json:"scopes,omitempty"` + + // Additional query parameters added to authorizationUrl and tokenUrl. + // default: nil + AdditionalQueryStringParams map[string]string `json:"additionalQueryStringParams,omitempty"` + + // Unavailable Only activated for the accessCode flow. + // During the authorization_code request to the tokenUrl, pass the Client Password using the HTTP Basic Authentication scheme + // (Authorization header with Basic base64encode(client_id + client_secret)). + // default: false + UseBasicAuthenticationWithAccessCodeGrant bool `json:"useBasicAuthenticationWithAccessCodeGrant,omitempty"` + + // Only applies to authorizatonCode flows. + // Proof Key for Code Exchange brings enhanced security for OAuth public clients. + // default: false + UsePkceWithAuthorizationCodeGrant bool `json:"usePkceWithAuthorizationCodeGrant,omitempty"` +} + +var ConfigDefault = Config{ + Title: "Swagger UI", + Layout: "StandaloneLayout", + Plugins: []template.JS{ + template.JS("SwaggerUIBundle.plugins.DownloadUrl"), + }, + Presets: []template.JS{ + template.JS("SwaggerUIBundle.presets.apis"), + template.JS("SwaggerUIStandalonePreset"), + }, + DeepLinking: true, + DefaultModelsExpandDepth: 1, + DefaultModelExpandDepth: 1, + DefaultModelRendering: "example", + DocExpansion: "list", + SyntaxHighlight: &SyntaxHighlightConfig{ + Activate: true, + Theme: "agate", + }, + ShowMutatedRequest: true, +} + +// Helper function to set default values +func configDefault(config ...Config) Config { + // Return default config if nothing provided + if len(config) < 1 { + return ConfigDefault + } + + // Override default config + cfg := config[0] + + if cfg.Title == "" { + cfg.Title = ConfigDefault.Title + } + + if cfg.Layout == "" { + cfg.Layout = ConfigDefault.Layout + } + + if cfg.DefaultModelRendering == "" { + cfg.DefaultModelRendering = ConfigDefault.DefaultModelRendering + } + + if cfg.DocExpansion == "" { + cfg.DocExpansion = ConfigDefault.DocExpansion + } + + if cfg.Plugins == nil { + cfg.Plugins = ConfigDefault.Plugins + } + + if cfg.Presets == nil { + cfg.Presets = ConfigDefault.Presets + } + + if cfg.SyntaxHighlight == nil { + cfg.SyntaxHighlight = ConfigDefault.SyntaxHighlight + } + + return cfg +} diff --git a/providers/http/swagger/swagger.go b/providers/http/swagger/swagger.go new file mode 100644 index 0000000..0722e61 --- /dev/null +++ b/providers/http/swagger/swagger.go @@ -0,0 +1,103 @@ +package swagger + +import ( + "fmt" + "html/template" + "path" + "strings" + "sync" + + "github.com/gofiber/fiber/v3" + "github.com/gofiber/fiber/v3/middleware/static" + "github.com/gofiber/utils/v2" + "github.com/rogeecn/swag" + swaggerFiles "github.com/swaggo/files/v2" +) + +const ( + defaultDocURL = "doc.json" + defaultIndex = "index.html" +) + +var HandlerDefault = New() + +// New returns custom handler +func New(config ...Config) fiber.Handler { + cfg := configDefault(config...) + + index, err := template.New("swagger_index.html").Parse(indexTmpl) + if err != nil { + panic(fmt.Errorf("fiber: swagger middleware error -> %w", err)) + } + + var ( + prefix string + once sync.Once + ) + + return func(c fiber.Ctx) error { + // Set prefix + once.Do( + func() { + prefix = strings.ReplaceAll(c.Route().Path, "*", "") + + forwardedPrefix := getForwardedPrefix(c) + if forwardedPrefix != "" { + prefix = forwardedPrefix + prefix + } + + // Set doc url + if len(cfg.URL) == 0 { + cfg.URL = path.Join(prefix, defaultDocURL) + } + }, + ) + + p := c.Path(utils.CopyString(c.Params("*"))) + + switch p { + case defaultIndex: + c.Type("html") + return index.Execute(c, cfg) + case defaultDocURL: + var doc string + if doc, err = swag.ReadDoc(cfg.InstanceName); err != nil { + return err + } + return c.Type("json").SendString(doc) + case "", "/": + return c.Redirect().To(path.Join(prefix, defaultIndex)) + default: + // return fs(c) + return static.New("/swagger", static.Config{ + FS: swaggerFiles.FS, + Browse: true, + })(c) + } + } +} + +func getForwardedPrefix(c fiber.Ctx) string { + header := c.GetReqHeaders()["X-Forwarded-Prefix"] + + if len(header) == 0 { + return "" + } + + prefix := "" + + for _, rawPrefix := range header { + endIndex := len(rawPrefix) + for endIndex > 1 && rawPrefix[endIndex-1] == '/' { + endIndex-- + } + + if endIndex != len(rawPrefix) { + prefix += rawPrefix[:endIndex] + } else { + prefix += rawPrefix + } + } + + return prefix +} diff --git a/providers/http/swagger/template.go b/providers/http/swagger/template.go new file mode 100644 index 0000000..d90607f --- /dev/null +++ b/providers/http/swagger/template.go @@ -0,0 +1,107 @@ +package swagger + +const indexTmpl string = ` + + + + + + {{.Title}} + + + + + {{- if .CustomStyle}} + + {{- end}} + {{- if .CustomScript}} + + {{- end}} + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +` diff --git a/providers/job/config.go b/providers/job/config.go new file mode 100644 index 0000000..77b0f7d --- /dev/null +++ b/providers/job/config.go @@ -0,0 +1,33 @@ +package job + +import ( + "github.com/riverqueue/river" + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/opt" +) + +const DefaultPrefix = "Job" + +func DefaultProvider() container.ProviderContainer { + return container.ProviderContainer{ + Provider: Provide, + Options: []opt.Option{ + opt.Prefix(DefaultPrefix), + }, + } +} + +type Config struct{} + +const ( + PriorityDefault = river.PriorityDefault + PriorityLow = 2 + PriorityMiddle = 3 + PriorityHigh = 3 +) + +const ( + QueueHigh = "high" + QueueDefault = river.QueueDefault + QueueLow = "low" +) diff --git a/providers/job/provider.go b/providers/job/provider.go new file mode 100644 index 0000000..20a8641 --- /dev/null +++ b/providers/job/provider.go @@ -0,0 +1,187 @@ +package job + +import ( + "context" + "sync" + + "quyun/providers/postgres" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" + "github.com/pkg/errors" + "github.com/riverqueue/river" + "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivertype" + "github.com/samber/lo" + log "github.com/sirupsen/logrus" + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/contracts" + "go.ipao.vip/atom/opt" +) + +func Provide(opts ...opt.Option) error { + o := opt.New(opts...) + var config Config + if err := o.UnmarshalConfig(&config); err != nil { + return err + } + return container.Container.Provide(func(ctx context.Context, dbConf *postgres.Config) (*Job, error) { + workers := river.NewWorkers() + + dbPoolConfig, err := pgxpool.ParseConfig(dbConf.DSN()) + if err != nil { + return nil, err + } + + dbPool, err := pgxpool.NewWithConfig(ctx, dbPoolConfig) + if err != nil { + return nil, err + } + container.AddCloseAble(dbPool.Close) + pool := riverpgxv5.New(dbPool) + + queue := &Job{Workers: workers, driver: pool, ctx: ctx, periodicJobs: make(map[string]rivertype.PeriodicJobHandle), jobs: make(map[string]*rivertype.JobInsertResult)} + container.AddCloseAble(queue.Close) + + return queue, nil + }, o.DiOptions()...) +} + +type Job struct { + ctx context.Context + Workers *river.Workers + driver *riverpgxv5.Driver + + l sync.Mutex + client *river.Client[pgx.Tx] + + periodicJobs map[string]rivertype.PeriodicJobHandle + jobs map[string]*rivertype.JobInsertResult +} + +func (q *Job) Close() { + if q.client == nil { + return + } + + if err := q.client.StopAndCancel(q.ctx); err != nil { + log.Errorf("Failed to stop and cancel client: %s", err) + } +} + +func (q *Job) Client() (*river.Client[pgx.Tx], error) { + q.l.Lock() + defer q.l.Unlock() + + if q.client == nil { + var err error + q.client, err = river.NewClient(q.driver, &river.Config{ + Workers: q.Workers, + Queues: map[string]river.QueueConfig{ + QueueHigh: {MaxWorkers: 10}, + QueueDefault: {MaxWorkers: 10}, + QueueLow: {MaxWorkers: 10}, + }, + }) + if err != nil { + return nil, err + } + } + + return q.client, nil +} + +func (q *Job) Start(ctx context.Context) error { + client, err := q.Client() + if err != nil { + return errors.Wrap(err, "get client failed") + } + + if err := client.Start(ctx); err != nil { + return err + } + defer client.StopAndCancel(ctx) + + <-ctx.Done() + + return nil +} + +func (q *Job) StopAndCancel(ctx context.Context) error { + client, err := q.Client() + if err != nil { + return errors.Wrap(err, "get client failed") + } + + return client.StopAndCancel(ctx) +} + +func (q *Job) AddPeriodicJobs(job contracts.CronJob) error { + for _, job := range job.Args() { + if err := q.AddPeriodicJob(job); err != nil { + return err + } + } + return nil +} + +func (q *Job) AddPeriodicJob(job contracts.CronJobArg) error { + client, err := q.Client() + if err != nil { + return err + } + q.l.Lock() + defer q.l.Unlock() + + q.periodicJobs[job.Arg.UniqueID()] = client.PeriodicJobs().Add(river.NewPeriodicJob( + job.PeriodicInterval, + func() (river.JobArgs, *river.InsertOpts) { + return job.Arg, lo.ToPtr(job.Arg.InsertOpts()) + }, + &river.PeriodicJobOpts{ + RunOnStart: job.RunOnStart, + }, + )) + + return nil +} + +func (q *Job) Cancel(id string) error { + client, err := q.Client() + if err != nil { + return err + } + + q.l.Lock() + defer q.l.Unlock() + + if h, ok := q.periodicJobs[id]; ok { + client.PeriodicJobs().Remove(h) + delete(q.periodicJobs, id) + return nil + } + + if r, ok := q.jobs[id]; ok { + _, err = client.JobCancel(q.ctx, r.Job.ID) + if err != nil { + return err + } + delete(q.jobs, id) + return nil + } + + return nil +} + +func (q *Job) Add(job contracts.JobArgs) error { + client, err := q.Client() + if err != nil { + return err + } + + q.l.Lock() + defer q.l.Unlock() + + q.jobs[job.UniqueID()], err = client.Insert(q.ctx, job, lo.ToPtr(job.InsertOpts())) + return err +} diff --git a/providers/jwt/config.go b/providers/jwt/config.go new file mode 100644 index 0000000..dc227d4 --- /dev/null +++ b/providers/jwt/config.go @@ -0,0 +1,35 @@ +package jwt + +import ( + "time" + + log "github.com/sirupsen/logrus" + + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/opt" +) + +const DefaultPrefix = "JWT" + +func DefaultProvider() container.ProviderContainer { + return container.ProviderContainer{ + Provider: Provide, + Options: []opt.Option{ + opt.Prefix(DefaultPrefix), + }, + } +} + +type Config struct { + SigningKey string // jwt签名 + ExpiresTime string // 过期时间 + Issuer string // 签发者 +} + +func (c *Config) ExpiresTimeDuration() time.Duration { + d, err := time.ParseDuration(c.ExpiresTime) + if err != nil { + log.Fatal(err) + } + return d +} diff --git a/providers/jwt/jwt.go b/providers/jwt/jwt.go new file mode 100644 index 0000000..dd94465 --- /dev/null +++ b/providers/jwt/jwt.go @@ -0,0 +1,118 @@ +package jwt + +import ( + "errors" + "strings" + "time" + + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/opt" + + jwt "github.com/golang-jwt/jwt/v4" + "golang.org/x/sync/singleflight" +) + +const ( + CtxKey = "claims" + HttpHeader = "Authorization" +) + +type BaseClaims struct { + OpenID string `json:"open_id,omitempty"` + Tenant string `json:"tenant,omitempty"` + UserID int64 `json:"user_id,omitempty"` + TenantID int64 `json:"tenant_id,omitempty"` +} + +// Custom claims structure +type Claims struct { + BaseClaims + jwt.RegisteredClaims +} + +const TokenPrefix = "Bearer " + +type JWT struct { + singleflight *singleflight.Group + config *Config + SigningKey []byte +} + +var ( + TokenExpired = errors.New("Token is expired") + TokenNotValidYet = errors.New("Token not active yet") + TokenMalformed = errors.New("That's not even a token") + TokenInvalid = errors.New("Couldn't handle this token:") +) + +func Provide(opts ...opt.Option) error { + o := opt.New(opts...) + var config Config + if err := o.UnmarshalConfig(&config); err != nil { + return err + } + return container.Container.Provide(func() (*JWT, error) { + return &JWT{ + singleflight: &singleflight.Group{}, + config: &config, + SigningKey: []byte(config.SigningKey), + }, nil + }, o.DiOptions()...) +} + +func (j *JWT) CreateClaims(baseClaims BaseClaims) *Claims { + ep, _ := time.ParseDuration(j.config.ExpiresTime) + claims := Claims{ + BaseClaims: baseClaims, + RegisteredClaims: jwt.RegisteredClaims{ + NotBefore: jwt.NewNumericDate(time.Now().Add(-time.Second * 10)), // 签名生效时间 + ExpiresAt: jwt.NewNumericDate(time.Now().Add(ep)), // 过期时间 7天 配置文件 + Issuer: j.config.Issuer, // 签名的发行者 + }, + } + return &claims +} + +// 创建一个token +func (j *JWT) CreateToken(claims *Claims) (string, error) { + token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) + return token.SignedString(j.SigningKey) +} + +// CreateTokenByOldToken 旧token 换新token 使用归并回源避免并发问题 +func (j *JWT) CreateTokenByOldToken(oldToken string, claims *Claims) (string, error) { + v, err, _ := j.singleflight.Do("JWT:"+oldToken, func() (interface{}, error) { + return j.CreateToken(claims) + }) + return v.(string), err +} + +// 解析 token +func (j *JWT) Parse(tokenString string) (*Claims, error) { + tokenString = strings.TrimPrefix(tokenString, TokenPrefix) + token, err := jwt.ParseWithClaims(tokenString, &Claims{}, func(token *jwt.Token) (i interface{}, e error) { + return j.SigningKey, nil + }) + if err != nil { + if ve, ok := err.(*jwt.ValidationError); ok { + if ve.Errors&jwt.ValidationErrorMalformed != 0 { + return nil, TokenMalformed + } else if ve.Errors&jwt.ValidationErrorExpired != 0 { + // Token is expired + return nil, TokenExpired + } else if ve.Errors&jwt.ValidationErrorNotValidYet != 0 { + return nil, TokenNotValidYet + } else { + return nil, TokenInvalid + } + } + } + if token != nil { + if claims, ok := token.Claims.(*Claims); ok && token.Valid { + return claims, nil + } + return nil, TokenInvalid + } else { + return nil, TokenInvalid + } +} diff --git a/providers/otel/config.go b/providers/otel/config.go new file mode 100644 index 0000000..f09a9a8 --- /dev/null +++ b/providers/otel/config.go @@ -0,0 +1,54 @@ +package otel + +import ( + "os" + + "go.ipao.vip/atom" + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/opt" +) + +const DefaultPrefix = "OTEL" + +func DefaultProvider() container.ProviderContainer { + return container.ProviderContainer{ + Provider: Provide, + Options: []opt.Option{ + opt.Prefix(DefaultPrefix), + opt.Group(atom.GroupInitialName), + }, + } +} + +type Config struct { + ServiceName string + Version string + Env string + + EndpointGRPC string + EndpointHTTP string + Token string +} + +func (c *Config) format() { + if c.ServiceName == "" { + c.ServiceName = os.Getenv("SERVICE_NAME") + if c.ServiceName == "" { + c.ServiceName = "unknown" + } + } + + if c.Version == "" { + c.Version = os.Getenv("SERVICE_VERSION") + if c.Version == "" { + c.Version = "unknown" + } + } + + if c.Env == "" { + c.Env = os.Getenv("DEPLOY_ENVIRONMENT") + if c.Env == "" { + c.Env = "unknown" + } + } +} diff --git a/providers/otel/docker/.env b/providers/otel/docker/.env new file mode 100644 index 0000000..7a96191 --- /dev/null +++ b/providers/otel/docker/.env @@ -0,0 +1,30 @@ +# Dependent images +GRAFANA_IMAGE=docker.hub.ipao.vip/grafana/grafana:11.4.0 +JAEGERTRACING_IMAGE=docker.hub.ipao.vip/jaegertracing/all-in-one:1.64.0 +OPENSEARCH_IMAGE=docker.hub.ipao.vip/opensearchproject/opensearch:2.18.0 +COLLECTOR_CONTRIB_IMAGE=docker-ghcr.hub.ipao.vip/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-contrib:0.116.1 +PROMETHEUS_IMAGE=docker-quay.hub.ipao.vip/prometheus/prometheus:v3.0.1 + +# OpenTelemetry Collector +HOST_FILESYSTEM=/ +DOCKER_SOCK=/var/run/docker.sock +OTEL_COLLECTOR_HOST=otel-collector +OTEL_COLLECTOR_PORT_GRPC=4317 +OTEL_COLLECTOR_PORT_HTTP=4318 +OTEL_COLLECTOR_CONFIG=./otel-collector/otelcol-config.yml +OTEL_COLLECTOR_CONFIG_EXTRAS=./otel-collector/otelcol-config-extras.yml +OTEL_EXPORTER_OTLP_ENDPOINT=http://${OTEL_COLLECTOR_HOST}:${OTEL_COLLECTOR_PORT_GRPC} +PUBLIC_OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=http://localhost:8080/otlp-http/v1/traces + +# Grafana +GRAFANA_SERVICE_PORT=3000 +GRAFANA_SERVICE_HOST=grafana + +# Jaeger +JAEGER_SERVICE_PORT=16686 +JAEGER_SERVICE_HOST=jaeger + +# Prometheus +PROMETHEUS_SERVICE_PORT=9090 +PROMETHEUS_SERVICE_HOST=prometheus +PROMETHEUS_ADDR=${PROMETHEUS_SERVICE_HOST}:${PROMETHEUS_SERVICE_PORT} diff --git a/providers/otel/docker/docker-compose.yaml b/providers/otel/docker/docker-compose.yaml new file mode 100644 index 0000000..cdbe983 --- /dev/null +++ b/providers/otel/docker/docker-compose.yaml @@ -0,0 +1,153 @@ +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +x-default-logging: &logging + driver: "json-file" + options: + max-size: "5m" + max-file: "2" + tag: "{{.Name}}" + +networks: + default: + name: opentelemetry-demo + driver: bridge + +services: + # ******************** + # Telemetry Components + # ******************** + # Jaeger + jaeger: + image: ${JAEGERTRACING_IMAGE} + container_name: jaeger + command: + - "--memory.max-traces=5000" + - "--query.base-path=/jaeger/ui" + - "--prometheus.server-url=http://${PROMETHEUS_ADDR}" + - "--prometheus.query.normalize-calls=true" + - "--prometheus.query.normalize-duration=true" + deploy: + resources: + limits: + memory: 400M + restart: unless-stopped + ports: + - "${JAEGER_SERVICE_PORT}:${JAEGER_SERVICE_PORT}" # Jaeger UI + # - "${OTEL_COLLECTOR_PORT_GRPC}" + environment: + - METRICS_STORAGE_TYPE=prometheus + logging: *logging + + # Grafana + grafana: + image: ${GRAFANA_IMAGE} + container_name: grafana + deploy: + resources: + limits: + memory: 100M + restart: unless-stopped + environment: + - "GF_INSTALL_PLUGINS=grafana-opensearch-datasource" + volumes: + - ./grafana/grafana.ini:/etc/grafana/grafana.ini + - ./grafana/provisioning/:/etc/grafana/provisioning/ + ports: + - "${GRAFANA_SERVICE_PORT}:${GRAFANA_SERVICE_PORT}" + logging: *logging + + # OpenTelemetry Collector + otel-collector: + image: ${COLLECTOR_CONTRIB_IMAGE} + container_name: otel-collector + deploy: + resources: + limits: + memory: 200M + restart: unless-stopped + command: + [ + "--config=/etc/otelcol-config.yml", + "--config=/etc/otelcol-config-extras.yml", + ] + user: 0:0 + volumes: + - ${HOST_FILESYSTEM}:/hostfs:ro + - ${DOCKER_SOCK}:/var/run/docker.sock:ro + - ${OTEL_COLLECTOR_CONFIG}:/etc/otelcol-config.yml + - ${OTEL_COLLECTOR_CONFIG_EXTRAS}:/etc/otelcol-config-extras.yml + ports: + - "${OTEL_COLLECTOR_PORT_GRPC}:${OTEL_COLLECTOR_PORT_GRPC}" + - "${OTEL_COLLECTOR_PORT_HTTP}:${OTEL_COLLECTOR_PORT_HTTP}" + depends_on: + jaeger: + condition: service_started + opensearch: + condition: service_healthy + logging: *logging + environment: + - ENVOY_PORT + - HOST_FILESYSTEM + - OTEL_COLLECTOR_HOST + - OTEL_COLLECTOR_PORT_GRPC + - OTEL_COLLECTOR_PORT_HTTP + + # Prometheus + prometheus: + image: ${PROMETHEUS_IMAGE} + container_name: prometheus + command: + - --web.console.templates=/etc/prometheus/consoles + - --web.console.libraries=/etc/prometheus/console_libraries + - --storage.tsdb.retention.time=1h + - --config.file=/etc/prometheus/prometheus-config.yaml + - --storage.tsdb.path=/prometheus + - --web.enable-lifecycle + - --web.route-prefix=/ + - --web.enable-otlp-receiver + - --enable-feature=exemplar-storage + volumes: + - ./prometheus/prometheus-config.yaml:/etc/prometheus/prometheus-config.yaml + deploy: + resources: + limits: + memory: 300M + restart: unless-stopped + ports: + - "${PROMETHEUS_SERVICE_PORT}:${PROMETHEUS_SERVICE_PORT}" + logging: *logging + + # OpenSearch + opensearch: + image: ${OPENSEARCH_IMAGE} + container_name: opensearch + deploy: + resources: + limits: + memory: 1G + restart: unless-stopped + environment: + - cluster.name=demo-cluster + - node.name=demo-node + - bootstrap.memory_lock=true + - discovery.type=single-node + - OPENSEARCH_JAVA_OPTS=-Xms300m -Xmx300m + - DISABLE_INSTALL_DEMO_CONFIG=true + - DISABLE_SECURITY_PLUGIN=true + ulimits: + memlock: + soft: -1 + hard: -1 + nofile: + soft: 65536 + hard: 65536 + ports: + - "9200:9200" + healthcheck: + test: curl -s http://localhost:9200/_cluster/health | grep -E '"status":"(green|yellow)"' + start_period: 10s + interval: 5s + timeout: 10s + retries: 10 + logging: *logging diff --git a/providers/otel/docker/grafana/grafana.ini b/providers/otel/docker/grafana/grafana.ini new file mode 100644 index 0000000..c21262f --- /dev/null +++ b/providers/otel/docker/grafana/grafana.ini @@ -0,0 +1,1170 @@ +##################### Grafana Configuration Example ##################### +# +# Everything has defaults so you only need to uncomment things you want to +# change + +# possible values : production, development +;app_mode = production + +# instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty +;instance_name = ${HOSTNAME} + +# force migration will run migrations that might cause dataloss +;force_migration = false + +#################################### Paths #################################### +[paths] +# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used) +;data = /var/lib/grafana + +# Temporary files in `data` directory older than given duration will be removed +;temp_data_lifetime = 24h + +# Directory where grafana can store logs +;logs = /var/log/grafana + +# Directory where grafana will automatically scan and look for plugins +;plugins = /var/lib/grafana/plugins + +# folder that contains provisioning config files that grafana will apply on startup and while running. +provisioning = /etc/grafana/provisioning + +#################################### Server #################################### +[server] +# Protocol (http, https, h2, socket) +protocol = http + +# The ip address to bind to, empty will bind to all interfaces +;http_addr = + +# The http port to use +http_port = 3000 + +# The public facing domain name used to access grafana from a browser +domain = localhost + +# Redirect to correct domain if host header does not match domain +# Prevents DNS rebinding attacks +;enforce_domain = false + +# The full public facing url you use in browser, used for redirects and emails +# If you use reverse proxy and sub path specify full url (with sub path) +root_url = %(protocol)s://%(domain)s:3000/grafana/ + +# Serve Grafana from subpath specified in `root_url` setting. By default it is set to `false` for compatibility reasons. +serve_from_sub_path = true + +# Log web requests +;router_logging = false + +# the path relative working path +;static_root_path = public + +# enable gzip +;enable_gzip = false + +# https certs & key file +;cert_file = +;cert_key = + +# Unix socket path +;socket = + +# CDN Url +;cdn_url = + +# Sets the maximum time using a duration format (5s/5m/5ms) before timing out read of an incoming request and closing idle connections. +# `0` means there is no timeout for reading the request. +;read_timeout = 0 + +#################################### Database #################################### +[database] +# You can configure the database connection by specifying type, host, name, user and password +# as separate properties or as on string using the url properties. + +# Either "mysql", "postgres" or "sqlite3", it's your choice +;type = sqlite3 +;host = 127.0.0.1:3306 +;name = grafana +;user = root +# If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;""" +;password = + +# Use either URL or the previous fields to configure the database +# Example: mysql://user:secret@host:port/database +;url = + +# For "postgres" only, either "disable", "require" or "verify-full" +;ssl_mode = disable + +# Database drivers may support different transaction isolation levels. +# Currently, only "mysql" driver supports isolation levels. +# If the value is empty - driver's default isolation level is applied. +# For "mysql" use "READ-UNCOMMITTED", "READ-COMMITTED", "REPEATABLE-READ" or "SERIALIZABLE". +;isolation_level = + +;ca_cert_path = +;client_key_path = +;client_cert_path = +;server_cert_name = + +# For "sqlite3" only, path relative to data_path setting +;path = grafana.db + +# Max idle conn setting default is 2 +;max_idle_conn = 2 + +# Max conn setting default is 0 (mean not set) +;max_open_conn = + +# Connection Max Lifetime default is 14400 (means 14400 seconds or 4 hours) +;conn_max_lifetime = 14400 + +# Set to true to log the sql calls and execution times. +;log_queries = + +# For "sqlite3" only. cache mode setting used for connecting to the database. (private, shared) +;cache_mode = private + +# For "mysql" only if lockingMigration feature toggle is set. How many seconds to wait before failing to lock the database for the migrations, default is 0. +;locking_attempt_timeout_sec = 0 + +################################### Data sources ######################### +[datasources] +# Upper limit of data sources that Grafana will return. This limit is a temporary configuration and it will be deprecated when pagination will be introduced on the list data sources API. +;datasource_limit = 5000 + +#################################### Cache server ############################# +[remote_cache] +# Either "redis", "memcached" or "database" default is "database" +;type = database + +# cache connectionstring options +# database: will use Grafana primary database. +# redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=0,ssl=false`. Only addr is required. ssl may be 'true', 'false', or 'insecure'. +# memcache: 127.0.0.1:11211 +;connstr = + +#################################### Data proxy ########################### +[dataproxy] + +# This enables data proxy logging, default is false +;logging = false + +# How long the data proxy waits to read the headers of the response before timing out, default is 30 seconds. +# This setting also applies to core backend HTTP data sources where query requests use an HTTP client with timeout set. +;timeout = 30 + +# How long the data proxy waits to establish a TCP connection before timing out, default is 10 seconds. +;dialTimeout = 10 + +# How many seconds the data proxy waits before sending a keepalive probe request. +;keep_alive_seconds = 30 + +# How many seconds the data proxy waits for a successful TLS Handshake before timing out. +;tls_handshake_timeout_seconds = 10 + +# How many seconds the data proxy will wait for a server's first response headers after +# fully writing the request headers if the request has an "Expect: 100-continue" +# header. A value of 0 will result in the body being sent immediately, without +# waiting for the server to approve. +;expect_continue_timeout_seconds = 1 + +# Optionally limits the total number of connections per host, including connections in the dialing, +# active, and idle states. On limit violation, dials will block. +# A value of zero (0) means no limit. +;max_conns_per_host = 0 + +# The maximum number of idle connections that Grafana will keep alive. +;max_idle_connections = 100 + +# How many seconds the data proxy keeps an idle connection open before timing out. +;idle_conn_timeout_seconds = 90 + +# If enabled and user is not anonymous, data proxy will add X-Grafana-User header with username into the request, default is false. +;send_user_header = false + +# Limit the amount of bytes that will be read/accepted from responses of outgoing HTTP requests. +;response_limit = 0 + +# Limits the number of rows that Grafana will process from SQL data sources. +;row_limit = 1000000 + +#################################### Analytics #################################### +[analytics] +# Server reporting, sends usage counters to stats.grafana.org every 24 hours. +# No ip addresses are being tracked, only simple counters to track +# running instances, dashboard and error counts. It is very helpful to us. +# Change this option to false to disable reporting. +;reporting_enabled = true + +# The name of the distributor of the Grafana instance. Ex hosted-grafana, grafana-labs +;reporting_distributor = grafana-labs + +# Set to false to disable all checks to https://grafana.com +# for new versions of grafana. The check is used +# in some UI views to notify that a grafana update exists. +# This option does not cause any auto updates, nor send any information +# only a GET request to https://raw.githubusercontent.com/grafana/grafana/main/latest.json to get the latest version. +;check_for_updates = true + +# Set to false to disable all checks to https://grafana.com +# for new versions of plugins. The check is used +# in some UI views to notify that a plugin update exists. +# This option does not cause any auto updates, nor send any information +# only a GET request to https://grafana.com to get the latest versions. +;check_for_plugin_updates = true + +# Google Analytics universal tracking code, only enabled if you specify an id here +;google_analytics_ua_id = + +# Google Tag Manager ID, only enabled if you specify an id here +;google_tag_manager_id = + +# Rudderstack write key, enabled only if rudderstack_data_plane_url is also set +;rudderstack_write_key = + +# Rudderstack data plane url, enabled only if rudderstack_write_key is also set +;rudderstack_data_plane_url = + +# Rudderstack SDK url, optional, only valid if rudderstack_write_key and rudderstack_data_plane_url is also set +;rudderstack_sdk_url = + +# Rudderstack Config url, optional, used by Rudderstack SDK to fetch source config +;rudderstack_config_url = + +# Controls if the UI contains any links to user feedback forms +;feedback_links_enabled = true + +#################################### Security #################################### +[security] +# disable creation of admin user on first start of grafana +;disable_initial_admin_creation = false + +# default admin user, created on startup +;admin_user = admin + +# default admin password, can be changed before first start of grafana, or in profile settings +;admin_password = admin + +# used for signing +;secret_key = SW2YcwTIb9zpOOhoPsMm + +# current key provider used for envelope encryption, default to static value specified by secret_key +;encryption_provider = secretKey.v1 + +# list of configured key providers, space separated (Enterprise only): e.g., awskms.v1 azurekv.v1 +;available_encryption_providers = + +# disable gravatar profile images +;disable_gravatar = false + +# data source proxy whitelist (ip_or_domain:port separated by spaces) +;data_source_proxy_whitelist = + +# disable protection against brute force login attempts +;disable_brute_force_login_protection = false + +# set to true if you host Grafana behind HTTPS. default is false. +;cookie_secure = false + +# set cookie SameSite attribute. defaults to `lax`. can be set to "lax", "strict", "none" and "disabled" +;cookie_samesite = lax + +# set to true if you want to allow browsers to render Grafana in a ,