From 24bd161df9b0d6924e0b908f84ac000cdfc49772 Mon Sep 17 00:00:00 2001 From: Rogee Date: Fri, 19 Dec 2025 14:46:58 +0800 Subject: [PATCH] feat: add backend_v1 migration --- backend_v1/.air.toml | 40 ++ backend_v1/.editorconfig | 19 + backend_v1/.env.example | 58 ++ backend_v1/.gitignore | 27 + backend_v1/.golangci.yml | 294 ++++++++++ backend_v1/Dockerfile | 82 +++ backend_v1/Dockerfile.dev | 32 ++ backend_v1/Makefile | 64 +++ backend_v1/README.md | 114 ++++ backend_v1/app/commands/event/event.go | 58 ++ backend_v1/app/commands/grpc/grpc.go | 57 ++ backend_v1/app/commands/http/http.go | 79 +++ .../migrate/20251219062731_river_queue.go | 35 ++ backend_v1/app/commands/migrate/migrate.go | 86 +++ backend_v1/app/commands/queue/error.go | 24 + backend_v1/app/commands/queue/river.go | 67 +++ backend_v1/app/commands/service.go | 19 + backend_v1/app/commands/testx/testing.go | 30 + backend_v1/app/console/.gitkeep | 0 backend_v1/app/errorx/app_error.go | 65 +++ backend_v1/app/errorx/codes.go | 90 +++ backend_v1/app/errorx/handler.go | 105 ++++ backend_v1/app/errorx/middleware.go | 38 ++ backend_v1/app/errorx/predefined.go | 105 ++++ backend_v1/app/errorx/response.go | 127 +++++ .../app/events/publishers/user_register.go | 26 + .../app/events/subscribers/provider.gen.go | 27 + .../app/events/subscribers/user_register.go | 45 ++ backend_v1/app/events/subscribers/utils.go | 24 + backend_v1/app/events/topics.go | 6 + backend_v1/app/grpc/users/handler.go | 26 + backend_v1/app/grpc/users/provider.gen.go | 25 + backend_v1/app/http/v1/demo.go | 76 +++ backend_v1/app/http/v1/provider.gen.go | 37 ++ backend_v1/app/http/v1/routes.gen.go | 60 ++ backend_v1/app/http/v1/routes.manual.go | 9 + backend_v1/app/jobs/demo_cron.go | 36 ++ backend_v1/app/jobs/demo_job.go | 53 ++ backend_v1/app/jobs/demo_job_test.go | 53 ++ backend_v1/app/jobs/provider.gen.go | 41 ++ backend_v1/app/middlewares/mid_debug.go | 9 + backend_v1/app/middlewares/middlewares.go | 15 + backend_v1/app/middlewares/provider.gen.go | 20 + backend_v1/app/requests/pagination.go | 30 + backend_v1/app/requests/sort.go | 41 ++ backend_v1/app/services/provider.gen.go | 36 ++ backend_v1/app/services/services.gen.go | 28 + backend_v1/app/services/test.go | 10 + backend_v1/app/services/test_test.go | 41 ++ backend_v1/buf.gen.yaml | 23 + backend_v1/buf.yaml | 13 + backend_v1/config.full.toml | 239 ++++++++ backend_v1/config.prod.toml | 87 +++ backend_v1/config.toml | 89 +++ backend_v1/database/.transform.yaml | 12 + backend_v1/database/database.go | 55 ++ .../20250321112535_create_medias.sql | 19 + .../20250322100215_create_posts.sql | 26 + .../20250322103119_create_users.sql | 22 + .../20250322103243_create_user_posts.sql | 18 + .../20250410130530_create_orders.sql | 26 + .../migrations/20250430014015_alter_user.sql | 18 + .../migrations/20250512113213_alter_user.sql | 11 + backend_v1/database/models/medias.gen.go | 59 ++ .../database/models/medias.query.gen.go | 487 ++++++++++++++++ backend_v1/database/models/orders.gen.go | 66 +++ .../database/models/orders.query.gen.go | 515 +++++++++++++++++ backend_v1/database/models/posts.gen.go | 72 +++ backend_v1/database/models/posts.query.gen.go | 528 ++++++++++++++++++ backend_v1/database/models/query.gen.go | 135 +++++ backend_v1/database/models/user_posts.gen.go | 56 ++ .../database/models/user_posts.query.gen.go | 481 ++++++++++++++++ backend_v1/database/models/users.gen.go | 68 +++ backend_v1/database/models/users.query.gen.go | 512 +++++++++++++++++ backend_v1/docs/docs.go | 141 +++++ backend_v1/docs/ember.go | 10 + backend_v1/docs/swagger.json | 117 ++++ backend_v1/docs/swagger.yaml | 75 +++ backend_v1/fixtures/.gitkeep | 0 backend_v1/go.mod | 131 +++++ backend_v1/go.sum | 392 +++++++++++++ backend_v1/llm.txt | 207 +++++++ backend_v1/main.go | 40 ++ backend_v1/main_test.go | 1 + backend_v1/pkg/consts/consts.go | 8 + backend_v1/pkg/proto/user/v1/user.pb.go | 387 +++++++++++++ backend_v1/pkg/proto/user/v1/user_grpc.pb.go | 167 ++++++ backend_v1/pkg/utils/buffer.go | 26 + backend_v1/pkg/utils/build_info.go | 44 ++ backend_v1/proto/user/v1/user.proto | 40 ++ backend_v1/providers/app/app.go | 18 + backend_v1/providers/app/config.gen.go | 179 ++++++ backend_v1/providers/app/config.go | 45 ++ backend_v1/providers/cmux/config.go | 109 ++++ backend_v1/providers/cmux/provider.go | 37 ++ backend_v1/providers/event/channel.go | 30 + backend_v1/providers/event/config.go | 99 ++++ backend_v1/providers/event/logrus_adapter.go | 60 ++ backend_v1/providers/event/provider.go | 109 ++++ backend_v1/providers/grpc/config.go | 145 +++++ backend_v1/providers/grpc/options.md | 513 +++++++++++++++++ backend_v1/providers/grpc/provider.go | 18 + backend_v1/providers/http/config.go | 38 ++ backend_v1/providers/http/engine.go | 203 +++++++ backend_v1/providers/http/swagger/config.go | 317 +++++++++++ backend_v1/providers/http/swagger/swagger.go | 103 ++++ backend_v1/providers/http/swagger/template.go | 107 ++++ backend_v1/providers/job/config.go | 67 +++ backend_v1/providers/job/provider.go | 207 +++++++ backend_v1/providers/jwt/config.go | 35 ++ backend_v1/providers/jwt/jwt.go | 118 ++++ backend_v1/providers/postgres/config.go | 136 +++++ backend_v1/providers/postgres/postgres.go | 91 +++ backend_v1/tests/README.md | 288 ++++++++++ backend_v1/tests/e2e/api_test.go | 419 ++++++++++++++ backend_v1/tests/integration/database_test.go | 364 ++++++++++++ backend_v1/tests/setup_test.go | 161 ++++++ backend_v1/tests/unit/config_test.go | 287 ++++++++++ backend_v1/utils/build_info.go | 44 ++ 119 files changed, 12259 insertions(+) create mode 100644 backend_v1/.air.toml create mode 100644 backend_v1/.editorconfig create mode 100644 backend_v1/.env.example create mode 100644 backend_v1/.gitignore create mode 100644 backend_v1/.golangci.yml create mode 100644 backend_v1/Dockerfile create mode 100644 backend_v1/Dockerfile.dev create mode 100644 backend_v1/Makefile create mode 100644 backend_v1/README.md create mode 100644 backend_v1/app/commands/event/event.go create mode 100644 backend_v1/app/commands/grpc/grpc.go create mode 100644 backend_v1/app/commands/http/http.go create mode 100644 backend_v1/app/commands/migrate/20251219062731_river_queue.go create mode 100644 backend_v1/app/commands/migrate/migrate.go create mode 100644 backend_v1/app/commands/queue/error.go create mode 100644 backend_v1/app/commands/queue/river.go create mode 100644 backend_v1/app/commands/service.go create mode 100644 backend_v1/app/commands/testx/testing.go create mode 100644 backend_v1/app/console/.gitkeep create mode 100644 backend_v1/app/errorx/app_error.go create mode 100644 backend_v1/app/errorx/codes.go create mode 100644 backend_v1/app/errorx/handler.go create mode 100644 backend_v1/app/errorx/middleware.go create mode 100644 backend_v1/app/errorx/predefined.go create mode 100644 backend_v1/app/errorx/response.go create mode 100644 backend_v1/app/events/publishers/user_register.go create mode 100755 backend_v1/app/events/subscribers/provider.gen.go create mode 100644 backend_v1/app/events/subscribers/user_register.go create mode 100644 backend_v1/app/events/subscribers/utils.go create mode 100644 backend_v1/app/events/topics.go create mode 100644 backend_v1/app/grpc/users/handler.go create mode 100755 backend_v1/app/grpc/users/provider.gen.go create mode 100644 backend_v1/app/http/v1/demo.go create mode 100755 backend_v1/app/http/v1/provider.gen.go create mode 100644 backend_v1/app/http/v1/routes.gen.go create mode 100644 backend_v1/app/http/v1/routes.manual.go create mode 100644 backend_v1/app/jobs/demo_cron.go create mode 100644 backend_v1/app/jobs/demo_job.go create mode 100644 backend_v1/app/jobs/demo_job_test.go create mode 100755 backend_v1/app/jobs/provider.gen.go create mode 100644 backend_v1/app/middlewares/mid_debug.go create mode 100644 backend_v1/app/middlewares/middlewares.go create mode 100755 backend_v1/app/middlewares/provider.gen.go create mode 100644 backend_v1/app/requests/pagination.go create mode 100644 backend_v1/app/requests/sort.go create mode 100755 backend_v1/app/services/provider.gen.go create mode 100644 backend_v1/app/services/services.gen.go create mode 100644 backend_v1/app/services/test.go create mode 100644 backend_v1/app/services/test_test.go create mode 100644 backend_v1/buf.gen.yaml create mode 100644 backend_v1/buf.yaml create mode 100644 backend_v1/config.full.toml create mode 100644 backend_v1/config.prod.toml create mode 100644 backend_v1/config.toml create mode 100644 backend_v1/database/.transform.yaml create mode 100644 backend_v1/database/database.go create mode 100644 backend_v1/database/migrations/20250321112535_create_medias.sql create mode 100644 backend_v1/database/migrations/20250322100215_create_posts.sql create mode 100644 backend_v1/database/migrations/20250322103119_create_users.sql create mode 100644 backend_v1/database/migrations/20250322103243_create_user_posts.sql create mode 100644 backend_v1/database/migrations/20250410130530_create_orders.sql create mode 100644 backend_v1/database/migrations/20250430014015_alter_user.sql create mode 100644 backend_v1/database/migrations/20250512113213_alter_user.sql create mode 100644 backend_v1/database/models/medias.gen.go create mode 100644 backend_v1/database/models/medias.query.gen.go create mode 100644 backend_v1/database/models/orders.gen.go create mode 100644 backend_v1/database/models/orders.query.gen.go create mode 100644 backend_v1/database/models/posts.gen.go create mode 100644 backend_v1/database/models/posts.query.gen.go create mode 100644 backend_v1/database/models/query.gen.go create mode 100644 backend_v1/database/models/user_posts.gen.go create mode 100644 backend_v1/database/models/user_posts.query.gen.go create mode 100644 backend_v1/database/models/users.gen.go create mode 100644 backend_v1/database/models/users.query.gen.go create mode 100644 backend_v1/docs/docs.go create mode 100644 backend_v1/docs/ember.go create mode 100644 backend_v1/docs/swagger.json create mode 100644 backend_v1/docs/swagger.yaml create mode 100644 backend_v1/fixtures/.gitkeep create mode 100644 backend_v1/go.mod create mode 100644 backend_v1/go.sum create mode 100644 backend_v1/llm.txt create mode 100644 backend_v1/main.go create mode 100644 backend_v1/main_test.go create mode 100644 backend_v1/pkg/consts/consts.go create mode 100644 backend_v1/pkg/proto/user/v1/user.pb.go create mode 100644 backend_v1/pkg/proto/user/v1/user_grpc.pb.go create mode 100644 backend_v1/pkg/utils/buffer.go create mode 100644 backend_v1/pkg/utils/build_info.go create mode 100644 backend_v1/proto/user/v1/user.proto create mode 100644 backend_v1/providers/app/app.go create mode 100644 backend_v1/providers/app/config.gen.go create mode 100644 backend_v1/providers/app/config.go create mode 100644 backend_v1/providers/cmux/config.go create mode 100644 backend_v1/providers/cmux/provider.go create mode 100644 backend_v1/providers/event/channel.go create mode 100644 backend_v1/providers/event/config.go create mode 100644 backend_v1/providers/event/logrus_adapter.go create mode 100644 backend_v1/providers/event/provider.go create mode 100644 backend_v1/providers/grpc/config.go create mode 100644 backend_v1/providers/grpc/options.md create mode 100644 backend_v1/providers/grpc/provider.go create mode 100644 backend_v1/providers/http/config.go create mode 100644 backend_v1/providers/http/engine.go create mode 100644 backend_v1/providers/http/swagger/config.go create mode 100644 backend_v1/providers/http/swagger/swagger.go create mode 100644 backend_v1/providers/http/swagger/template.go create mode 100644 backend_v1/providers/job/config.go create mode 100644 backend_v1/providers/job/provider.go create mode 100644 backend_v1/providers/jwt/config.go create mode 100644 backend_v1/providers/jwt/jwt.go create mode 100644 backend_v1/providers/postgres/config.go create mode 100644 backend_v1/providers/postgres/postgres.go create mode 100644 backend_v1/tests/README.md create mode 100644 backend_v1/tests/e2e/api_test.go create mode 100644 backend_v1/tests/integration/database_test.go create mode 100644 backend_v1/tests/setup_test.go create mode 100644 backend_v1/tests/unit/config_test.go create mode 100644 backend_v1/utils/build_info.go diff --git a/backend_v1/.air.toml b/backend_v1/.air.toml new file mode 100644 index 0000000..f97236a --- /dev/null +++ b/backend_v1/.air.toml @@ -0,0 +1,40 @@ +# .air.toml - Air 热重载配置文件 + +root = "." +testdata_dir = "testdata" +tmp_dir = "tmp" + +[build] + args_bin = [] + bin = "./tmp/main" + cmd = "go build -o ./tmp/main ." + delay = 1000 + exclude_dir = ["assets", "tmp", "vendor", "testdata", "frontend"] + exclude_file = [] + exclude_regex = ["_test.go"] + exclude_unchanged = false + follow_symlink = false + full_bin = "" + include_dir = [] + include_ext = ["go", "tpl", "tmpl", "html", "yaml", "yml", "toml"] + kill_delay = "0s" + log = "build-errors.log" + send_interrupt = false + stop_on_root = false + +[color] + app = "" + build = "yellow" + main = "magenta" + runner = "green" + watcher = "cyan" + +[log] + time = false + +[misc] + clean_on_exit = false + +[screen] + clear_on_rebuild = false + keep_scroll = true \ No newline at end of file diff --git a/backend_v1/.editorconfig b/backend_v1/.editorconfig new file mode 100644 index 0000000..90df546 --- /dev/null +++ b/backend_v1/.editorconfig @@ -0,0 +1,19 @@ +# EditorConfig is awesome: https://EditorConfig.org + +# top-most EditorConfig file +root = true + +[*] +indent_style = space +indent_size = 4 +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = false + +[*.{yaml,yml}] +indent_style = space +indent_size = 2 + +[Makefile] +indent_style = tab \ No newline at end of file diff --git a/backend_v1/.env.example b/backend_v1/.env.example new file mode 100644 index 0000000..00c3ede --- /dev/null +++ b/backend_v1/.env.example @@ -0,0 +1,58 @@ +# 应用配置 +APP_MODE=development +APP_BASE_URI=http://localhost:8080 + +# HTTP 服务配置 +HTTP_PORT=8080 +HTTP_HOST=0.0.0.0 + +# 数据库配置 +DB_HOST=localhost +DB_PORT=5432 +DB_NAME={{.ProjectName}} +DB_USER=postgres +DB_PASSWORD=password +DB_SSL_MODE=disable +DB_MAX_CONNECTIONS=25 +DB_MAX_IDLE_CONNECTIONS=5 +DB_CONNECTION_LIFETIME=5m + +# Redis 配置 +REDIS_HOST=localhost +REDIS_PORT=6379 +REDIS_PASSWORD= +REDIS_DB=0 + +# JWT 配置 +JWT_SECRET_KEY=your-secret-key-here +JWT_EXPIRES_TIME=168h + +# HashIDs 配置 +HASHIDS_SALT=your-salt-here + +# 日志配置 +LOG_LEVEL=info +LOG_FORMAT=json + +# 文件上传配置 +UPLOAD_MAX_SIZE=10MB +UPLOAD_PATH=./uploads + +# 邮件配置 +SMTP_HOST=smtp.gmail.com +SMTP_PORT=587 +SMTP_USER= +SMTP_PASSWORD= + +# 第三方服务配置 +REDIS_URL=redis://localhost:6379/0 +DATABASE_URL=postgres://postgres:password@localhost:5432/{{.ProjectName}}?sslmode=disable + +# 开发配置 +ENABLE_SWAGGER=true +ENABLE_CORS=true +DEBUG_MODE=true + +# 监控配置 +ENABLE_METRICS=false +METRICS_PORT=9090 \ No newline at end of file diff --git a/backend_v1/.gitignore b/backend_v1/.gitignore new file mode 100644 index 0000000..03ac9f4 --- /dev/null +++ b/backend_v1/.gitignore @@ -0,0 +1,27 @@ +bin/* +vendor/ +__debug_bin* +backend +build/* +.vscode +.idea +tmp/ +docker-compose.yml +sqlite.db +go.work +go.work.sum +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ diff --git a/backend_v1/.golangci.yml b/backend_v1/.golangci.yml new file mode 100644 index 0000000..19d4f68 --- /dev/null +++ b/backend_v1/.golangci.yml @@ -0,0 +1,294 @@ +# golangci-lint 配置文件 +# https://golangci-lint.run/usage/configuration/ + +# 运行时配置 +run: + # 默认并行处理器数量 + default-concurrency: 4 + + # 超时时间 + timeout: 5m + + # 退出代码 + issues-exit-code: 1 + + # 测试包含的文件 + tests: true + + # 是否跳过文件 + skip-files: + - "_test\\.go$" + - ".*\\.gen\\.go$" + - ".*\\.pb\\.go$" + + # 是否跳过目录 + skip-dirs: + - "vendor" + - "node_modules" + - ".git" + - "build" + - "dist" + +# 输出配置 +output: + # 输出格式 + format: colored-line-number + + # 打印已使用的 linter + print-issued-lines: true + + # 打印 linter 名称 + print-linter-name: true + + # 唯一性检查 + uniq-by-line: true + +# linter 启用配置 +linters-settings: + # 错误检查 + errcheck: + # 检查类型断言 + check-type-assertions: true + # 检查赋值 + check-blank: true + + # 代码复杂度 + gocyclo: + # 最小复杂度 + min-complexity: 15 + + # 函数参数和返回值 + gocognit: + # 最小认知复杂度 + min-complexity: 20 + + # 函数长度 + funlen: + # 最大行数 + lines: 60 + # 最大语句数 + statements: 40 + + # 代码行长度 + lll: + # 最大行长度 + line-length: 120 + + # 导入顺序 + importas: + # 别名规则 + no-unaliased: true + alias: + - pkg: "github.com/sirupsen/logrus" + alias: "logrus" + - pkg: "github.com/stretchr/testify/assert" + alias: "assert" + - pkg: "github.com/stretchr/testify/suite" + alias: "suite" + + # 重复导入 + dupl: + # 重复代码块的最小 token 数 + threshold: 100 + + # 空值检查 + nilerr: + # 检查返回 nil 的函数 + check-type-assertions: true + check-blank: true + + # 代码格式化 + gofmt: + # 格式化简化 + simplify: true + + # 导入检查 + goimports: + # 本地前缀 + local-prefixes: "{{.ModuleName}}" + + # 静态检查 + staticcheck: + # 检查版本 + go_version: "1.22" + + # 结构体标签 + structtag: + # 检查标签 + required: [] + # 是否允许空标签 + allow-omit-latest: true + + # 未使用的变量 + unused: + # 检查字段 + check-exported-fields: true + + # 变量命名 + varnamelen: + # 最小变量名长度 + min-name-length: 2 + # 检查参数 + check-parameters: true + # 检查返回值 + check-return: true + # 检查接收器 + check-receiver: true + # 检查变量 + check-variable: true + # 忽略名称 + ignore-names: + - "ok" + - "err" + - "T" + - "i" + - "n" + - "v" + # 忽略类型 + ignore-type-assert-ok: true + ignore-map-index-ok: true + ignore-chan-recv-ok: true + ignore-decls: + - "T any" + - "w http.ResponseWriter" + - "r *http.Request" + +# 启用的 linter +linters: + enable: + # 错误检查 + - errcheck + - errorlint + - goerr113 + + # 代码复杂度 + - gocyclo + - gocognit + - funlen + + # 代码风格 + - gofmt + - goimports + - lll + - misspell + - whitespace + + # 导入检查 + - importas + - dupl + + # 静态检查 + - staticcheck + - unused + - typecheck + - ineffassign + - bodyclose + - contextcheck + - nilerr + + # 测试检查 + - tparallel + - testpackage + - thelper + + # 性能检查 + - prealloc + - unconvert + + # 安全检查 + - gosec + - noctx + - rowserrcheck + + # 代码质量 + - revive + - varnamelen + - exportloopref + - forcetypeassert + - govet + - paralleltest + - nlreturn + - wastedassign + - wrapcheck + +# 禁用的 linter +linters-disable: + - deadcode # 被 unused 替代 + - varcheck # 被 unused 替代 + - structcheck # 被 unused 替代 + - interfacer # 已弃用 + - maligned # 已弃用 + - scopelint # 已弃用 + +# 问题配置 +issues: + # 排除规则 + exclude-rules: + # 排除测试文件的某些规则 + - path: _test\.go + linters: + - funlen + - gocyclo + - dupl + - gochecknoglobals + - gochecknoinits + + # 排除生成的文件 + - path: \.gen\.go$ + linters: + - lll + - funlen + - gocyclo + + # 排除错误处理中的简单错误检查 + - path: .* + text: "Error return value of `.*` is not checked" + + # 排除特定的 golangci-lint 注释 + - path: .* + text: "// nolint:.*" + + # 排除 context.Context 的未使用检查 + - path: .* + text: "context.Context should be the first parameter of a function" + + # 排除某些性能优化建议 + - path: .* + text: "predeclared" + + # 排除某些重复代码检查 + - path: .* + linters: + - dupl + text: "is duplicate of" + + # 最大问题数 + max-issues-per-linter: 50 + + # 最大相同问题数 + max-same-issues: 3 + +# 严重性配置 +severity: + # 默认严重性 + default-severity: error + + # 规则严重性 + rules: + - linters: + - dupl + - gosec + severity: warning + + - linters: + - misspell + - whitespace + severity: info + +# 性能配置 +performance: + # 是否使用内存缓存 + use-memory-cache: true + + # 缓存超时时间 + cache-timeout: 5m \ No newline at end of file diff --git a/backend_v1/Dockerfile b/backend_v1/Dockerfile new file mode 100644 index 0000000..255ddd4 --- /dev/null +++ b/backend_v1/Dockerfile @@ -0,0 +1,82 @@ +# 多阶段构建 Dockerfile +# 阶段 1: 构建应用 +FROM golang:1.22-alpine AS builder + +# 安装构建依赖 +RUN apk add --no-cache git ca-certificates tzdata + +# 设置工作目录 +WORKDIR /app + +# 复制 go mod 文件 +COPY go.mod go.sum ./ + +# 设置 Go 代理 +ENV GOPROXY=https://goproxy.cn,direct +ENV CGO_ENABLED=0 +ENV GOOS=linux +ENV GOARCH=amd64 + +# 下载依赖 +RUN go mod download + +# 复制源代码 +COPY . . + +# 构建应用 +RUN go build -a -installsuffix cgo -ldflags="-w -s" -o main . + +# 阶段 2: 构建前端(如果有) +# 如果有前端构建,取消下面的注释 +# FROM node:18-alpine AS frontend-builder +# WORKDIR /app +# COPY frontend/package*.json ./ +# RUN npm ci --only=production +# COPY frontend/ . +# RUN npm run build + +# 阶段 3: 运行时镜像 +FROM alpine:3.20 AS runtime + +# 安装运行时依赖 +RUN apk add --no-cache ca-certificates tzdata curl + +# 设置时区 +RUN cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \ + echo "Asia/Shanghai" > /etc/timezone && \ + apk del tzdata + +# 创建非 root 用户 +RUN addgroup -g 1000 appgroup && \ + adduser -u 1000 -G appgroup -s /bin/sh -D appuser + +# 创建必要的目录 +RUN mkdir -p /app/config /app/logs /app/uploads && \ + chown -R appuser:appgroup /app + +# 设置工作目录 +WORKDIR /app + +# 从构建阶段复制应用 +COPY --from=builder /app/main . +COPY --chown=appuser:appgroup config.toml ./config/ + +# 如果有前端构建,取消下面的注释 +# COPY --from=frontend-builder /app/dist ./dist + +# 创建空目录供应用使用 +RUN mkdir -p /app/logs /app/uploads && \ + chown -R appuser:appgroup /app + +# 切换到非 root 用户 +USER appuser + +# 健康检查 +HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \ + CMD curl -f http://localhost:8080/health || exit 1 + +# 暴露端口 +EXPOSE 8080 + +# 启动应用 +CMD ["./main", "serve"] diff --git a/backend_v1/Dockerfile.dev b/backend_v1/Dockerfile.dev new file mode 100644 index 0000000..4140057 --- /dev/null +++ b/backend_v1/Dockerfile.dev @@ -0,0 +1,32 @@ +# 开发环境 Dockerfile +FROM golang:1.22-alpine AS builder + +# 安装必要的工具 +RUN apk add --no-cache git ca-certificates tzdata + +# 设置工作目录 +WORKDIR /app + +# 复制 go mod 文件 +COPY go.mod go.sum ./ + +# 设置 Go 代理 +RUN go env -w GOPROXY=https://goproxy.cn,direct + +# 下载依赖 +RUN go mod download + +# 复制源代码 +COPY . . + +# 设置时区 +ENV TZ=Asia/Shanghai + +# 安装 air 用于热重载 +RUN go install github.com/air-verse/air@latest + +# 暴露端口 +EXPOSE 8080 9090 + +# 启动命令 +CMD ["air", "-c", ".air.toml"] \ No newline at end of file diff --git a/backend_v1/Makefile b/backend_v1/Makefile new file mode 100644 index 0000000..1adb571 --- /dev/null +++ b/backend_v1/Makefile @@ -0,0 +1,64 @@ +buildAt=`date +%Y/%m/%d-%H:%M:%S` +gitHash=`(git log -1 --pretty=format:%H 2>/dev/null || echo "no-commit")` +version=`(git describe --tags --exact-match HEAD 2>/dev/null || git rev-parse --abbrev-ref HEAD 2>/dev/null | grep -v HEAD 2>/dev/null || echo "dev")` +# 修改为项目特定的变量路径 +flags="-X 'quyun/v2/pkg/utils.Version=${version}' -X 'quyun/v2/pkg/utils.BuildAt=${buildAt}' -X 'quyun/v2/pkg/utils.GitHash=${gitHash}'" +release_flags="-w -s ${flags}" + +GOPATH:=$(shell go env GOPATH) + +.PHONY: tidy +tidy: + @go mod tidy + +.PHONY: release +release: + @CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags=${flags} -o bin/release/v2 . + @cp config.toml bin/release/ + +.PHONY: build +build: + @go build -ldflags=${flags} -o bin/v2 . + +.PHONY: run +run: build + @./bin/v2 + +.PHONY: test +test: + @go test -v ./tests/... -cover + +.PHONY: info +info: + @echo "Build Information:" + @echo "==================" + @echo "Build Time: $(buildAt)" + @echo "Git Hash: $(gitHash)" + @echo "Version: $(version)" + +.PHONY: lint +lint: + @golangci-lint run + +.PHONY: tools +tools: + go install github.com/air-verse/air@latest + go install github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway@latest + go install github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2@latest + go install google.golang.org/protobuf/cmd/protoc-gen-go@latest + go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest + go install github.com/bufbuild/buf/cmd/buf@latest + go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest + go get -u go.ipao.vip/atom + go get -u google.golang.org/genproto + go get -u github.com/gofiber/fiber/v3 +.PHONY: init +init: tools + @atomctl swag init + @atomctl gen enum + @atomctl gen route + @atomctl gen service + @buf generate + @go mod tidy + @go get -u + @go mod tidy \ No newline at end of file diff --git a/backend_v1/README.md b/backend_v1/README.md new file mode 100644 index 0000000..7e1274b --- /dev/null +++ b/backend_v1/README.md @@ -0,0 +1,114 @@ +## 路由生成(gen route) + +通过在控制器方法上编写注释,解析器从 Go AST 中读取注释、方法签名与参数列表,自动生成路由注册与参数绑定代码。 + +- 核心标签:`@Router` 定义路径与方法;`@Bind` 定义方法参数的来源与键名。 +- 生成行为:输出 `router.(path, FuncN(...))` 或 `DataFuncN(...)` 包装调用,并自动汇聚所需 imports 与控制器注入字段。 + +### 快速开始 + +``` +atomctl gen route [path] +``` + +- 生成文件:当前包目录下 `routes.gen.go` +- 分组与排序:按控制器分组,导入、方法、路由项稳定排序,便于审阅 diff。 + +### 注释语法 + +- `@Router []` + - 示例:`@Router /users/:id [get]` + +- `@Bind [key()] [model(|[:])]` + - `paramName` 与方法参数名一致(大小写敏感) + - `position`:`path`、`query`、`body`、`header`、`cookie`、`local`、`file` + - 可选: + - `key()` 覆盖默认键名; + - `model()` 详见“模型绑定”。 + +### 参数绑定规则(按 position) + +- query:标量用 `QueryParam[T]("key")`,非标量用 `Query[T]("key")` +- path:标量用 `PathParam[T]("key")`,非标量用 `Path[T]("key")` + - 若使用 `model()`(仅在 path 有效),会按字段值查询并绑定为 `T`,详见下文 +- header:`Header[T]("key")` +- body:`Body[T]("key")` +- cookie:`string` 用 `CookieParam("key")`,其他用 `Cookie[T]("key")` +- file:`File[multipart.FileHeader]("key")` +- local:`Local[T]("key")` + +说明: + +- 标量类型集合:`string`、`int`、`int32`、`int64`、`float32`、`float64`、`bool` +- `key` 默认等于 `paramName`;设置 `key(...)` 后以其为准 +- `file` 使用固定类型 `multipart.FileHeader` + +### 类型与指针处理 + +- 支持 `T`、`*T`、`pkg.T`、`*pkg.T`;会正确收集选择子表达式对应 import +- 忽略结尾为 `Context` 或 `Ctx` 的参数(框架上下文) +- 指针处理:除 `local` 外会去掉前导 `*` 作为泛型实参;`local` 保留指针(便于写回) + +### 解析与匹配 + +- 先收集注释中的多条 `@Bind`,再按“方法参数列表顺序”匹配并输出绑定器,确保调用顺序与方法签名一致 +- 未在方法参数中的 `@Bind` 会被忽略;缺失 `@Router` 或方法无注释将跳过该方法 +- import 自动收集去重;控制器注入字段名为类型名的小驼峰形式,例如 `userController *UserController` + +### 返回值与包装函数 + +- 返回值个数 > 1:使用 `DataFuncN` +- 否则使用 `FuncN` +- `N` 为参与绑定的参数个数 + +### 模型绑定(path + model) + +当 `@Bind ... model(...)` 配合 `position=path` 使用时,将根据路径参数值查询模型并绑定为方法参数类型的实例(`T` 来自方法参数)。 + +- 语法: + - 仅字段:`model(id)`(推荐) + - 指定字段与类型:`model(id:int)`、`model(code:string)`(用于非字符串路径参数) + - 指定类型与字段:`model(pkg.Type:field)` 或 `model(pkg.Type)`(字段缺省为 `id`) +- 行为: + - 生成的绑定器会按给定字段构造查询条件并返回首条记录 + - 自动注入 import:`field "go.ipao.vip/gen/field"`,用于构造字段条件表达式 + +示例: + +```go +// @Router /users/:id [get] +// @Bind user path key(id) model(id) +func (uc *UserController) Show(ctx context.Context, user *models.User) (*UserDTO, error) +``` + +### 完整示例 + +注释与方法签名: + +```go +// @Router /users/:id [get] +// @Bind user path key(id) model(id) +// @Bind fields query +// @Bind token header key(Authorization) +// @Bind sess cookie key(session_id) +// @Bind cfg local +func (uc *UserController) GetUser(ctx context.Context, user *models.User, fields []string, token string, sess string, cfg *AppConfig) (*User, error) +``` + +生成的路由注册(示意): + +```go +router.Get("/users/:id", DataFunc4( + r.userController.GetUser, + PathModel[models.User]("id", "id"), + Query[[]string]("fields"), + Header[string]("Authorization"), + CookieParam("session_id"), +)) +``` + +### 错误与限制 + +- 无效的 `@Router` 语法会报错;无效的 `position` 会在解析阶段触发错误 +- `file` 仅支持单文件头;`model()` 仅在 `position=path` 时参与代码生成 +- 请确保路由段变量名与 `key(...)` 保持一致 diff --git a/backend_v1/app/commands/event/event.go b/backend_v1/app/commands/event/event.go new file mode 100644 index 0000000..4325d0e --- /dev/null +++ b/backend_v1/app/commands/event/event.go @@ -0,0 +1,58 @@ +package event + +import ( + "context" + + "go.ipao.vip/atom" + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/contracts" + "quyun/v2/app/commands" + "quyun/v2/app/events/subscribers" + "quyun/v2/providers/app" + "quyun/v2/providers/event" + "quyun/v2/providers/postgres" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "go.uber.org/dig" +) + +func defaultProviders() container.Providers { + return commands.Default(container.Providers{ + postgres.DefaultProvider(), + }...) +} + +func Command() atom.Option { + return atom.Command( + atom.Name("event"), + atom.Short("start event processor"), + atom.RunE(Serve), + atom.Providers( + defaultProviders(). + With( + subscribers.Provide, + ), + ), + ) +} + +type Service struct { + dig.In + + App *app.Config + PubSub *event.PubSub + Initials []contracts.Initial `group:"initials"` +} + +func Serve(cmd *cobra.Command, args []string) error { + return container.Container.Invoke(func(ctx context.Context, svc Service) error { + log.SetFormatter(&log.JSONFormatter{}) + + if svc.App.IsDevMode() { + log.SetLevel(log.DebugLevel) + } + + return svc.PubSub.Serve(ctx) + }) +} diff --git a/backend_v1/app/commands/grpc/grpc.go b/backend_v1/app/commands/grpc/grpc.go new file mode 100644 index 0000000..43c45a9 --- /dev/null +++ b/backend_v1/app/commands/grpc/grpc.go @@ -0,0 +1,57 @@ +package grpc + +import ( + "go.ipao.vip/atom" + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/contracts" + "quyun/v2/app/commands" + "quyun/v2/app/grpc/users" + "quyun/v2/providers/app" + "quyun/v2/providers/grpc" + "quyun/v2/providers/postgres" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "go.uber.org/dig" +) + +func defaultProviders() container.Providers { + return commands.Default(container.Providers{ + postgres.DefaultProvider(), + grpc.DefaultProvider(), + }...) +} + +func Command() atom.Option { + return atom.Command( + atom.Name("grpc"), + atom.Short("run grpc server"), + atom.RunE(Serve), + atom.Providers( + defaultProviders(). + With( + users.Provide, + ), + ), + ) +} + +type Service struct { + dig.In + + App *app.Config + Grpc *grpc.Grpc + Initials []contracts.Initial `group:"initials"` +} + +func Serve(cmd *cobra.Command, args []string) error { + return container.Container.Invoke(func(svc Service) error { + log.SetFormatter(&log.JSONFormatter{}) + + if svc.App.IsDevMode() { + log.SetLevel(log.DebugLevel) + } + + return svc.Grpc.Serve() + }) +} diff --git a/backend_v1/app/commands/http/http.go b/backend_v1/app/commands/http/http.go new file mode 100644 index 0000000..a68a597 --- /dev/null +++ b/backend_v1/app/commands/http/http.go @@ -0,0 +1,79 @@ +package http + +import ( + "context" + + "quyun/v2/app/commands" + "quyun/v2/app/errorx" + "quyun/v2/app/jobs" + _ "quyun/v2/docs" + "quyun/v2/providers/app" + "quyun/v2/providers/http" + "quyun/v2/providers/http/swagger" + "quyun/v2/providers/job" + "quyun/v2/providers/jwt" + + "go.ipao.vip/atom" + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/contracts" + + "github.com/gofiber/fiber/v3/middleware/favicon" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "go.uber.org/dig" +) + +func defaultProviders() container.Providers { + return commands.Default(container.Providers{ + http.DefaultProvider(), + jwt.DefaultProvider(), + job.DefaultProvider(), + }...) +} + +func Command() atom.Option { + return atom.Command( + atom.Name("serve"), + atom.Short("run http server"), + atom.RunE(Serve), + atom.Providers( + defaultProviders(). + With( + jobs.Provide, + ), + ), + ) +} + +type Service struct { + dig.In + + App *app.Config + Job *job.Job + Http *http.Service + Initials []contracts.Initial `group:"initials"` + Routes []contracts.HttpRoute `group:"routes"` +} + +func Serve(cmd *cobra.Command, args []string) error { + return container.Container.Invoke(func(ctx context.Context, svc Service) error { + log.SetFormatter(&log.JSONFormatter{}) + + if svc.App.Mode == app.AppModeDevelopment { + log.SetLevel(log.DebugLevel) + + svc.Http.Engine.Get("/swagger/*", swagger.HandlerDefault) + } + svc.Http.Engine.Use(errorx.Middleware) + svc.Http.Engine.Use(favicon.New(favicon.Config{ + Data: []byte{}, + })) + + group := svc.Http.Engine.Group("") + for _, route := range svc.Routes { + route.Register(group) + } + + return svc.Http.Serve(ctx) + }) +} diff --git a/backend_v1/app/commands/migrate/20251219062731_river_queue.go b/backend_v1/app/commands/migrate/20251219062731_river_queue.go new file mode 100644 index 0000000..40e2255 --- /dev/null +++ b/backend_v1/app/commands/migrate/20251219062731_river_queue.go @@ -0,0 +1,35 @@ +package migrate + +import ( + "context" + "database/sql" + + "github.com/pkg/errors" + "github.com/pressly/goose/v3" + "github.com/riverqueue/river/riverdriver/riverdatabasesql" + "github.com/riverqueue/river/rivermigrate" +) + +func init() { + goose.AddMigrationNoTxContext(RiverQueueUp, RiverQueueDown) +} + +func RiverQueueUp(ctx context.Context, db *sql.DB) error { + migrator, err := rivermigrate.New(riverdatabasesql.New(db), nil) + if err != nil { + return errors.Wrap(err, "river migrate up failed") + } + + _, err = migrator.Migrate(ctx, rivermigrate.DirectionUp, &rivermigrate.MigrateOpts{TargetVersion: -1}) + return err +} + +func RiverQueueDown(ctx context.Context, db *sql.DB) error { + migrator, err := rivermigrate.New(riverdatabasesql.New(db), nil) + if err != nil { + return errors.Wrap(err, "river migrate down failed") + } + + _, err = migrator.Migrate(ctx, rivermigrate.DirectionDown, &rivermigrate.MigrateOpts{TargetVersion: -1}) + return err +} diff --git a/backend_v1/app/commands/migrate/migrate.go b/backend_v1/app/commands/migrate/migrate.go new file mode 100644 index 0000000..6414181 --- /dev/null +++ b/backend_v1/app/commands/migrate/migrate.go @@ -0,0 +1,86 @@ +package migrate + +import ( + "context" + "database/sql" + + "quyun/v2/app/commands" + "quyun/v2/database" + + "github.com/pressly/goose/v3" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "go.ipao.vip/atom" + "go.ipao.vip/atom/container" + "go.uber.org/dig" + + "github.com/riverqueue/river/riverdriver/riverdatabasesql" + "github.com/riverqueue/river/rivermigrate" +) + +func defaultProviders() container.Providers { + return commands.Default(container.Providers{}...) +} + +func Command() atom.Option { + return atom.Command( + atom.Name("migrate"), + atom.Short("run migrations"), + atom.RunE(Serve), + atom.Providers(defaultProviders()), + atom.Example("migrate [up|up-by-one|up-to|create|down|down-to|fix|redo|reset|status|version]"), + ) +} + +type Service struct { + dig.In + + DB *sql.DB +} + +// migrate +func Serve(cmd *cobra.Command, args []string) error { + return container.Container.Invoke(func(ctx context.Context, svc Service) error { + if len(args) == 0 { + args = append(args, "up") + } + + if args[0] == "create" { + return nil + } + + action, args := args[0], args[1:] + log.Infof("migration action: %s args: %+v", action, args) + + goose.SetBaseFS(database.MigrationFS) + goose.SetTableName("migrations") + goose.AddNamedMigrationNoTxContext("20251219062732_river_job.go", RiverUp, RiverDown) + + return goose.RunContext(context.Background(), action, svc.DB, "migrations", args...) + }) +} + +func RiverUp(ctx context.Context, db *sql.DB) error { + migrator, err := rivermigrate.New(riverdatabasesql.New(db), nil) + if err != nil { + return err + } + + // Migrate up. An empty MigrateOpts will migrate all the way up, but + // best practice is to specify a specific target version. + _, err = migrator.Migrate(ctx, rivermigrate.DirectionUp, &rivermigrate.MigrateOpts{}) + return err +} + +func RiverDown(ctx context.Context, db *sql.DB) error { + migrator, err := rivermigrate.New(riverdatabasesql.New(db), nil) + if err != nil { + return err + } + + // TargetVersion -1 removes River's schema completely. + _, err = migrator.Migrate(ctx, rivermigrate.DirectionDown, &rivermigrate.MigrateOpts{ + TargetVersion: -1, + }) + return err +} diff --git a/backend_v1/app/commands/queue/error.go b/backend_v1/app/commands/queue/error.go new file mode 100644 index 0000000..3300b00 --- /dev/null +++ b/backend_v1/app/commands/queue/error.go @@ -0,0 +1,24 @@ +package queue + +import ( + "context" + + "github.com/riverqueue/river" + "github.com/riverqueue/river/rivertype" + log "github.com/sirupsen/logrus" +) + +type CustomErrorHandler struct{} + +func (*CustomErrorHandler) HandleError(ctx context.Context, job *rivertype.JobRow, err error) *river.ErrorHandlerResult { + log.Infof("Job errored with: %s\n", err) + return nil +} + +func (*CustomErrorHandler) HandlePanic(ctx context.Context, job *rivertype.JobRow, panicVal any, trace string) *river.ErrorHandlerResult { + log.Infof("Job panicked with: %v\n", panicVal) + log.Infof("Stack trace: %s\n", trace) + return &river.ErrorHandlerResult{ + SetCancelled: true, + } +} diff --git a/backend_v1/app/commands/queue/river.go b/backend_v1/app/commands/queue/river.go new file mode 100644 index 0000000..2365127 --- /dev/null +++ b/backend_v1/app/commands/queue/river.go @@ -0,0 +1,67 @@ +package queue + +import ( + "context" + + "go.ipao.vip/atom" + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/contracts" + + "quyun/v2/app/commands" + "quyun/v2/app/jobs" + "quyun/v2/providers/app" + "quyun/v2/providers/job" + "quyun/v2/providers/postgres" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "go.uber.org/dig" +) + +func defaultProviders() container.Providers { + return commands.Default(container.Providers{ + postgres.DefaultProvider(), + job.DefaultProvider(), + }...) +} + +func Command() atom.Option { + return atom.Command( + atom.Name("queue"), + atom.Short("start queue processor"), + atom.RunE(Serve), + atom.Providers( + defaultProviders(). + With( + jobs.Provide, + ), + ), + ) +} + +type Service struct { + dig.In + + App *app.Config + Job *job.Job + Initials []contracts.Initial `group:"initials"` + CronJobs []contracts.CronJob `group:"cron_jobs"` +} + +func Serve(cmd *cobra.Command, args []string) error { + return container.Container.Invoke(func(ctx context.Context, svc Service) error { + log.SetFormatter(&log.JSONFormatter{}) + + if svc.App.IsDevMode() { + log.SetLevel(log.DebugLevel) + } + + if err := svc.Job.Start(ctx); err != nil { + return err + } + defer svc.Job.Close() + + <-ctx.Done() + return nil + }) +} diff --git a/backend_v1/app/commands/service.go b/backend_v1/app/commands/service.go new file mode 100644 index 0000000..d18a25e --- /dev/null +++ b/backend_v1/app/commands/service.go @@ -0,0 +1,19 @@ +package commands + +import ( + "quyun/v2/database" + "quyun/v2/providers/app" + "quyun/v2/providers/event" + "quyun/v2/providers/postgres" + + "go.ipao.vip/atom/container" +) + +func Default(providers ...container.ProviderContainer) container.Providers { + return append(container.Providers{ + app.DefaultProvider(), + event.DefaultProvider(), + database.DefaultProvider(), + postgres.DefaultProvider(), + }, providers...) +} diff --git a/backend_v1/app/commands/testx/testing.go b/backend_v1/app/commands/testx/testing.go new file mode 100644 index 0000000..2db2d65 --- /dev/null +++ b/backend_v1/app/commands/testx/testing.go @@ -0,0 +1,30 @@ +package testx + +import ( + "os" + "testing" + + "go.ipao.vip/atom" + "go.ipao.vip/atom/container" + + "github.com/rogeecn/fabfile" + . "github.com/smartystreets/goconvey/convey" +) + +func Default(providers ...container.ProviderContainer) container.Providers { + return append(container.Providers{}, providers...) +} + +func Serve(providers container.Providers, t *testing.T, invoke any) { + Convey("tests boot up", t, func() { + file := fabfile.MustFind("config.toml") + + localEnv := os.Getenv("ENV_LOCAL") + if localEnv != "" { + file = fabfile.MustFind("config." + localEnv + ".toml") + } + + So(atom.LoadProviders(file, providers), ShouldBeNil) + So(container.Container.Invoke(invoke), ShouldBeNil) + }) +} diff --git a/backend_v1/app/console/.gitkeep b/backend_v1/app/console/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/backend_v1/app/errorx/app_error.go b/backend_v1/app/errorx/app_error.go new file mode 100644 index 0000000..ed70ea7 --- /dev/null +++ b/backend_v1/app/errorx/app_error.go @@ -0,0 +1,65 @@ +package errorx + +import ( + "fmt" + "runtime" +) + +// AppError 应用错误结构 +type AppError struct { + Code ErrorCode `json:"code"` + Message string `json:"message"` + StatusCode int `json:"-"` + Data any `json:"data,omitempty"` + ID string `json:"id,omitempty"` + + // 调试信息 + originalErr error + file string + params []any + sql string +} + +// Error 实现 error 接口 +func (e *AppError) Error() string { + return fmt.Sprintf("[%d] %s", e.Code, e.Message) +} + +// Unwrap 允许通过 errors.Unwrap 遍历到原始错误 +func (e *AppError) Unwrap() error { return e.originalErr } + +// WithData 添加数据 +func (e *AppError) WithData(data any) *AppError { + e.Data = data + return e +} + +// WithMsg 设置消息 +func (e *AppError) WithMsg(msg string) *AppError { + e.Message = msg + return e +} + +// WithSQL 记录SQL信息 +func (e *AppError) WithSQL(sql string) *AppError { + e.sql = sql + return e +} + +// WithParams 记录参数信息,并自动获取调用位置 +func (e *AppError) WithParams(params ...any) *AppError { + e.params = params + if _, file, line, ok := runtime.Caller(1); ok { + e.file = fmt.Sprintf("%s:%d", file, line) + } + return e +} + +// NewError 创建应用错误 +func NewError(code ErrorCode, statusCode int, message string) *AppError { + return &AppError{ + Code: code, + Message: message, + StatusCode: statusCode, + } +} diff --git a/backend_v1/app/errorx/codes.go b/backend_v1/app/errorx/codes.go new file mode 100644 index 0000000..a319573 --- /dev/null +++ b/backend_v1/app/errorx/codes.go @@ -0,0 +1,90 @@ +package errorx + +// ErrorCode 错误码类型 +type ErrorCode int + +const ( + // 1000-1099: 数据相关错误 + CodeRecordNotFound ErrorCode = 1001 + CodeRecordDuplicated ErrorCode = 1002 + CodeDataCorrupted ErrorCode = 1003 + CodeDataTooLarge ErrorCode = 1004 + CodeDataValidationFail ErrorCode = 1005 + CodeConstraintViolated ErrorCode = 1006 + CodeDataExpired ErrorCode = 1007 + CodeDataLocked ErrorCode = 1008 + + // 1100-1199: 请求相关错误 + CodeBadRequest ErrorCode = 1101 + CodeMissingParameter ErrorCode = 1102 + CodeInvalidParameter ErrorCode = 1103 + CodeParameterTooLong ErrorCode = 1104 + CodeParameterTooShort ErrorCode = 1105 + CodeInvalidFormat ErrorCode = 1106 + CodeUnsupportedMethod ErrorCode = 1107 + CodeRequestTooLarge ErrorCode = 1108 + CodeInvalidJSON ErrorCode = 1109 + CodeInvalidXML ErrorCode = 1110 + + // 1200-1299: 认证授权错误 + CodeUnauthorized ErrorCode = 1201 + CodeForbidden ErrorCode = 1202 + CodeTokenExpired ErrorCode = 1203 + CodeTokenInvalid ErrorCode = 1204 + CodeTokenMissing ErrorCode = 1205 + CodePermissionDenied ErrorCode = 1206 + CodeAccountDisabled ErrorCode = 1207 + CodeAccountLocked ErrorCode = 1208 + CodeInvalidCredentials ErrorCode = 1209 + CodeSessionExpired ErrorCode = 1210 + + // 1300-1399: 业务逻辑错误 + CodeBusinessLogic ErrorCode = 1301 + CodeWorkflowError ErrorCode = 1302 + CodeStatusConflict ErrorCode = 1303 + CodeOperationFailed ErrorCode = 1304 + CodeResourceConflict ErrorCode = 1305 + CodePreconditionFailed ErrorCode = 1306 + CodeQuotaExceeded ErrorCode = 1307 + CodeResourceExhausted ErrorCode = 1308 + + // 1400-1499: 外部服务错误 + CodeExternalService ErrorCode = 1401 + CodeServiceUnavailable ErrorCode = 1402 + CodeServiceTimeout ErrorCode = 1403 + CodeThirdPartyError ErrorCode = 1404 + CodeNetworkError ErrorCode = 1405 + CodeDatabaseError ErrorCode = 1406 + CodeCacheError ErrorCode = 1407 + CodeMessageQueueError ErrorCode = 1408 + + // 1500-1599: 系统错误 + CodeInternalError ErrorCode = 1501 + CodeConfigurationError ErrorCode = 1502 + CodeFileSystemError ErrorCode = 1503 + CodeMemoryError ErrorCode = 1504 + CodeConcurrencyError ErrorCode = 1505 + CodeDeadlockError ErrorCode = 1506 + + // 1600-1699: 限流和频率控制 + CodeRateLimitExceeded ErrorCode = 1601 + CodeTooManyRequests ErrorCode = 1602 + CodeConcurrentLimit ErrorCode = 1603 + CodeAPIQuotaExceeded ErrorCode = 1604 + + // 1700-1799: 文件和上传错误 + CodeFileNotFound ErrorCode = 1701 + CodeFileTooBig ErrorCode = 1702 + CodeInvalidFileType ErrorCode = 1703 + CodeFileCorrupted ErrorCode = 1704 + CodeUploadFailed ErrorCode = 1705 + CodeDownloadFailed ErrorCode = 1706 + CodeFilePermission ErrorCode = 1707 + + // 1800-1899: 加密和安全错误 + CodeEncryptionError ErrorCode = 1801 + CodeDecryptionError ErrorCode = 1802 + CodeSignatureInvalid ErrorCode = 1803 + CodeCertificateInvalid ErrorCode = 1804 + CodeSecurityViolation ErrorCode = 1805 +) diff --git a/backend_v1/app/errorx/handler.go b/backend_v1/app/errorx/handler.go new file mode 100644 index 0000000..adbb2e6 --- /dev/null +++ b/backend_v1/app/errorx/handler.go @@ -0,0 +1,105 @@ +package errorx + +import ( + "errors" + "net/http" + + "github.com/gofiber/fiber/v3" + "gorm.io/gorm" +) + +// ErrorHandler 错误处理器 +type ErrorHandler struct{} + +// NewErrorHandler 创建错误处理器 +func NewErrorHandler() *ErrorHandler { + return &ErrorHandler{} +} + +// Handle 处理错误并返回统一格式 +func (h *ErrorHandler) Handle(err error) *AppError { + if appErr, ok := err.(*AppError); ok { + return appErr + } + + // 处理 Fiber 错误 + if fiberErr, ok := err.(*fiber.Error); ok { + return h.handleFiberError(fiberErr) + } + + // 处理 GORM 错误 + if appErr := h.handleGormError(err); appErr != nil { + return appErr + } + + // 默认内部错误 + return &AppError{ + Code: ErrInternalError.Code, + Message: err.Error(), + StatusCode: http.StatusInternalServerError, + originalErr: err, + } +} + +// handleFiberError 处理 Fiber 错误 +func (h *ErrorHandler) handleFiberError(fiberErr *fiber.Error) *AppError { + var appErr *AppError + + switch fiberErr.Code { + case http.StatusBadRequest: + appErr = ErrBadRequest + case http.StatusUnauthorized: + appErr = ErrUnauthorized + case http.StatusForbidden: + appErr = ErrForbidden + case http.StatusNotFound: + appErr = ErrRecordNotFound + case http.StatusMethodNotAllowed: + appErr = ErrUnsupportedMethod + case http.StatusRequestEntityTooLarge: + appErr = ErrRequestTooLarge + case http.StatusTooManyRequests: + appErr = ErrTooManyRequests + default: + appErr = ErrInternalError + } + + return &AppError{ + Code: appErr.Code, + Message: fiberErr.Message, + StatusCode: fiberErr.Code, + originalErr: fiberErr, + } +} + +// handleGormError 处理 GORM 错误 +func (h *ErrorHandler) handleGormError(err error) *AppError { + if errors.Is(err, gorm.ErrRecordNotFound) { + return &AppError{ + Code: ErrRecordNotFound.Code, + Message: ErrRecordNotFound.Message, + StatusCode: ErrRecordNotFound.StatusCode, + originalErr: err, + } + } + + if errors.Is(err, gorm.ErrDuplicatedKey) { + return &AppError{ + Code: ErrRecordDuplicated.Code, + Message: ErrRecordDuplicated.Message, + StatusCode: ErrRecordDuplicated.StatusCode, + originalErr: err, + } + } + + if errors.Is(err, gorm.ErrInvalidTransaction) { + return &AppError{ + Code: ErrConcurrencyError.Code, + Message: "事务无效", + StatusCode: ErrConcurrencyError.StatusCode, + originalErr: err, + } + } + + return nil +} diff --git a/backend_v1/app/errorx/middleware.go b/backend_v1/app/errorx/middleware.go new file mode 100644 index 0000000..9fd1beb --- /dev/null +++ b/backend_v1/app/errorx/middleware.go @@ -0,0 +1,38 @@ +package errorx + +import "github.com/gofiber/fiber/v3" + +// 全局实例 +var DefaultSender = NewResponseSender() + +// Middleware 错误处理中间件 +func Middleware(c fiber.Ctx) error { + err := c.Next() + if err != nil { + return DefaultSender.SendError(c, err) + } + return nil +} + +// 便捷函数 +func Wrap(err error) *AppError { + if err == nil { + return nil + } + + if appErr, ok := err.(*AppError); ok { + return &AppError{ + Code: appErr.Code, + Message: appErr.Message, + StatusCode: appErr.StatusCode, + Data: appErr.Data, + originalErr: appErr, + } + } + + return DefaultSender.handler.Handle(err) +} + +func SendError(ctx fiber.Ctx, err error) error { + return DefaultSender.SendError(ctx, err) +} diff --git a/backend_v1/app/errorx/predefined.go b/backend_v1/app/errorx/predefined.go new file mode 100644 index 0000000..0f7970e --- /dev/null +++ b/backend_v1/app/errorx/predefined.go @@ -0,0 +1,105 @@ +package errorx + +import "net/http" + +// 预定义错误 - 数据相关 +var ( + ErrRecordNotFound = NewError(CodeRecordNotFound, http.StatusNotFound, "记录不存在") + ErrRecordDuplicated = NewError(CodeRecordDuplicated, http.StatusConflict, "记录重复") + ErrDataCorrupted = NewError(CodeDataCorrupted, http.StatusBadRequest, "数据损坏") + ErrDataTooLarge = NewError(CodeDataTooLarge, http.StatusRequestEntityTooLarge, "数据过大") + ErrDataValidationFail = NewError(CodeDataValidationFail, http.StatusBadRequest, "数据验证失败") + ErrConstraintViolated = NewError(CodeConstraintViolated, http.StatusConflict, "约束违规") + ErrDataExpired = NewError(CodeDataExpired, http.StatusGone, "数据已过期") + ErrDataLocked = NewError(CodeDataLocked, http.StatusLocked, "数据已锁定") +) + +// 预定义错误 - 请求相关 +var ( + ErrBadRequest = NewError(CodeBadRequest, http.StatusBadRequest, "请求错误") + ErrMissingParameter = NewError(CodeMissingParameter, http.StatusBadRequest, "缺少必需参数") + ErrInvalidParameter = NewError(CodeInvalidParameter, http.StatusBadRequest, "参数无效") + ErrParameterTooLong = NewError(CodeParameterTooLong, http.StatusBadRequest, "参数过长") + ErrParameterTooShort = NewError(CodeParameterTooShort, http.StatusBadRequest, "参数过短") + ErrInvalidFormat = NewError(CodeInvalidFormat, http.StatusBadRequest, "格式无效") + ErrUnsupportedMethod = NewError(CodeUnsupportedMethod, http.StatusMethodNotAllowed, "不支持的请求方法") + ErrRequestTooLarge = NewError(CodeRequestTooLarge, http.StatusRequestEntityTooLarge, "请求体过大") + ErrInvalidJSON = NewError(CodeInvalidJSON, http.StatusBadRequest, "JSON格式错误") + ErrInvalidXML = NewError(CodeInvalidXML, http.StatusBadRequest, "XML格式错误") +) + +// 预定义错误 - 认证授权 +var ( + ErrUnauthorized = NewError(CodeUnauthorized, http.StatusUnauthorized, "未授权") + ErrForbidden = NewError(CodeForbidden, http.StatusForbidden, "禁止访问") + ErrTokenExpired = NewError(CodeTokenExpired, http.StatusUnauthorized, "Token已过期") + ErrTokenInvalid = NewError(CodeTokenInvalid, http.StatusUnauthorized, "Token无效") + ErrTokenMissing = NewError(CodeTokenMissing, http.StatusUnauthorized, "Token缺失") + ErrPermissionDenied = NewError(CodePermissionDenied, http.StatusForbidden, "权限不足") + ErrAccountDisabled = NewError(CodeAccountDisabled, http.StatusForbidden, "账户已禁用") + ErrAccountLocked = NewError(CodeAccountLocked, http.StatusLocked, "账户已锁定") + ErrInvalidCredentials = NewError(CodeInvalidCredentials, http.StatusUnauthorized, "凭据无效") + ErrSessionExpired = NewError(CodeSessionExpired, http.StatusUnauthorized, "会话已过期") +) + +// 预定义错误 - 业务逻辑 +var ( + ErrBusinessLogic = NewError(CodeBusinessLogic, http.StatusBadRequest, "业务逻辑错误") + ErrWorkflowError = NewError(CodeWorkflowError, http.StatusBadRequest, "工作流错误") + ErrStatusConflict = NewError(CodeStatusConflict, http.StatusConflict, "状态冲突") + ErrOperationFailed = NewError(CodeOperationFailed, http.StatusInternalServerError, "操作失败") + ErrResourceConflict = NewError(CodeResourceConflict, http.StatusConflict, "资源冲突") + ErrPreconditionFailed = NewError(CodePreconditionFailed, http.StatusPreconditionFailed, "前置条件失败") + ErrQuotaExceeded = NewError(CodeQuotaExceeded, http.StatusForbidden, "配额超限") + ErrResourceExhausted = NewError(CodeResourceExhausted, http.StatusTooManyRequests, "资源耗尽") +) + +// 预定义错误 - 外部服务 +var ( + ErrExternalService = NewError(CodeExternalService, http.StatusBadGateway, "外部服务错误") + ErrServiceUnavailable = NewError(CodeServiceUnavailable, http.StatusServiceUnavailable, "服务不可用") + ErrServiceTimeout = NewError(CodeServiceTimeout, http.StatusRequestTimeout, "服务超时") + ErrThirdPartyError = NewError(CodeThirdPartyError, http.StatusBadGateway, "第三方服务错误") + ErrNetworkError = NewError(CodeNetworkError, http.StatusBadGateway, "网络错误") + ErrDatabaseError = NewError(CodeDatabaseError, http.StatusInternalServerError, "数据库错误") + ErrCacheError = NewError(CodeCacheError, http.StatusInternalServerError, "缓存错误") + ErrMessageQueueError = NewError(CodeMessageQueueError, http.StatusInternalServerError, "消息队列错误") +) + +// 预定义错误 - 系统错误 +var ( + ErrInternalError = NewError(CodeInternalError, http.StatusInternalServerError, "内部错误") + ErrConfigurationError = NewError(CodeConfigurationError, http.StatusInternalServerError, "配置错误") + ErrFileSystemError = NewError(CodeFileSystemError, http.StatusInternalServerError, "文件系统错误") + ErrMemoryError = NewError(CodeMemoryError, http.StatusInternalServerError, "内存错误") + ErrConcurrencyError = NewError(CodeConcurrencyError, http.StatusInternalServerError, "并发错误") + ErrDeadlockError = NewError(CodeDeadlockError, http.StatusInternalServerError, "死锁错误") +) + +// 预定义错误 - 限流 +var ( + ErrRateLimitExceeded = NewError(CodeRateLimitExceeded, http.StatusTooManyRequests, "请求频率超限") + ErrTooManyRequests = NewError(CodeTooManyRequests, http.StatusTooManyRequests, "请求过多") + ErrConcurrentLimit = NewError(CodeConcurrentLimit, http.StatusTooManyRequests, "并发数超限") + ErrAPIQuotaExceeded = NewError(CodeAPIQuotaExceeded, http.StatusTooManyRequests, "API配额超限") +) + +// 预定义错误 - 文件处理 +var ( + ErrFileNotFound = NewError(CodeFileNotFound, http.StatusNotFound, "文件不存在") + ErrFileTooBig = NewError(CodeFileTooBig, http.StatusRequestEntityTooLarge, "文件过大") + ErrInvalidFileType = NewError(CodeInvalidFileType, http.StatusBadRequest, "文件类型无效") + ErrFileCorrupted = NewError(CodeFileCorrupted, http.StatusBadRequest, "文件损坏") + ErrUploadFailed = NewError(CodeUploadFailed, http.StatusInternalServerError, "上传失败") + ErrDownloadFailed = NewError(CodeDownloadFailed, http.StatusInternalServerError, "下载失败") + ErrFilePermission = NewError(CodeFilePermission, http.StatusForbidden, "文件权限不足") +) + +// 预定义错误 - 安全相关 +var ( + ErrEncryptionError = NewError(CodeEncryptionError, http.StatusInternalServerError, "加密错误") + ErrDecryptionError = NewError(CodeDecryptionError, http.StatusInternalServerError, "解密错误") + ErrSignatureInvalid = NewError(CodeSignatureInvalid, http.StatusUnauthorized, "签名无效") + ErrCertificateInvalid = NewError(CodeCertificateInvalid, http.StatusUnauthorized, "证书无效") + ErrSecurityViolation = NewError(CodeSecurityViolation, http.StatusForbidden, "安全违规") +) diff --git a/backend_v1/app/errorx/response.go b/backend_v1/app/errorx/response.go new file mode 100644 index 0000000..d31c60d --- /dev/null +++ b/backend_v1/app/errorx/response.go @@ -0,0 +1,127 @@ +package errorx + +import ( + "errors" + "fmt" + + "github.com/gofiber/fiber/v3" + "github.com/gofiber/fiber/v3/binder" + "github.com/gofiber/utils/v2" + "github.com/google/uuid" + log "github.com/sirupsen/logrus" + "gorm.io/gorm" +) + +// ResponseSender 响应发送器 +type ResponseSender struct { + handler *ErrorHandler +} + +// NewResponseSender 创建响应发送器 +func NewResponseSender() *ResponseSender { + return &ResponseSender{ + handler: NewErrorHandler(), + } +} + +// SendError 发送错误响应 +func (s *ResponseSender) SendError(ctx fiber.Ctx, err error) error { + appErr := s.handler.Handle(err) + + // 记录错误日志 + s.logError(appErr) + + // 根据 Content-Type 返回不同格式 + return s.sendResponse(ctx, appErr) +} + +// logError 记录错误日志 +func (s *ResponseSender) logError(appErr *AppError) { + // 确保每个错误实例都有唯一ID,便于日志关联 + if appErr.ID == "" { + appErr.ID = uuid.NewString() + } + + // 构造详细的错误级联链路(包含类型、状态、定位等) + chain := make([]map[string]any, 0, 4) + var e error = appErr + for e != nil { + entry := map[string]any{ + "type": fmt.Sprintf("%T", e), + "error": e.Error(), + } + switch v := e.(type) { + case *AppError: + entry["code"] = v.Code + entry["statusCode"] = v.StatusCode + if v.file != "" { + entry["file"] = v.file + } + if len(v.params) > 0 { + entry["params"] = v.params + } + if v.sql != "" { + entry["sql"] = v.sql + } + if v.ID != "" { + entry["id"] = v.ID + } + case *fiber.Error: + entry["statusCode"] = v.Code + entry["message"] = v.Message + } + + // GORM 常见错误归类标记 + if errors.Is(e, gorm.ErrRecordNotFound) { + entry["gorm"] = "record_not_found" + } else if errors.Is(e, gorm.ErrDuplicatedKey) { + entry["gorm"] = "duplicated_key" + } else if errors.Is(e, gorm.ErrInvalidTransaction) { + entry["gorm"] = "invalid_transaction" + } + + chain = append(chain, entry) + e = errors.Unwrap(e) + } + + root := chain[len(chain)-1]["error"] + + logEntry := log.WithFields(log.Fields{ + "id": appErr.ID, + "code": appErr.Code, + "statusCode": appErr.StatusCode, + "file": appErr.file, + "sql": appErr.sql, + "params": appErr.params, + "error_chain": chain, + "root_error": root, + }) + + if appErr.originalErr != nil { + logEntry = logEntry.WithError(appErr.originalErr) + } + + // 根据错误级别记录不同级别的日志 + if appErr.StatusCode >= 500 { + logEntry.Error("系统错误: ", appErr.Message) + } else if appErr.StatusCode >= 400 { + logEntry.Warn("客户端错误: ", appErr.Message) + } else { + logEntry.Info("应用错误: ", appErr.Message) + } +} + +// sendResponse 发送响应 +func (s *ResponseSender) sendResponse(ctx fiber.Ctx, appErr *AppError) error { + contentType := utils.ToLower(utils.UnsafeString(ctx.Request().Header.ContentType())) + contentType = binder.FilterFlags(utils.ParseVendorSpecificContentType(contentType)) + + switch contentType { + case fiber.MIMETextXML, fiber.MIMEApplicationXML: + return ctx.Status(appErr.StatusCode).XML(appErr) + case fiber.MIMETextHTML, fiber.MIMETextPlain: + return ctx.Status(appErr.StatusCode).SendString(appErr.Message) + default: + return ctx.Status(appErr.StatusCode).JSON(appErr) + } +} diff --git a/backend_v1/app/events/publishers/user_register.go b/backend_v1/app/events/publishers/user_register.go new file mode 100644 index 0000000..af1a671 --- /dev/null +++ b/backend_v1/app/events/publishers/user_register.go @@ -0,0 +1,26 @@ +package publishers + +import ( + "encoding/json" + + "quyun/v2/app/events" + "quyun/v2/providers/event" + + "go.ipao.vip/atom/contracts" +) + +var _ contracts.EventPublisher = (*UserRegister)(nil) + +type UserRegister struct { + event.DefaultChannel + + ID int64 `json:"id"` +} + +func (e *UserRegister) Marshal() ([]byte, error) { + return json.Marshal(e) +} + +func (e *UserRegister) Topic() string { + return events.TopicUserRegister +} diff --git a/backend_v1/app/events/subscribers/provider.gen.go b/backend_v1/app/events/subscribers/provider.gen.go new file mode 100755 index 0000000..e66ab95 --- /dev/null +++ b/backend_v1/app/events/subscribers/provider.gen.go @@ -0,0 +1,27 @@ +package subscribers + +import ( + "quyun/v2/providers/event" + + "go.ipao.vip/atom" + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/contracts" + "go.ipao.vip/atom/opt" +) + +func Provide(opts ...opt.Option) error { + if err := container.Container.Provide(func( + __event *event.PubSub, + ) (contracts.Initial, error) { + obj := &UserRegister{} + if err := obj.Prepare(); err != nil { + return nil, err + } + __event.Handle("handler:UserRegister", obj) + + return obj, nil + }, atom.GroupInitial); err != nil { + return err + } + return nil +} diff --git a/backend_v1/app/events/subscribers/user_register.go b/backend_v1/app/events/subscribers/user_register.go new file mode 100644 index 0000000..f529a46 --- /dev/null +++ b/backend_v1/app/events/subscribers/user_register.go @@ -0,0 +1,45 @@ +package subscribers + +import ( + "encoding/json" + + "quyun/v2/app/events" + "quyun/v2/app/events/publishers" + "quyun/v2/providers/event" + + "github.com/ThreeDotsLabs/watermill/message" + "github.com/sirupsen/logrus" + "go.ipao.vip/atom/contracts" +) + +var _ contracts.EventHandler = (*UserRegister)(nil) + +// @provider(event) +type UserRegister struct { + event.DefaultChannel + event.DefaultPublishTo + + log *logrus.Entry `inject:"false"` +} + +func (e *UserRegister) Prepare() error { + e.log = logrus.WithField("module", "events.subscribers.user_register") + return nil +} + +// Topic implements contracts.EventHandler. +func (e *UserRegister) Topic() string { + return events.TopicUserRegister +} + +// Handler implements contracts.EventHandler. +func (e *UserRegister) Handler(msg *message.Message) ([]*message.Message, error) { + var payload publishers.UserRegister + err := json.Unmarshal(msg.Payload, &payload) + if err != nil { + return nil, err + } + e.log.Infof("received event %s", msg.Payload) + + return nil, nil +} diff --git a/backend_v1/app/events/subscribers/utils.go b/backend_v1/app/events/subscribers/utils.go new file mode 100644 index 0000000..45419ef --- /dev/null +++ b/backend_v1/app/events/subscribers/utils.go @@ -0,0 +1,24 @@ +package subscribers + +import ( + "encoding/json" + + "github.com/ThreeDotsLabs/watermill" + "github.com/ThreeDotsLabs/watermill/message" +) + +func toMessage(event any) (*message.Message, error) { + b, err := json.Marshal(event) + if err != nil { + return nil, err + } + return message.NewMessage(watermill.NewUUID(), b), nil +} + +func toMessageList(event any) ([]*message.Message, error) { + m, err := toMessage(event) + if err != nil { + return nil, err + } + return []*message.Message{m}, nil +} diff --git a/backend_v1/app/events/topics.go b/backend_v1/app/events/topics.go new file mode 100644 index 0000000..e2b777a --- /dev/null +++ b/backend_v1/app/events/topics.go @@ -0,0 +1,6 @@ +package events + +const ( + TopicProcessed = "event:processed" + TopicUserRegister = "event:user_register" +) diff --git a/backend_v1/app/grpc/users/handler.go b/backend_v1/app/grpc/users/handler.go new file mode 100644 index 0000000..e8764a1 --- /dev/null +++ b/backend_v1/app/grpc/users/handler.go @@ -0,0 +1,26 @@ +package users + +import ( + "context" + + userv1 "quyun/v2/pkg/proto/user/v1" +) + +// @provider(grpc) userv1.RegisterUserServiceServer +type Users struct { + userv1.UnimplementedUserServiceServer +} + +func (u *Users) ListUsers(ctx context.Context, in *userv1.ListUsersRequest) (*userv1.ListUsersResponse, error) { + // userv1.UserServiceServer + return &userv1.ListUsersResponse{}, nil +} + +// GetUser implements userv1.UserServiceServer +func (u *Users) GetUser(ctx context.Context, in *userv1.GetUserRequest) (*userv1.GetUserResponse, error) { + return &userv1.GetUserResponse{ + User: &userv1.User{ + Id: in.Id, + }, + }, nil +} diff --git a/backend_v1/app/grpc/users/provider.gen.go b/backend_v1/app/grpc/users/provider.gen.go new file mode 100755 index 0000000..d708c0f --- /dev/null +++ b/backend_v1/app/grpc/users/provider.gen.go @@ -0,0 +1,25 @@ +package users + +import ( + userv1 "quyun/v2/pkg/proto/user/v1" + "quyun/v2/providers/grpc" + + "go.ipao.vip/atom" + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/contracts" + "go.ipao.vip/atom/opt" +) + +func Provide(opts ...opt.Option) error { + if err := container.Container.Provide(func( + __grpc *grpc.Grpc, + ) (contracts.Initial, error) { + obj := &Users{} + userv1.RegisterUserServiceServer(__grpc.Server, obj) + + return obj, nil + }, atom.GroupInitial); err != nil { + return err + } + return nil +} diff --git a/backend_v1/app/http/v1/demo.go b/backend_v1/app/http/v1/demo.go new file mode 100644 index 0000000..a176b60 --- /dev/null +++ b/backend_v1/app/http/v1/demo.go @@ -0,0 +1,76 @@ +package v1 + +import ( + "mime/multipart" + + "quyun/v2/app/errorx" + "quyun/v2/app/requests" + "quyun/v2/app/services" + "quyun/v2/providers/jwt" + + "github.com/gofiber/fiber/v3" +) + +// @provider +type demo struct{} + +type FooUploadReq struct { + Folder string `json:"folder" form:"folder"` // 上传到指定文件夹 +} + +type FooQuery struct { + Search string `query:"search"` // 搜索关键词 +} + +type FooHeader struct { + ContentType string `header:"Content-Type"` // 内容类型 +} +type Filter struct { + Name string `query:"name"` // 名称 + Age int `query:"age"` // 年龄 +} + +type ResponseItem struct{} + +// Foo +// +// @Summary Test +// @Description Test +// @Tags Test +// @Accept json +// @Produce json +// +// @Param id path int true "ID" +// @Param query query Filter true "Filter" +// @Param pager query requests.Pagination true "Pager" +// @Success 200 {object} requests.Pager{list=ResponseItem} "成功" +// +// @Router /v1/medias/:id [post] +// @Bind query query +// @Bind pager query +// @Bind header header +// @Bind id path +// @Bind req body +// @Bind file file +// @Bind claim local +func (d *demo) Foo( + ctx fiber.Ctx, + id int, + pager *requests.Pagination, + query *FooQuery, + header *FooHeader, + claim *jwt.Claims, + file *multipart.FileHeader, + req *FooUploadReq, +) error { + _, err := services.Test.Test(ctx) + if err != nil { + // 示例:在控制器层自定义错误消息/附加数据 + appErr := errorx.Wrap(err). + WithMsg("获取测试失败"). + WithData(fiber.Map{"route": "/v1/test"}). + WithParams("handler", "Test.Hello") + return appErr + } + return nil +} diff --git a/backend_v1/app/http/v1/provider.gen.go b/backend_v1/app/http/v1/provider.gen.go new file mode 100755 index 0000000..e4074ec --- /dev/null +++ b/backend_v1/app/http/v1/provider.gen.go @@ -0,0 +1,37 @@ +package v1 + +import ( + "quyun/v2/app/middlewares" + + "go.ipao.vip/atom" + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/contracts" + "go.ipao.vip/atom/opt" +) + +func Provide(opts ...opt.Option) error { + if err := container.Container.Provide(func() (*demo, error) { + obj := &demo{} + + return obj, nil + }); err != nil { + return err + } + if err := container.Container.Provide(func( + demo *demo, + middlewares *middlewares.Middlewares, + ) (contracts.HttpRoute, error) { + obj := &Routes{ + demo: demo, + middlewares: middlewares, + } + if err := obj.Prepare(); err != nil { + return nil, err + } + + return obj, nil + }, atom.GroupRoutes); err != nil { + return err + } + return nil +} diff --git a/backend_v1/app/http/v1/routes.gen.go b/backend_v1/app/http/v1/routes.gen.go new file mode 100644 index 0000000..0b47171 --- /dev/null +++ b/backend_v1/app/http/v1/routes.gen.go @@ -0,0 +1,60 @@ +// Code generated by atomctl. DO NOT EDIT. + +// Package v1 provides HTTP route definitions and registration +// for the quyun/v2 application. +package v1 + +import ( + "mime/multipart" + "quyun/v2/app/middlewares" + "quyun/v2/app/requests" + "quyun/v2/providers/jwt" + + "github.com/gofiber/fiber/v3" + log "github.com/sirupsen/logrus" + _ "go.ipao.vip/atom" + _ "go.ipao.vip/atom/contracts" + . "go.ipao.vip/atom/fen" +) + +// Routes implements the HttpRoute contract and provides route registration +// for all controllers in the v1 module. +// +// @provider contracts.HttpRoute atom.GroupRoutes +type Routes struct { + log *log.Entry `inject:"false"` + middlewares *middlewares.Middlewares + // Controller instances + demo *demo +} + +// Prepare initializes the routes provider with logging configuration. +func (r *Routes) Prepare() error { + r.log = log.WithField("module", "routes.v1") + r.log.Info("Initializing routes module") + return nil +} + +// Name returns the unique identifier for this routes provider. +func (r *Routes) Name() string { + return "v1" +} + +// Register registers all HTTP routes with the provided fiber router. +// Each route is registered with its corresponding controller action and parameter bindings. +func (r *Routes) Register(router fiber.Router) { + // Register routes for controller: demo + r.log.Debugf("Registering route: Post /v1/medias/:id -> demo.Foo") + router.Post("/v1/medias/:id"[len(r.Path()):], Func7( + r.demo.Foo, + PathParam[int]("id"), + Query[requests.Pagination]("pager"), + Query[FooQuery]("query"), + Header[FooHeader]("header"), + Local[*jwt.Claims]("claim"), + File[multipart.FileHeader]("file"), + Body[FooUploadReq]("req"), + )) + + r.log.Info("Successfully registered all routes") +} diff --git a/backend_v1/app/http/v1/routes.manual.go b/backend_v1/app/http/v1/routes.manual.go new file mode 100644 index 0000000..4802231 --- /dev/null +++ b/backend_v1/app/http/v1/routes.manual.go @@ -0,0 +1,9 @@ +package v1 + +func (r *Routes) Path() string { + return "/v1" +} + +func (r *Routes) Middlewares() []any { + return []any{} +} diff --git a/backend_v1/app/jobs/demo_cron.go b/backend_v1/app/jobs/demo_cron.go new file mode 100644 index 0000000..88960d4 --- /dev/null +++ b/backend_v1/app/jobs/demo_cron.go @@ -0,0 +1,36 @@ +package jobs + +import ( + "time" + + . "github.com/riverqueue/river" + "github.com/sirupsen/logrus" + _ "go.ipao.vip/atom" + "go.ipao.vip/atom/contracts" +) + +var _ contracts.CronJob = (*DemoCronJob)(nil) + +// @provider(cronjob) +type DemoCronJob struct { + log *logrus.Entry `inject:"false"` +} + +// Prepare implements contracts.CronJob. +func (DemoCronJob) Prepare() error { + return nil +} + +// JobArgs implements contracts.CronJob. +func (DemoCronJob) Args() []contracts.CronJobArg { + return []contracts.CronJobArg{ + { + Arg: DemoJob{ + Strings: []string{"a", "b", "c", "d"}, + }, + + PeriodicInterval: PeriodicInterval(time.Second * 10), + RunOnStart: false, + }, + } +} diff --git a/backend_v1/app/jobs/demo_job.go b/backend_v1/app/jobs/demo_job.go new file mode 100644 index 0000000..e36dab8 --- /dev/null +++ b/backend_v1/app/jobs/demo_job.go @@ -0,0 +1,53 @@ +package jobs + +import ( + "context" + "sort" + "time" + + . "github.com/riverqueue/river" + log "github.com/sirupsen/logrus" + _ "go.ipao.vip/atom" + "go.ipao.vip/atom/contracts" + _ "go.ipao.vip/atom/contracts" +) + +var _ contracts.JobArgs = DemoJob{} + +type DemoJob struct { + Strings []string `json:"strings"` +} + +func (s DemoJob) InsertOpts() InsertOpts { + return InsertOpts{ + Queue: QueueDefault, + Priority: PriorityDefault, + } +} + +func (DemoJob) Kind() string { return "demo_job" } +func (a DemoJob) UniqueID() string { return a.Kind() } + +var _ Worker[DemoJob] = (*DemoJobWorker)(nil) + +// @provider(job) +type DemoJobWorker struct { + WorkerDefaults[DemoJob] +} + +func (w *DemoJobWorker) NextRetry(job *Job[DemoJob]) time.Time { + return time.Now().Add(30 * time.Second) +} + +func (w *DemoJobWorker) Work(ctx context.Context, job *Job[DemoJob]) error { + logger := log.WithField("job", job.Args.Kind()) + + logger.Infof("[START] %s args: %v", job.Args.Kind(), job.Args.Strings) + defer logger.Infof("[END] %s", job.Args.Kind()) + + // modify below + sort.Strings(job.Args.Strings) + logger.Infof("[%s] Sorted strings: %v\n", time.Now().Format(time.TimeOnly), job.Args.Strings) + + return nil +} diff --git a/backend_v1/app/jobs/demo_job_test.go b/backend_v1/app/jobs/demo_job_test.go new file mode 100644 index 0000000..6f08455 --- /dev/null +++ b/backend_v1/app/jobs/demo_job_test.go @@ -0,0 +1,53 @@ +package jobs + +import ( + "context" + "testing" + + "quyun/v2/app/commands/testx" + "quyun/v2/app/services" + + . "github.com/riverqueue/river" + . "github.com/smartystreets/goconvey/convey" + "github.com/stretchr/testify/suite" + _ "go.ipao.vip/atom" + "go.ipao.vip/atom/contracts" + "go.uber.org/dig" +) + +type DemoJobSuiteInjectParams struct { + dig.In + + Initials []contracts.Initial `group:"initials"` // nolint:structcheck +} + +type DemoJobSuite struct { + suite.Suite + + DemoJobSuiteInjectParams +} + +func Test_DemoJob(t *testing.T) { + providers := testx.Default().With(Provide, services.Provide) + + testx.Serve(providers, t, func(p DemoJobSuiteInjectParams) { + suite.Run(t, &DemoJobSuite{DemoJobSuiteInjectParams: p}) + }) +} + +func (t *DemoJobSuite) Test_Work() { + Convey("test_work", t.T(), func() { + Convey("step 1", func() { + job := &Job[DemoJob]{ + Args: DemoJob{ + Strings: []string{"a", "b", "c"}, + }, + } + + worker := &DemoJobWorker{} + + err := worker.Work(context.Background(), job) + So(err, ShouldBeNil) + }) + }) +} diff --git a/backend_v1/app/jobs/provider.gen.go b/backend_v1/app/jobs/provider.gen.go new file mode 100755 index 0000000..95331e5 --- /dev/null +++ b/backend_v1/app/jobs/provider.gen.go @@ -0,0 +1,41 @@ +package jobs + +import ( + "quyun/v2/providers/job" + + "github.com/riverqueue/river" + "go.ipao.vip/atom" + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/contracts" + "go.ipao.vip/atom/opt" +) + +func Provide(opts ...opt.Option) error { + if err := container.Container.Provide(func( + __job *job.Job, + ) (contracts.Initial, error) { + obj := &DemoCronJob{} + if err := obj.Prepare(); err != nil { + return nil, err + } + + container.Later(func() error { return __job.AddPeriodicJobs(obj) }) + + return obj, nil + }, atom.GroupInitial); err != nil { + return err + } + if err := container.Container.Provide(func( + __job *job.Job, + ) (contracts.Initial, error) { + obj := &DemoJobWorker{} + if err := river.AddWorkerSafely(__job.Workers, obj); err != nil { + return nil, err + } + + return obj, nil + }, atom.GroupInitial); err != nil { + return err + } + return nil +} diff --git a/backend_v1/app/middlewares/mid_debug.go b/backend_v1/app/middlewares/mid_debug.go new file mode 100644 index 0000000..ecb33af --- /dev/null +++ b/backend_v1/app/middlewares/mid_debug.go @@ -0,0 +1,9 @@ +package middlewares + +import ( + "github.com/gofiber/fiber/v3" +) + +func (f *Middlewares) DebugMode(c fiber.Ctx) error { + return c.Next() +} diff --git a/backend_v1/app/middlewares/middlewares.go b/backend_v1/app/middlewares/middlewares.go new file mode 100644 index 0000000..69e0e4c --- /dev/null +++ b/backend_v1/app/middlewares/middlewares.go @@ -0,0 +1,15 @@ +package middlewares + +import ( + log "github.com/sirupsen/logrus" +) + +// @provider +type Middlewares struct { + log *log.Entry `inject:"false"` +} + +func (f *Middlewares) Prepare() error { + f.log = log.WithField("module", "middleware") + return nil +} diff --git a/backend_v1/app/middlewares/provider.gen.go b/backend_v1/app/middlewares/provider.gen.go new file mode 100755 index 0000000..f84d36c --- /dev/null +++ b/backend_v1/app/middlewares/provider.gen.go @@ -0,0 +1,20 @@ +package middlewares + +import ( + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/opt" +) + +func Provide(opts ...opt.Option) error { + if err := container.Container.Provide(func() (*Middlewares, error) { + obj := &Middlewares{} + if err := obj.Prepare(); err != nil { + return nil, err + } + + return obj, nil + }); err != nil { + return err + } + return nil +} diff --git a/backend_v1/app/requests/pagination.go b/backend_v1/app/requests/pagination.go new file mode 100644 index 0000000..e98528d --- /dev/null +++ b/backend_v1/app/requests/pagination.go @@ -0,0 +1,30 @@ +package requests + +import "github.com/samber/lo" + +type Pager struct { + Pagination `json:",inline"` + Total int64 `json:"total"` + Items any `json:"items"` +} + +type Pagination struct { + Page int64 `json:"page" form:"page" query:"page"` + Limit int64 `json:"limit" form:"limit" query:"limit"` +} + +func (filter *Pagination) Offset() int64 { + return (filter.Page - 1) * filter.Limit +} + +func (filter *Pagination) Format() *Pagination { + if filter.Page <= 0 { + filter.Page = 1 + } + + if !lo.Contains([]int64{10, 20, 50, 100}, filter.Limit) { + filter.Limit = 10 + } + + return filter +} diff --git a/backend_v1/app/requests/sort.go b/backend_v1/app/requests/sort.go new file mode 100644 index 0000000..517b419 --- /dev/null +++ b/backend_v1/app/requests/sort.go @@ -0,0 +1,41 @@ +package requests + +import ( + "strings" + + "github.com/samber/lo" +) + +type SortQueryFilter struct { + Asc *string `json:"asc" form:"asc"` + Desc *string `json:"desc" form:"desc"` +} + +func (s *SortQueryFilter) AscFields() []string { + if s.Asc == nil { + return nil + } + return strings.Split(*s.Asc, ",") +} + +func (s *SortQueryFilter) DescFields() []string { + if s.Desc == nil { + return nil + } + return strings.Split(*s.Desc, ",") +} + +func (s *SortQueryFilter) DescID() *SortQueryFilter { + if s.Desc == nil { + s.Desc = lo.ToPtr("id") + } + + items := s.DescFields() + if lo.Contains(items, "id") { + return s + } + + items = append(items, "id") + s.Desc = lo.ToPtr(strings.Join(items, ",")) + return s +} diff --git a/backend_v1/app/services/provider.gen.go b/backend_v1/app/services/provider.gen.go new file mode 100755 index 0000000..b6c4169 --- /dev/null +++ b/backend_v1/app/services/provider.gen.go @@ -0,0 +1,36 @@ +package services + +import ( + "go.ipao.vip/atom" + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/contracts" + "go.ipao.vip/atom/opt" + "gorm.io/gorm" +) + +func Provide(opts ...opt.Option) error { + if err := container.Container.Provide(func( + db *gorm.DB, + test *test, + ) (contracts.Initial, error) { + obj := &services{ + db: db, + test: test, + } + if err := obj.Prepare(); err != nil { + return nil, err + } + + return obj, nil + }, atom.GroupInitial); err != nil { + return err + } + if err := container.Container.Provide(func() (*test, error) { + obj := &test{} + + return obj, nil + }); err != nil { + return err + } + return nil +} diff --git a/backend_v1/app/services/services.gen.go b/backend_v1/app/services/services.gen.go new file mode 100644 index 0000000..e65f276 --- /dev/null +++ b/backend_v1/app/services/services.gen.go @@ -0,0 +1,28 @@ +package services + +import ( + "gorm.io/gorm" +) + +var _db *gorm.DB + +// exported CamelCase Services +var ( + Test *test +) + +// @provider(model) +type services struct { + db *gorm.DB + // define Services + test *test +} + +func (svc *services) Prepare() error { + _db = svc.db + + // set exported Services here + Test = svc.test + + return nil +} diff --git a/backend_v1/app/services/test.go b/backend_v1/app/services/test.go new file mode 100644 index 0000000..051d196 --- /dev/null +++ b/backend_v1/app/services/test.go @@ -0,0 +1,10 @@ +package services + +import "context" + +// @provider +type test struct{} + +func (t *test) Test(ctx context.Context) (string, error) { + return "Test", nil +} diff --git a/backend_v1/app/services/test_test.go b/backend_v1/app/services/test_test.go new file mode 100644 index 0000000..31a7e77 --- /dev/null +++ b/backend_v1/app/services/test_test.go @@ -0,0 +1,41 @@ +package services + +import ( + "testing" + "time" + + "quyun/v2/app/commands/testx" + + . "github.com/smartystreets/goconvey/convey" + "github.com/stretchr/testify/suite" + + _ "go.ipao.vip/atom" + "go.ipao.vip/atom/contracts" + "go.uber.org/dig" +) + +type TestSuiteInjectParams struct { + dig.In + + Initials []contracts.Initial `group:"initials"` // nolint:structcheck +} + +type TestSuite struct { + suite.Suite + + TestSuiteInjectParams +} + +func Test_Test(t *testing.T) { + providers := testx.Default().With(Provide) + + testx.Serve(providers, t, func(p TestSuiteInjectParams) { + suite.Run(t, &TestSuite{TestSuiteInjectParams: p}) + }) +} + +func (t *TestSuite) Test_Test() { + Convey("test_work", t.T(), func() { + t.T().Log("start test at", time.Now()) + }) +} diff --git a/backend_v1/buf.gen.yaml b/backend_v1/buf.gen.yaml new file mode 100644 index 0000000..9cbda62 --- /dev/null +++ b/backend_v1/buf.gen.yaml @@ -0,0 +1,23 @@ +version: v2 +inputs: + - directory: proto +managed: + enabled: true + override: + - file_option: go_package_prefix + value: quyun/v2/pkg/proto + +plugins: + - local: protoc-gen-go + out: pkg/proto + opt: paths=source_relative + #- local: protoc-gen-grpc-gateway + # out: pkg/proto + # opt: + # - paths=source_relative + # - generate_unbound_methods=true + - local: protoc-gen-go-grpc + out: pkg/proto + opt: paths=source_relative + # - local: protoc-gen-openapiv2 + # out: docs/proto diff --git a/backend_v1/buf.yaml b/backend_v1/buf.yaml new file mode 100644 index 0000000..06039af --- /dev/null +++ b/backend_v1/buf.yaml @@ -0,0 +1,13 @@ +# For details on buf.yaml configuration, visit https://buf.build/docs/configuration/v2/buf-yaml +version: v2 +modules: + - path: proto +lint: + use: + - STANDARD +breaking: + use: + - FILE +deps: + - buf.build/googleapis/googleapis + - buf.build/grpc-ecosystem/grpc-gateway diff --git a/backend_v1/config.full.toml b/backend_v1/config.full.toml new file mode 100644 index 0000000..0ea2865 --- /dev/null +++ b/backend_v1/config.full.toml @@ -0,0 +1,239 @@ +# ========================= +# gRPC Server (providers/grpc) +# ========================= +[Grpc] +# 必填 +Port = 9090 # gRPC 监听端口 +# 可选 +# Host = "0.0.0.0" # 监听地址(默认 0.0.0.0) +EnableReflection = true # 开启服务反射(开发/调试友好) +EnableHealth = true # 注册 gRPC health 服务 +ShutdownTimeoutSeconds = 10 # 优雅关停超时,超时后强制 Stop + +# 说明: +# - 统一的拦截器、ServerOption 可通过 providers/grpc/options.go 的 +# UseUnaryInterceptors/UseStreamInterceptors/UseOptions 动态注入。 +# ========================= +# HTTP Server (providers/http) +# ========================= +[Http] +# 必填 +Port = 8080 # HTTP 监听端口 + +# 可选 +# BaseURI = "/api" # 全局前缀 +# StaticRoute = "/static" # 静态路由路径 +# StaticPath = "./public" # 静态文件目录 +[Http.Tls] + +# Cert = "server.crt" +# Key = "server.key" +[Http.Cors] + +# Mode = "enabled" # "enabled"|"disabled"(默认按 Whitelist 推断) +# 白名单项示例(按需追加多条) +# [[Http.Cors.Whitelist]] +# AllowOrigin = "https://example.com" +# AllowHeaders = "Authorization,Content-Type" +# AllowMethods = "GET,POST,PUT,DELETE" +# ExposeHeaders = "X-Request-Id" +# AllowCredentials = true +# ========================= +# Connection Multiplexer (providers/cmux) +# 用于同端口同时暴露 HTTP + gRPC:cmux -> 分发到 Http/Grpc +# ========================= +[Cmux] +# 必填 +Port = 8081 # cmux 监听端口 + +# 可选 +# Host = "0.0.0.0" +# ========================= +# Events / PubSub (providers/event) +# gochannel 为默认内存通道(始终启用) +# 如需 Kafka / Redis Stream / SQL,请按需开启对应小节 +# ========================= +[Events] + +# Kafka(可选) +[Events.Kafka] +# 必填(启用时) +Brokers = ["127.0.0.1:9092"] +# 可选 +ConsumerGroup = "my-group" + +# Redis Stream(可选) +[Events.Redis] +# 必填(启用时) +ConsumerGroup = "my-group" +# 可选 +Streams = ["mystream"] # 订阅的 streams;可在 Handler 侧具体指定 + +# SQL(可选,基于 PostgreSQL) +[Events.Sql] +# 必填(启用时) +ConsumerGroup = "my-group" + +# ========================= +# Job / Queue (providers/job) +# 基于 River(Postgres)队列 +# ========================= +[Job] + +# 可选:每队列并发数(默认 high/default/low 均 10) +#QueueWorkers = { high = 20, default = 10, low = 5 } +# 说明: +# - 需要启用 providers/postgres 以提供数据库连接 +# - 通过 Add/AddWithID 入队,AddPeriodicJob 注册定时任务 +# ========================= +# JWT (providers/jwt) +# ========================= +[JWT] +# 必填 +SigningKey = "your-signing-key" # 密钥 +ExpiresTime = "168h" # 过期时间,形如 "72h", "168h" +# 可选 +Issuer = "my-service" + +# ========================= +# HashIDs (providers/hashids) +# ========================= +[HashIDs] +# 必填 +Salt = "your-salt" + +# 可选 +# Alphabet = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890" +# MinLength = 8 +# ========================= +# Redis (providers/redis) +# ========================= +[Redis] +# 必填(若不填 Host/Port,将默认 localhost:6379) +Host = "127.0.0.1" +Port = 6379 + +# 可选 +# Username = "" +# Password = "" +# DB = 0 +# ClientName = "my-service" +# 连接池/重试(可选) +# PoolSize = 50 +# MinIdleConns = 10 +# MaxRetries = 2 +# 超时(秒,可选) +# DialTimeoutSeconds = 5 +# ReadTimeoutSeconds = 3 +# WriteTimeoutSeconds = 3 +# 连接生命周期(秒,可选) +# ConnMaxIdleTimeSeconds = 300 +# ConnMaxLifetimeSeconds = 1800 +# 探活(秒,可选,默认 5) +# PingTimeoutSeconds = 5 +# ========================= +# PostgreSQL / GORM (providers/postgres) +# ========================= +[Database] +# 必填 +Host = "127.0.0.1" +Port = 5432 +Database = "app" + +# 可选(未填 Username 默认 postgres;其它有默认值见代码) +# Username = "postgres" +# Password = "" +# SslMode = "disable" # "disable"|"require"|... +# TimeZone = "Asia/Shanghai" +# Schema = "public" +# Prefix = "" # 表前缀 +# Singular = false # 表名是否使用单数 +# 连接池(可选) +# MaxIdleConns = 10 +# MaxOpenConns = 100 +# ConnMaxLifetimeSeconds = 1800 +# ConnMaxIdleTimeSeconds = 300 +# DSN 增强(可选) +# UseSearchPath = true +# ApplicationName = "my-service" +# ========================= +# HTTP Client (providers/req) +# ========================= +[HttpClient] + +# 可选 +# DevMode = true +# CookieJarFile = "./data/cookies.jar" +# RootCa = ["./ca.crt"] +# UserAgent = "my-service/1.0" +# InsecureSkipVerify = false +# BaseURL = "https://api.example.com" +# ContentType = "application/json" +# Timeout = 10 # 秒 +# CommonHeaders = { X-Request-From = "service" } +# CommonQuery = { locale = "zh-CN" } +[HttpClient.AuthBasic] + +# Username = "" +# Password = "" +# 其它认证 / 代理 / 跳转策略(可选) +# AuthBearerToken = "Bearer " # 或仅 ,内部会设置 Bearer +# ProxyURL = "http://127.0.0.1:7890" +# RedirectPolicy = ["Max:10","SameHost"] +# ========================= +# OpenTracing (Jaeger) (providers/tracing) +# ========================= +[Tracing] +# 必填 +Name = "my-service" +# 可选(Agent / Collector 至少配一个;未配时走默认本地端口) +Reporter_LocalAgentHostPort = "127.0.0.1:6831" +Reporter_CollectorEndpoint = "http://127.0.0.1:14268/api/traces" + +# 行为开关(可选) +# Disabled = false +# Gen128Bit = true +# ZipkinSharedRPCSpan = true +# RPCMetrics = false +# 采样器(可选) +# Sampler_Type = "const" # const|probabilistic|ratelimiting|remote +# Sampler_Param = 1.0 +# Sampler_SamplingServerURL = "" +# Sampler_MaxOperations = 0 +# Sampler_RefreshIntervalSec = 0 +# Reporter(可选) +# Reporter_LogSpans = true +# Reporter_BufferFlushMs = 100 +# Reporter_QueueSize = 1000 +# 进程 Tags(可选) +# [Tracing.Tags] +# version = "1.0.0" +# zone = "az1" +# ========================= +# OpenTelemetry (providers/otel) +# ========================= +[OTEL] +# 必填(建议设置) +ServiceName = "my-service" +# 可选(版本/环境) +Version = "1.0.0" +Env = "dev" +# 导出端点(二选一,若都填优先 HTTP) +# EndpointGRPC = "127.0.0.1:4317" +# EndpointHTTP = "127.0.0.1:4318" +# 认证(可选,支持仅填纯 token,将自动补齐 Bearer) +# Token = "Bearer " +# 安全(可选) +# InsecureGRPC = true +# InsecureHTTP = true +# 采样(可选) +# Sampler = "always" # "always"|"ratio" +# SamplerRatio = 0.1 # 0..1,仅当 Sampler="ratio" 生效 +# 批处理(毫秒,可选) +# BatchTimeoutMs = 5000 +# ExportTimeoutMs = 10000 +# MaxQueueSize = 2048 +# MaxExportBatchSize = 512 +# 指标(毫秒,可选) +# MetricReaderIntervalMs = 10000 # 指标导出周期 +# RuntimeReadMemStatsIntervalMs = 5000 # 运行时指标采集最小周期 diff --git a/backend_v1/config.prod.toml b/backend_v1/config.prod.toml new file mode 100644 index 0000000..6188954 --- /dev/null +++ b/backend_v1/config.prod.toml @@ -0,0 +1,87 @@ +[App] +Mode = "prod" +BaseURI = "https://mp.jdwan.com" +StoragePath = "/app/downloads" +DistAdmin = "/app/dist/admin" +DistWeChat = "/app/dist/wechat" +RechargeWechat = "13932043996" + +[Http] +Port = 9888 + +[Database] +Host = "host.local" +Database = "quyun" +Password = "xixi0202" + +[JWT] +ExpiresTime = "168h" +SigningKey = "xixi@0202" + +[HashIDs] +Salt = "Salt" + +[Redis] +Host = "" +Port = 6379 +Password = "hello" +DB = 0 + +[Ali] +AccessKeyId = "LTAI5t86SjiP9zRd3q2w7jQN" +AccessKeySecret = "hV7spvJuWh8w0EEIXj8NFi2uBlF4aS" +Bucket ="rogee-bj" +Host ="https://assets.jdwan.com" +Region ="cn-beijing" + +[WeChat] +AppID = "wx47649361b6eba174" +AppSecret = "e9cdf19b006cd294a9dae7ad8ae08b72" +Token = "W8Xhw5TivYBgY" +EncodingAesKey = "OlgPgMvsl92zy5oErtEzRcziRT2txoN3jgEHV6RQZMY" +DevMode = false + +[WeChat.Pay] +NotifyURL="https://mp.jdwan.com/v1/pay/callback/wechat" +MchID = "1702644947" +SerialNo = "4563EC584A35BC84FB27AA4100C934C9A91D59CA" +MechName = "佳芃(北京)企业管理咨询有限公司" +ApiV3Key="5UBDkxVDY44AKafkqN6YgYxgtkXP6Mw6" +PrivateKey="""-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC+GjWdwRorgQXw +Z8ouZeW8UsUgjiSUPKLJHGOZQESa09xmzm+DVZWNCPEHJvz1bDmSNXVsQUWRE/+y +MwSfe+faq0d4BZiw1ueFbRyj4Vw/x7B2vY0i8yo0VyTnWdC6QZRc+V+dbuPJM1Ok ++Qf2yg+NZhz3XuTWvQOscAc9+f3aj8fhXJQlRmNby736w0iDu9EQFvPnkVs10/lW +RwICvzEbEMq7D2SSXS/lc/qS84eHYYCnH8b3NGNCt7ifSXPJesGT/+pwBKmPef6T +lK0pjgqqiKltP04nYeP7Ujz5r8v/CPebUG6Iuht3EjH84i4UCpcBdI/mD2rJInJ/ +jiiCjP7tAgMBAAECggEAVYVe94BGsKmTrWpT13m513X4/sNTi2iX5xehavExq+GB +trJKEnBvHgqWvBv7EsHESJVKJRBcJn8zucwf2UuZq5MATOtfnLahYzIJ/2PD52GD +bnepxb5VD0Tg8j9CmngkMYtyS1X2na48g+wQfCK8ymTUxSholH5l565iY6xSWn8r +SD/u/EBLv69i40uocG1hUUicrJZ1wc5T0ct3GpfiA1BfH462/dp6mROONdpwM8IT +ltRH4wjIc2nPgE7eNbXlHg+KkqyNNLA+BeN3yn001QwvP6Q0panuCTsVVlvEuGAY +RwXbu/0fHFbppIpgfr7AFGRWKTF66Peq3ozsG9jNgQKBgQDviSJxN2Mpdln4i5F3 +74s8FMtZ5bY63RHHcvJ5/D9G1iDNHFgLJsgdrbAhLqBbqg73EsIT8TsPlAqKPKS8 +EGKBg75MsMSYu7EmzIURV3Gy+Pou9jOkTUfQfblkiV+uJjWQPlBlfksL1bQnfSvZ +Pk1DCwGMb5DMDazAQLP9/wtLYQKBgQDLKz9YHF+wFsnfUjBQngDLCTkxrfxp8y84 +s/z5IRZIEdfxmnaEeWJXYa0oeQumNLSVHrryvHm3vkBgKexN49TWUGIM3q54gi/R +FPXXJKarDEI7C86Th3g+3FPEez5v+CEncmlB9X3kBT0ZFROWD3HHaz2DUKPVmJe1 +eUOtAN0LDQKBgCoulx8i5taFXgCz61EYoQdajhjtp/KjvZ7G8kZjEm2SBcK5DBQi +pzj6vjqJsHmT8AC4j+7dG055/oUresMXi5FNNvTgaC6RVvgDKifMo1wmFkCw4JU9 +erkPetdmja/oUKRvJM9Kt0KFRq1xkIg4PXjh9krZ1sDoY5STkF7ZTA7hAoGAQhPv +xzV7Pac7wwFVK3MoKOD4FBtVRBRO4G9RsKk9OPVsuWyWbWGZRXhEPCyaSFVOAk37 +WaVJJSSghWY9L9wQxh9gtHTcY99bs/HQP0fxWSJkjBW7+ymNR0ybhgTbeslF5zGD +4Gr6peW6SGUdeKnPRJ+xYvsgPgEiHmixRRxJyCUCgYEAoguVZdpDaRDZGGrTghwj +F4kMIyEczFeBZtK2JEGSLA6j8uj+oBZ26c6K4sh/Btc0l6IkiXijXbTaH87s52xZ +im8aIZZ9jDKUFxtjVUL0l9fjRsCLAvaBbWw3z4EdtOGuYlnhNCheeSd+/Lzqrb1q +pnTiwBHnQCMFFL/rNcz/Mmk= +-----END PRIVATE KEY-----""" +PublicKeyID="PUB_KEY_ID_0117026449472025041400331572000400" +PublicKey="""-----BEGIN PUBLIC KEY----- +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxcBzCAfddF4y/e3aT92g +z/DWNNFFdoKUxxSAjBFdq+7cHGL/b6VmHLfbZqUF2JvlGYoVxE/vHWrrtDYzPctN ++IaGqwiPSAjvJTHTlxpxZkLz+9YGynrj9jbl12gY73mo/M1jJqmrERN6ZA5P8oNl +tjNmYNK/H5FLuZVVUilEiWn8XskxGEKiGh0KhMEl3YRPPzguADPck9Ip4tgn4UDt +fUs5UFrzH3A4cpuc1Je3wJ3vqztu3sr+G3LBSXCvYD7EkDhXMHCv01cJBxBN876T +442YAFX94VJ79/xwwmXOgCLz1QegDd6M+Um0l5BkQoOIqDlEkWsOvRo9iOsZ25H9 +kQIDAQAB +-----END PUBLIC KEY-----""" diff --git a/backend_v1/config.toml b/backend_v1/config.toml new file mode 100644 index 0000000..56e8c13 --- /dev/null +++ b/backend_v1/config.toml @@ -0,0 +1,89 @@ +[App] +Mode = "development" +# Mode = "prod" +BaseURI = "baseURI" +StoragePath = "/Users/rogee/Projects/self/quyun/fixtures" +DistAdmin = "frontend/admin/dist" +DistWeChat = "frontend/wechat/dist" +RechargeWechat = "13932043996" + +[Http] +Port = 8088 + +[Database] +Host = "10.1.1.2" +Port = 5433 +Database = "quyun" +Password = "xixi0202" + +[JWT] +ExpiresTime = "168h" +SigningKey = "xixi@0202" + +[HashIDs] +Salt = "Salt" + +[Redis] +Host = "" +Port = 6379 +Password = "hello" +DB = 0 + +[Ali] +AccessKeyId = "LTAI5t86SjiP9zRd3q2w7jQN" +AccessKeySecret = "hV7spvJuWh8w0EEIXj8NFi2uBlF4aS" +Bucket = "rogee-test" +Host = "https://assets-test.jdwan.com" +Region = "cn-beijing" + +[WeChat] +DevMode = true +AppID = "wx47649361b6eba174" +AppSecret = "e9cdf19b006cd294a9dae7ad8ae08b72" +Token = "W8Xhw5TivYBgY" +EncodingAesKey = "OlgPgMvsl92zy5oErtEzRcziRT2txoN3jgEHV6RQZMY" + +[WeChat.Pay] +NotifyURL = "https://mp.jdwan.com/v1/pay/callback/wechat" +MchID = "1702644947" +SerialNo = "4563EC584A35BC84FB27AA4100C934C9A91D59CA" +MechName = "佳芃(北京)企业管理咨询有限公司" +ApiV3Key = "5UBDkxVDY44AKafkqN6YgYxgtkXP6Mw6" +PrivateKey = """-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC+GjWdwRorgQXw +Z8ouZeW8UsUgjiSUPKLJHGOZQESa09xmzm+DVZWNCPEHJvz1bDmSNXVsQUWRE/+y +MwSfe+faq0d4BZiw1ueFbRyj4Vw/x7B2vY0i8yo0VyTnWdC6QZRc+V+dbuPJM1Ok ++Qf2yg+NZhz3XuTWvQOscAc9+f3aj8fhXJQlRmNby736w0iDu9EQFvPnkVs10/lW +RwICvzEbEMq7D2SSXS/lc/qS84eHYYCnH8b3NGNCt7ifSXPJesGT/+pwBKmPef6T +lK0pjgqqiKltP04nYeP7Ujz5r8v/CPebUG6Iuht3EjH84i4UCpcBdI/mD2rJInJ/ +jiiCjP7tAgMBAAECggEAVYVe94BGsKmTrWpT13m513X4/sNTi2iX5xehavExq+GB +trJKEnBvHgqWvBv7EsHESJVKJRBcJn8zucwf2UuZq5MATOtfnLahYzIJ/2PD52GD +bnepxb5VD0Tg8j9CmngkMYtyS1X2na48g+wQfCK8ymTUxSholH5l565iY6xSWn8r +SD/u/EBLv69i40uocG1hUUicrJZ1wc5T0ct3GpfiA1BfH462/dp6mROONdpwM8IT +ltRH4wjIc2nPgE7eNbXlHg+KkqyNNLA+BeN3yn001QwvP6Q0panuCTsVVlvEuGAY +RwXbu/0fHFbppIpgfr7AFGRWKTF66Peq3ozsG9jNgQKBgQDviSJxN2Mpdln4i5F3 +74s8FMtZ5bY63RHHcvJ5/D9G1iDNHFgLJsgdrbAhLqBbqg73EsIT8TsPlAqKPKS8 +EGKBg75MsMSYu7EmzIURV3Gy+Pou9jOkTUfQfblkiV+uJjWQPlBlfksL1bQnfSvZ +Pk1DCwGMb5DMDazAQLP9/wtLYQKBgQDLKz9YHF+wFsnfUjBQngDLCTkxrfxp8y84 +s/z5IRZIEdfxmnaEeWJXYa0oeQumNLSVHrryvHm3vkBgKexN49TWUGIM3q54gi/R +FPXXJKarDEI7C86Th3g+3FPEez5v+CEncmlB9X3kBT0ZFROWD3HHaz2DUKPVmJe1 +eUOtAN0LDQKBgCoulx8i5taFXgCz61EYoQdajhjtp/KjvZ7G8kZjEm2SBcK5DBQi +pzj6vjqJsHmT8AC4j+7dG055/oUresMXi5FNNvTgaC6RVvgDKifMo1wmFkCw4JU9 +erkPetdmja/oUKRvJM9Kt0KFRq1xkIg4PXjh9krZ1sDoY5STkF7ZTA7hAoGAQhPv +xzV7Pac7wwFVK3MoKOD4FBtVRBRO4G9RsKk9OPVsuWyWbWGZRXhEPCyaSFVOAk37 +WaVJJSSghWY9L9wQxh9gtHTcY99bs/HQP0fxWSJkjBW7+ymNR0ybhgTbeslF5zGD +4Gr6peW6SGUdeKnPRJ+xYvsgPgEiHmixRRxJyCUCgYEAoguVZdpDaRDZGGrTghwj +F4kMIyEczFeBZtK2JEGSLA6j8uj+oBZ26c6K4sh/Btc0l6IkiXijXbTaH87s52xZ +im8aIZZ9jDKUFxtjVUL0l9fjRsCLAvaBbWw3z4EdtOGuYlnhNCheeSd+/Lzqrb1q +pnTiwBHnQCMFFL/rNcz/Mmk= +-----END PRIVATE KEY-----""" +PublicKeyID="PUB_KEY_ID_0117026449472025041400331572000400" +PublicKey="""-----BEGIN PUBLIC KEY----- +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxcBzCAfddF4y/e3aT92g +z/DWNNFFdoKUxxSAjBFdq+7cHGL/b6VmHLfbZqUF2JvlGYoVxE/vHWrrtDYzPctN ++IaGqwiPSAjvJTHTlxpxZkLz+9YGynrj9jbl12gY73mo/M1jJqmrERN6ZA5P8oNl +tjNmYNK/H5FLuZVVUilEiWn8XskxGEKiGh0KhMEl3YRPPzguADPck9Ip4tgn4UDt +fUs5UFrzH3A4cpuc1Je3wJ3vqztu3sr+G3LBSXCvYD7EkDhXMHCv01cJBxBN876T +442YAFX94VJ79/xwwmXOgCLz1QegDd6M+Um0l5BkQoOIqDlEkWsOvRo9iOsZ25H9 +kQIDAQAB +-----END PUBLIC KEY-----""" diff --git a/backend_v1/database/.transform.yaml b/backend_v1/database/.transform.yaml new file mode 100644 index 0000000..76fc5cc --- /dev/null +++ b/backend_v1/database/.transform.yaml @@ -0,0 +1,12 @@ +ignores: +- migrations +- river_client +- river_client_queue +- river_job +- river_leader +- river_migration +- river_queue +imports: +- go.ipao.vip/gen +field_type: +field_relate: diff --git a/backend_v1/database/database.go b/backend_v1/database/database.go new file mode 100644 index 0000000..fe6c999 --- /dev/null +++ b/backend_v1/database/database.go @@ -0,0 +1,55 @@ +package database + +import ( + "context" + "database/sql" + "embed" + "fmt" + + "quyun/v2/database/models" + + "go.ipao.vip/atom" + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/contracts" + "go.ipao.vip/atom/opt" + "gorm.io/gorm" +) + +//go:embed migrations/* +var MigrationFS embed.FS + +func Truncate(ctx context.Context, db *sql.DB, tableName ...string) error { + for _, name := range tableName { + sql := fmt.Sprintf("TRUNCATE TABLE %s RESTART IDENTITY", name) + if _, err := db.ExecContext(ctx, sql); err != nil { + return err + } + } + return nil +} + +func WrapLike(v string) string { + return "%" + v + "%" +} + +func WrapLikeLeft(v string) string { + return "%" + v +} + +func WrapLikeRight(v string) string { + return "%" + v +} + +func Provide(...opt.Option) error { + return container.Container.Provide(func(db *gorm.DB) contracts.Initial { + models.SetDefault(db) + return models.Q + }, atom.GroupInitial) +} + +func DefaultProvider() container.ProviderContainer { + return container.ProviderContainer{ + Provider: Provide, + Options: []opt.Option{}, + } +} diff --git a/backend_v1/database/migrations/20250321112535_create_medias.sql b/backend_v1/database/migrations/20250321112535_create_medias.sql new file mode 100644 index 0000000..63bc702 --- /dev/null +++ b/backend_v1/database/migrations/20250321112535_create_medias.sql @@ -0,0 +1,19 @@ +-- +goose Up +-- +goose StatementBegin +CREATE TABLE medias( + id SERIAL8 PRIMARY KEY, + created_at timestamp NOT NULL DEFAULT now(), + name varchar(255) NOT NULL DEFAULT '', + mime_type varchar(128) NOT NULL DEFAULT '', + size int8 NOT NULL DEFAULT 0, + path varchar(255) NOT NULL DEFAULT '', + metas jsonb NOT NULL DEFAULT '{}' ::jsonb, + hash varchar(64) NOT NULL DEFAULT '' +); + +-- +goose StatementEnd +-- +goose Down +-- +goose StatementBegin +DROP TABLE medias; + +-- +goose StatementEnd diff --git a/backend_v1/database/migrations/20250322100215_create_posts.sql b/backend_v1/database/migrations/20250322100215_create_posts.sql new file mode 100644 index 0000000..bce67df --- /dev/null +++ b/backend_v1/database/migrations/20250322100215_create_posts.sql @@ -0,0 +1,26 @@ +-- +goose Up +-- +goose StatementBegin +CREATE TABLE posts( + id SERIAL8 PRIMARY KEY, + created_at timestamp NOT NULL DEFAULT now(), + updated_at timestamp NOT NULL DEFAULT now(), + deleted_at timestamp, + status int2 NOT NULL DEFAULT 0, + title varchar(128) NOT NULL, + head_images jsonb DEFAULT '[]' ::jsonb NOT NULL, + description varchar(256) NOT NULL, + content text NOT NULL, + price int8 NOT NULL DEFAULT 0, + discount int2 NOT NULL DEFAULT 100, + views int8 NOT NULL DEFAULT 0, + likes int8 NOT NULL DEFAULT 0, + tags jsonb DEFAULT '{}' ::jsonb, + assets jsonb DEFAULT '{}' ::jsonb +); + +-- +goose StatementEnd +-- +goose Down +-- +goose StatementBegin +DROP TABLE posts; + +-- +goose StatementEnd diff --git a/backend_v1/database/migrations/20250322103119_create_users.sql b/backend_v1/database/migrations/20250322103119_create_users.sql new file mode 100644 index 0000000..480aec4 --- /dev/null +++ b/backend_v1/database/migrations/20250322103119_create_users.sql @@ -0,0 +1,22 @@ +-- +goose Up +-- +goose StatementBegin +CREATE TABLE users( + id SERIAL8 PRIMARY KEY, + created_at timestamp NOT NULL DEFAULT now(), + updated_at timestamp NOT NULL DEFAULT now(), + deleted_at timestamp, + status int2 NOT NULL DEFAULT 0, + open_id varchar(128) NOT NULL UNIQUE, + username varchar(128) NOT NULL, + avatar text +); + +SELECT + setval('users_id_seq', 1000); + +-- +goose StatementEnd +-- +goose Down +-- +goose StatementBegin +DROP TABLE users; + +-- +goose StatementEnd diff --git a/backend_v1/database/migrations/20250322103243_create_user_posts.sql b/backend_v1/database/migrations/20250322103243_create_user_posts.sql new file mode 100644 index 0000000..550df00 --- /dev/null +++ b/backend_v1/database/migrations/20250322103243_create_user_posts.sql @@ -0,0 +1,18 @@ +-- +goose Up +-- +goose StatementBegin +CREATE TABLE user_posts( + id SERIAL8 PRIMARY KEY, + created_at timestamp NOT NULL DEFAULT now(), + updated_at timestamp NOT NULL DEFAULT now(), + -- + user_id int8 NOT NULL, + post_id int8 NOT NULL, + price int8 NOT NULL +); + +-- +goose StatementEnd +-- +goose Down +-- +goose StatementBegin +DROP TABLE user_posts; + +-- +goose StatementEnd diff --git a/backend_v1/database/migrations/20250410130530_create_orders.sql b/backend_v1/database/migrations/20250410130530_create_orders.sql new file mode 100644 index 0000000..c3a5a20 --- /dev/null +++ b/backend_v1/database/migrations/20250410130530_create_orders.sql @@ -0,0 +1,26 @@ +-- +goose Up +-- +goose StatementBegin +CREATE TABLE orders( + id SERIAL8 PRIMARY KEY, + created_at timestamp NOT NULL DEFAULT now(), + updated_at timestamp NOT NULL DEFAULT now(), + order_no varchar(64) NOT NULL, + sub_order_no varchar(64) NOT NULL DEFAULT '', + transaction_id varchar(64) NOT NULL DEFAULT '', + refund_transaction_id varchar(64) NOT NULL DEFAULT '', + price int8 NOT NULL DEFAULT 0, + discount int2 NOT NULL DEFAULT 100, + currency varchar(10) NOT NULL DEFAULT 'CNY', + payment_method varchar(50) NOT NULL DEFAULT 'wechatpay', + post_id int8 NOT NULL, + user_id int8 NOT NULL, + status int2 NOT NULL, + meta jsonb NOT NULL DEFAULT '{}' ::jsonb +); + +-- +goose StatementEnd +-- +goose Down +-- +goose StatementBegin +DROP TABLE orders; + +-- +goose StatementEnd diff --git a/backend_v1/database/migrations/20250430014015_alter_user.sql b/backend_v1/database/migrations/20250430014015_alter_user.sql new file mode 100644 index 0000000..c7dbbee --- /dev/null +++ b/backend_v1/database/migrations/20250430014015_alter_user.sql @@ -0,0 +1,18 @@ +-- +goose Up +-- +goose StatementBegin +ALTER TABLE public.users + ADD metas jsonb DEFAULT '{}'::jsonb NOT NULL; + +ALTER TABLE public.users + ADD auth_token jsonb DEFAULT '{}'::jsonb NOT NULL; + +-- +goose StatementEnd +-- +goose Down +-- +goose StatementBegin +ALTER TABLE public.users + DROP COLUMN metas; + +ALTER TABLE public.users + DROP COLUMN auth_token; + +-- +goose StatementEnd diff --git a/backend_v1/database/migrations/20250512113213_alter_user.sql b/backend_v1/database/migrations/20250512113213_alter_user.sql new file mode 100644 index 0000000..b635d31 --- /dev/null +++ b/backend_v1/database/migrations/20250512113213_alter_user.sql @@ -0,0 +1,11 @@ +-- +goose Up +-- +goose StatementBegin +ALTER TABLE public.users + ADD balance int8 DEFAULT 0 NOT NULL; +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +ALTER TABLE public.users + DROP COLUMN balance; +-- +goose StatementEnd diff --git a/backend_v1/database/models/medias.gen.go b/backend_v1/database/models/medias.gen.go new file mode 100644 index 0000000..a82f3d8 --- /dev/null +++ b/backend_v1/database/models/medias.gen.go @@ -0,0 +1,59 @@ +// Code generated by go.ipao.vip/gen. DO NOT EDIT. +// Code generated by go.ipao.vip/gen. DO NOT EDIT. +// Code generated by go.ipao.vip/gen. DO NOT EDIT. + +package models + +import ( + "context" + "time" + + "go.ipao.vip/gen" + "go.ipao.vip/gen/types" +) + +const TableNameMedia = "medias" + +// Media mapped from table +type Media struct { + ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement:true" json:"id"` + CreatedAt time.Time `gorm:"column:created_at;type:timestamp without time zone;not null;default:now()" json:"created_at"` + Name string `gorm:"column:name;type:character varying(255);not null" json:"name"` + MimeType string `gorm:"column:mime_type;type:character varying(128);not null" json:"mime_type"` + Size int64 `gorm:"column:size;type:bigint;not null" json:"size"` + Path string `gorm:"column:path;type:character varying(255);not null" json:"path"` + Metas types.JSON `gorm:"column:metas;type:jsonb;not null;default:{}" json:"metas"` + Hash string `gorm:"column:hash;type:character varying(64);not null" json:"hash"` +} + +// Quick operations without importing query package +// Update applies changed fields to the database using the default DB. +func (m *Media) Update(ctx context.Context) (gen.ResultInfo, error) { + return Q.Media.WithContext(ctx).Updates(m) +} + +// Save upserts the model using the default DB. +func (m *Media) Save(ctx context.Context) error { return Q.Media.WithContext(ctx).Save(m) } + +// Create inserts the model using the default DB. +func (m *Media) Create(ctx context.Context) error { return Q.Media.WithContext(ctx).Create(m) } + +// Delete removes the row represented by the model using the default DB. +func (m *Media) Delete(ctx context.Context) (gen.ResultInfo, error) { + return Q.Media.WithContext(ctx).Delete(m) +} + +// ForceDelete permanently deletes the row (ignores soft delete) using the default DB. +func (m *Media) ForceDelete(ctx context.Context) (gen.ResultInfo, error) { + return Q.Media.WithContext(ctx).Unscoped().Delete(m) +} + +// Reload reloads the model from database by its primary key and overwrites current fields. +func (m *Media) Reload(ctx context.Context) error { + fresh, err := Q.Media.WithContext(ctx).GetByID(m.ID) + if err != nil { + return err + } + *m = *fresh + return nil +} diff --git a/backend_v1/database/models/medias.query.gen.go b/backend_v1/database/models/medias.query.gen.go new file mode 100644 index 0000000..572324b --- /dev/null +++ b/backend_v1/database/models/medias.query.gen.go @@ -0,0 +1,487 @@ +// Code generated by go.ipao.vip/gen. DO NOT EDIT. +// Code generated by go.ipao.vip/gen. DO NOT EDIT. +// Code generated by go.ipao.vip/gen. DO NOT EDIT. + +package models + +import ( + "context" + + "gorm.io/gorm" + "gorm.io/gorm/clause" + "gorm.io/gorm/schema" + + "go.ipao.vip/gen" + "go.ipao.vip/gen/field" + + "gorm.io/plugin/dbresolver" +) + +func newMedia(db *gorm.DB, opts ...gen.DOOption) mediaQuery { + _mediaQuery := mediaQuery{} + + _mediaQuery.mediaQueryDo.UseDB(db, opts...) + _mediaQuery.mediaQueryDo.UseModel(&Media{}) + + tableName := _mediaQuery.mediaQueryDo.TableName() + _mediaQuery.ALL = field.NewAsterisk(tableName) + _mediaQuery.ID = field.NewInt64(tableName, "id") + _mediaQuery.CreatedAt = field.NewTime(tableName, "created_at") + _mediaQuery.Name = field.NewString(tableName, "name") + _mediaQuery.MimeType = field.NewString(tableName, "mime_type") + _mediaQuery.Size = field.NewInt64(tableName, "size") + _mediaQuery.Path = field.NewString(tableName, "path") + _mediaQuery.Metas = field.NewJSONB(tableName, "metas") + _mediaQuery.Hash = field.NewString(tableName, "hash") + + _mediaQuery.fillFieldMap() + + return _mediaQuery +} + +type mediaQuery struct { + mediaQueryDo mediaQueryDo + + ALL field.Asterisk + ID field.Int64 + CreatedAt field.Time + Name field.String + MimeType field.String + Size field.Int64 + Path field.String + Metas field.JSONB + Hash field.String + + fieldMap map[string]field.Expr +} + +func (m mediaQuery) Table(newTableName string) *mediaQuery { + m.mediaQueryDo.UseTable(newTableName) + return m.updateTableName(newTableName) +} + +func (m mediaQuery) As(alias string) *mediaQuery { + m.mediaQueryDo.DO = *(m.mediaQueryDo.As(alias).(*gen.DO)) + return m.updateTableName(alias) +} + +func (m *mediaQuery) updateTableName(table string) *mediaQuery { + m.ALL = field.NewAsterisk(table) + m.ID = field.NewInt64(table, "id") + m.CreatedAt = field.NewTime(table, "created_at") + m.Name = field.NewString(table, "name") + m.MimeType = field.NewString(table, "mime_type") + m.Size = field.NewInt64(table, "size") + m.Path = field.NewString(table, "path") + m.Metas = field.NewJSONB(table, "metas") + m.Hash = field.NewString(table, "hash") + + m.fillFieldMap() + + return m +} + +func (m *mediaQuery) QueryContext(ctx context.Context) (*mediaQuery, *mediaQueryDo) { + return m, m.mediaQueryDo.WithContext(ctx) +} + +func (m *mediaQuery) WithContext(ctx context.Context) *mediaQueryDo { + return m.mediaQueryDo.WithContext(ctx) +} + +func (m mediaQuery) TableName() string { return m.mediaQueryDo.TableName() } + +func (m mediaQuery) Alias() string { return m.mediaQueryDo.Alias() } + +func (m mediaQuery) Columns(cols ...field.Expr) gen.Columns { return m.mediaQueryDo.Columns(cols...) } + +func (m *mediaQuery) GetFieldByName(fieldName string) (field.OrderExpr, bool) { + _f, ok := m.fieldMap[fieldName] + if !ok || _f == nil { + return nil, false + } + _oe, ok := _f.(field.OrderExpr) + return _oe, ok +} + +func (m *mediaQuery) fillFieldMap() { + m.fieldMap = make(map[string]field.Expr, 8) + m.fieldMap["id"] = m.ID + m.fieldMap["created_at"] = m.CreatedAt + m.fieldMap["name"] = m.Name + m.fieldMap["mime_type"] = m.MimeType + m.fieldMap["size"] = m.Size + m.fieldMap["path"] = m.Path + m.fieldMap["metas"] = m.Metas + m.fieldMap["hash"] = m.Hash +} + +func (m mediaQuery) clone(db *gorm.DB) mediaQuery { + m.mediaQueryDo.ReplaceConnPool(db.Statement.ConnPool) + return m +} + +func (m mediaQuery) replaceDB(db *gorm.DB) mediaQuery { + m.mediaQueryDo.ReplaceDB(db) + return m +} + +type mediaQueryDo struct{ gen.DO } + +func (m mediaQueryDo) Debug() *mediaQueryDo { + return m.withDO(m.DO.Debug()) +} + +func (m mediaQueryDo) WithContext(ctx context.Context) *mediaQueryDo { + return m.withDO(m.DO.WithContext(ctx)) +} + +func (m mediaQueryDo) ReadDB() *mediaQueryDo { + return m.Clauses(dbresolver.Read) +} + +func (m mediaQueryDo) WriteDB() *mediaQueryDo { + return m.Clauses(dbresolver.Write) +} + +func (m mediaQueryDo) Session(config *gorm.Session) *mediaQueryDo { + return m.withDO(m.DO.Session(config)) +} + +func (m mediaQueryDo) Clauses(conds ...clause.Expression) *mediaQueryDo { + return m.withDO(m.DO.Clauses(conds...)) +} + +func (m mediaQueryDo) Returning(value interface{}, columns ...string) *mediaQueryDo { + return m.withDO(m.DO.Returning(value, columns...)) +} + +func (m mediaQueryDo) Not(conds ...gen.Condition) *mediaQueryDo { + return m.withDO(m.DO.Not(conds...)) +} + +func (m mediaQueryDo) Or(conds ...gen.Condition) *mediaQueryDo { + return m.withDO(m.DO.Or(conds...)) +} + +func (m mediaQueryDo) Select(conds ...field.Expr) *mediaQueryDo { + return m.withDO(m.DO.Select(conds...)) +} + +func (m mediaQueryDo) Where(conds ...gen.Condition) *mediaQueryDo { + return m.withDO(m.DO.Where(conds...)) +} + +func (m mediaQueryDo) Order(conds ...field.Expr) *mediaQueryDo { + return m.withDO(m.DO.Order(conds...)) +} + +func (m mediaQueryDo) Distinct(cols ...field.Expr) *mediaQueryDo { + return m.withDO(m.DO.Distinct(cols...)) +} + +func (m mediaQueryDo) Omit(cols ...field.Expr) *mediaQueryDo { + return m.withDO(m.DO.Omit(cols...)) +} + +func (m mediaQueryDo) Join(table schema.Tabler, on ...field.Expr) *mediaQueryDo { + return m.withDO(m.DO.Join(table, on...)) +} + +func (m mediaQueryDo) LeftJoin(table schema.Tabler, on ...field.Expr) *mediaQueryDo { + return m.withDO(m.DO.LeftJoin(table, on...)) +} + +func (m mediaQueryDo) RightJoin(table schema.Tabler, on ...field.Expr) *mediaQueryDo { + return m.withDO(m.DO.RightJoin(table, on...)) +} + +func (m mediaQueryDo) Group(cols ...field.Expr) *mediaQueryDo { + return m.withDO(m.DO.Group(cols...)) +} + +func (m mediaQueryDo) Having(conds ...gen.Condition) *mediaQueryDo { + return m.withDO(m.DO.Having(conds...)) +} + +func (m mediaQueryDo) Limit(limit int) *mediaQueryDo { + return m.withDO(m.DO.Limit(limit)) +} + +func (m mediaQueryDo) Offset(offset int) *mediaQueryDo { + return m.withDO(m.DO.Offset(offset)) +} + +func (m mediaQueryDo) Scopes(funcs ...func(gen.Dao) gen.Dao) *mediaQueryDo { + return m.withDO(m.DO.Scopes(funcs...)) +} + +func (m mediaQueryDo) Unscoped() *mediaQueryDo { + return m.withDO(m.DO.Unscoped()) +} + +func (m mediaQueryDo) Create(values ...*Media) error { + if len(values) == 0 { + return nil + } + return m.DO.Create(values) +} + +func (m mediaQueryDo) CreateInBatches(values []*Media, batchSize int) error { + return m.DO.CreateInBatches(values, batchSize) +} + +// Save : !!! underlying implementation is different with GORM +// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values) +func (m mediaQueryDo) Save(values ...*Media) error { + if len(values) == 0 { + return nil + } + return m.DO.Save(values) +} + +func (m mediaQueryDo) First() (*Media, error) { + if result, err := m.DO.First(); err != nil { + return nil, err + } else { + return result.(*Media), nil + } +} + +func (m mediaQueryDo) Take() (*Media, error) { + if result, err := m.DO.Take(); err != nil { + return nil, err + } else { + return result.(*Media), nil + } +} + +func (m mediaQueryDo) Last() (*Media, error) { + if result, err := m.DO.Last(); err != nil { + return nil, err + } else { + return result.(*Media), nil + } +} + +func (m mediaQueryDo) Find() ([]*Media, error) { + result, err := m.DO.Find() + return result.([]*Media), err +} + +func (m mediaQueryDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*Media, err error) { + buf := make([]*Media, 0, batchSize) + err = m.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error { + defer func() { results = append(results, buf...) }() + return fc(tx, batch) + }) + return results, err +} + +func (m mediaQueryDo) FindInBatches(result *[]*Media, batchSize int, fc func(tx gen.Dao, batch int) error) error { + return m.DO.FindInBatches(result, batchSize, fc) +} + +func (m mediaQueryDo) Attrs(attrs ...field.AssignExpr) *mediaQueryDo { + return m.withDO(m.DO.Attrs(attrs...)) +} + +func (m mediaQueryDo) Assign(attrs ...field.AssignExpr) *mediaQueryDo { + return m.withDO(m.DO.Assign(attrs...)) +} + +func (m mediaQueryDo) Joins(fields ...field.RelationField) *mediaQueryDo { + for _, _f := range fields { + m = *m.withDO(m.DO.Joins(_f)) + } + return &m +} + +func (m mediaQueryDo) Preload(fields ...field.RelationField) *mediaQueryDo { + for _, _f := range fields { + m = *m.withDO(m.DO.Preload(_f)) + } + return &m +} + +func (m mediaQueryDo) FirstOrInit() (*Media, error) { + if result, err := m.DO.FirstOrInit(); err != nil { + return nil, err + } else { + return result.(*Media), nil + } +} + +func (m mediaQueryDo) FirstOrCreate() (*Media, error) { + if result, err := m.DO.FirstOrCreate(); err != nil { + return nil, err + } else { + return result.(*Media), nil + } +} + +func (m mediaQueryDo) FindByPage(offset int, limit int) (result []*Media, count int64, err error) { + result, err = m.Offset(offset).Limit(limit).Find() + if err != nil { + return + } + + if size := len(result); 0 < limit && 0 < size && size < limit { + count = int64(size + offset) + return + } + + count, err = m.Offset(-1).Limit(-1).Count() + return +} + +func (m mediaQueryDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) { + count, err = m.Count() + if err != nil { + return + } + + err = m.Offset(offset).Limit(limit).Scan(result) + return +} + +func (m mediaQueryDo) Scan(result interface{}) (err error) { + return m.DO.Scan(result) +} + +func (m mediaQueryDo) Delete(models ...*Media) (result gen.ResultInfo, err error) { + return m.DO.Delete(models) +} + +// ForceDelete performs a permanent delete (ignores soft-delete) for current scope. +func (m mediaQueryDo) ForceDelete() (gen.ResultInfo, error) { + return m.Unscoped().Delete() +} + +// Inc increases the given column by step for current scope. +func (m mediaQueryDo) Inc(column field.Expr, step int64) (gen.ResultInfo, error) { + // column = column + step + e := field.NewUnsafeFieldRaw("?+?", column.RawExpr(), step) + return m.DO.UpdateColumn(column, e) +} + +// Dec decreases the given column by step for current scope. +func (m mediaQueryDo) Dec(column field.Expr, step int64) (gen.ResultInfo, error) { + // column = column - step + e := field.NewUnsafeFieldRaw("?-?", column.RawExpr(), step) + return m.DO.UpdateColumn(column, e) +} + +// Sum returns SUM(column) for current scope. +func (m mediaQueryDo) Sum(column field.Expr) (float64, error) { + var _v float64 + agg := field.NewUnsafeFieldRaw("SUM(?)", column.RawExpr()) + if err := m.Select(agg).Scan(&_v); err != nil { + return 0, err + } + return _v, nil +} + +// Avg returns AVG(column) for current scope. +func (m mediaQueryDo) Avg(column field.Expr) (float64, error) { + var _v float64 + agg := field.NewUnsafeFieldRaw("AVG(?)", column.RawExpr()) + if err := m.Select(agg).Scan(&_v); err != nil { + return 0, err + } + return _v, nil +} + +// Min returns MIN(column) for current scope. +func (m mediaQueryDo) Min(column field.Expr) (float64, error) { + var _v float64 + agg := field.NewUnsafeFieldRaw("MIN(?)", column.RawExpr()) + if err := m.Select(agg).Scan(&_v); err != nil { + return 0, err + } + return _v, nil +} + +// Max returns MAX(column) for current scope. +func (m mediaQueryDo) Max(column field.Expr) (float64, error) { + var _v float64 + agg := field.NewUnsafeFieldRaw("MAX(?)", column.RawExpr()) + if err := m.Select(agg).Scan(&_v); err != nil { + return 0, err + } + return _v, nil +} + +// PluckMap returns a map[key]value for selected key/value expressions within current scope. +func (m mediaQueryDo) PluckMap(key, val field.Expr) (map[interface{}]interface{}, error) { + do := m.Select(key, val) + rows, err := do.DO.Rows() + if err != nil { + return nil, err + } + defer rows.Close() + mm := make(map[interface{}]interface{}) + for rows.Next() { + var k interface{} + var v interface{} + if err := rows.Scan(&k, &v); err != nil { + return nil, err + } + mm[k] = v + } + return mm, rows.Err() +} + +// Exists returns true if any record matches the given conditions. +func (m mediaQueryDo) Exists(conds ...gen.Condition) (bool, error) { + cnt, err := m.Where(conds...).Count() + if err != nil { + return false, err + } + return cnt > 0, nil +} + +// PluckIDs returns all primary key values under current scope. +func (m mediaQueryDo) PluckIDs() ([]int64, error) { + ids := make([]int64, 0, 16) + pk := field.NewInt64(m.TableName(), "id") + if err := m.DO.Pluck(pk, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// GetByID finds a single record by primary key. +func (m mediaQueryDo) GetByID(id int64) (*Media, error) { + pk := field.NewInt64(m.TableName(), "id") + return m.Where(pk.Eq(id)).First() +} + +// GetByIDs finds records by primary key list. +func (m mediaQueryDo) GetByIDs(ids ...int64) ([]*Media, error) { + if len(ids) == 0 { + return []*Media{}, nil + } + pk := field.NewInt64(m.TableName(), "id") + return m.Where(pk.In(ids...)).Find() +} + +// DeleteByID deletes records by primary key. +func (m mediaQueryDo) DeleteByID(id int64) (gen.ResultInfo, error) { + pk := field.NewInt64(m.TableName(), "id") + return m.Where(pk.Eq(id)).Delete() +} + +// DeleteByIDs deletes records by a list of primary keys. +func (m mediaQueryDo) DeleteByIDs(ids ...int64) (gen.ResultInfo, error) { + if len(ids) == 0 { + return gen.ResultInfo{RowsAffected: 0, Error: nil}, nil + } + pk := field.NewInt64(m.TableName(), "id") + return m.Where(pk.In(ids...)).Delete() +} + +func (m *mediaQueryDo) withDO(do gen.Dao) *mediaQueryDo { + m.DO = *do.(*gen.DO) + return m +} diff --git a/backend_v1/database/models/orders.gen.go b/backend_v1/database/models/orders.gen.go new file mode 100644 index 0000000..86cee2d --- /dev/null +++ b/backend_v1/database/models/orders.gen.go @@ -0,0 +1,66 @@ +// Code generated by go.ipao.vip/gen. DO NOT EDIT. +// Code generated by go.ipao.vip/gen. DO NOT EDIT. +// Code generated by go.ipao.vip/gen. DO NOT EDIT. + +package models + +import ( + "context" + "time" + + "go.ipao.vip/gen" + "go.ipao.vip/gen/types" +) + +const TableNameOrder = "orders" + +// Order mapped from table +type Order struct { + ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement:true" json:"id"` + CreatedAt time.Time `gorm:"column:created_at;type:timestamp without time zone;not null;default:now()" json:"created_at"` + UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp without time zone;not null;default:now()" json:"updated_at"` + OrderNo string `gorm:"column:order_no;type:character varying(64);not null" json:"order_no"` + SubOrderNo string `gorm:"column:sub_order_no;type:character varying(64);not null" json:"sub_order_no"` + TransactionID string `gorm:"column:transaction_id;type:character varying(64);not null" json:"transaction_id"` + RefundTransactionID string `gorm:"column:refund_transaction_id;type:character varying(64);not null" json:"refund_transaction_id"` + Price int64 `gorm:"column:price;type:bigint;not null" json:"price"` + Discount int16 `gorm:"column:discount;type:smallint;not null;default:100" json:"discount"` + Currency string `gorm:"column:currency;type:character varying(10);not null;default:CNY" json:"currency"` + PaymentMethod string `gorm:"column:payment_method;type:character varying(50);not null;default:wechatpay" json:"payment_method"` + PostID int64 `gorm:"column:post_id;type:bigint;not null" json:"post_id"` + UserID int64 `gorm:"column:user_id;type:bigint;not null" json:"user_id"` + Status int16 `gorm:"column:status;type:smallint;not null" json:"status"` + Meta types.JSON `gorm:"column:meta;type:jsonb;not null;default:{}" json:"meta"` +} + +// Quick operations without importing query package +// Update applies changed fields to the database using the default DB. +func (m *Order) Update(ctx context.Context) (gen.ResultInfo, error) { + return Q.Order.WithContext(ctx).Updates(m) +} + +// Save upserts the model using the default DB. +func (m *Order) Save(ctx context.Context) error { return Q.Order.WithContext(ctx).Save(m) } + +// Create inserts the model using the default DB. +func (m *Order) Create(ctx context.Context) error { return Q.Order.WithContext(ctx).Create(m) } + +// Delete removes the row represented by the model using the default DB. +func (m *Order) Delete(ctx context.Context) (gen.ResultInfo, error) { + return Q.Order.WithContext(ctx).Delete(m) +} + +// ForceDelete permanently deletes the row (ignores soft delete) using the default DB. +func (m *Order) ForceDelete(ctx context.Context) (gen.ResultInfo, error) { + return Q.Order.WithContext(ctx).Unscoped().Delete(m) +} + +// Reload reloads the model from database by its primary key and overwrites current fields. +func (m *Order) Reload(ctx context.Context) error { + fresh, err := Q.Order.WithContext(ctx).GetByID(m.ID) + if err != nil { + return err + } + *m = *fresh + return nil +} diff --git a/backend_v1/database/models/orders.query.gen.go b/backend_v1/database/models/orders.query.gen.go new file mode 100644 index 0000000..6a6a9bf --- /dev/null +++ b/backend_v1/database/models/orders.query.gen.go @@ -0,0 +1,515 @@ +// Code generated by go.ipao.vip/gen. DO NOT EDIT. +// Code generated by go.ipao.vip/gen. DO NOT EDIT. +// Code generated by go.ipao.vip/gen. DO NOT EDIT. + +package models + +import ( + "context" + + "gorm.io/gorm" + "gorm.io/gorm/clause" + "gorm.io/gorm/schema" + + "go.ipao.vip/gen" + "go.ipao.vip/gen/field" + + "gorm.io/plugin/dbresolver" +) + +func newOrder(db *gorm.DB, opts ...gen.DOOption) orderQuery { + _orderQuery := orderQuery{} + + _orderQuery.orderQueryDo.UseDB(db, opts...) + _orderQuery.orderQueryDo.UseModel(&Order{}) + + tableName := _orderQuery.orderQueryDo.TableName() + _orderQuery.ALL = field.NewAsterisk(tableName) + _orderQuery.ID = field.NewInt64(tableName, "id") + _orderQuery.CreatedAt = field.NewTime(tableName, "created_at") + _orderQuery.UpdatedAt = field.NewTime(tableName, "updated_at") + _orderQuery.OrderNo = field.NewString(tableName, "order_no") + _orderQuery.SubOrderNo = field.NewString(tableName, "sub_order_no") + _orderQuery.TransactionID = field.NewString(tableName, "transaction_id") + _orderQuery.RefundTransactionID = field.NewString(tableName, "refund_transaction_id") + _orderQuery.Price = field.NewInt64(tableName, "price") + _orderQuery.Discount = field.NewInt16(tableName, "discount") + _orderQuery.Currency = field.NewString(tableName, "currency") + _orderQuery.PaymentMethod = field.NewString(tableName, "payment_method") + _orderQuery.PostID = field.NewInt64(tableName, "post_id") + _orderQuery.UserID = field.NewInt64(tableName, "user_id") + _orderQuery.Status = field.NewInt16(tableName, "status") + _orderQuery.Meta = field.NewJSONB(tableName, "meta") + + _orderQuery.fillFieldMap() + + return _orderQuery +} + +type orderQuery struct { + orderQueryDo orderQueryDo + + ALL field.Asterisk + ID field.Int64 + CreatedAt field.Time + UpdatedAt field.Time + OrderNo field.String + SubOrderNo field.String + TransactionID field.String + RefundTransactionID field.String + Price field.Int64 + Discount field.Int16 + Currency field.String + PaymentMethod field.String + PostID field.Int64 + UserID field.Int64 + Status field.Int16 + Meta field.JSONB + + fieldMap map[string]field.Expr +} + +func (o orderQuery) Table(newTableName string) *orderQuery { + o.orderQueryDo.UseTable(newTableName) + return o.updateTableName(newTableName) +} + +func (o orderQuery) As(alias string) *orderQuery { + o.orderQueryDo.DO = *(o.orderQueryDo.As(alias).(*gen.DO)) + return o.updateTableName(alias) +} + +func (o *orderQuery) updateTableName(table string) *orderQuery { + o.ALL = field.NewAsterisk(table) + o.ID = field.NewInt64(table, "id") + o.CreatedAt = field.NewTime(table, "created_at") + o.UpdatedAt = field.NewTime(table, "updated_at") + o.OrderNo = field.NewString(table, "order_no") + o.SubOrderNo = field.NewString(table, "sub_order_no") + o.TransactionID = field.NewString(table, "transaction_id") + o.RefundTransactionID = field.NewString(table, "refund_transaction_id") + o.Price = field.NewInt64(table, "price") + o.Discount = field.NewInt16(table, "discount") + o.Currency = field.NewString(table, "currency") + o.PaymentMethod = field.NewString(table, "payment_method") + o.PostID = field.NewInt64(table, "post_id") + o.UserID = field.NewInt64(table, "user_id") + o.Status = field.NewInt16(table, "status") + o.Meta = field.NewJSONB(table, "meta") + + o.fillFieldMap() + + return o +} + +func (o *orderQuery) QueryContext(ctx context.Context) (*orderQuery, *orderQueryDo) { + return o, o.orderQueryDo.WithContext(ctx) +} + +func (o *orderQuery) WithContext(ctx context.Context) *orderQueryDo { + return o.orderQueryDo.WithContext(ctx) +} + +func (o orderQuery) TableName() string { return o.orderQueryDo.TableName() } + +func (o orderQuery) Alias() string { return o.orderQueryDo.Alias() } + +func (o orderQuery) Columns(cols ...field.Expr) gen.Columns { return o.orderQueryDo.Columns(cols...) } + +func (o *orderQuery) GetFieldByName(fieldName string) (field.OrderExpr, bool) { + _f, ok := o.fieldMap[fieldName] + if !ok || _f == nil { + return nil, false + } + _oe, ok := _f.(field.OrderExpr) + return _oe, ok +} + +func (o *orderQuery) fillFieldMap() { + o.fieldMap = make(map[string]field.Expr, 15) + o.fieldMap["id"] = o.ID + o.fieldMap["created_at"] = o.CreatedAt + o.fieldMap["updated_at"] = o.UpdatedAt + o.fieldMap["order_no"] = o.OrderNo + o.fieldMap["sub_order_no"] = o.SubOrderNo + o.fieldMap["transaction_id"] = o.TransactionID + o.fieldMap["refund_transaction_id"] = o.RefundTransactionID + o.fieldMap["price"] = o.Price + o.fieldMap["discount"] = o.Discount + o.fieldMap["currency"] = o.Currency + o.fieldMap["payment_method"] = o.PaymentMethod + o.fieldMap["post_id"] = o.PostID + o.fieldMap["user_id"] = o.UserID + o.fieldMap["status"] = o.Status + o.fieldMap["meta"] = o.Meta +} + +func (o orderQuery) clone(db *gorm.DB) orderQuery { + o.orderQueryDo.ReplaceConnPool(db.Statement.ConnPool) + return o +} + +func (o orderQuery) replaceDB(db *gorm.DB) orderQuery { + o.orderQueryDo.ReplaceDB(db) + return o +} + +type orderQueryDo struct{ gen.DO } + +func (o orderQueryDo) Debug() *orderQueryDo { + return o.withDO(o.DO.Debug()) +} + +func (o orderQueryDo) WithContext(ctx context.Context) *orderQueryDo { + return o.withDO(o.DO.WithContext(ctx)) +} + +func (o orderQueryDo) ReadDB() *orderQueryDo { + return o.Clauses(dbresolver.Read) +} + +func (o orderQueryDo) WriteDB() *orderQueryDo { + return o.Clauses(dbresolver.Write) +} + +func (o orderQueryDo) Session(config *gorm.Session) *orderQueryDo { + return o.withDO(o.DO.Session(config)) +} + +func (o orderQueryDo) Clauses(conds ...clause.Expression) *orderQueryDo { + return o.withDO(o.DO.Clauses(conds...)) +} + +func (o orderQueryDo) Returning(value interface{}, columns ...string) *orderQueryDo { + return o.withDO(o.DO.Returning(value, columns...)) +} + +func (o orderQueryDo) Not(conds ...gen.Condition) *orderQueryDo { + return o.withDO(o.DO.Not(conds...)) +} + +func (o orderQueryDo) Or(conds ...gen.Condition) *orderQueryDo { + return o.withDO(o.DO.Or(conds...)) +} + +func (o orderQueryDo) Select(conds ...field.Expr) *orderQueryDo { + return o.withDO(o.DO.Select(conds...)) +} + +func (o orderQueryDo) Where(conds ...gen.Condition) *orderQueryDo { + return o.withDO(o.DO.Where(conds...)) +} + +func (o orderQueryDo) Order(conds ...field.Expr) *orderQueryDo { + return o.withDO(o.DO.Order(conds...)) +} + +func (o orderQueryDo) Distinct(cols ...field.Expr) *orderQueryDo { + return o.withDO(o.DO.Distinct(cols...)) +} + +func (o orderQueryDo) Omit(cols ...field.Expr) *orderQueryDo { + return o.withDO(o.DO.Omit(cols...)) +} + +func (o orderQueryDo) Join(table schema.Tabler, on ...field.Expr) *orderQueryDo { + return o.withDO(o.DO.Join(table, on...)) +} + +func (o orderQueryDo) LeftJoin(table schema.Tabler, on ...field.Expr) *orderQueryDo { + return o.withDO(o.DO.LeftJoin(table, on...)) +} + +func (o orderQueryDo) RightJoin(table schema.Tabler, on ...field.Expr) *orderQueryDo { + return o.withDO(o.DO.RightJoin(table, on...)) +} + +func (o orderQueryDo) Group(cols ...field.Expr) *orderQueryDo { + return o.withDO(o.DO.Group(cols...)) +} + +func (o orderQueryDo) Having(conds ...gen.Condition) *orderQueryDo { + return o.withDO(o.DO.Having(conds...)) +} + +func (o orderQueryDo) Limit(limit int) *orderQueryDo { + return o.withDO(o.DO.Limit(limit)) +} + +func (o orderQueryDo) Offset(offset int) *orderQueryDo { + return o.withDO(o.DO.Offset(offset)) +} + +func (o orderQueryDo) Scopes(funcs ...func(gen.Dao) gen.Dao) *orderQueryDo { + return o.withDO(o.DO.Scopes(funcs...)) +} + +func (o orderQueryDo) Unscoped() *orderQueryDo { + return o.withDO(o.DO.Unscoped()) +} + +func (o orderQueryDo) Create(values ...*Order) error { + if len(values) == 0 { + return nil + } + return o.DO.Create(values) +} + +func (o orderQueryDo) CreateInBatches(values []*Order, batchSize int) error { + return o.DO.CreateInBatches(values, batchSize) +} + +// Save : !!! underlying implementation is different with GORM +// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values) +func (o orderQueryDo) Save(values ...*Order) error { + if len(values) == 0 { + return nil + } + return o.DO.Save(values) +} + +func (o orderQueryDo) First() (*Order, error) { + if result, err := o.DO.First(); err != nil { + return nil, err + } else { + return result.(*Order), nil + } +} + +func (o orderQueryDo) Take() (*Order, error) { + if result, err := o.DO.Take(); err != nil { + return nil, err + } else { + return result.(*Order), nil + } +} + +func (o orderQueryDo) Last() (*Order, error) { + if result, err := o.DO.Last(); err != nil { + return nil, err + } else { + return result.(*Order), nil + } +} + +func (o orderQueryDo) Find() ([]*Order, error) { + result, err := o.DO.Find() + return result.([]*Order), err +} + +func (o orderQueryDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*Order, err error) { + buf := make([]*Order, 0, batchSize) + err = o.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error { + defer func() { results = append(results, buf...) }() + return fc(tx, batch) + }) + return results, err +} + +func (o orderQueryDo) FindInBatches(result *[]*Order, batchSize int, fc func(tx gen.Dao, batch int) error) error { + return o.DO.FindInBatches(result, batchSize, fc) +} + +func (o orderQueryDo) Attrs(attrs ...field.AssignExpr) *orderQueryDo { + return o.withDO(o.DO.Attrs(attrs...)) +} + +func (o orderQueryDo) Assign(attrs ...field.AssignExpr) *orderQueryDo { + return o.withDO(o.DO.Assign(attrs...)) +} + +func (o orderQueryDo) Joins(fields ...field.RelationField) *orderQueryDo { + for _, _f := range fields { + o = *o.withDO(o.DO.Joins(_f)) + } + return &o +} + +func (o orderQueryDo) Preload(fields ...field.RelationField) *orderQueryDo { + for _, _f := range fields { + o = *o.withDO(o.DO.Preload(_f)) + } + return &o +} + +func (o orderQueryDo) FirstOrInit() (*Order, error) { + if result, err := o.DO.FirstOrInit(); err != nil { + return nil, err + } else { + return result.(*Order), nil + } +} + +func (o orderQueryDo) FirstOrCreate() (*Order, error) { + if result, err := o.DO.FirstOrCreate(); err != nil { + return nil, err + } else { + return result.(*Order), nil + } +} + +func (o orderQueryDo) FindByPage(offset int, limit int) (result []*Order, count int64, err error) { + result, err = o.Offset(offset).Limit(limit).Find() + if err != nil { + return + } + + if size := len(result); 0 < limit && 0 < size && size < limit { + count = int64(size + offset) + return + } + + count, err = o.Offset(-1).Limit(-1).Count() + return +} + +func (o orderQueryDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) { + count, err = o.Count() + if err != nil { + return + } + + err = o.Offset(offset).Limit(limit).Scan(result) + return +} + +func (o orderQueryDo) Scan(result interface{}) (err error) { + return o.DO.Scan(result) +} + +func (o orderQueryDo) Delete(models ...*Order) (result gen.ResultInfo, err error) { + return o.DO.Delete(models) +} + +// ForceDelete performs a permanent delete (ignores soft-delete) for current scope. +func (o orderQueryDo) ForceDelete() (gen.ResultInfo, error) { + return o.Unscoped().Delete() +} + +// Inc increases the given column by step for current scope. +func (o orderQueryDo) Inc(column field.Expr, step int64) (gen.ResultInfo, error) { + // column = column + step + e := field.NewUnsafeFieldRaw("?+?", column.RawExpr(), step) + return o.DO.UpdateColumn(column, e) +} + +// Dec decreases the given column by step for current scope. +func (o orderQueryDo) Dec(column field.Expr, step int64) (gen.ResultInfo, error) { + // column = column - step + e := field.NewUnsafeFieldRaw("?-?", column.RawExpr(), step) + return o.DO.UpdateColumn(column, e) +} + +// Sum returns SUM(column) for current scope. +func (o orderQueryDo) Sum(column field.Expr) (float64, error) { + var _v float64 + agg := field.NewUnsafeFieldRaw("SUM(?)", column.RawExpr()) + if err := o.Select(agg).Scan(&_v); err != nil { + return 0, err + } + return _v, nil +} + +// Avg returns AVG(column) for current scope. +func (o orderQueryDo) Avg(column field.Expr) (float64, error) { + var _v float64 + agg := field.NewUnsafeFieldRaw("AVG(?)", column.RawExpr()) + if err := o.Select(agg).Scan(&_v); err != nil { + return 0, err + } + return _v, nil +} + +// Min returns MIN(column) for current scope. +func (o orderQueryDo) Min(column field.Expr) (float64, error) { + var _v float64 + agg := field.NewUnsafeFieldRaw("MIN(?)", column.RawExpr()) + if err := o.Select(agg).Scan(&_v); err != nil { + return 0, err + } + return _v, nil +} + +// Max returns MAX(column) for current scope. +func (o orderQueryDo) Max(column field.Expr) (float64, error) { + var _v float64 + agg := field.NewUnsafeFieldRaw("MAX(?)", column.RawExpr()) + if err := o.Select(agg).Scan(&_v); err != nil { + return 0, err + } + return _v, nil +} + +// PluckMap returns a map[key]value for selected key/value expressions within current scope. +func (o orderQueryDo) PluckMap(key, val field.Expr) (map[interface{}]interface{}, error) { + do := o.Select(key, val) + rows, err := do.DO.Rows() + if err != nil { + return nil, err + } + defer rows.Close() + mm := make(map[interface{}]interface{}) + for rows.Next() { + var k interface{} + var v interface{} + if err := rows.Scan(&k, &v); err != nil { + return nil, err + } + mm[k] = v + } + return mm, rows.Err() +} + +// Exists returns true if any record matches the given conditions. +func (o orderQueryDo) Exists(conds ...gen.Condition) (bool, error) { + cnt, err := o.Where(conds...).Count() + if err != nil { + return false, err + } + return cnt > 0, nil +} + +// PluckIDs returns all primary key values under current scope. +func (o orderQueryDo) PluckIDs() ([]int64, error) { + ids := make([]int64, 0, 16) + pk := field.NewInt64(o.TableName(), "id") + if err := o.DO.Pluck(pk, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// GetByID finds a single record by primary key. +func (o orderQueryDo) GetByID(id int64) (*Order, error) { + pk := field.NewInt64(o.TableName(), "id") + return o.Where(pk.Eq(id)).First() +} + +// GetByIDs finds records by primary key list. +func (o orderQueryDo) GetByIDs(ids ...int64) ([]*Order, error) { + if len(ids) == 0 { + return []*Order{}, nil + } + pk := field.NewInt64(o.TableName(), "id") + return o.Where(pk.In(ids...)).Find() +} + +// DeleteByID deletes records by primary key. +func (o orderQueryDo) DeleteByID(id int64) (gen.ResultInfo, error) { + pk := field.NewInt64(o.TableName(), "id") + return o.Where(pk.Eq(id)).Delete() +} + +// DeleteByIDs deletes records by a list of primary keys. +func (o orderQueryDo) DeleteByIDs(ids ...int64) (gen.ResultInfo, error) { + if len(ids) == 0 { + return gen.ResultInfo{RowsAffected: 0, Error: nil}, nil + } + pk := field.NewInt64(o.TableName(), "id") + return o.Where(pk.In(ids...)).Delete() +} + +func (o *orderQueryDo) withDO(do gen.Dao) *orderQueryDo { + o.DO = *do.(*gen.DO) + return o +} diff --git a/backend_v1/database/models/posts.gen.go b/backend_v1/database/models/posts.gen.go new file mode 100644 index 0000000..1925e05 --- /dev/null +++ b/backend_v1/database/models/posts.gen.go @@ -0,0 +1,72 @@ +// Code generated by go.ipao.vip/gen. DO NOT EDIT. +// Code generated by go.ipao.vip/gen. DO NOT EDIT. +// Code generated by go.ipao.vip/gen. DO NOT EDIT. + +package models + +import ( + "context" + "time" + + "go.ipao.vip/gen" + "go.ipao.vip/gen/types" + "gorm.io/gorm" +) + +const TableNamePost = "posts" + +// Post mapped from table +type Post struct { + ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement:true" json:"id"` + CreatedAt time.Time `gorm:"column:created_at;type:timestamp without time zone;not null;default:now()" json:"created_at"` + UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp without time zone;not null;default:now()" json:"updated_at"` + DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp without time zone" json:"deleted_at"` + Status int16 `gorm:"column:status;type:smallint;not null" json:"status"` + Title string `gorm:"column:title;type:character varying(128);not null" json:"title"` + HeadImages types.JSON `gorm:"column:head_images;type:jsonb;not null;default:[]" json:"head_images"` + Description string `gorm:"column:description;type:character varying(256);not null" json:"description"` + Content string `gorm:"column:content;type:text;not null" json:"content"` + Price int64 `gorm:"column:price;type:bigint;not null" json:"price"` + Discount int16 `gorm:"column:discount;type:smallint;not null;default:100" json:"discount"` + Views int64 `gorm:"column:views;type:bigint;not null" json:"views"` + Likes int64 `gorm:"column:likes;type:bigint;not null" json:"likes"` + Tags types.JSON `gorm:"column:tags;type:jsonb;default:{}" json:"tags"` + Assets types.JSON `gorm:"column:assets;type:jsonb;default:{}" json:"assets"` +} + +// Quick operations without importing query package +// Update applies changed fields to the database using the default DB. +func (m *Post) Update(ctx context.Context) (gen.ResultInfo, error) { + return Q.Post.WithContext(ctx).Updates(m) +} + +// Save upserts the model using the default DB. +func (m *Post) Save(ctx context.Context) error { return Q.Post.WithContext(ctx).Save(m) } + +// Create inserts the model using the default DB. +func (m *Post) Create(ctx context.Context) error { return Q.Post.WithContext(ctx).Create(m) } + +// Delete removes the row represented by the model using the default DB. +func (m *Post) Delete(ctx context.Context) (gen.ResultInfo, error) { + return Q.Post.WithContext(ctx).Delete(m) +} + +// ForceDelete permanently deletes the row (ignores soft delete) using the default DB. +func (m *Post) ForceDelete(ctx context.Context) (gen.ResultInfo, error) { + return Q.Post.WithContext(ctx).Unscoped().Delete(m) +} + +// Restore sets deleted_at to NULL for this model's primary key using the default DB. +func (m *Post) Restore(ctx context.Context) (gen.ResultInfo, error) { + return Q.Post.WithContext(ctx).RestoreByID(m.ID) +} + +// Reload reloads the model from database by its primary key and overwrites current fields. +func (m *Post) Reload(ctx context.Context) error { + fresh, err := Q.Post.WithContext(ctx).GetByID(m.ID) + if err != nil { + return err + } + *m = *fresh + return nil +} diff --git a/backend_v1/database/models/posts.query.gen.go b/backend_v1/database/models/posts.query.gen.go new file mode 100644 index 0000000..efdbb00 --- /dev/null +++ b/backend_v1/database/models/posts.query.gen.go @@ -0,0 +1,528 @@ +// Code generated by go.ipao.vip/gen. DO NOT EDIT. +// Code generated by go.ipao.vip/gen. DO NOT EDIT. +// Code generated by go.ipao.vip/gen. DO NOT EDIT. + +package models + +import ( + "context" + + "gorm.io/gorm" + "gorm.io/gorm/clause" + "gorm.io/gorm/schema" + + "go.ipao.vip/gen" + "go.ipao.vip/gen/field" + + "gorm.io/plugin/dbresolver" +) + +func newPost(db *gorm.DB, opts ...gen.DOOption) postQuery { + _postQuery := postQuery{} + + _postQuery.postQueryDo.UseDB(db, opts...) + _postQuery.postQueryDo.UseModel(&Post{}) + + tableName := _postQuery.postQueryDo.TableName() + _postQuery.ALL = field.NewAsterisk(tableName) + _postQuery.ID = field.NewInt64(tableName, "id") + _postQuery.CreatedAt = field.NewTime(tableName, "created_at") + _postQuery.UpdatedAt = field.NewTime(tableName, "updated_at") + _postQuery.DeletedAt = field.NewField(tableName, "deleted_at") + _postQuery.Status = field.NewInt16(tableName, "status") + _postQuery.Title = field.NewString(tableName, "title") + _postQuery.HeadImages = field.NewJSONB(tableName, "head_images") + _postQuery.Description = field.NewString(tableName, "description") + _postQuery.Content = field.NewString(tableName, "content") + _postQuery.Price = field.NewInt64(tableName, "price") + _postQuery.Discount = field.NewInt16(tableName, "discount") + _postQuery.Views = field.NewInt64(tableName, "views") + _postQuery.Likes = field.NewInt64(tableName, "likes") + _postQuery.Tags = field.NewJSONB(tableName, "tags") + _postQuery.Assets = field.NewJSONB(tableName, "assets") + + _postQuery.fillFieldMap() + + return _postQuery +} + +type postQuery struct { + postQueryDo postQueryDo + + ALL field.Asterisk + ID field.Int64 + CreatedAt field.Time + UpdatedAt field.Time + DeletedAt field.Field + Status field.Int16 + Title field.String + HeadImages field.JSONB + Description field.String + Content field.String + Price field.Int64 + Discount field.Int16 + Views field.Int64 + Likes field.Int64 + Tags field.JSONB + Assets field.JSONB + + fieldMap map[string]field.Expr +} + +func (p postQuery) Table(newTableName string) *postQuery { + p.postQueryDo.UseTable(newTableName) + return p.updateTableName(newTableName) +} + +func (p postQuery) As(alias string) *postQuery { + p.postQueryDo.DO = *(p.postQueryDo.As(alias).(*gen.DO)) + return p.updateTableName(alias) +} + +func (p *postQuery) updateTableName(table string) *postQuery { + p.ALL = field.NewAsterisk(table) + p.ID = field.NewInt64(table, "id") + p.CreatedAt = field.NewTime(table, "created_at") + p.UpdatedAt = field.NewTime(table, "updated_at") + p.DeletedAt = field.NewField(table, "deleted_at") + p.Status = field.NewInt16(table, "status") + p.Title = field.NewString(table, "title") + p.HeadImages = field.NewJSONB(table, "head_images") + p.Description = field.NewString(table, "description") + p.Content = field.NewString(table, "content") + p.Price = field.NewInt64(table, "price") + p.Discount = field.NewInt16(table, "discount") + p.Views = field.NewInt64(table, "views") + p.Likes = field.NewInt64(table, "likes") + p.Tags = field.NewJSONB(table, "tags") + p.Assets = field.NewJSONB(table, "assets") + + p.fillFieldMap() + + return p +} + +func (p *postQuery) QueryContext(ctx context.Context) (*postQuery, *postQueryDo) { + return p, p.postQueryDo.WithContext(ctx) +} + +func (p *postQuery) WithContext(ctx context.Context) *postQueryDo { + return p.postQueryDo.WithContext(ctx) +} + +func (p postQuery) TableName() string { return p.postQueryDo.TableName() } + +func (p postQuery) Alias() string { return p.postQueryDo.Alias() } + +func (p postQuery) Columns(cols ...field.Expr) gen.Columns { return p.postQueryDo.Columns(cols...) } + +func (p *postQuery) GetFieldByName(fieldName string) (field.OrderExpr, bool) { + _f, ok := p.fieldMap[fieldName] + if !ok || _f == nil { + return nil, false + } + _oe, ok := _f.(field.OrderExpr) + return _oe, ok +} + +func (p *postQuery) fillFieldMap() { + p.fieldMap = make(map[string]field.Expr, 15) + p.fieldMap["id"] = p.ID + p.fieldMap["created_at"] = p.CreatedAt + p.fieldMap["updated_at"] = p.UpdatedAt + p.fieldMap["deleted_at"] = p.DeletedAt + p.fieldMap["status"] = p.Status + p.fieldMap["title"] = p.Title + p.fieldMap["head_images"] = p.HeadImages + p.fieldMap["description"] = p.Description + p.fieldMap["content"] = p.Content + p.fieldMap["price"] = p.Price + p.fieldMap["discount"] = p.Discount + p.fieldMap["views"] = p.Views + p.fieldMap["likes"] = p.Likes + p.fieldMap["tags"] = p.Tags + p.fieldMap["assets"] = p.Assets +} + +func (p postQuery) clone(db *gorm.DB) postQuery { + p.postQueryDo.ReplaceConnPool(db.Statement.ConnPool) + return p +} + +func (p postQuery) replaceDB(db *gorm.DB) postQuery { + p.postQueryDo.ReplaceDB(db) + return p +} + +type postQueryDo struct{ gen.DO } + +func (p postQueryDo) Debug() *postQueryDo { + return p.withDO(p.DO.Debug()) +} + +func (p postQueryDo) WithContext(ctx context.Context) *postQueryDo { + return p.withDO(p.DO.WithContext(ctx)) +} + +func (p postQueryDo) ReadDB() *postQueryDo { + return p.Clauses(dbresolver.Read) +} + +func (p postQueryDo) WriteDB() *postQueryDo { + return p.Clauses(dbresolver.Write) +} + +func (p postQueryDo) Session(config *gorm.Session) *postQueryDo { + return p.withDO(p.DO.Session(config)) +} + +func (p postQueryDo) Clauses(conds ...clause.Expression) *postQueryDo { + return p.withDO(p.DO.Clauses(conds...)) +} + +func (p postQueryDo) Returning(value interface{}, columns ...string) *postQueryDo { + return p.withDO(p.DO.Returning(value, columns...)) +} + +func (p postQueryDo) Not(conds ...gen.Condition) *postQueryDo { + return p.withDO(p.DO.Not(conds...)) +} + +func (p postQueryDo) Or(conds ...gen.Condition) *postQueryDo { + return p.withDO(p.DO.Or(conds...)) +} + +func (p postQueryDo) Select(conds ...field.Expr) *postQueryDo { + return p.withDO(p.DO.Select(conds...)) +} + +func (p postQueryDo) Where(conds ...gen.Condition) *postQueryDo { + return p.withDO(p.DO.Where(conds...)) +} + +func (p postQueryDo) Order(conds ...field.Expr) *postQueryDo { + return p.withDO(p.DO.Order(conds...)) +} + +func (p postQueryDo) Distinct(cols ...field.Expr) *postQueryDo { + return p.withDO(p.DO.Distinct(cols...)) +} + +func (p postQueryDo) Omit(cols ...field.Expr) *postQueryDo { + return p.withDO(p.DO.Omit(cols...)) +} + +func (p postQueryDo) Join(table schema.Tabler, on ...field.Expr) *postQueryDo { + return p.withDO(p.DO.Join(table, on...)) +} + +func (p postQueryDo) LeftJoin(table schema.Tabler, on ...field.Expr) *postQueryDo { + return p.withDO(p.DO.LeftJoin(table, on...)) +} + +func (p postQueryDo) RightJoin(table schema.Tabler, on ...field.Expr) *postQueryDo { + return p.withDO(p.DO.RightJoin(table, on...)) +} + +func (p postQueryDo) Group(cols ...field.Expr) *postQueryDo { + return p.withDO(p.DO.Group(cols...)) +} + +func (p postQueryDo) Having(conds ...gen.Condition) *postQueryDo { + return p.withDO(p.DO.Having(conds...)) +} + +func (p postQueryDo) Limit(limit int) *postQueryDo { + return p.withDO(p.DO.Limit(limit)) +} + +func (p postQueryDo) Offset(offset int) *postQueryDo { + return p.withDO(p.DO.Offset(offset)) +} + +func (p postQueryDo) Scopes(funcs ...func(gen.Dao) gen.Dao) *postQueryDo { + return p.withDO(p.DO.Scopes(funcs...)) +} + +func (p postQueryDo) Unscoped() *postQueryDo { + return p.withDO(p.DO.Unscoped()) +} + +func (p postQueryDo) Create(values ...*Post) error { + if len(values) == 0 { + return nil + } + return p.DO.Create(values) +} + +func (p postQueryDo) CreateInBatches(values []*Post, batchSize int) error { + return p.DO.CreateInBatches(values, batchSize) +} + +// Save : !!! underlying implementation is different with GORM +// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values) +func (p postQueryDo) Save(values ...*Post) error { + if len(values) == 0 { + return nil + } + return p.DO.Save(values) +} + +func (p postQueryDo) First() (*Post, error) { + if result, err := p.DO.First(); err != nil { + return nil, err + } else { + return result.(*Post), nil + } +} + +func (p postQueryDo) Take() (*Post, error) { + if result, err := p.DO.Take(); err != nil { + return nil, err + } else { + return result.(*Post), nil + } +} + +func (p postQueryDo) Last() (*Post, error) { + if result, err := p.DO.Last(); err != nil { + return nil, err + } else { + return result.(*Post), nil + } +} + +func (p postQueryDo) Find() ([]*Post, error) { + result, err := p.DO.Find() + return result.([]*Post), err +} + +func (p postQueryDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*Post, err error) { + buf := make([]*Post, 0, batchSize) + err = p.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error { + defer func() { results = append(results, buf...) }() + return fc(tx, batch) + }) + return results, err +} + +func (p postQueryDo) FindInBatches(result *[]*Post, batchSize int, fc func(tx gen.Dao, batch int) error) error { + return p.DO.FindInBatches(result, batchSize, fc) +} + +func (p postQueryDo) Attrs(attrs ...field.AssignExpr) *postQueryDo { + return p.withDO(p.DO.Attrs(attrs...)) +} + +func (p postQueryDo) Assign(attrs ...field.AssignExpr) *postQueryDo { + return p.withDO(p.DO.Assign(attrs...)) +} + +func (p postQueryDo) Joins(fields ...field.RelationField) *postQueryDo { + for _, _f := range fields { + p = *p.withDO(p.DO.Joins(_f)) + } + return &p +} + +func (p postQueryDo) Preload(fields ...field.RelationField) *postQueryDo { + for _, _f := range fields { + p = *p.withDO(p.DO.Preload(_f)) + } + return &p +} + +func (p postQueryDo) FirstOrInit() (*Post, error) { + if result, err := p.DO.FirstOrInit(); err != nil { + return nil, err + } else { + return result.(*Post), nil + } +} + +func (p postQueryDo) FirstOrCreate() (*Post, error) { + if result, err := p.DO.FirstOrCreate(); err != nil { + return nil, err + } else { + return result.(*Post), nil + } +} + +func (p postQueryDo) FindByPage(offset int, limit int) (result []*Post, count int64, err error) { + result, err = p.Offset(offset).Limit(limit).Find() + if err != nil { + return + } + + if size := len(result); 0 < limit && 0 < size && size < limit { + count = int64(size + offset) + return + } + + count, err = p.Offset(-1).Limit(-1).Count() + return +} + +func (p postQueryDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) { + count, err = p.Count() + if err != nil { + return + } + + err = p.Offset(offset).Limit(limit).Scan(result) + return +} + +func (p postQueryDo) Scan(result interface{}) (err error) { + return p.DO.Scan(result) +} + +func (p postQueryDo) Delete(models ...*Post) (result gen.ResultInfo, err error) { + return p.DO.Delete(models) +} + +// ForceDelete performs a permanent delete (ignores soft-delete) for current scope. +func (p postQueryDo) ForceDelete() (gen.ResultInfo, error) { + return p.Unscoped().Delete() +} + +// Inc increases the given column by step for current scope. +func (p postQueryDo) Inc(column field.Expr, step int64) (gen.ResultInfo, error) { + // column = column + step + e := field.NewUnsafeFieldRaw("?+?", column.RawExpr(), step) + return p.DO.UpdateColumn(column, e) +} + +// Dec decreases the given column by step for current scope. +func (p postQueryDo) Dec(column field.Expr, step int64) (gen.ResultInfo, error) { + // column = column - step + e := field.NewUnsafeFieldRaw("?-?", column.RawExpr(), step) + return p.DO.UpdateColumn(column, e) +} + +// Sum returns SUM(column) for current scope. +func (p postQueryDo) Sum(column field.Expr) (float64, error) { + var _v float64 + agg := field.NewUnsafeFieldRaw("SUM(?)", column.RawExpr()) + if err := p.Select(agg).Scan(&_v); err != nil { + return 0, err + } + return _v, nil +} + +// Avg returns AVG(column) for current scope. +func (p postQueryDo) Avg(column field.Expr) (float64, error) { + var _v float64 + agg := field.NewUnsafeFieldRaw("AVG(?)", column.RawExpr()) + if err := p.Select(agg).Scan(&_v); err != nil { + return 0, err + } + return _v, nil +} + +// Min returns MIN(column) for current scope. +func (p postQueryDo) Min(column field.Expr) (float64, error) { + var _v float64 + agg := field.NewUnsafeFieldRaw("MIN(?)", column.RawExpr()) + if err := p.Select(agg).Scan(&_v); err != nil { + return 0, err + } + return _v, nil +} + +// Max returns MAX(column) for current scope. +func (p postQueryDo) Max(column field.Expr) (float64, error) { + var _v float64 + agg := field.NewUnsafeFieldRaw("MAX(?)", column.RawExpr()) + if err := p.Select(agg).Scan(&_v); err != nil { + return 0, err + } + return _v, nil +} + +// PluckMap returns a map[key]value for selected key/value expressions within current scope. +func (p postQueryDo) PluckMap(key, val field.Expr) (map[interface{}]interface{}, error) { + do := p.Select(key, val) + rows, err := do.DO.Rows() + if err != nil { + return nil, err + } + defer rows.Close() + mm := make(map[interface{}]interface{}) + for rows.Next() { + var k interface{} + var v interface{} + if err := rows.Scan(&k, &v); err != nil { + return nil, err + } + mm[k] = v + } + return mm, rows.Err() +} + +// Exists returns true if any record matches the given conditions. +func (p postQueryDo) Exists(conds ...gen.Condition) (bool, error) { + cnt, err := p.Where(conds...).Count() + if err != nil { + return false, err + } + return cnt > 0, nil +} + +// PluckIDs returns all primary key values under current scope. +func (p postQueryDo) PluckIDs() ([]int64, error) { + ids := make([]int64, 0, 16) + pk := field.NewInt64(p.TableName(), "id") + if err := p.DO.Pluck(pk, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// GetByID finds a single record by primary key. +func (p postQueryDo) GetByID(id int64) (*Post, error) { + pk := field.NewInt64(p.TableName(), "id") + return p.Where(pk.Eq(id)).First() +} + +// GetByIDs finds records by primary key list. +func (p postQueryDo) GetByIDs(ids ...int64) ([]*Post, error) { + if len(ids) == 0 { + return []*Post{}, nil + } + pk := field.NewInt64(p.TableName(), "id") + return p.Where(pk.In(ids...)).Find() +} + +// DeleteByID deletes records by primary key. +func (p postQueryDo) DeleteByID(id int64) (gen.ResultInfo, error) { + pk := field.NewInt64(p.TableName(), "id") + return p.Where(pk.Eq(id)).Delete() +} + +// DeleteByIDs deletes records by a list of primary keys. +func (p postQueryDo) DeleteByIDs(ids ...int64) (gen.ResultInfo, error) { + if len(ids) == 0 { + return gen.ResultInfo{RowsAffected: 0, Error: nil}, nil + } + pk := field.NewInt64(p.TableName(), "id") + return p.Where(pk.In(ids...)).Delete() +} + +// RestoreWhere sets deleted_at to NULL for rows matching current scope + conds. +func (p postQueryDo) RestoreWhere(conds ...gen.Condition) (gen.ResultInfo, error) { + col := field.NewField(p.TableName(), "deleted_at") + return p.Unscoped().Where(conds...).UpdateColumn(col, nil) +} + +// RestoreByID sets deleted_at to NULL for the given primary key. +func (p postQueryDo) RestoreByID(id int64) (gen.ResultInfo, error) { + pk := field.NewInt64(p.TableName(), "id") + col := field.NewField(p.TableName(), "deleted_at") + return p.Unscoped().Where(pk.Eq(id)).UpdateColumn(col, nil) +} + +func (p *postQueryDo) withDO(do gen.Dao) *postQueryDo { + p.DO = *do.(*gen.DO) + return p +} diff --git a/backend_v1/database/models/query.gen.go b/backend_v1/database/models/query.gen.go new file mode 100644 index 0000000..5e2852a --- /dev/null +++ b/backend_v1/database/models/query.gen.go @@ -0,0 +1,135 @@ +// Code generated by go.ipao.vip/gen. DO NOT EDIT. +// Code generated by go.ipao.vip/gen. DO NOT EDIT. +// Code generated by go.ipao.vip/gen. DO NOT EDIT. + +package models + +import ( + "context" + "database/sql" + + "gorm.io/gorm" + + "go.ipao.vip/gen" + + "gorm.io/plugin/dbresolver" +) + +var ( + Q = new(Query) + MediaQuery *mediaQuery + OrderQuery *orderQuery + PostQuery *postQuery + UserQuery *userQuery + UserPostQuery *userPostQuery +) + +func SetDefault(db *gorm.DB, opts ...gen.DOOption) { + *Q = *Use(db, opts...) + MediaQuery = &Q.Media + OrderQuery = &Q.Order + PostQuery = &Q.Post + UserQuery = &Q.User + UserPostQuery = &Q.UserPost +} + +func Use(db *gorm.DB, opts ...gen.DOOption) *Query { + return &Query{ + db: db, + Media: newMedia(db, opts...), + Order: newOrder(db, opts...), + Post: newPost(db, opts...), + User: newUser(db, opts...), + UserPost: newUserPost(db, opts...), + } +} + +type Query struct { + db *gorm.DB + + Media mediaQuery + Order orderQuery + Post postQuery + User userQuery + UserPost userPostQuery +} + +func (q *Query) Available() bool { return q.db != nil } + +func (q *Query) clone(db *gorm.DB) *Query { + return &Query{ + db: db, + Media: q.Media.clone(db), + Order: q.Order.clone(db), + Post: q.Post.clone(db), + User: q.User.clone(db), + UserPost: q.UserPost.clone(db), + } +} + +func (q *Query) ReadDB() *Query { + return q.ReplaceDB(q.db.Clauses(dbresolver.Read)) +} + +func (q *Query) WriteDB() *Query { + return q.ReplaceDB(q.db.Clauses(dbresolver.Write)) +} + +func (q *Query) ReplaceDB(db *gorm.DB) *Query { + return &Query{ + db: db, + Media: q.Media.replaceDB(db), + Order: q.Order.replaceDB(db), + Post: q.Post.replaceDB(db), + User: q.User.replaceDB(db), + UserPost: q.UserPost.replaceDB(db), + } +} + +type queryCtx struct { + Media *mediaQueryDo + Order *orderQueryDo + Post *postQueryDo + User *userQueryDo + UserPost *userPostQueryDo +} + +func (q *Query) WithContext(ctx context.Context) *queryCtx { + return &queryCtx{ + Media: q.Media.WithContext(ctx), + Order: q.Order.WithContext(ctx), + Post: q.Post.WithContext(ctx), + User: q.User.WithContext(ctx), + UserPost: q.UserPost.WithContext(ctx), + } +} + +func (q *Query) Transaction(fc func(tx *Query) error, opts ...*sql.TxOptions) error { + return q.db.Transaction(func(tx *gorm.DB) error { return fc(q.clone(tx)) }, opts...) +} + +func (q *Query) Begin(opts ...*sql.TxOptions) *QueryTx { + tx := q.db.Begin(opts...) + return &QueryTx{Query: q.clone(tx), Error: tx.Error} +} + +type QueryTx struct { + *Query + Error error +} + +func (q *QueryTx) Commit() error { + return q.db.Commit().Error +} + +func (q *QueryTx) Rollback() error { + return q.db.Rollback().Error +} + +func (q *QueryTx) SavePoint(name string) error { + return q.db.SavePoint(name).Error +} + +func (q *QueryTx) RollbackTo(name string) error { + return q.db.RollbackTo(name).Error +} diff --git a/backend_v1/database/models/user_posts.gen.go b/backend_v1/database/models/user_posts.gen.go new file mode 100644 index 0000000..aa08135 --- /dev/null +++ b/backend_v1/database/models/user_posts.gen.go @@ -0,0 +1,56 @@ +// Code generated by go.ipao.vip/gen. DO NOT EDIT. +// Code generated by go.ipao.vip/gen. DO NOT EDIT. +// Code generated by go.ipao.vip/gen. DO NOT EDIT. + +package models + +import ( + "context" + "time" + + "go.ipao.vip/gen" +) + +const TableNameUserPost = "user_posts" + +// UserPost mapped from table +type UserPost struct { + ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement:true" json:"id"` + CreatedAt time.Time `gorm:"column:created_at;type:timestamp without time zone;not null;default:now()" json:"created_at"` + UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp without time zone;not null;default:now()" json:"updated_at"` + UserID int64 `gorm:"column:user_id;type:bigint;not null" json:"user_id"` + PostID int64 `gorm:"column:post_id;type:bigint;not null" json:"post_id"` + Price int64 `gorm:"column:price;type:bigint;not null" json:"price"` +} + +// Quick operations without importing query package +// Update applies changed fields to the database using the default DB. +func (m *UserPost) Update(ctx context.Context) (gen.ResultInfo, error) { + return Q.UserPost.WithContext(ctx).Updates(m) +} + +// Save upserts the model using the default DB. +func (m *UserPost) Save(ctx context.Context) error { return Q.UserPost.WithContext(ctx).Save(m) } + +// Create inserts the model using the default DB. +func (m *UserPost) Create(ctx context.Context) error { return Q.UserPost.WithContext(ctx).Create(m) } + +// Delete removes the row represented by the model using the default DB. +func (m *UserPost) Delete(ctx context.Context) (gen.ResultInfo, error) { + return Q.UserPost.WithContext(ctx).Delete(m) +} + +// ForceDelete permanently deletes the row (ignores soft delete) using the default DB. +func (m *UserPost) ForceDelete(ctx context.Context) (gen.ResultInfo, error) { + return Q.UserPost.WithContext(ctx).Unscoped().Delete(m) +} + +// Reload reloads the model from database by its primary key and overwrites current fields. +func (m *UserPost) Reload(ctx context.Context) error { + fresh, err := Q.UserPost.WithContext(ctx).GetByID(m.ID) + if err != nil { + return err + } + *m = *fresh + return nil +} diff --git a/backend_v1/database/models/user_posts.query.gen.go b/backend_v1/database/models/user_posts.query.gen.go new file mode 100644 index 0000000..517b796 --- /dev/null +++ b/backend_v1/database/models/user_posts.query.gen.go @@ -0,0 +1,481 @@ +// Code generated by go.ipao.vip/gen. DO NOT EDIT. +// Code generated by go.ipao.vip/gen. DO NOT EDIT. +// Code generated by go.ipao.vip/gen. DO NOT EDIT. + +package models + +import ( + "context" + + "gorm.io/gorm" + "gorm.io/gorm/clause" + "gorm.io/gorm/schema" + + "go.ipao.vip/gen" + "go.ipao.vip/gen/field" + + "gorm.io/plugin/dbresolver" +) + +func newUserPost(db *gorm.DB, opts ...gen.DOOption) userPostQuery { + _userPostQuery := userPostQuery{} + + _userPostQuery.userPostQueryDo.UseDB(db, opts...) + _userPostQuery.userPostQueryDo.UseModel(&UserPost{}) + + tableName := _userPostQuery.userPostQueryDo.TableName() + _userPostQuery.ALL = field.NewAsterisk(tableName) + _userPostQuery.ID = field.NewInt64(tableName, "id") + _userPostQuery.CreatedAt = field.NewTime(tableName, "created_at") + _userPostQuery.UpdatedAt = field.NewTime(tableName, "updated_at") + _userPostQuery.UserID = field.NewInt64(tableName, "user_id") + _userPostQuery.PostID = field.NewInt64(tableName, "post_id") + _userPostQuery.Price = field.NewInt64(tableName, "price") + + _userPostQuery.fillFieldMap() + + return _userPostQuery +} + +type userPostQuery struct { + userPostQueryDo userPostQueryDo + + ALL field.Asterisk + ID field.Int64 + CreatedAt field.Time + UpdatedAt field.Time + UserID field.Int64 + PostID field.Int64 + Price field.Int64 + + fieldMap map[string]field.Expr +} + +func (u userPostQuery) Table(newTableName string) *userPostQuery { + u.userPostQueryDo.UseTable(newTableName) + return u.updateTableName(newTableName) +} + +func (u userPostQuery) As(alias string) *userPostQuery { + u.userPostQueryDo.DO = *(u.userPostQueryDo.As(alias).(*gen.DO)) + return u.updateTableName(alias) +} + +func (u *userPostQuery) updateTableName(table string) *userPostQuery { + u.ALL = field.NewAsterisk(table) + u.ID = field.NewInt64(table, "id") + u.CreatedAt = field.NewTime(table, "created_at") + u.UpdatedAt = field.NewTime(table, "updated_at") + u.UserID = field.NewInt64(table, "user_id") + u.PostID = field.NewInt64(table, "post_id") + u.Price = field.NewInt64(table, "price") + + u.fillFieldMap() + + return u +} + +func (u *userPostQuery) QueryContext(ctx context.Context) (*userPostQuery, *userPostQueryDo) { + return u, u.userPostQueryDo.WithContext(ctx) +} + +func (u *userPostQuery) WithContext(ctx context.Context) *userPostQueryDo { + return u.userPostQueryDo.WithContext(ctx) +} + +func (u userPostQuery) TableName() string { return u.userPostQueryDo.TableName() } + +func (u userPostQuery) Alias() string { return u.userPostQueryDo.Alias() } + +func (u userPostQuery) Columns(cols ...field.Expr) gen.Columns { + return u.userPostQueryDo.Columns(cols...) +} + +func (u *userPostQuery) GetFieldByName(fieldName string) (field.OrderExpr, bool) { + _f, ok := u.fieldMap[fieldName] + if !ok || _f == nil { + return nil, false + } + _oe, ok := _f.(field.OrderExpr) + return _oe, ok +} + +func (u *userPostQuery) fillFieldMap() { + u.fieldMap = make(map[string]field.Expr, 6) + u.fieldMap["id"] = u.ID + u.fieldMap["created_at"] = u.CreatedAt + u.fieldMap["updated_at"] = u.UpdatedAt + u.fieldMap["user_id"] = u.UserID + u.fieldMap["post_id"] = u.PostID + u.fieldMap["price"] = u.Price +} + +func (u userPostQuery) clone(db *gorm.DB) userPostQuery { + u.userPostQueryDo.ReplaceConnPool(db.Statement.ConnPool) + return u +} + +func (u userPostQuery) replaceDB(db *gorm.DB) userPostQuery { + u.userPostQueryDo.ReplaceDB(db) + return u +} + +type userPostQueryDo struct{ gen.DO } + +func (u userPostQueryDo) Debug() *userPostQueryDo { + return u.withDO(u.DO.Debug()) +} + +func (u userPostQueryDo) WithContext(ctx context.Context) *userPostQueryDo { + return u.withDO(u.DO.WithContext(ctx)) +} + +func (u userPostQueryDo) ReadDB() *userPostQueryDo { + return u.Clauses(dbresolver.Read) +} + +func (u userPostQueryDo) WriteDB() *userPostQueryDo { + return u.Clauses(dbresolver.Write) +} + +func (u userPostQueryDo) Session(config *gorm.Session) *userPostQueryDo { + return u.withDO(u.DO.Session(config)) +} + +func (u userPostQueryDo) Clauses(conds ...clause.Expression) *userPostQueryDo { + return u.withDO(u.DO.Clauses(conds...)) +} + +func (u userPostQueryDo) Returning(value interface{}, columns ...string) *userPostQueryDo { + return u.withDO(u.DO.Returning(value, columns...)) +} + +func (u userPostQueryDo) Not(conds ...gen.Condition) *userPostQueryDo { + return u.withDO(u.DO.Not(conds...)) +} + +func (u userPostQueryDo) Or(conds ...gen.Condition) *userPostQueryDo { + return u.withDO(u.DO.Or(conds...)) +} + +func (u userPostQueryDo) Select(conds ...field.Expr) *userPostQueryDo { + return u.withDO(u.DO.Select(conds...)) +} + +func (u userPostQueryDo) Where(conds ...gen.Condition) *userPostQueryDo { + return u.withDO(u.DO.Where(conds...)) +} + +func (u userPostQueryDo) Order(conds ...field.Expr) *userPostQueryDo { + return u.withDO(u.DO.Order(conds...)) +} + +func (u userPostQueryDo) Distinct(cols ...field.Expr) *userPostQueryDo { + return u.withDO(u.DO.Distinct(cols...)) +} + +func (u userPostQueryDo) Omit(cols ...field.Expr) *userPostQueryDo { + return u.withDO(u.DO.Omit(cols...)) +} + +func (u userPostQueryDo) Join(table schema.Tabler, on ...field.Expr) *userPostQueryDo { + return u.withDO(u.DO.Join(table, on...)) +} + +func (u userPostQueryDo) LeftJoin(table schema.Tabler, on ...field.Expr) *userPostQueryDo { + return u.withDO(u.DO.LeftJoin(table, on...)) +} + +func (u userPostQueryDo) RightJoin(table schema.Tabler, on ...field.Expr) *userPostQueryDo { + return u.withDO(u.DO.RightJoin(table, on...)) +} + +func (u userPostQueryDo) Group(cols ...field.Expr) *userPostQueryDo { + return u.withDO(u.DO.Group(cols...)) +} + +func (u userPostQueryDo) Having(conds ...gen.Condition) *userPostQueryDo { + return u.withDO(u.DO.Having(conds...)) +} + +func (u userPostQueryDo) Limit(limit int) *userPostQueryDo { + return u.withDO(u.DO.Limit(limit)) +} + +func (u userPostQueryDo) Offset(offset int) *userPostQueryDo { + return u.withDO(u.DO.Offset(offset)) +} + +func (u userPostQueryDo) Scopes(funcs ...func(gen.Dao) gen.Dao) *userPostQueryDo { + return u.withDO(u.DO.Scopes(funcs...)) +} + +func (u userPostQueryDo) Unscoped() *userPostQueryDo { + return u.withDO(u.DO.Unscoped()) +} + +func (u userPostQueryDo) Create(values ...*UserPost) error { + if len(values) == 0 { + return nil + } + return u.DO.Create(values) +} + +func (u userPostQueryDo) CreateInBatches(values []*UserPost, batchSize int) error { + return u.DO.CreateInBatches(values, batchSize) +} + +// Save : !!! underlying implementation is different with GORM +// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values) +func (u userPostQueryDo) Save(values ...*UserPost) error { + if len(values) == 0 { + return nil + } + return u.DO.Save(values) +} + +func (u userPostQueryDo) First() (*UserPost, error) { + if result, err := u.DO.First(); err != nil { + return nil, err + } else { + return result.(*UserPost), nil + } +} + +func (u userPostQueryDo) Take() (*UserPost, error) { + if result, err := u.DO.Take(); err != nil { + return nil, err + } else { + return result.(*UserPost), nil + } +} + +func (u userPostQueryDo) Last() (*UserPost, error) { + if result, err := u.DO.Last(); err != nil { + return nil, err + } else { + return result.(*UserPost), nil + } +} + +func (u userPostQueryDo) Find() ([]*UserPost, error) { + result, err := u.DO.Find() + return result.([]*UserPost), err +} + +func (u userPostQueryDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*UserPost, err error) { + buf := make([]*UserPost, 0, batchSize) + err = u.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error { + defer func() { results = append(results, buf...) }() + return fc(tx, batch) + }) + return results, err +} + +func (u userPostQueryDo) FindInBatches(result *[]*UserPost, batchSize int, fc func(tx gen.Dao, batch int) error) error { + return u.DO.FindInBatches(result, batchSize, fc) +} + +func (u userPostQueryDo) Attrs(attrs ...field.AssignExpr) *userPostQueryDo { + return u.withDO(u.DO.Attrs(attrs...)) +} + +func (u userPostQueryDo) Assign(attrs ...field.AssignExpr) *userPostQueryDo { + return u.withDO(u.DO.Assign(attrs...)) +} + +func (u userPostQueryDo) Joins(fields ...field.RelationField) *userPostQueryDo { + for _, _f := range fields { + u = *u.withDO(u.DO.Joins(_f)) + } + return &u +} + +func (u userPostQueryDo) Preload(fields ...field.RelationField) *userPostQueryDo { + for _, _f := range fields { + u = *u.withDO(u.DO.Preload(_f)) + } + return &u +} + +func (u userPostQueryDo) FirstOrInit() (*UserPost, error) { + if result, err := u.DO.FirstOrInit(); err != nil { + return nil, err + } else { + return result.(*UserPost), nil + } +} + +func (u userPostQueryDo) FirstOrCreate() (*UserPost, error) { + if result, err := u.DO.FirstOrCreate(); err != nil { + return nil, err + } else { + return result.(*UserPost), nil + } +} + +func (u userPostQueryDo) FindByPage(offset int, limit int) (result []*UserPost, count int64, err error) { + result, err = u.Offset(offset).Limit(limit).Find() + if err != nil { + return + } + + if size := len(result); 0 < limit && 0 < size && size < limit { + count = int64(size + offset) + return + } + + count, err = u.Offset(-1).Limit(-1).Count() + return +} + +func (u userPostQueryDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) { + count, err = u.Count() + if err != nil { + return + } + + err = u.Offset(offset).Limit(limit).Scan(result) + return +} + +func (u userPostQueryDo) Scan(result interface{}) (err error) { + return u.DO.Scan(result) +} + +func (u userPostQueryDo) Delete(models ...*UserPost) (result gen.ResultInfo, err error) { + return u.DO.Delete(models) +} + +// ForceDelete performs a permanent delete (ignores soft-delete) for current scope. +func (u userPostQueryDo) ForceDelete() (gen.ResultInfo, error) { + return u.Unscoped().Delete() +} + +// Inc increases the given column by step for current scope. +func (u userPostQueryDo) Inc(column field.Expr, step int64) (gen.ResultInfo, error) { + // column = column + step + e := field.NewUnsafeFieldRaw("?+?", column.RawExpr(), step) + return u.DO.UpdateColumn(column, e) +} + +// Dec decreases the given column by step for current scope. +func (u userPostQueryDo) Dec(column field.Expr, step int64) (gen.ResultInfo, error) { + // column = column - step + e := field.NewUnsafeFieldRaw("?-?", column.RawExpr(), step) + return u.DO.UpdateColumn(column, e) +} + +// Sum returns SUM(column) for current scope. +func (u userPostQueryDo) Sum(column field.Expr) (float64, error) { + var _v float64 + agg := field.NewUnsafeFieldRaw("SUM(?)", column.RawExpr()) + if err := u.Select(agg).Scan(&_v); err != nil { + return 0, err + } + return _v, nil +} + +// Avg returns AVG(column) for current scope. +func (u userPostQueryDo) Avg(column field.Expr) (float64, error) { + var _v float64 + agg := field.NewUnsafeFieldRaw("AVG(?)", column.RawExpr()) + if err := u.Select(agg).Scan(&_v); err != nil { + return 0, err + } + return _v, nil +} + +// Min returns MIN(column) for current scope. +func (u userPostQueryDo) Min(column field.Expr) (float64, error) { + var _v float64 + agg := field.NewUnsafeFieldRaw("MIN(?)", column.RawExpr()) + if err := u.Select(agg).Scan(&_v); err != nil { + return 0, err + } + return _v, nil +} + +// Max returns MAX(column) for current scope. +func (u userPostQueryDo) Max(column field.Expr) (float64, error) { + var _v float64 + agg := field.NewUnsafeFieldRaw("MAX(?)", column.RawExpr()) + if err := u.Select(agg).Scan(&_v); err != nil { + return 0, err + } + return _v, nil +} + +// PluckMap returns a map[key]value for selected key/value expressions within current scope. +func (u userPostQueryDo) PluckMap(key, val field.Expr) (map[interface{}]interface{}, error) { + do := u.Select(key, val) + rows, err := do.DO.Rows() + if err != nil { + return nil, err + } + defer rows.Close() + mm := make(map[interface{}]interface{}) + for rows.Next() { + var k interface{} + var v interface{} + if err := rows.Scan(&k, &v); err != nil { + return nil, err + } + mm[k] = v + } + return mm, rows.Err() +} + +// Exists returns true if any record matches the given conditions. +func (u userPostQueryDo) Exists(conds ...gen.Condition) (bool, error) { + cnt, err := u.Where(conds...).Count() + if err != nil { + return false, err + } + return cnt > 0, nil +} + +// PluckIDs returns all primary key values under current scope. +func (u userPostQueryDo) PluckIDs() ([]int64, error) { + ids := make([]int64, 0, 16) + pk := field.NewInt64(u.TableName(), "id") + if err := u.DO.Pluck(pk, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// GetByID finds a single record by primary key. +func (u userPostQueryDo) GetByID(id int64) (*UserPost, error) { + pk := field.NewInt64(u.TableName(), "id") + return u.Where(pk.Eq(id)).First() +} + +// GetByIDs finds records by primary key list. +func (u userPostQueryDo) GetByIDs(ids ...int64) ([]*UserPost, error) { + if len(ids) == 0 { + return []*UserPost{}, nil + } + pk := field.NewInt64(u.TableName(), "id") + return u.Where(pk.In(ids...)).Find() +} + +// DeleteByID deletes records by primary key. +func (u userPostQueryDo) DeleteByID(id int64) (gen.ResultInfo, error) { + pk := field.NewInt64(u.TableName(), "id") + return u.Where(pk.Eq(id)).Delete() +} + +// DeleteByIDs deletes records by a list of primary keys. +func (u userPostQueryDo) DeleteByIDs(ids ...int64) (gen.ResultInfo, error) { + if len(ids) == 0 { + return gen.ResultInfo{RowsAffected: 0, Error: nil}, nil + } + pk := field.NewInt64(u.TableName(), "id") + return u.Where(pk.In(ids...)).Delete() +} + +func (u *userPostQueryDo) withDO(do gen.Dao) *userPostQueryDo { + u.DO = *do.(*gen.DO) + return u +} diff --git a/backend_v1/database/models/users.gen.go b/backend_v1/database/models/users.gen.go new file mode 100644 index 0000000..9784f47 --- /dev/null +++ b/backend_v1/database/models/users.gen.go @@ -0,0 +1,68 @@ +// Code generated by go.ipao.vip/gen. DO NOT EDIT. +// Code generated by go.ipao.vip/gen. DO NOT EDIT. +// Code generated by go.ipao.vip/gen. DO NOT EDIT. + +package models + +import ( + "context" + "time" + + "go.ipao.vip/gen" + "go.ipao.vip/gen/types" + "gorm.io/gorm" +) + +const TableNameUser = "users" + +// User mapped from table +type User struct { + ID int64 `gorm:"column:id;type:bigint;primaryKey;autoIncrement:true" json:"id"` + CreatedAt time.Time `gorm:"column:created_at;type:timestamp without time zone;not null;default:now()" json:"created_at"` + UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp without time zone;not null;default:now()" json:"updated_at"` + DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;type:timestamp without time zone" json:"deleted_at"` + Status int16 `gorm:"column:status;type:smallint;not null" json:"status"` + OpenID string `gorm:"column:open_id;type:character varying(128);not null" json:"open_id"` + Username string `gorm:"column:username;type:character varying(128);not null" json:"username"` + Avatar string `gorm:"column:avatar;type:text" json:"avatar"` + Metas types.JSON `gorm:"column:metas;type:jsonb;not null;default:{}" json:"metas"` + AuthToken types.JSON `gorm:"column:auth_token;type:jsonb;not null;default:{}" json:"auth_token"` + Balance int64 `gorm:"column:balance;type:bigint;not null" json:"balance"` +} + +// Quick operations without importing query package +// Update applies changed fields to the database using the default DB. +func (m *User) Update(ctx context.Context) (gen.ResultInfo, error) { + return Q.User.WithContext(ctx).Updates(m) +} + +// Save upserts the model using the default DB. +func (m *User) Save(ctx context.Context) error { return Q.User.WithContext(ctx).Save(m) } + +// Create inserts the model using the default DB. +func (m *User) Create(ctx context.Context) error { return Q.User.WithContext(ctx).Create(m) } + +// Delete removes the row represented by the model using the default DB. +func (m *User) Delete(ctx context.Context) (gen.ResultInfo, error) { + return Q.User.WithContext(ctx).Delete(m) +} + +// ForceDelete permanently deletes the row (ignores soft delete) using the default DB. +func (m *User) ForceDelete(ctx context.Context) (gen.ResultInfo, error) { + return Q.User.WithContext(ctx).Unscoped().Delete(m) +} + +// Restore sets deleted_at to NULL for this model's primary key using the default DB. +func (m *User) Restore(ctx context.Context) (gen.ResultInfo, error) { + return Q.User.WithContext(ctx).RestoreByID(m.ID) +} + +// Reload reloads the model from database by its primary key and overwrites current fields. +func (m *User) Reload(ctx context.Context) error { + fresh, err := Q.User.WithContext(ctx).GetByID(m.ID) + if err != nil { + return err + } + *m = *fresh + return nil +} diff --git a/backend_v1/database/models/users.query.gen.go b/backend_v1/database/models/users.query.gen.go new file mode 100644 index 0000000..0e3ec67 --- /dev/null +++ b/backend_v1/database/models/users.query.gen.go @@ -0,0 +1,512 @@ +// Code generated by go.ipao.vip/gen. DO NOT EDIT. +// Code generated by go.ipao.vip/gen. DO NOT EDIT. +// Code generated by go.ipao.vip/gen. DO NOT EDIT. + +package models + +import ( + "context" + + "gorm.io/gorm" + "gorm.io/gorm/clause" + "gorm.io/gorm/schema" + + "go.ipao.vip/gen" + "go.ipao.vip/gen/field" + + "gorm.io/plugin/dbresolver" +) + +func newUser(db *gorm.DB, opts ...gen.DOOption) userQuery { + _userQuery := userQuery{} + + _userQuery.userQueryDo.UseDB(db, opts...) + _userQuery.userQueryDo.UseModel(&User{}) + + tableName := _userQuery.userQueryDo.TableName() + _userQuery.ALL = field.NewAsterisk(tableName) + _userQuery.ID = field.NewInt64(tableName, "id") + _userQuery.CreatedAt = field.NewTime(tableName, "created_at") + _userQuery.UpdatedAt = field.NewTime(tableName, "updated_at") + _userQuery.DeletedAt = field.NewField(tableName, "deleted_at") + _userQuery.Status = field.NewInt16(tableName, "status") + _userQuery.OpenID = field.NewString(tableName, "open_id") + _userQuery.Username = field.NewString(tableName, "username") + _userQuery.Avatar = field.NewString(tableName, "avatar") + _userQuery.Metas = field.NewJSONB(tableName, "metas") + _userQuery.AuthToken = field.NewJSONB(tableName, "auth_token") + _userQuery.Balance = field.NewInt64(tableName, "balance") + + _userQuery.fillFieldMap() + + return _userQuery +} + +type userQuery struct { + userQueryDo userQueryDo + + ALL field.Asterisk + ID field.Int64 + CreatedAt field.Time + UpdatedAt field.Time + DeletedAt field.Field + Status field.Int16 + OpenID field.String + Username field.String + Avatar field.String + Metas field.JSONB + AuthToken field.JSONB + Balance field.Int64 + + fieldMap map[string]field.Expr +} + +func (u userQuery) Table(newTableName string) *userQuery { + u.userQueryDo.UseTable(newTableName) + return u.updateTableName(newTableName) +} + +func (u userQuery) As(alias string) *userQuery { + u.userQueryDo.DO = *(u.userQueryDo.As(alias).(*gen.DO)) + return u.updateTableName(alias) +} + +func (u *userQuery) updateTableName(table string) *userQuery { + u.ALL = field.NewAsterisk(table) + u.ID = field.NewInt64(table, "id") + u.CreatedAt = field.NewTime(table, "created_at") + u.UpdatedAt = field.NewTime(table, "updated_at") + u.DeletedAt = field.NewField(table, "deleted_at") + u.Status = field.NewInt16(table, "status") + u.OpenID = field.NewString(table, "open_id") + u.Username = field.NewString(table, "username") + u.Avatar = field.NewString(table, "avatar") + u.Metas = field.NewJSONB(table, "metas") + u.AuthToken = field.NewJSONB(table, "auth_token") + u.Balance = field.NewInt64(table, "balance") + + u.fillFieldMap() + + return u +} + +func (u *userQuery) QueryContext(ctx context.Context) (*userQuery, *userQueryDo) { + return u, u.userQueryDo.WithContext(ctx) +} + +func (u *userQuery) WithContext(ctx context.Context) *userQueryDo { + return u.userQueryDo.WithContext(ctx) +} + +func (u userQuery) TableName() string { return u.userQueryDo.TableName() } + +func (u userQuery) Alias() string { return u.userQueryDo.Alias() } + +func (u userQuery) Columns(cols ...field.Expr) gen.Columns { return u.userQueryDo.Columns(cols...) } + +func (u *userQuery) GetFieldByName(fieldName string) (field.OrderExpr, bool) { + _f, ok := u.fieldMap[fieldName] + if !ok || _f == nil { + return nil, false + } + _oe, ok := _f.(field.OrderExpr) + return _oe, ok +} + +func (u *userQuery) fillFieldMap() { + u.fieldMap = make(map[string]field.Expr, 11) + u.fieldMap["id"] = u.ID + u.fieldMap["created_at"] = u.CreatedAt + u.fieldMap["updated_at"] = u.UpdatedAt + u.fieldMap["deleted_at"] = u.DeletedAt + u.fieldMap["status"] = u.Status + u.fieldMap["open_id"] = u.OpenID + u.fieldMap["username"] = u.Username + u.fieldMap["avatar"] = u.Avatar + u.fieldMap["metas"] = u.Metas + u.fieldMap["auth_token"] = u.AuthToken + u.fieldMap["balance"] = u.Balance +} + +func (u userQuery) clone(db *gorm.DB) userQuery { + u.userQueryDo.ReplaceConnPool(db.Statement.ConnPool) + return u +} + +func (u userQuery) replaceDB(db *gorm.DB) userQuery { + u.userQueryDo.ReplaceDB(db) + return u +} + +type userQueryDo struct{ gen.DO } + +func (u userQueryDo) Debug() *userQueryDo { + return u.withDO(u.DO.Debug()) +} + +func (u userQueryDo) WithContext(ctx context.Context) *userQueryDo { + return u.withDO(u.DO.WithContext(ctx)) +} + +func (u userQueryDo) ReadDB() *userQueryDo { + return u.Clauses(dbresolver.Read) +} + +func (u userQueryDo) WriteDB() *userQueryDo { + return u.Clauses(dbresolver.Write) +} + +func (u userQueryDo) Session(config *gorm.Session) *userQueryDo { + return u.withDO(u.DO.Session(config)) +} + +func (u userQueryDo) Clauses(conds ...clause.Expression) *userQueryDo { + return u.withDO(u.DO.Clauses(conds...)) +} + +func (u userQueryDo) Returning(value interface{}, columns ...string) *userQueryDo { + return u.withDO(u.DO.Returning(value, columns...)) +} + +func (u userQueryDo) Not(conds ...gen.Condition) *userQueryDo { + return u.withDO(u.DO.Not(conds...)) +} + +func (u userQueryDo) Or(conds ...gen.Condition) *userQueryDo { + return u.withDO(u.DO.Or(conds...)) +} + +func (u userQueryDo) Select(conds ...field.Expr) *userQueryDo { + return u.withDO(u.DO.Select(conds...)) +} + +func (u userQueryDo) Where(conds ...gen.Condition) *userQueryDo { + return u.withDO(u.DO.Where(conds...)) +} + +func (u userQueryDo) Order(conds ...field.Expr) *userQueryDo { + return u.withDO(u.DO.Order(conds...)) +} + +func (u userQueryDo) Distinct(cols ...field.Expr) *userQueryDo { + return u.withDO(u.DO.Distinct(cols...)) +} + +func (u userQueryDo) Omit(cols ...field.Expr) *userQueryDo { + return u.withDO(u.DO.Omit(cols...)) +} + +func (u userQueryDo) Join(table schema.Tabler, on ...field.Expr) *userQueryDo { + return u.withDO(u.DO.Join(table, on...)) +} + +func (u userQueryDo) LeftJoin(table schema.Tabler, on ...field.Expr) *userQueryDo { + return u.withDO(u.DO.LeftJoin(table, on...)) +} + +func (u userQueryDo) RightJoin(table schema.Tabler, on ...field.Expr) *userQueryDo { + return u.withDO(u.DO.RightJoin(table, on...)) +} + +func (u userQueryDo) Group(cols ...field.Expr) *userQueryDo { + return u.withDO(u.DO.Group(cols...)) +} + +func (u userQueryDo) Having(conds ...gen.Condition) *userQueryDo { + return u.withDO(u.DO.Having(conds...)) +} + +func (u userQueryDo) Limit(limit int) *userQueryDo { + return u.withDO(u.DO.Limit(limit)) +} + +func (u userQueryDo) Offset(offset int) *userQueryDo { + return u.withDO(u.DO.Offset(offset)) +} + +func (u userQueryDo) Scopes(funcs ...func(gen.Dao) gen.Dao) *userQueryDo { + return u.withDO(u.DO.Scopes(funcs...)) +} + +func (u userQueryDo) Unscoped() *userQueryDo { + return u.withDO(u.DO.Unscoped()) +} + +func (u userQueryDo) Create(values ...*User) error { + if len(values) == 0 { + return nil + } + return u.DO.Create(values) +} + +func (u userQueryDo) CreateInBatches(values []*User, batchSize int) error { + return u.DO.CreateInBatches(values, batchSize) +} + +// Save : !!! underlying implementation is different with GORM +// The method is equivalent to executing the statement: db.Clauses(clause.OnConflict{UpdateAll: true}).Create(values) +func (u userQueryDo) Save(values ...*User) error { + if len(values) == 0 { + return nil + } + return u.DO.Save(values) +} + +func (u userQueryDo) First() (*User, error) { + if result, err := u.DO.First(); err != nil { + return nil, err + } else { + return result.(*User), nil + } +} + +func (u userQueryDo) Take() (*User, error) { + if result, err := u.DO.Take(); err != nil { + return nil, err + } else { + return result.(*User), nil + } +} + +func (u userQueryDo) Last() (*User, error) { + if result, err := u.DO.Last(); err != nil { + return nil, err + } else { + return result.(*User), nil + } +} + +func (u userQueryDo) Find() ([]*User, error) { + result, err := u.DO.Find() + return result.([]*User), err +} + +func (u userQueryDo) FindInBatch(batchSize int, fc func(tx gen.Dao, batch int) error) (results []*User, err error) { + buf := make([]*User, 0, batchSize) + err = u.DO.FindInBatches(&buf, batchSize, func(tx gen.Dao, batch int) error { + defer func() { results = append(results, buf...) }() + return fc(tx, batch) + }) + return results, err +} + +func (u userQueryDo) FindInBatches(result *[]*User, batchSize int, fc func(tx gen.Dao, batch int) error) error { + return u.DO.FindInBatches(result, batchSize, fc) +} + +func (u userQueryDo) Attrs(attrs ...field.AssignExpr) *userQueryDo { + return u.withDO(u.DO.Attrs(attrs...)) +} + +func (u userQueryDo) Assign(attrs ...field.AssignExpr) *userQueryDo { + return u.withDO(u.DO.Assign(attrs...)) +} + +func (u userQueryDo) Joins(fields ...field.RelationField) *userQueryDo { + for _, _f := range fields { + u = *u.withDO(u.DO.Joins(_f)) + } + return &u +} + +func (u userQueryDo) Preload(fields ...field.RelationField) *userQueryDo { + for _, _f := range fields { + u = *u.withDO(u.DO.Preload(_f)) + } + return &u +} + +func (u userQueryDo) FirstOrInit() (*User, error) { + if result, err := u.DO.FirstOrInit(); err != nil { + return nil, err + } else { + return result.(*User), nil + } +} + +func (u userQueryDo) FirstOrCreate() (*User, error) { + if result, err := u.DO.FirstOrCreate(); err != nil { + return nil, err + } else { + return result.(*User), nil + } +} + +func (u userQueryDo) FindByPage(offset int, limit int) (result []*User, count int64, err error) { + result, err = u.Offset(offset).Limit(limit).Find() + if err != nil { + return + } + + if size := len(result); 0 < limit && 0 < size && size < limit { + count = int64(size + offset) + return + } + + count, err = u.Offset(-1).Limit(-1).Count() + return +} + +func (u userQueryDo) ScanByPage(result interface{}, offset int, limit int) (count int64, err error) { + count, err = u.Count() + if err != nil { + return + } + + err = u.Offset(offset).Limit(limit).Scan(result) + return +} + +func (u userQueryDo) Scan(result interface{}) (err error) { + return u.DO.Scan(result) +} + +func (u userQueryDo) Delete(models ...*User) (result gen.ResultInfo, err error) { + return u.DO.Delete(models) +} + +// ForceDelete performs a permanent delete (ignores soft-delete) for current scope. +func (u userQueryDo) ForceDelete() (gen.ResultInfo, error) { + return u.Unscoped().Delete() +} + +// Inc increases the given column by step for current scope. +func (u userQueryDo) Inc(column field.Expr, step int64) (gen.ResultInfo, error) { + // column = column + step + e := field.NewUnsafeFieldRaw("?+?", column.RawExpr(), step) + return u.DO.UpdateColumn(column, e) +} + +// Dec decreases the given column by step for current scope. +func (u userQueryDo) Dec(column field.Expr, step int64) (gen.ResultInfo, error) { + // column = column - step + e := field.NewUnsafeFieldRaw("?-?", column.RawExpr(), step) + return u.DO.UpdateColumn(column, e) +} + +// Sum returns SUM(column) for current scope. +func (u userQueryDo) Sum(column field.Expr) (float64, error) { + var _v float64 + agg := field.NewUnsafeFieldRaw("SUM(?)", column.RawExpr()) + if err := u.Select(agg).Scan(&_v); err != nil { + return 0, err + } + return _v, nil +} + +// Avg returns AVG(column) for current scope. +func (u userQueryDo) Avg(column field.Expr) (float64, error) { + var _v float64 + agg := field.NewUnsafeFieldRaw("AVG(?)", column.RawExpr()) + if err := u.Select(agg).Scan(&_v); err != nil { + return 0, err + } + return _v, nil +} + +// Min returns MIN(column) for current scope. +func (u userQueryDo) Min(column field.Expr) (float64, error) { + var _v float64 + agg := field.NewUnsafeFieldRaw("MIN(?)", column.RawExpr()) + if err := u.Select(agg).Scan(&_v); err != nil { + return 0, err + } + return _v, nil +} + +// Max returns MAX(column) for current scope. +func (u userQueryDo) Max(column field.Expr) (float64, error) { + var _v float64 + agg := field.NewUnsafeFieldRaw("MAX(?)", column.RawExpr()) + if err := u.Select(agg).Scan(&_v); err != nil { + return 0, err + } + return _v, nil +} + +// PluckMap returns a map[key]value for selected key/value expressions within current scope. +func (u userQueryDo) PluckMap(key, val field.Expr) (map[interface{}]interface{}, error) { + do := u.Select(key, val) + rows, err := do.DO.Rows() + if err != nil { + return nil, err + } + defer rows.Close() + mm := make(map[interface{}]interface{}) + for rows.Next() { + var k interface{} + var v interface{} + if err := rows.Scan(&k, &v); err != nil { + return nil, err + } + mm[k] = v + } + return mm, rows.Err() +} + +// Exists returns true if any record matches the given conditions. +func (u userQueryDo) Exists(conds ...gen.Condition) (bool, error) { + cnt, err := u.Where(conds...).Count() + if err != nil { + return false, err + } + return cnt > 0, nil +} + +// PluckIDs returns all primary key values under current scope. +func (u userQueryDo) PluckIDs() ([]int64, error) { + ids := make([]int64, 0, 16) + pk := field.NewInt64(u.TableName(), "id") + if err := u.DO.Pluck(pk, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// GetByID finds a single record by primary key. +func (u userQueryDo) GetByID(id int64) (*User, error) { + pk := field.NewInt64(u.TableName(), "id") + return u.Where(pk.Eq(id)).First() +} + +// GetByIDs finds records by primary key list. +func (u userQueryDo) GetByIDs(ids ...int64) ([]*User, error) { + if len(ids) == 0 { + return []*User{}, nil + } + pk := field.NewInt64(u.TableName(), "id") + return u.Where(pk.In(ids...)).Find() +} + +// DeleteByID deletes records by primary key. +func (u userQueryDo) DeleteByID(id int64) (gen.ResultInfo, error) { + pk := field.NewInt64(u.TableName(), "id") + return u.Where(pk.Eq(id)).Delete() +} + +// DeleteByIDs deletes records by a list of primary keys. +func (u userQueryDo) DeleteByIDs(ids ...int64) (gen.ResultInfo, error) { + if len(ids) == 0 { + return gen.ResultInfo{RowsAffected: 0, Error: nil}, nil + } + pk := field.NewInt64(u.TableName(), "id") + return u.Where(pk.In(ids...)).Delete() +} + +// RestoreWhere sets deleted_at to NULL for rows matching current scope + conds. +func (u userQueryDo) RestoreWhere(conds ...gen.Condition) (gen.ResultInfo, error) { + col := field.NewField(u.TableName(), "deleted_at") + return u.Unscoped().Where(conds...).UpdateColumn(col, nil) +} + +// RestoreByID sets deleted_at to NULL for the given primary key. +func (u userQueryDo) RestoreByID(id int64) (gen.ResultInfo, error) { + pk := field.NewInt64(u.TableName(), "id") + col := field.NewField(u.TableName(), "deleted_at") + return u.Unscoped().Where(pk.Eq(id)).UpdateColumn(col, nil) +} + +func (u *userQueryDo) withDO(do gen.Dao) *userQueryDo { + u.DO = *do.(*gen.DO) + return u +} diff --git a/backend_v1/docs/docs.go b/backend_v1/docs/docs.go new file mode 100644 index 0000000..8c35c88 --- /dev/null +++ b/backend_v1/docs/docs.go @@ -0,0 +1,141 @@ +// Package docs Code generated by swaggo/swag. DO NOT EDIT +package docs + +import "github.com/rogeecn/swag" + +const docTemplate = `{ + "schemes": {{ marshal .Schemes }}, + "swagger": "2.0", + "info": { + "description": "{{escape .Description}}", + "title": "{{.Title}}", + "termsOfService": "http://swagger.io/terms/", + "contact": { + "name": "UserName", + "url": "http://www.swagger.io/support", + "email": "support@swagger.io" + }, + "license": { + "name": "Apache 2.0", + "url": "http://www.apache.org/licenses/LICENSE-2.0.html" + }, + "version": "{{.Version}}" + }, + "host": "{{.Host}}", + "basePath": "{{.BasePath}}", + "paths": { + "/v1/medias/{id}": { + "post": { + "description": "Test", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Test" + ], + "summary": "Test", + "parameters": [ + { + "type": "integer", + "description": "ID", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "integer", + "description": "年龄", + "name": "age", + "in": "query" + }, + { + "type": "string", + "description": "名称", + "name": "name", + "in": "query" + }, + { + "type": "integer", + "name": "limit", + "in": "query" + }, + { + "type": "integer", + "name": "page", + "in": "query" + } + ], + "responses": { + "200": { + "description": "成功", + "schema": { + "allOf": [ + { + "$ref": "#/definitions/requests.Pager" + }, + { + "type": "object", + "properties": { + "list": { + "$ref": "#/definitions/v1.ResponseItem" + } + } + } + ] + } + } + } + } + } + }, + "definitions": { + "requests.Pager": { + "type": "object", + "properties": { + "items": {}, + "limit": { + "type": "integer" + }, + "page": { + "type": "integer" + }, + "total": { + "type": "integer" + } + } + }, + "v1.ResponseItem": { + "type": "object" + } + }, + "securityDefinitions": { + "BasicAuth": { + "type": "basic" + } + }, + "externalDocs": { + "description": "OpenAPI", + "url": "https://swagger.io/resources/open-api/" + } +}` + +// SwaggerInfo holds exported Swagger Info so clients can modify it +var SwaggerInfo = &swag.Spec{ + Version: "1.0", + Host: "localhost:8080", + BasePath: "/api/v1", + Schemes: []string{}, + Title: "ApiDoc", + Description: "This is a sample server celler server.", + InfoInstanceName: "swagger", + SwaggerTemplate: docTemplate, + LeftDelim: "{{", + RightDelim: "}}", +} + +func init() { + swag.Register(SwaggerInfo.InstanceName(), SwaggerInfo) +} diff --git a/backend_v1/docs/ember.go b/backend_v1/docs/ember.go new file mode 100644 index 0000000..ae898ec --- /dev/null +++ b/backend_v1/docs/ember.go @@ -0,0 +1,10 @@ +package docs + +import ( + _ "embed" + + _ "github.com/rogeecn/swag" +) + +//go:embed swagger.json +var SwaggerSpec string diff --git a/backend_v1/docs/swagger.json b/backend_v1/docs/swagger.json new file mode 100644 index 0000000..c1e57c5 --- /dev/null +++ b/backend_v1/docs/swagger.json @@ -0,0 +1,117 @@ +{ + "swagger": "2.0", + "info": { + "description": "This is a sample server celler server.", + "title": "ApiDoc", + "termsOfService": "http://swagger.io/terms/", + "contact": { + "name": "UserName", + "url": "http://www.swagger.io/support", + "email": "support@swagger.io" + }, + "license": { + "name": "Apache 2.0", + "url": "http://www.apache.org/licenses/LICENSE-2.0.html" + }, + "version": "1.0" + }, + "host": "localhost:8080", + "basePath": "/api/v1", + "paths": { + "/v1/medias/{id}": { + "post": { + "description": "Test", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Test" + ], + "summary": "Test", + "parameters": [ + { + "type": "integer", + "description": "ID", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "integer", + "description": "年龄", + "name": "age", + "in": "query" + }, + { + "type": "string", + "description": "名称", + "name": "name", + "in": "query" + }, + { + "type": "integer", + "name": "limit", + "in": "query" + }, + { + "type": "integer", + "name": "page", + "in": "query" + } + ], + "responses": { + "200": { + "description": "成功", + "schema": { + "allOf": [ + { + "$ref": "#/definitions/requests.Pager" + }, + { + "type": "object", + "properties": { + "list": { + "$ref": "#/definitions/v1.ResponseItem" + } + } + } + ] + } + } + } + } + } + }, + "definitions": { + "requests.Pager": { + "type": "object", + "properties": { + "items": {}, + "limit": { + "type": "integer" + }, + "page": { + "type": "integer" + }, + "total": { + "type": "integer" + } + } + }, + "v1.ResponseItem": { + "type": "object" + } + }, + "securityDefinitions": { + "BasicAuth": { + "type": "basic" + } + }, + "externalDocs": { + "description": "OpenAPI", + "url": "https://swagger.io/resources/open-api/" + } +} \ No newline at end of file diff --git a/backend_v1/docs/swagger.yaml b/backend_v1/docs/swagger.yaml new file mode 100644 index 0000000..f9f7516 --- /dev/null +++ b/backend_v1/docs/swagger.yaml @@ -0,0 +1,75 @@ +basePath: /api/v1 +definitions: + requests.Pager: + properties: + items: {} + limit: + type: integer + page: + type: integer + total: + type: integer + type: object + v1.ResponseItem: + type: object +externalDocs: + description: OpenAPI + url: https://swagger.io/resources/open-api/ +host: localhost:8080 +info: + contact: + email: support@swagger.io + name: UserName + url: http://www.swagger.io/support + description: This is a sample server celler server. + license: + name: Apache 2.0 + url: http://www.apache.org/licenses/LICENSE-2.0.html + termsOfService: http://swagger.io/terms/ + title: ApiDoc + version: "1.0" +paths: + /v1/medias/{id}: + post: + consumes: + - application/json + description: Test + parameters: + - description: ID + in: path + name: id + required: true + type: integer + - description: 年龄 + in: query + name: age + type: integer + - description: 名称 + in: query + name: name + type: string + - in: query + name: limit + type: integer + - in: query + name: page + type: integer + produces: + - application/json + responses: + "200": + description: 成功 + schema: + allOf: + - $ref: '#/definitions/requests.Pager' + - properties: + list: + $ref: '#/definitions/v1.ResponseItem' + type: object + summary: Test + tags: + - Test +securityDefinitions: + BasicAuth: + type: basic +swagger: "2.0" diff --git a/backend_v1/fixtures/.gitkeep b/backend_v1/fixtures/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/backend_v1/go.mod b/backend_v1/go.mod new file mode 100644 index 0000000..95cd9df --- /dev/null +++ b/backend_v1/go.mod @@ -0,0 +1,131 @@ +module quyun/v2 + +go 1.25.3 + +require ( + github.com/ThreeDotsLabs/watermill v1.5.1 + github.com/ThreeDotsLabs/watermill-kafka/v3 v3.1.2 + github.com/ThreeDotsLabs/watermill-redisstream v1.4.5 + github.com/ThreeDotsLabs/watermill-sql/v3 v3.1.0 + github.com/gofiber/fiber/v3 v3.0.0-rc.3 + github.com/gofiber/utils/v2 v2.0.0-rc.4 + github.com/golang-jwt/jwt/v4 v4.5.2 + github.com/google/uuid v1.6.0 + github.com/jackc/pgx/v5 v5.7.6 + github.com/pkg/errors v0.9.1 + github.com/pressly/goose/v3 v3.26.0 + github.com/redis/go-redis/v9 v9.17.2 + github.com/riverqueue/river v0.28.0 + github.com/riverqueue/river/riverdriver/riverdatabasesql v0.28.0 + github.com/riverqueue/river/riverdriver/riverpgxv5 v0.28.0 + github.com/riverqueue/river/rivertype v0.28.0 + github.com/rogeecn/fabfile v1.7.0 + github.com/rogeecn/swag v1.0.1 + github.com/samber/lo v1.52.0 + github.com/sirupsen/logrus v1.9.3 + github.com/smartystreets/goconvey v1.8.1 + github.com/soheilhy/cmux v0.1.5 + github.com/spf13/cobra v1.10.1 + github.com/stretchr/testify v1.11.1 + github.com/swaggo/files/v2 v2.0.2 + go.ipao.vip/atom v1.2.1 + go.uber.org/dig v1.19.0 + golang.org/x/sync v0.19.0 + google.golang.org/grpc v1.75.1 + google.golang.org/protobuf v1.36.11 + gorm.io/driver/postgres v1.6.0 + gorm.io/gorm v1.31.1 +) + +require ( + github.com/IBM/sarama v1.46.3 // indirect + github.com/KyleBanks/depth v1.2.1 // indirect + github.com/Rican7/retry v0.3.1 // indirect + github.com/andybalholm/brotli v1.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/dnwe/otelsarama v0.0.0-20240308230250-9388d9d40bc0 // indirect + github.com/eapache/go-resiliency v1.7.0 // indirect + github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect + github.com/eapache/queue v1.1.0 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/jsonpointer v0.22.4 // indirect + github.com/go-openapi/jsonreference v0.21.4 // indirect + github.com/go-openapi/spec v0.22.2 // indirect + github.com/go-openapi/swag/conv v0.25.4 // indirect + github.com/go-openapi/swag/jsonname v0.25.4 // indirect + github.com/go-openapi/swag/jsonutils v0.25.4 // indirect + github.com/go-openapi/swag/loading v0.25.4 // indirect + github.com/go-openapi/swag/stringutils v0.25.4 // indirect + github.com/go-openapi/swag/typeutils v0.25.4 // indirect + github.com/go-openapi/swag/yamlutils v0.25.4 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect + github.com/gofiber/schema v1.6.0 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/golang/snappy v1.0.0 // indirect + github.com/gopherjs/gopherjs v1.17.2 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-uuid v1.0.3 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect + github.com/jackc/puddle/v2 v2.2.2 // indirect + github.com/jcmturner/aescts/v2 v2.0.0 // indirect + github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect + github.com/jcmturner/gofork v1.7.6 // indirect + github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect + github.com/jcmturner/rpc/v2 v2.0.3 // indirect + github.com/jinzhu/inflection v1.0.0 // indirect + github.com/jinzhu/now v1.1.5 // indirect + github.com/jtolds/gls v4.20.0+incompatible // indirect + github.com/klauspost/compress v1.18.2 // indirect + github.com/lib/pq v1.10.9 // indirect + github.com/lithammer/shortuuid/v3 v3.0.7 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mfridman/interpolate v0.0.2 // indirect + github.com/oklog/ulid v1.3.1 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect + github.com/philhofer/fwd v1.2.0 // indirect + github.com/pierrec/lz4/v4 v4.1.22 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 // indirect + github.com/riverqueue/river/riverdriver v0.28.0 // indirect + github.com/riverqueue/river/rivershared v0.28.0 // indirect + github.com/sagikazarmark/locafero v0.12.0 // indirect + github.com/sethvargo/go-retry v0.3.0 // indirect + github.com/smarty/assertions v1.15.0 // indirect + github.com/spf13/afero v1.15.0 // indirect + github.com/spf13/cast v1.10.0 // indirect + github.com/spf13/pflag v1.0.10 // indirect + github.com/spf13/viper v1.21.0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/tidwall/gjson v1.18.0 // indirect + github.com/tidwall/match v1.2.0 // indirect + github.com/tidwall/pretty v1.2.1 // indirect + github.com/tidwall/sjson v1.2.5 // indirect + github.com/tinylib/msgp v1.6.1 // indirect + github.com/valyala/bytebufferpool v1.0.0 // indirect + github.com/valyala/fasthttp v1.68.0 // indirect + github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.39.0 // indirect + go.opentelemetry.io/otel/metric v1.39.0 // indirect + go.opentelemetry.io/otel/trace v1.39.0 // indirect + go.uber.org/goleak v1.3.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/crypto v0.46.0 // indirect + golang.org/x/mod v0.31.0 // indirect + golang.org/x/net v0.48.0 // indirect + golang.org/x/sys v0.39.0 // indirect + golang.org/x/text v0.32.0 // indirect + golang.org/x/tools v0.40.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251124214823-79d6a2a48846 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/backend_v1/go.sum b/backend_v1/go.sum new file mode 100644 index 0000000..d354a7b --- /dev/null +++ b/backend_v1/go.sum @@ -0,0 +1,392 @@ +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/IBM/sarama v1.46.3 h1:njRsX6jNlnR+ClJ8XmkO+CM4unbrNr/2vB5KK6UA+IE= +github.com/IBM/sarama v1.46.3/go.mod h1:GTUYiF9DMOZVe3FwyGT+dtSPceGFIgA+sPc5u6CBwko= +github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc= +github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= +github.com/Rican7/retry v0.3.1 h1:scY4IbO8swckzoA/11HgBwaZRJEyY9vaNJshcdhp1Mc= +github.com/Rican7/retry v0.3.1/go.mod h1:CxSDrhAyXmTMeEuRAnArMu1FHu48vtfjLREWqVl7Vw0= +github.com/ThreeDotsLabs/watermill v1.5.1 h1:t5xMivyf9tpmU3iozPqyrCZXHvoV1XQDfihas4sV0fY= +github.com/ThreeDotsLabs/watermill v1.5.1/go.mod h1:Uop10dA3VeJWsSvis9qO3vbVY892LARrKAdki6WtXS4= +github.com/ThreeDotsLabs/watermill-kafka/v3 v3.1.2 h1:lLmrzZnl8o8U5uLVhMLSFHGSuWLcsqhW1MOtltx2CbQ= +github.com/ThreeDotsLabs/watermill-kafka/v3 v3.1.2/go.mod h1:o1GcoF/1CSJ9JSmQzUkULvpZeO635pZe+WWrYNFlJNk= +github.com/ThreeDotsLabs/watermill-redisstream v1.4.5 h1:SCETqsAYo/CRBb7H3+zWCcSqhMpDrQA4I6dCqC7UPR4= +github.com/ThreeDotsLabs/watermill-redisstream v1.4.5/go.mod h1:Da3wqG1OcvHPODjuJcxSCY1O7D4loIZQpVbZ5u94xRo= +github.com/ThreeDotsLabs/watermill-sql/v3 v3.1.0 h1:g4uE5Nm3Z6LVB3m+uMgHlN4ne4bDpwf3RJmXYRgMv94= +github.com/ThreeDotsLabs/watermill-sql/v3 v3.1.0/go.mod h1:G8/otZYWLTCeYL2Ww3ujQ7gQ/3+jw5Bj0UtyKn7bBjA= +github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ= +github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY= +github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= +github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= +github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= +github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/dnwe/otelsarama v0.0.0-20240308230250-9388d9d40bc0 h1:R2zQhFwSCyyd7L43igYjDrH0wkC/i+QBPELuY0HOu84= +github.com/dnwe/otelsarama v0.0.0-20240308230250-9388d9d40bc0/go.mod h1:2MqLKYJfjs3UriXXF9Fd0Qmh/lhxi/6tHXkqtXxyIHc= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/eapache/go-resiliency v1.7.0 h1:n3NRTnBn5N0Cbi/IeOHuQn9s2UwVUH7Ga0ZWcP+9JTA= +github.com/eapache/go-resiliency v1.7.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= +github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= +github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= +github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-openapi/jsonpointer v0.22.4 h1:dZtK82WlNpVLDW2jlA1YCiVJFVqkED1MegOUy9kR5T4= +github.com/go-openapi/jsonpointer v0.22.4/go.mod h1:elX9+UgznpFhgBuaMQ7iu4lvvX1nvNsesQ3oxmYTw80= +github.com/go-openapi/jsonreference v0.21.4 h1:24qaE2y9bx/q3uRK/qN+TDwbok1NhbSmGjjySRCHtC8= +github.com/go-openapi/jsonreference v0.21.4/go.mod h1:rIENPTjDbLpzQmQWCj5kKj3ZlmEh+EFVbz3RTUh30/4= +github.com/go-openapi/spec v0.22.2 h1:KEU4Fb+Lp1qg0V4MxrSCPv403ZjBl8Lx1a83gIPU8Qc= +github.com/go-openapi/spec v0.22.2/go.mod h1:iIImLODL2loCh3Vnox8TY2YWYJZjMAKYyLH2Mu8lOZs= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4= +github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU= +github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI= +github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag= +github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA= +github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4 h1:IACsSvBhiNJwlDix7wq39SS2Fh7lUOCJRmx/4SN4sVo= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4/go.mod h1:Mt0Ost9l3cUzVv4OEZG+WSeoHwjWLnarzMePNDAOBiM= +github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s= +github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE= +github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8= +github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0= +github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw= +github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE= +github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw= +github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc= +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxEodtNSI1WG1c/m5Akw4= +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg= +github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls= +github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= +github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo= +github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/gofiber/fiber/v3 v3.0.0-rc.3 h1:h0KXuRHbivSslIpoHD1R/XjUsjcGwt+2vK0avFiYonA= +github.com/gofiber/fiber/v3 v3.0.0-rc.3/go.mod h1:LNBPuS/rGoUFlOyy03fXsWAeWfdGoT1QytwjRVNSVWo= +github.com/gofiber/schema v1.6.0 h1:rAgVDFwhndtC+hgV7Vu5ItQCn7eC2mBA4Eu1/ZTiEYY= +github.com/gofiber/schema v1.6.0/go.mod h1:WNZWpQx8LlPSK7ZaX0OqOh+nQo/eW2OevsXs1VZfs/s= +github.com/gofiber/utils/v2 v2.0.0-rc.4 h1:CDjwPwtwwj1OTIf6v3iRk+D2wcdjUzwk91Ghu2TMNbE= +github.com/gofiber/utils/v2 v2.0.0-rc.4/go.mod h1:gXins5o7up+BQFiubmO8aUJc/+Mhd7EKXIiAK5GBomI= +github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= +github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= +github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= +github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w= +github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM= +github.com/jackc/pgerrcode v0.0.0-20240316143900-6e2875d9b438 h1:Dj0L5fhJ9F82ZJyVOmBx6msDp/kfd1t9GRfny/mfJA0= +github.com/jackc/pgerrcode v0.0.0-20240316143900-6e2875d9b438/go.mod h1:a/s9Lp5W7n/DD0VrVoyJ00FbP2ytTPDVOivvn2bMlds= +github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag= +github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgtype v1.14.0 h1:y+xUdabmyMkJLyApYuPj38mW+aAIqCe5uuBB51rH3Vw= +github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgx/v4 v4.18.2 h1:xVpYkNR5pk5bMCZGfClbO962UIqVABcAGt7ha1s/FeU= +github.com/jackc/pgx/v4 v4.18.2/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw= +github.com/jackc/pgx/v5 v5.7.6 h1:rWQc5FwZSPX58r1OQmkuaNicxdmExaEz5A2DO2hUuTk= +github.com/jackc/pgx/v5 v5.7.6/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= +github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= +github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= +github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= +github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= +github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= +github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= +github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= +github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk= +github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lithammer/shortuuid/v3 v3.0.7 h1:trX0KTHy4Pbwo/6ia8fscyHoGA+mf1jWbPJVuvyJQQ8= +github.com/lithammer/shortuuid/v3 v3.0.7/go.mod h1:vMk8ke37EmiewwolSO1NLW8vP4ZaKlRuDIi8tWWmAts= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mfridman/interpolate v0.0.2 h1:pnuTK7MQIxxFz1Gr+rjSIx9u7qVjf5VOoM/u6BbAxPY= +github.com/mfridman/interpolate v0.0.2/go.mod h1:p+7uk6oE07mpE/Ik1b8EckO0O4ZXiGAfshKBWLUM9Xg= +github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= +github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= +github.com/philhofer/fwd v1.2.0 h1:e6DnBTl7vGY+Gz322/ASL4Gyp1FspeMvx1RNDoToZuM= +github.com/philhofer/fwd v1.2.0/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= +github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= +github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pressly/goose/v3 v3.26.0 h1:KJakav68jdH0WDvoAcj8+n61WqOIaPGgH0bJWS6jpmM= +github.com/pressly/goose/v3 v3.26.0/go.mod h1:4hC1KrritdCxtuFsqgs1R4AU5bWtTAf+cnWvfhf2DNY= +github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 h1:bsUq1dX0N8AOIL7EB/X911+m4EHsnWEHeJ0c+3TTBrg= +github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/redis/go-redis/v9 v9.17.2 h1:P2EGsA4qVIM3Pp+aPocCJ7DguDHhqrXNhVcEp4ViluI= +github.com/redis/go-redis/v9 v9.17.2/go.mod h1:u410H11HMLoB+TP67dz8rL9s6QW2j76l0//kSOd3370= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/riverqueue/river v0.28.0 h1:j+1vqwRkFzI0kWTbU0p5mH+hX5x8ZJiyVH4p6T1OqLU= +github.com/riverqueue/river v0.28.0/go.mod h1:3oPHvH8cRjpBj391ULViBW+p6gBFRbWCO9RjJfDkb4M= +github.com/riverqueue/river/riverdriver v0.28.0 h1:FvzYl0JjpsxSyMtMRRENneggVdDDm8g69yyFCfDjkt8= +github.com/riverqueue/river/riverdriver v0.28.0/go.mod h1:mprPQKIzMlyrek0+w25K0hvHZilvWBdDRxLvUg6aZcs= +github.com/riverqueue/river/riverdriver/riverdatabasesql v0.28.0 h1:d1xLt7fDlCYkobLX9r7Jo86XM53iVlqlAbwXbQMKvKc= +github.com/riverqueue/river/riverdriver/riverdatabasesql v0.28.0/go.mod h1:P4sEKDITAxWCJt4NfXgYV+BvYnhccaYGJ0fViEHKdHk= +github.com/riverqueue/river/riverdriver/riverpgxv5 v0.28.0 h1:5OTfF344bIVKcpULFJNIqGqFQdqB63u8DOycyzAprww= +github.com/riverqueue/river/riverdriver/riverpgxv5 v0.28.0/go.mod h1:ZBXSTqJ8FinI4nf9Bz6KMv561YDWvcqKGc6gWghJcV4= +github.com/riverqueue/river/rivershared v0.28.0 h1:8bJ0SxX95dyjm/H3xYtOXprZgiJHF423msyfWHUNhUg= +github.com/riverqueue/river/rivershared v0.28.0/go.mod h1:6ujXUF1mwCKvgC/OVwRIn8Z3GIuOdjjhiTuRxc4jCb4= +github.com/riverqueue/river/rivertype v0.28.0 h1:JYSpY0DWg34bOKyxB/kWgGXeryjunckYgNNrgKYk4jg= +github.com/riverqueue/river/rivertype v0.28.0/go.mod h1:rWpgI59doOWS6zlVocROcwc00fZ1RbzRwsRTU8CDguw= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/rogeecn/fabfile v1.7.0 h1:qtwkqaBsJjWrggbvznbd0HGyJ0ebBTOBE893JvD5Tng= +github.com/rogeecn/fabfile v1.7.0/go.mod h1:EPwX7TtVcIWSLJkJAqxSzYjM/aV1Q0wymcaXqnMgzas= +github.com/rogeecn/swag v1.0.1 h1:s1yxLgopqO1m8sqGjVmt6ocMBRubMPIh2JtIPG4xjQE= +github.com/rogeecn/swag v1.0.1/go.mod h1:flG2NXERPxlRl2VdpU2VXTO8iBnQiERyowOXSkZVMOc= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sagikazarmark/locafero v0.12.0 h1:/NQhBAkUb4+fH1jivKHWusDYFjMOOKU88eegjfxfHb4= +github.com/sagikazarmark/locafero v0.12.0/go.mod h1:sZh36u/YSZ918v0Io+U9ogLYQJ9tLLBmM4eneO6WwsI= +github.com/samber/lo v1.52.0 h1:Rvi+3BFHES3A8meP33VPAxiBZX/Aws5RxrschYGjomw= +github.com/samber/lo v1.52.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0= +github.com/sethvargo/go-retry v0.3.0 h1:EEt31A35QhrcRZtrYFDTBg91cqZVnFL2navjDrah2SE= +github.com/sethvargo/go-retry v0.3.0/go.mod h1:mNX17F0C/HguQMyMyJxcnU471gOZGxCLyYaFyAZraas= +github.com/shamaton/msgpack/v2 v2.4.0 h1:O5Z08MRmbo0lA9o2xnQ4TXx6teJbPqEurqcCOQ8Oi/4= +github.com/shamaton/msgpack/v2 v2.4.0/go.mod h1:6khjYnkx73f7VQU7wjcFS9DFjs+59naVWJv1TB7qdOI= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY= +github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec= +github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY= +github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60= +github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= +github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= +github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= +github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/swaggo/files/v2 v2.0.2 h1:Bq4tgS/yxLB/3nwOMcul5oLEUKa877Ykgz3CJMVbQKU= +github.com/swaggo/files/v2 v2.0.2/go.mod h1:TVqetIzZsO9OhHX1Am9sRf9LdrFZqoK49N37KON/jr0= +github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/match v1.2.0 h1:0pt8FlkOwjN2fPt4bIl4BoNxb98gGHN2ObFEDkrfZnM= +github.com/tidwall/match v1.2.0/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= +github.com/tinylib/msgp v1.6.1 h1:ESRv8eL3u+DNHUoSAAQRE50Hm162zqAnBoGv9PzScPY= +github.com/tinylib/msgp v1.6.1/go.mod h1:RSp0LW9oSxFut3KzESt5Voq4GVWyS+PSulT77roAqEA= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.68.0 h1:v12Nx16iepr8r9ySOwqI+5RBJ/DqTxhOy1HrHoDFnok= +github.com/valyala/fasthttp v1.68.0/go.mod h1:5EXiRfYQAoiO/khu4oU9VISC/eVY6JqmSpPJoHCKsz4= +github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= +github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= +github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.ipao.vip/atom v1.2.1 h1:7VlDLSkGNVEZLVM/JVcXXdMTO0+sFsxe1vfIM4Xz8uc= +go.ipao.vip/atom v1.2.1/go.mod h1:woAv+rZf0xd+7mEtKWv4PyazQARFLnrV/qA4qlAK008= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48= +go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8= +go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0= +go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs= +go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= +go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI= +go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= +go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4= +go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= +golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= +golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o= +golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= +golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= +golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= +golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251124214823-79d6a2a48846 h1:Wgl1rcDNThT+Zn47YyCXOXyX/COgMTIdhJ717F0l4xk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251124214823-79d6a2a48846/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= +google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gorm.io/driver/postgres v1.6.0 h1:2dxzU8xJ+ivvqTRph34QX+WrRaJlmfyPqXmoGVjMBa4= +gorm.io/driver/postgres v1.6.0/go.mod h1:vUw0mrGgrTK+uPHEhAdV4sfFELrByKVGnaVRkXDhtWo= +gorm.io/gorm v1.31.1 h1:7CA8FTFz/gRfgqgpeKIBcervUn3xSyPUmr6B2WXJ7kg= +gorm.io/gorm v1.31.1/go.mod h1:XyQVbO2k6YkOis7C2437jSit3SsDK72s7n7rsSHd+Gs= +modernc.org/libc v1.66.3 h1:cfCbjTUcdsKyyZZfEUKfoHcP3S0Wkvz3jgSzByEWVCQ= +modernc.org/libc v1.66.3/go.mod h1:XD9zO8kt59cANKvHPXpx7yS2ELPheAey0vjIuZOhOU8= +modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU= +modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg= +modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI= +modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw= +modernc.org/sqlite v1.38.2 h1:Aclu7+tgjgcQVShZqim41Bbw9Cho0y/7WzYptXqkEek= +modernc.org/sqlite v1.38.2/go.mod h1:cPTJYSlgg3Sfg046yBShXENNtPrWrDX8bsbAQBzgQ5E= diff --git a/backend_v1/llm.txt b/backend_v1/llm.txt new file mode 100644 index 0000000..6d69a38 --- /dev/null +++ b/backend_v1/llm.txt @@ -0,0 +1,207 @@ +# Backend Dev Rules (HTTP API + Model) + +This file condenses `backend/docs/dev/http_api.md` + `backend/docs/dev/model.md` into a checklist/rule format for LLMs. + +--- + +## 0) Golden rules (DO / DO NOT) + +- DO follow existing module layout under `backend/app/http//`. +- DO keep controller methods thin: parse/bind → call `services.*` → return result/error. +- DO regenerate code after changes (routes/docs/models). +- DO NOT manually edit generated files: +- `backend/app/http/**/routes.gen.go` +- `backend/app/http/**/provider.gen.go` +- `backend/docs/docs.go` +- DO keep Swagger annotations consistent with actual Fiber route paths (including `:param`). + +--- + +## 1) Add a new HTTP API endpoint + +### 1.1 Where code lives + +- Controllers: `backend/app/http//*.go` +- Example module: `backend/app/http/super/tenant.go`, `backend/app/http/super/user.go` +- DTOs: `backend/app/http//dto/*` +- Routes (generated): `backend/app/http//routes.gen.go` +- Swagger output (generated): `backend/docs/swagger.yaml`, `backend/docs/swagger.json`, `backend/docs/docs.go` + +### 1.2 Controller method signatures + +- “Return data” endpoints: return `(, error)` +- Example: `(*requests.Pager, error)` for paginated list +- “No data” endpoints: return `error` + +### 1.3 Swagger annotations (minimum set) + +Place above the handler function: + +- `@Summary` +- `@Tags` +- `@Accept json` +- `@Produce json` +- `@Param` (query/path/body as needed) +- `@Success` for 200 responses +- `@Router [get|post|patch|delete|put]` +- `@Bind` for parameters (see below) + +Common `@Success` patterns: + +- Paginated list: `requests.Pager{items=dto.Item}` +- Single object: `dto.Item` +- Array: `{array} dto.Item` + +### 1.4 Parameter binding (@Bind) + +Format: + +`@Bind [key()] [model(|[:])]` + +Positions: + +- `path`, `query`, `body`, `header`, `cookie`, `local`, `file` + +Notes: + +- `paramName` MUST match function parameter name (case-sensitive). +- Default key name is `paramName` ; override via `key(...)`. +- Scalar types: `string/int/int32/int64/float32/float64/bool`. +- Pointer types are supported (framework will handle deref for most positions). + +#### Model binding (path-only) + +Used to bind a model instance from a path value: + +- `model(id)` (recommended) +- `model(id:int)` / `model(code:string)` +- `model(pkg.Type:field)` or `model(pkg.Type)` (default field is `id`) + +Behavior: + +- Generated binder queries by field and returns first row as the parameter value. +- Auto-imports field helper for query building. + +### 1.5 Generate routes + providers + swagger docs + +Run from `backend/`: + +- Generate routes: `atomctl gen route` +- Generate providers: `atomctl gen provider` +- Generate swagger docs: `atomctl swag init` + +### 1.6 Local verify + +- Build/run: `make run` +- Use REST client examples: `backend/test/[module]/[controller].http` (extend it for new endpoints) + +### 1.7 Testing + +- Prefer existing test style under `backend/tests/e2e`. +- Run: `make test` + +--- + +## 2) Add / update a DB model + +Models live in: + +- `backend/database/models/*` (generated model code + optional manual extensions) + +### 2.1 Migration → model generation workflow + +1) Create migration: + +- `atomctl migrate create alter_table` or `atomctl migrate create create_table` + +2) Edit migration: + +- No explicit `BEGIN/COMMIT` needed (framework handles). +- Table name should be plural (e.g. `tenants`). + +3) Apply migration: + +- `atomctl migrate up` + +4) Map complex field types (JSON/ARRAY/UUID/…) via transform file: + +- `backend/database/.transform.yaml` → `field_type.` + +5) Generate models: + +- `atomctl gen model` + +### 2.2 Enum strategy + +- DO NOT use native DB ENUM. +- Define enums in Go under `backend/pkg/consts/
.go`, example: + +```go +// swagger:enum UserStatus +// ENUM(pending_verify, verified, banned, ) +type UserStatus string +``` + +- Generate enum code: `atomctl gen enum` + +### 2.3 Supported field types (`gen/types/`) + +`backend/database/.transform.yaml` typically imports `go.ipao.vip/gen` so you can use `types.*` in `field_type`. + +Common types: + +- JSON: `types.JSON`, `types.JSONMap`, `types.JSONType[T]`, `types.JSONSlice[T]` +- Array: `types.Array[T]` +- UUID: `types.UUID`, `types.BinUUID` +- Date/Time: `types.Date`, `types.Time` +- Money/XML/URL/Binary: `types.Money`, `types.XML`, `types.URL`, `types.HexBytes` +- Bit string: `types.BitString` +- Network: `types.Inet`, `types.CIDR`, `types.MACAddr` +- Ranges: `types.Int4Range`, `types.Int8Range`, `types.NumRange`, `types.TsRange`, `types.TstzRange`, `types.DateRange` +- Geometry: `types.Point`, `types.Polygon`, `types.Box`, `types.Circle`, `types.Path` +- Fulltext: `types.TSQuery`, `types.TSVector` +- Nullable: `types.Null[T]` and aliases (requires DB NULL) + +Reference: + +- Detailed examples: `gen/types/README.md` + +### 2.4 Relationships (GORM-aligned) via `.transform.yaml` + +Define in `field_relate.
.`: + +- `relation`: `belongs_to` | `has_one` | `has_many` | `many_to_many` +- `table`: target table +- `pivot`: join table (many_to_many only) +- `foreign_key`, `references` +- `join_foreign_key`, `join_references` (many_to_many only) +- `json`: JSON field name in API outputs + +Generator will convert snake_case columns to Go struct field names (e.g. `class_id` → `ClassID`). + +### 2.5 Extending generated models + +- Add manual methods/hooks by creating `backend/database/models/
.go`. +- Keep generated files untouched ; put custom logic only in your own file(s). + +--- + +## 3) Service layer injection (when adding services) + +- Services are in `backend/app/services`. +- After creating/updating a service provider, regenerate wiring: + - `atomctl gen service` + - `atomctl gen provider` +- Service call conventions: + - **Service-to-service (inside `services` package)**: call directly as `CamelCaseServiceStructName.Method()` (no `services.` prefix). + - **From outside (controllers/handlers/etc.)**: call via the package entrypoint `services.CamelCaseServiceStructName.Method()`. + +--- + +## 4) Quick command summary (run in `backend/`) + +- `make run` / `make build` / `make test` +- `atomctl gen route` / `atomctl gen provider` / `atomctl swag init` +- `atomctl migrate create ...` / `atomctl migrate up` +- `atomctl gen model` / `atomctl gen enum` / `atomctl gen service` +- `make init` (full refresh) diff --git a/backend_v1/main.go b/backend_v1/main.go new file mode 100644 index 0000000..a3996db --- /dev/null +++ b/backend_v1/main.go @@ -0,0 +1,40 @@ +package main + +import ( + "quyun/v2/app/commands/http" + "quyun/v2/app/commands/migrate" + "quyun/v2/pkg/utils" + + log "github.com/sirupsen/logrus" + "go.ipao.vip/atom" +) + +// @title ApiDoc +// @version 1.0 +// @description This is a sample server celler server. +// @termsOfService http://swagger.io/terms/ +// @contact.name UserName +// @contact.url http://www.swagger.io/support +// @contact.email support@swagger.io +// @license.name Apache 2.0 +// @license.url http://www.apache.org/licenses/LICENSE-2.0.html +// @host localhost:8080 +// @BasePath /api/v1 +// @securityDefinitions.basic BasicAuth +// @externalDocs.description OpenAPI +// @externalDocs.url https://swagger.io/resources/open-api/ + +func main() { + // 打印构建信息 + utils.PrintBuildInfo("v2") + + opts := []atom.Option{ + atom.Name("v2"), + http.Command(), + migrate.Command(), + } + + if err := atom.Serve(opts...); err != nil { + log.Fatal(err) + } +} diff --git a/backend_v1/main_test.go b/backend_v1/main_test.go new file mode 100644 index 0000000..06ab7d0 --- /dev/null +++ b/backend_v1/main_test.go @@ -0,0 +1 @@ +package main diff --git a/backend_v1/pkg/consts/consts.go b/backend_v1/pkg/consts/consts.go new file mode 100644 index 0000000..85fa520 --- /dev/null +++ b/backend_v1/pkg/consts/consts.go @@ -0,0 +1,8 @@ +package consts + +// Format +// +// // swagger:enum CacheKey +// // ENUM( +// // VerifyCode = "code:__CHANNEL__:%s", +// // ) diff --git a/backend_v1/pkg/proto/user/v1/user.pb.go b/backend_v1/pkg/proto/user/v1/user.pb.go new file mode 100644 index 0000000..796cb5c --- /dev/null +++ b/backend_v1/pkg/proto/user/v1/user.pb.go @@ -0,0 +1,387 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.11 +// protoc (unknown) +// source: user/v1/user.proto + +package userv1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// User represents a user entity +type User struct { + state protoimpl.MessageState `protogen:"open.v1"` + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` + Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"` + Phone string `protobuf:"bytes,4,opt,name=phone,proto3" json:"phone,omitempty"` + CreateTime string `protobuf:"bytes,5,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + UpdateTime string `protobuf:"bytes,6,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *User) Reset() { + *x = User{} + mi := &file_user_v1_user_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *User) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*User) ProtoMessage() {} + +func (x *User) ProtoReflect() protoreflect.Message { + mi := &file_user_v1_user_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use User.ProtoReflect.Descriptor instead. +func (*User) Descriptor() ([]byte, []int) { + return file_user_v1_user_proto_rawDescGZIP(), []int{0} +} + +func (x *User) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *User) GetUsername() string { + if x != nil { + return x.Username + } + return "" +} + +func (x *User) GetEmail() string { + if x != nil { + return x.Email + } + return "" +} + +func (x *User) GetPhone() string { + if x != nil { + return x.Phone + } + return "" +} + +func (x *User) GetCreateTime() string { + if x != nil { + return x.CreateTime + } + return "" +} + +func (x *User) GetUpdateTime() string { + if x != nil { + return x.UpdateTime + } + return "" +} + +type ListUsersRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + PageSize int32 `protobuf:"varint,1,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + PageNumber int32 `protobuf:"varint,2,opt,name=page_number,json=pageNumber,proto3" json:"page_number,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListUsersRequest) Reset() { + *x = ListUsersRequest{} + mi := &file_user_v1_user_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListUsersRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListUsersRequest) ProtoMessage() {} + +func (x *ListUsersRequest) ProtoReflect() protoreflect.Message { + mi := &file_user_v1_user_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListUsersRequest.ProtoReflect.Descriptor instead. +func (*ListUsersRequest) Descriptor() ([]byte, []int) { + return file_user_v1_user_proto_rawDescGZIP(), []int{1} +} + +func (x *ListUsersRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListUsersRequest) GetPageNumber() int32 { + if x != nil { + return x.PageNumber + } + return 0 +} + +type ListUsersResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Users []*User `protobuf:"bytes,1,rep,name=users,proto3" json:"users,omitempty"` + Total int32 `protobuf:"varint,2,opt,name=total,proto3" json:"total,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListUsersResponse) Reset() { + *x = ListUsersResponse{} + mi := &file_user_v1_user_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListUsersResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListUsersResponse) ProtoMessage() {} + +func (x *ListUsersResponse) ProtoReflect() protoreflect.Message { + mi := &file_user_v1_user_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListUsersResponse.ProtoReflect.Descriptor instead. +func (*ListUsersResponse) Descriptor() ([]byte, []int) { + return file_user_v1_user_proto_rawDescGZIP(), []int{2} +} + +func (x *ListUsersResponse) GetUsers() []*User { + if x != nil { + return x.Users + } + return nil +} + +func (x *ListUsersResponse) GetTotal() int32 { + if x != nil { + return x.Total + } + return 0 +} + +type GetUserRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetUserRequest) Reset() { + *x = GetUserRequest{} + mi := &file_user_v1_user_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetUserRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetUserRequest) ProtoMessage() {} + +func (x *GetUserRequest) ProtoReflect() protoreflect.Message { + mi := &file_user_v1_user_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetUserRequest.ProtoReflect.Descriptor instead. +func (*GetUserRequest) Descriptor() ([]byte, []int) { + return file_user_v1_user_proto_rawDescGZIP(), []int{3} +} + +func (x *GetUserRequest) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +type GetUserResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + User *User `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetUserResponse) Reset() { + *x = GetUserResponse{} + mi := &file_user_v1_user_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetUserResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetUserResponse) ProtoMessage() {} + +func (x *GetUserResponse) ProtoReflect() protoreflect.Message { + mi := &file_user_v1_user_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetUserResponse.ProtoReflect.Descriptor instead. +func (*GetUserResponse) Descriptor() ([]byte, []int) { + return file_user_v1_user_proto_rawDescGZIP(), []int{4} +} + +func (x *GetUserResponse) GetUser() *User { + if x != nil { + return x.User + } + return nil +} + +var File_user_v1_user_proto protoreflect.FileDescriptor + +const file_user_v1_user_proto_rawDesc = "" + + "\n" + + "\x12user/v1/user.proto\x12\auser.v1\"\xa0\x01\n" + + "\x04User\x12\x0e\n" + + "\x02id\x18\x01 \x01(\x03R\x02id\x12\x1a\n" + + "\busername\x18\x02 \x01(\tR\busername\x12\x14\n" + + "\x05email\x18\x03 \x01(\tR\x05email\x12\x14\n" + + "\x05phone\x18\x04 \x01(\tR\x05phone\x12\x1f\n" + + "\vcreate_time\x18\x05 \x01(\tR\n" + + "createTime\x12\x1f\n" + + "\vupdate_time\x18\x06 \x01(\tR\n" + + "updateTime\"P\n" + + "\x10ListUsersRequest\x12\x1b\n" + + "\tpage_size\x18\x01 \x01(\x05R\bpageSize\x12\x1f\n" + + "\vpage_number\x18\x02 \x01(\x05R\n" + + "pageNumber\"N\n" + + "\x11ListUsersResponse\x12#\n" + + "\x05users\x18\x01 \x03(\v2\r.user.v1.UserR\x05users\x12\x14\n" + + "\x05total\x18\x02 \x01(\x05R\x05total\" \n" + + "\x0eGetUserRequest\x12\x0e\n" + + "\x02id\x18\x01 \x01(\x03R\x02id\"4\n" + + "\x0fGetUserResponse\x12!\n" + + "\x04user\x18\x01 \x01(\v2\r.user.v1.UserR\x04user2\x93\x01\n" + + "\vUserService\x12D\n" + + "\tListUsers\x12\x19.user.v1.ListUsersRequest\x1a\x1a.user.v1.ListUsersResponse\"\x00\x12>\n" + + "\aGetUser\x12\x17.user.v1.GetUserRequest\x1a\x18.user.v1.GetUserResponse\"\x00Bx\n" + + "\vcom.user.v1B\tUserProtoP\x01Z!quyun/v2/pkg/proto/user/v1;userv1\xa2\x02\x03UXX\xaa\x02\aUser.V1\xca\x02\aUser\\V1\xe2\x02\x13User\\V1\\GPBMetadata\xea\x02\bUser::V1b\x06proto3" + +var ( + file_user_v1_user_proto_rawDescOnce sync.Once + file_user_v1_user_proto_rawDescData []byte +) + +func file_user_v1_user_proto_rawDescGZIP() []byte { + file_user_v1_user_proto_rawDescOnce.Do(func() { + file_user_v1_user_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_user_v1_user_proto_rawDesc), len(file_user_v1_user_proto_rawDesc))) + }) + return file_user_v1_user_proto_rawDescData +} + +var file_user_v1_user_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_user_v1_user_proto_goTypes = []any{ + (*User)(nil), // 0: user.v1.User + (*ListUsersRequest)(nil), // 1: user.v1.ListUsersRequest + (*ListUsersResponse)(nil), // 2: user.v1.ListUsersResponse + (*GetUserRequest)(nil), // 3: user.v1.GetUserRequest + (*GetUserResponse)(nil), // 4: user.v1.GetUserResponse +} +var file_user_v1_user_proto_depIdxs = []int32{ + 0, // 0: user.v1.ListUsersResponse.users:type_name -> user.v1.User + 0, // 1: user.v1.GetUserResponse.user:type_name -> user.v1.User + 1, // 2: user.v1.UserService.ListUsers:input_type -> user.v1.ListUsersRequest + 3, // 3: user.v1.UserService.GetUser:input_type -> user.v1.GetUserRequest + 2, // 4: user.v1.UserService.ListUsers:output_type -> user.v1.ListUsersResponse + 4, // 5: user.v1.UserService.GetUser:output_type -> user.v1.GetUserResponse + 4, // [4:6] is the sub-list for method output_type + 2, // [2:4] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_user_v1_user_proto_init() } +func file_user_v1_user_proto_init() { + if File_user_v1_user_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_user_v1_user_proto_rawDesc), len(file_user_v1_user_proto_rawDesc)), + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_user_v1_user_proto_goTypes, + DependencyIndexes: file_user_v1_user_proto_depIdxs, + MessageInfos: file_user_v1_user_proto_msgTypes, + }.Build() + File_user_v1_user_proto = out.File + file_user_v1_user_proto_goTypes = nil + file_user_v1_user_proto_depIdxs = nil +} diff --git a/backend_v1/pkg/proto/user/v1/user_grpc.pb.go b/backend_v1/pkg/proto/user/v1/user_grpc.pb.go new file mode 100644 index 0000000..439436c --- /dev/null +++ b/backend_v1/pkg/proto/user/v1/user_grpc.pb.go @@ -0,0 +1,167 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.6.0 +// - protoc (unknown) +// source: user/v1/user.proto + +package userv1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + UserService_ListUsers_FullMethodName = "/user.v1.UserService/ListUsers" + UserService_GetUser_FullMethodName = "/user.v1.UserService/GetUser" +) + +// UserServiceClient is the client API for UserService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// UserService provides user-related operations +type UserServiceClient interface { + // ListUsers returns a list of users with pagination + ListUsers(ctx context.Context, in *ListUsersRequest, opts ...grpc.CallOption) (*ListUsersResponse, error) + // GetUser returns detailed information about a specific user + GetUser(ctx context.Context, in *GetUserRequest, opts ...grpc.CallOption) (*GetUserResponse, error) +} + +type userServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewUserServiceClient(cc grpc.ClientConnInterface) UserServiceClient { + return &userServiceClient{cc} +} + +func (c *userServiceClient) ListUsers(ctx context.Context, in *ListUsersRequest, opts ...grpc.CallOption) (*ListUsersResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ListUsersResponse) + err := c.cc.Invoke(ctx, UserService_ListUsers_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *userServiceClient) GetUser(ctx context.Context, in *GetUserRequest, opts ...grpc.CallOption) (*GetUserResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetUserResponse) + err := c.cc.Invoke(ctx, UserService_GetUser_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// UserServiceServer is the server API for UserService service. +// All implementations must embed UnimplementedUserServiceServer +// for forward compatibility. +// +// UserService provides user-related operations +type UserServiceServer interface { + // ListUsers returns a list of users with pagination + ListUsers(context.Context, *ListUsersRequest) (*ListUsersResponse, error) + // GetUser returns detailed information about a specific user + GetUser(context.Context, *GetUserRequest) (*GetUserResponse, error) + mustEmbedUnimplementedUserServiceServer() +} + +// UnimplementedUserServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedUserServiceServer struct{} + +func (UnimplementedUserServiceServer) ListUsers(context.Context, *ListUsersRequest) (*ListUsersResponse, error) { + return nil, status.Error(codes.Unimplemented, "method ListUsers not implemented") +} +func (UnimplementedUserServiceServer) GetUser(context.Context, *GetUserRequest) (*GetUserResponse, error) { + return nil, status.Error(codes.Unimplemented, "method GetUser not implemented") +} +func (UnimplementedUserServiceServer) mustEmbedUnimplementedUserServiceServer() {} +func (UnimplementedUserServiceServer) testEmbeddedByValue() {} + +// UnsafeUserServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to UserServiceServer will +// result in compilation errors. +type UnsafeUserServiceServer interface { + mustEmbedUnimplementedUserServiceServer() +} + +func RegisterUserServiceServer(s grpc.ServiceRegistrar, srv UserServiceServer) { + // If the following call panics, it indicates UnimplementedUserServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&UserService_ServiceDesc, srv) +} + +func _UserService_ListUsers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListUsersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UserServiceServer).ListUsers(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: UserService_ListUsers_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UserServiceServer).ListUsers(ctx, req.(*ListUsersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UserService_GetUser_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetUserRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UserServiceServer).GetUser(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: UserService_GetUser_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UserServiceServer).GetUser(ctx, req.(*GetUserRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// UserService_ServiceDesc is the grpc.ServiceDesc for UserService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var UserService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "user.v1.UserService", + HandlerType: (*UserServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListUsers", + Handler: _UserService_ListUsers_Handler, + }, + { + MethodName: "GetUser", + Handler: _UserService_GetUser_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "user/v1/user.proto", +} diff --git a/backend_v1/pkg/utils/buffer.go b/backend_v1/pkg/utils/buffer.go new file mode 100644 index 0000000..5746d74 --- /dev/null +++ b/backend_v1/pkg/utils/buffer.go @@ -0,0 +1,26 @@ +package utils + +import ( + "bufio" + "io" +) + +// NewLogBuffer creates a buffer that can be used to capture output stream +// and write to a logger in real time +func NewLogBuffer(output func(string)) io.Writer { + reader, writer := io.Pipe() + + go func() { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + output(scanner.Text()) + } + }() + + return writer +} + +// NewCombinedBuffer combines multiple io.Writers +func NewCombinedBuffer(writers ...io.Writer) io.Writer { + return io.MultiWriter(writers...) +} diff --git a/backend_v1/pkg/utils/build_info.go b/backend_v1/pkg/utils/build_info.go new file mode 100644 index 0000000..8dfc5a7 --- /dev/null +++ b/backend_v1/pkg/utils/build_info.go @@ -0,0 +1,44 @@ +package utils + +import "fmt" + +// 构建信息变量,通过 ldflags 在构建时注入 +var ( + // Version 应用版本信息 + Version string + + // BuildAt 构建时间 + BuildAt string + + // GitHash Git 提交哈希 + GitHash string +) + +// GetBuildInfo 获取构建信息 +func GetBuildInfo() map[string]string { + return map[string]string{ + "version": Version, + "buildAt": BuildAt, + "gitHash": GitHash, + } +} + +// PrintBuildInfo 打印构建信息 +func PrintBuildInfo(appName string) { + buildInfo := GetBuildInfo() + + println("========================================") + printf("🚀 %s\n", appName) + println("========================================") + printf("📋 Version: %s\n", buildInfo["version"]) + printf("🕐 Build Time: %s\n", buildInfo["buildAt"]) + printf("🔗 Git Hash: %s\n", buildInfo["gitHash"]) + println("========================================") + println("🌟 Application is starting...") + println() +} + +// 为了避免导入 fmt 包,我们使用内置的 print 和 printf 函数 +func printf(format string, args ...interface{}) { + print(fmt.Sprintf(format, args...)) +} diff --git a/backend_v1/proto/user/v1/user.proto b/backend_v1/proto/user/v1/user.proto new file mode 100644 index 0000000..40be68d --- /dev/null +++ b/backend_v1/proto/user/v1/user.proto @@ -0,0 +1,40 @@ +syntax = "proto3"; + +package user.v1; + +// User represents a user entity +message User { + int64 id = 1; + string username = 2; + string email = 3; + string phone = 4; + string create_time = 5; + string update_time = 6; +} + +message ListUsersRequest { + int32 page_size = 1; + int32 page_number = 2; +} + +message ListUsersResponse { + repeated User users = 1; + int32 total = 2; +} + +message GetUserRequest { + int64 id = 1; +} + +message GetUserResponse { + User user = 1; +} + +// UserService provides user-related operations +service UserService { + // ListUsers returns a list of users with pagination + rpc ListUsers(ListUsersRequest) returns (ListUsersResponse) {} + + // GetUser returns detailed information about a specific user + rpc GetUser(GetUserRequest) returns (GetUserResponse) {} +} diff --git a/backend_v1/providers/app/app.go b/backend_v1/providers/app/app.go new file mode 100644 index 0000000..d0a566e --- /dev/null +++ b/backend_v1/providers/app/app.go @@ -0,0 +1,18 @@ +package app + +import ( + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/opt" +) + +func Provide(opts ...opt.Option) error { + o := opt.New(opts...) + var config Config + if err := o.UnmarshalConfig(&config); err != nil { + return err + } + + return container.Container.Provide(func() (*Config, error) { + return &config, nil + }, o.DiOptions()...) +} diff --git a/backend_v1/providers/app/config.gen.go b/backend_v1/providers/app/config.gen.go new file mode 100644 index 0000000..702160e --- /dev/null +++ b/backend_v1/providers/app/config.gen.go @@ -0,0 +1,179 @@ +// Code generated by go-enum DO NOT EDIT. +// Version: - +// Revision: - +// Build Date: - +// Built By: - + +package app + +import ( + "database/sql/driver" + "errors" + "fmt" + "strings" +) + +const ( + // AppModeDevelopment is a AppMode of type development. + AppModeDevelopment AppMode = "development" + // AppModeRelease is a AppMode of type release. + AppModeRelease AppMode = "release" + // AppModeTest is a AppMode of type test. + AppModeTest AppMode = "test" +) + +var ErrInvalidAppMode = fmt.Errorf("not a valid AppMode, try [%s]", strings.Join(_AppModeNames, ", ")) + +var _AppModeNames = []string{ + string(AppModeDevelopment), + string(AppModeRelease), + string(AppModeTest), +} + +// AppModeNames returns a list of possible string values of AppMode. +func AppModeNames() []string { + tmp := make([]string, len(_AppModeNames)) + copy(tmp, _AppModeNames) + return tmp +} + +// AppModeValues returns a list of the values for AppMode +func AppModeValues() []AppMode { + return []AppMode{ + AppModeDevelopment, + AppModeRelease, + AppModeTest, + } +} + +// String implements the Stringer interface. +func (x AppMode) String() string { + return string(x) +} + +// IsValid provides a quick way to determine if the typed value is +// part of the allowed enumerated values +func (x AppMode) IsValid() bool { + _, err := ParseAppMode(string(x)) + return err == nil +} + +var _AppModeValue = map[string]AppMode{ + "development": AppModeDevelopment, + "release": AppModeRelease, + "test": AppModeTest, +} + +// ParseAppMode attempts to convert a string to a AppMode. +func ParseAppMode(name string) (AppMode, error) { + if x, ok := _AppModeValue[name]; ok { + return x, nil + } + return AppMode(""), fmt.Errorf("%s is %w", name, ErrInvalidAppMode) +} + +var errAppModeNilPtr = errors.New("value pointer is nil") // one per type for package clashes + +// Scan implements the Scanner interface. +func (x *AppMode) Scan(value interface{}) (err error) { + if value == nil { + *x = AppMode("") + return + } + + // A wider range of scannable types. + // driver.Value values at the top of the list for expediency + switch v := value.(type) { + case string: + *x, err = ParseAppMode(v) + case []byte: + *x, err = ParseAppMode(string(v)) + case AppMode: + *x = v + case *AppMode: + if v == nil { + return errAppModeNilPtr + } + *x = *v + case *string: + if v == nil { + return errAppModeNilPtr + } + *x, err = ParseAppMode(*v) + default: + return errors.New("invalid type for AppMode") + } + + return +} + +// Value implements the driver Valuer interface. +func (x AppMode) Value() (driver.Value, error) { + return x.String(), nil +} + +// Set implements the Golang flag.Value interface func. +func (x *AppMode) Set(val string) error { + v, err := ParseAppMode(val) + *x = v + return err +} + +// Get implements the Golang flag.Getter interface func. +func (x *AppMode) Get() interface{} { + return *x +} + +// Type implements the github.com/spf13/pFlag Value interface. +func (x *AppMode) Type() string { + return "AppMode" +} + +type NullAppMode struct { + AppMode AppMode + Valid bool +} + +func NewNullAppMode(val interface{}) (x NullAppMode) { + err := x.Scan(val) // yes, we ignore this error, it will just be an invalid value. + _ = err // make any errcheck linters happy + return +} + +// Scan implements the Scanner interface. +func (x *NullAppMode) Scan(value interface{}) (err error) { + if value == nil { + x.AppMode, x.Valid = AppMode(""), false + return + } + + err = x.AppMode.Scan(value) + x.Valid = (err == nil) + return +} + +// Value implements the driver Valuer interface. +func (x NullAppMode) Value() (driver.Value, error) { + if !x.Valid { + return nil, nil + } + // driver.Value accepts int64 for int values. + return string(x.AppMode), nil +} + +type NullAppModeStr struct { + NullAppMode +} + +func NewNullAppModeStr(val interface{}) (x NullAppModeStr) { + x.Scan(val) // yes, we ignore this error, it will just be an invalid value. + return +} + +// Value implements the driver Valuer interface. +func (x NullAppModeStr) Value() (driver.Value, error) { + if !x.Valid { + return nil, nil + } + return x.AppMode.String(), nil +} diff --git a/backend_v1/providers/app/config.go b/backend_v1/providers/app/config.go new file mode 100644 index 0000000..1d1d204 --- /dev/null +++ b/backend_v1/providers/app/config.go @@ -0,0 +1,45 @@ +package app + +import ( + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/opt" +) + +const DefaultPrefix = "App" + +func DefaultProvider() container.ProviderContainer { + return container.ProviderContainer{ + Provider: Provide, + Options: []opt.Option{ + opt.Prefix(DefaultPrefix), + }, + } +} + +// swagger:enum AppMode +// ENUM(development, release, test) +type AppMode string + +type Config struct { + Mode AppMode + Cert *Cert + BaseURI *string +} + +func (c *Config) IsDevMode() bool { + return c.Mode == AppModeDevelopment +} + +func (c *Config) IsReleaseMode() bool { + return c.Mode == AppModeRelease +} + +func (c *Config) IsTestMode() bool { + return c.Mode == AppModeTest +} + +type Cert struct { + CA string + Cert string + Key string +} diff --git a/backend_v1/providers/cmux/config.go b/backend_v1/providers/cmux/config.go new file mode 100644 index 0000000..c88aaba --- /dev/null +++ b/backend_v1/providers/cmux/config.go @@ -0,0 +1,109 @@ +package cmux + +import ( + "fmt" + "net" + "time" + + "quyun/v2/providers/grpc" + "quyun/v2/providers/http" + + log "github.com/sirupsen/logrus" + "github.com/soheilhy/cmux" + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/opt" + "golang.org/x/sync/errgroup" +) + +const DefaultPrefix = "Cmux" + +func DefaultProvider() container.ProviderContainer { + return container.ProviderContainer{ + Provider: Provide, + Options: []opt.Option{ + opt.Prefix(DefaultPrefix), + }, + } +} + +type Config struct { + Host *string + Port uint +} + +func (h *Config) Address() string { + if h.Host == nil { + return fmt.Sprintf(":%d", h.Port) + } + return fmt.Sprintf("%s:%d", *h.Host, h.Port) +} + +type CMux struct { + Http *http.Service + Grpc *grpc.Grpc + Mux cmux.CMux + Base net.Listener +} + +func (c *CMux) Serve() error { + // Protect against slowloris connections when sniffing protocol + // Safe even if SetReadTimeout is a no-op in the cmux version in use + c.Mux.SetReadTimeout(1 * time.Second) + + addr := "" + if c.Base != nil && c.Base.Addr() != nil { + addr = c.Base.Addr().String() + } + log.WithFields(log.Fields{ + "addr": addr, + }).Info("cmux starting") + + // Route classic HTTP/1.x traffic to the HTTP service + httpL := c.Mux.Match(cmux.HTTP1Fast()) + + // Route gRPC (HTTP/2 with content-type application/grpc) to the gRPC service. + // Additionally, send other HTTP/2 traffic to gRPC since Fiber (HTTP) does not serve HTTP/2. + grpcL := c.Mux.Match( + cmux.HTTP2HeaderField("content-type", "application/grpc"), + cmux.HTTP2(), + ) + + var eg errgroup.Group + eg.Go(func() error { + log.WithField("addr", addr).Info("grpc serving via cmux") + err := c.Grpc.ServeWithListener(grpcL) + if err != nil { + log.WithError(err).Error("grpc server exited with error") + } else { + log.Info("grpc server exited") + } + return err + }) + + eg.Go(func() error { + log.WithField("addr", addr).Info("http serving via cmux") + err := c.Http.Listener(httpL) + if err != nil { + log.WithError(err).Error("http server exited with error") + } else { + log.Info("http server exited") + } + return err + }) + + // Run cmux dispatcher; wait for the first error from any goroutine + eg.Go(func() error { + err := c.Mux.Serve() + if err != nil { + log.WithError(err).Error("cmux exited with error") + } else { + log.Info("cmux exited") + } + return err + }) + err := eg.Wait() + if err == nil { + log.Info("cmux and sub-servers exited cleanly") + } + return err +} diff --git a/backend_v1/providers/cmux/provider.go b/backend_v1/providers/cmux/provider.go new file mode 100644 index 0000000..8c10f55 --- /dev/null +++ b/backend_v1/providers/cmux/provider.go @@ -0,0 +1,37 @@ +package cmux + +import ( + "net" + + "quyun/v2/providers/grpc" + "quyun/v2/providers/http" + + "github.com/soheilhy/cmux" + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/opt" +) + +func Provide(opts ...opt.Option) error { + o := opt.New(opts...) + var config Config + if err := o.UnmarshalConfig(&config); err != nil { + return err + } + return container.Container.Provide(func(http *http.Service, grpc *grpc.Grpc) (*CMux, error) { + l, err := net.Listen("tcp", config.Address()) + if err != nil { + return nil, err + } + + mux := &CMux{ + Http: http, + Grpc: grpc, + Mux: cmux.New(l), + Base: l, + } + // Ensure cmux stops accepting new connections on shutdown + container.AddCloseAble(func() { _ = l.Close() }) + + return mux, nil + }, o.DiOptions()...) +} diff --git a/backend_v1/providers/event/channel.go b/backend_v1/providers/event/channel.go new file mode 100644 index 0000000..f8f25f8 --- /dev/null +++ b/backend_v1/providers/event/channel.go @@ -0,0 +1,30 @@ +package event + +import "go.ipao.vip/atom/contracts" + +const ( + Go contracts.Channel = "go" + Kafka contracts.Channel = "kafka" + Redis contracts.Channel = "redis" + Sql contracts.Channel = "sql" +) + +type DefaultPublishTo struct{} + +func (d *DefaultPublishTo) PublishTo() (contracts.Channel, string) { + return Go, "event:processed" +} + +type DefaultChannel struct{} + +func (d *DefaultChannel) Channel() contracts.Channel { return Go } + +// kafka +type KafkaChannel struct{} + +func (k *KafkaChannel) Channel() contracts.Channel { return Kafka } + +// kafka +type RedisChannel struct{} + +func (k *RedisChannel) Channel() contracts.Channel { return Redis } diff --git a/backend_v1/providers/event/config.go b/backend_v1/providers/event/config.go new file mode 100644 index 0000000..ba5509f --- /dev/null +++ b/backend_v1/providers/event/config.go @@ -0,0 +1,99 @@ +package event + +import ( + "context" + + "github.com/ThreeDotsLabs/watermill" + "github.com/ThreeDotsLabs/watermill/message" + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/contracts" + "go.ipao.vip/atom/opt" +) + +const DefaultPrefix = "Events" + +func DefaultProvider() container.ProviderContainer { + return container.ProviderContainer{ + Provider: ProvideChannel, + Options: []opt.Option{ + opt.Prefix(DefaultPrefix), + }, + } +} + +type Config struct { + Sql *ConfigSql + Kafka *ConfigKafka + Redis *ConfigRedis +} + +type ConfigSql struct { + ConsumerGroup string +} + +type ConfigRedis struct { + ConsumerGroup string + Streams []string +} + +type ConfigKafka struct { + ConsumerGroup string + Brokers []string +} + +type PubSub struct { + Router *message.Router + + publishers map[contracts.Channel]message.Publisher + subscribers map[contracts.Channel]message.Subscriber +} + +func (ps *PubSub) Serve(ctx context.Context) error { + if err := ps.Router.Run(ctx); err != nil { + return err + } + return nil +} + +// publish +func (ps *PubSub) Publish(e contracts.EventPublisher) error { + if e == nil { + return nil + } + + payload, err := e.Marshal() + if err != nil { + return err + } + + msg := message.NewMessage(watermill.NewUUID(), payload) + return ps.getPublisher(e.Channel()).Publish(e.Topic(), msg) +} + +// getPublisher returns the publisher for the specified channel. +func (ps *PubSub) getPublisher(channel contracts.Channel) message.Publisher { + if pub, ok := ps.publishers[channel]; ok { + return pub + } + return ps.publishers[Go] +} + +func (ps *PubSub) getSubscriber(channel contracts.Channel) message.Subscriber { + if sub, ok := ps.subscribers[channel]; ok { + return sub + } + return ps.subscribers[Go] +} + +func (ps *PubSub) Handle(handlerName string, sub contracts.EventHandler) { + publishToCh, publishToTopic := sub.PublishTo() + + ps.Router.AddHandler( + handlerName, + sub.Topic(), + ps.getSubscriber(sub.Channel()), + publishToTopic, + ps.getPublisher(publishToCh), + sub.Handler, + ) +} diff --git a/backend_v1/providers/event/logrus_adapter.go b/backend_v1/providers/event/logrus_adapter.go new file mode 100644 index 0000000..b4cdd41 --- /dev/null +++ b/backend_v1/providers/event/logrus_adapter.go @@ -0,0 +1,60 @@ +package event + +import ( + "github.com/ThreeDotsLabs/watermill" + "github.com/sirupsen/logrus" +) + +// LogrusLoggerAdapter is a watermill logger adapter for logrus. +type LogrusLoggerAdapter struct { + log *logrus.Logger + fields watermill.LogFields +} + +// NewLogrusLogger returns a LogrusLoggerAdapter that sends all logs to +// the passed logrus instance. +func LogrusAdapter() watermill.LoggerAdapter { + return &LogrusLoggerAdapter{log: logrus.StandardLogger()} +} + +// Error logs on level error with err as field and optional fields. +func (l *LogrusLoggerAdapter) Error(msg string, err error, fields watermill.LogFields) { + l.createEntry(fields.Add(watermill.LogFields{"err": err})).Error(msg) +} + +// Info logs on level info with optional fields. +func (l *LogrusLoggerAdapter) Info(msg string, fields watermill.LogFields) { + l.createEntry(fields).Info(msg) +} + +// Debug logs on level debug with optional fields. +func (l *LogrusLoggerAdapter) Debug(msg string, fields watermill.LogFields) { + l.createEntry(fields).Debug(msg) +} + +// Trace logs on level trace with optional fields. +func (l *LogrusLoggerAdapter) Trace(msg string, fields watermill.LogFields) { + l.createEntry(fields).Trace(msg) +} + +// With returns a new LogrusLoggerAdapter that includes fields +// to be re-used between logging statements. +func (l *LogrusLoggerAdapter) With(fields watermill.LogFields) watermill.LoggerAdapter { + return &LogrusLoggerAdapter{ + log: l.log, + fields: l.fields.Add(fields), + } +} + +// createEntry is a helper to add fields to a logrus entry if necessary. +func (l *LogrusLoggerAdapter) createEntry(fields watermill.LogFields) *logrus.Entry { + entry := logrus.NewEntry(l.log) + + allFields := fields.Add(l.fields) + + if len(allFields) > 0 { + entry = entry.WithFields(logrus.Fields(allFields)) + } + + return entry +} diff --git a/backend_v1/providers/event/provider.go b/backend_v1/providers/event/provider.go new file mode 100644 index 0000000..84cd980 --- /dev/null +++ b/backend_v1/providers/event/provider.go @@ -0,0 +1,109 @@ +package event + +import ( + sqlDB "database/sql" + + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/contracts" + "go.ipao.vip/atom/opt" + + "github.com/ThreeDotsLabs/watermill-kafka/v3/pkg/kafka" + "github.com/ThreeDotsLabs/watermill-redisstream/pkg/redisstream" + "github.com/ThreeDotsLabs/watermill-sql/v3/pkg/sql" + "github.com/ThreeDotsLabs/watermill/message" + "github.com/ThreeDotsLabs/watermill/pubsub/gochannel" + "github.com/redis/go-redis/v9" +) + +func ProvideChannel(opts ...opt.Option) error { + o := opt.New(opts...) + var config Config + if err := o.UnmarshalConfig(&config); err != nil { + return err + } + + return container.Container.Provide(func() (*PubSub, error) { + logger := LogrusAdapter() + + publishers := make(map[contracts.Channel]message.Publisher) + subscribers := make(map[contracts.Channel]message.Subscriber) + + // gochannel + client := gochannel.NewGoChannel(gochannel.Config{}, logger) + publishers[Go] = client + subscribers[Go] = client + + // kafka + if config.Kafka != nil { + kafkaPublisher, err := kafka.NewPublisher(kafka.PublisherConfig{ + Brokers: config.Kafka.Brokers, + Marshaler: kafka.DefaultMarshaler{}, + }, logger) + if err != nil { + return nil, err + } + publishers[Kafka] = kafkaPublisher + + kafkaSubscriber, err := kafka.NewSubscriber(kafka.SubscriberConfig{ + Brokers: config.Kafka.Brokers, + Unmarshaler: kafka.DefaultMarshaler{}, + ConsumerGroup: config.Kafka.ConsumerGroup, + }, logger) + if err != nil { + return nil, err + } + subscribers[Kafka] = kafkaSubscriber + } + + // redis + if config.Redis != nil { + var rdb redis.UniversalClient + redisSubscriber, err := redisstream.NewSubscriber(redisstream.SubscriberConfig{ + Client: rdb, + Unmarshaller: redisstream.DefaultMarshallerUnmarshaller{}, + ConsumerGroup: config.Redis.ConsumerGroup, + }, logger) + if err != nil { + return nil, err + } + subscribers[Redis] = redisSubscriber + + redisPublisher, err := redisstream.NewPublisher(redisstream.PublisherConfig{ + Client: rdb, + Marshaller: redisstream.DefaultMarshallerUnmarshaller{}, + }, logger) + if err != nil { + return nil, err + } + publishers[Redis] = redisPublisher + } + + if config.Sql == nil { + var db *sqlDB.DB + sqlPublisher, err := sql.NewPublisher(db, sql.PublisherConfig{ + SchemaAdapter: sql.DefaultPostgreSQLSchema{}, + AutoInitializeSchema: false, + }, logger) + if err != nil { + return nil, err + } + publishers[Sql] = sqlPublisher + + sqlSubscriber, err := sql.NewSubscriber(db, sql.SubscriberConfig{ + SchemaAdapter: sql.DefaultPostgreSQLSchema{}, + ConsumerGroup: config.Sql.ConsumerGroup, + }, logger) + if err != nil { + return nil, err + } + subscribers[Sql] = sqlSubscriber + } + + router, err := message.NewRouter(message.RouterConfig{}, logger) + if err != nil { + return nil, err + } + + return &PubSub{Router: router, publishers: publishers, subscribers: subscribers}, nil + }, o.DiOptions()...) +} diff --git a/backend_v1/providers/grpc/config.go b/backend_v1/providers/grpc/config.go new file mode 100644 index 0000000..b5dcbe2 --- /dev/null +++ b/backend_v1/providers/grpc/config.go @@ -0,0 +1,145 @@ +package grpc + +import ( + "fmt" + "net" + "time" + + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/opt" + + "google.golang.org/grpc" + "google.golang.org/grpc/health" + grpc_health_v1 "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/reflection" +) + +const DefaultPrefix = "Grpc" + +func DefaultProvider() container.ProviderContainer { + return container.ProviderContainer{ + Provider: Provide, + Options: []opt.Option{ + opt.Prefix(DefaultPrefix), + }, + } +} + +type Config struct { + Host *string + Port uint + // EnableReflection enables grpc/reflection registration when true + EnableReflection *bool + // EnableHealth enables gRPC health service registration when true + EnableHealth *bool + // ShutdownTimeoutSeconds controls graceful stop timeout; 0 uses default + ShutdownTimeoutSeconds uint +} + +func (h *Config) Address() string { + if h.Port == 0 { + h.Port = 8081 + } + + if h.Host == nil { + return fmt.Sprintf(":%d", h.Port) + } + return fmt.Sprintf("%s:%d", *h.Host, h.Port) +} + +type Grpc struct { + Server *grpc.Server + config *Config + + options []grpc.ServerOption + unaryInterceptors []grpc.UnaryServerInterceptor + streamInterceptors []grpc.StreamServerInterceptor +} + +func (g *Grpc) Init() error { + // merge options and build interceptor chains if provided + var srvOpts []grpc.ServerOption + if len(g.unaryInterceptors) > 0 { + srvOpts = append(srvOpts, grpc.ChainUnaryInterceptor(g.unaryInterceptors...)) + } + if len(g.streamInterceptors) > 0 { + srvOpts = append(srvOpts, grpc.ChainStreamInterceptor(g.streamInterceptors...)) + } + srvOpts = append(srvOpts, g.options...) + + g.Server = grpc.NewServer(srvOpts...) + + // optional reflection and health + if g.config.EnableReflection != nil && *g.config.EnableReflection { + reflection.Register(g.Server) + } + if g.config.EnableHealth != nil && *g.config.EnableHealth { + hs := health.NewServer() + grpc_health_v1.RegisterHealthServer(g.Server, hs) + } + + // graceful stop with timeout fallback to Stop() + container.AddCloseAble(func() { + timeout := g.config.ShutdownTimeoutSeconds + if timeout == 0 { + timeout = 10 + } + done := make(chan struct{}) + go func() { + g.Server.GracefulStop() + close(done) + }() + select { + case <-done: + // graceful stop finished + case <-time.After(time.Duration(timeout) * time.Second): + // timeout, force stop + g.Server.Stop() + } + }) + + return nil +} + +// Serve +func (g *Grpc) Serve() error { + if g.Server == nil { + if err := g.Init(); err != nil { + return err + } + } + + l, err := net.Listen("tcp", g.config.Address()) + if err != nil { + return err + } + + return g.Server.Serve(l) +} + +func (g *Grpc) ServeWithListener(ln net.Listener) error { + return g.Server.Serve(ln) +} + +// UseOptions appends gRPC ServerOptions to be applied when constructing the server. +func (g *Grpc) UseOptions(opts ...grpc.ServerOption) { + g.options = append(g.options, opts...) +} + +// UseUnaryInterceptors appends unary interceptors to be chained. +func (g *Grpc) UseUnaryInterceptors(inters ...grpc.UnaryServerInterceptor) { + g.unaryInterceptors = append(g.unaryInterceptors, inters...) +} + +// UseStreamInterceptors appends stream interceptors to be chained. +func (g *Grpc) UseStreamInterceptors(inters ...grpc.StreamServerInterceptor) { + g.streamInterceptors = append(g.streamInterceptors, inters...) +} + +// Reset clears all configured options and interceptors. +// Useful in tests to ensure isolation. +func (g *Grpc) Reset() { + g.options = nil + g.unaryInterceptors = nil + g.streamInterceptors = nil +} diff --git a/backend_v1/providers/grpc/options.md b/backend_v1/providers/grpc/options.md new file mode 100644 index 0000000..c721bbd --- /dev/null +++ b/backend_v1/providers/grpc/options.md @@ -0,0 +1,513 @@ +# gRPC Server Options & Interceptors Examples + +本文件给出一些可直接拷贝使用的示例,配合本包提供的注册函数: + +- `UseOptions(opts ...grpc.ServerOption)` +- `UseUnaryInterceptors(inters ...grpc.UnaryServerInterceptor)` +- `UseStreamInterceptors(inters ...grpc.StreamServerInterceptor)` + +建议在应用启动或 Provider 初始化阶段调用(在 gRPC 服务构造前)。 + +> 导入建议: +> +> ```go +> import ( +> pgrpc "test/providers/grpc" // 本包 +> grpc "google.golang.org/grpc" // 避免命名冲突 +> ) +> ``` + +## ServerOption 示例 + +最大消息大小限制: + +```go +pgrpc.UseOptions( +grpc.MaxRecvMsgSize(32<<20), // 32 MiB +grpc.MaxSendMsgSize(32<<20), // 32 MiB +) +``` + +限制最大并发流(对 HTTP/2 流并发施加上限): + +```go +pgrpc.UseOptions( +grpc.MaxConcurrentStreams(1024), +) +``` + +Keepalive 参数(需要 keepalive 包): + +```go +import ( +"time" +"google.golang.org/grpc/keepalive" +) + +pgrpc.UseOptions( +grpc.KeepaliveParams(keepalive.ServerParameters{ +MaxConnectionIdle: 5 * time.Minute, +MaxConnectionAge: 30 * time.Minute, +MaxConnectionAgeGrace: 5 * time.Minute, +Time: 2 * time.Minute, // ping 间隔 +Timeout: 20 * time.Second, +}), +grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ +MinTime: 1 * time.Minute, // 客户端 ping 最小间隔 +PermitWithoutStream: true, +}), +) +``` + +## UnaryServerInterceptor 示例 + +简单日志拦截器(logrus): + +```go +import ( +"context" +"time" +log "github.com/sirupsen/logrus" +"google.golang.org/grpc" +) + +func LoggingUnaryInterceptor( +ctx context.Context, +req any, +info *grpc.UnaryServerInfo, +handler grpc.UnaryHandler, +) (any, error) { +start := time.Now() +resp, err := handler(ctx, req) +dur := time.Since(start) +entry := log.WithFields(log.Fields{ +"grpc.method": info.FullMethod, +"grpc.duration_ms": dur.Milliseconds(), +}) +if err != nil { +entry.WithError(err).Warn("grpc unary request failed") +} else { +entry.Info("grpc unary request finished") +} +return resp, err +} + +// 注册 +pgrpc.UseUnaryInterceptors(LoggingUnaryInterceptor) +``` + +恢复拦截器(panic 捕获): + +```go +import ( +"context" +"fmt" +"runtime/debug" +log "github.com/sirupsen/logrus" +"google.golang.org/grpc" +"google.golang.org/grpc/status" +"google.golang.org/grpc/codes" +) + +func RecoveryUnaryInterceptor( +ctx context.Context, +req any, +info *grpc.UnaryServerInfo, +handler grpc.UnaryHandler, +) (any, error) { +defer func() { +if r := recover() ; r != nil { +log.WithField("grpc.method", info.FullMethod).Errorf("panic: %v\n%s", r, debug.Stack()) +} +}() +return handler(ctx, req) +} + +// 或者向客户端返回内部错误: +func RecoveryUnaryInterceptorWithError( +ctx context.Context, +req any, +info *grpc.UnaryServerInfo, +handler grpc.UnaryHandler, +) (any, error) { +defer func() { +if r := recover() ; r != nil { +log.WithField("grpc.method", info.FullMethod).Errorf("panic: %v\n%s", r, debug.Stack()) +} +}() +resp, err := handler(ctx, req) +if rec := recover() ; rec != nil { +return nil, status.Error(codes.Internal, fmt.Sprint(rec)) +} +return resp, err +} + +pgrpc.UseUnaryInterceptors(RecoveryUnaryInterceptor) +``` + +链式调用(与其它拦截器共同使用): + +```go +pgrpc.UseUnaryInterceptors(LoggingUnaryInterceptor, RecoveryUnaryInterceptor) +``` + +## StreamServerInterceptor 示例 + +简单日志拦截器: + +```go +import ( +"time" +log "github.com/sirupsen/logrus" +"google.golang.org/grpc" +) + +func LoggingStreamInterceptor( +srv any, +ss grpc.ServerStream, +info *grpc.StreamServerInfo, +handler grpc.StreamHandler, +) error { +start := time.Now() +err := handler(srv, ss) +dur := time.Since(start) +entry := log.WithFields(log.Fields{ +"grpc.method": info.FullMethod, +"grpc.is_client_stream": info.IsClientStream, +"grpc.is_server_stream": info.IsServerStream, +"grpc.duration_ms": dur.Milliseconds(), +}) +if err != nil { +entry.WithError(err).Warn("grpc stream request failed") +} else { +entry.Info("grpc stream request finished") +} +return err +} + +pgrpc.UseStreamInterceptors(LoggingStreamInterceptor) +``` + +恢复拦截器(panic 捕获): + +```go +import ( +"runtime/debug" +log "github.com/sirupsen/logrus" +"google.golang.org/grpc" +) + +func RecoveryStreamInterceptor( +srv any, +ss grpc.ServerStream, +info *grpc.StreamServerInfo, +handler grpc.StreamHandler, +) (err error) { +defer func() { +if r := recover() ; r != nil { +log.WithField("grpc.method", info.FullMethod).Errorf("panic: %v\n%s", r, debug.Stack()) +} +}() +return handler(srv, ss) +} + +pgrpc.UseStreamInterceptors(RecoveryStreamInterceptor) +``` + +## 组合与测试小贴士 + +- 可以多次调用 `UseOptions/UseUnaryInterceptors/UseStreamInterceptors`,最终会在服务构造时链式生效。 +- 单元测试中如需隔离,建议使用 `pgrpc.Reset()` 清理已注册的选项和拦截器。 +- 若要启用健康检查或反射,请在配置中设置: +- `EnableHealth = true` +- `EnableReflection = true` + +## 更多 ServerOption 示例 + +TLS(服务端或 mTLS): + +```go +import ( +"crypto/tls" +grpcCredentials "google.golang.org/grpc/credentials" +) + +// 使用自定义 tls.Config(可配置 mTLS) +var tlsConfig *tls.Config = &tls.Config{ /* ... */ } +pgrpc.UseOptions( +grpc.Creds(grpcCredentials.NewTLS(tlsConfig)), +) + +// 或者从证书文件加载(仅服务端 TLS) +// pgrpc.UseOptions(grpc.Creds(grpcCredentials.NewServerTLSFromFile(certFile, keyFile))) +``` + +OpenTelemetry 统计/追踪(StatsHandler): + +```go +import ( +otelgrpc "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" +) + +pgrpc.UseOptions( +grpc.StatsHandler(otelgrpc.NewServerHandler()), +) +``` + +流控/缓冲区调优: + +```go +pgrpc.UseOptions( +grpc.InitialWindowSize(1<<20), // 每个流初始窗口(字节) +grpc.InitialConnWindowSize(1<<21), // 连接级窗口 +grpc.ReadBufferSize(64<<10), // 读缓冲 64 KiB +grpc.WriteBufferSize(64<<10), // 写缓冲 64 KiB +) +``` + +连接超时与 Tap Handle(早期拦截): + +```go +import ( +"context" +"time" +"google.golang.org/grpc/tap" +) + +pgrpc.UseOptions( +grpc.ConnectionTimeout(5 * time.Second), +grpc.InTapHandle(func(ctx context.Context, info *tap.Info) (context.Context, error) { +// 在真正的 RPC 处理前进行快速拒绝(如黑名单、IP 检查等) +return ctx, nil +}), +) +``` + +未知服务处理与工作池: + +```go +pgrpc.UseOptions( +grpc.UnknownServiceHandler(func(srv any, stream grpc.ServerStream) error { +// 统一记录未注册方法,或返回自定义错误 +return status.Error(codes.Unimplemented, "unknown service/method") +}), +grpc.NumStreamWorkers(8), // 针对 CPU 密集流处理的工作池 +) +``` + +## 更多 Unary 拦截器示例 + +基于 Metadata 的鉴权: + +```go +import ( +"context" +"strings" +"google.golang.org/grpc/metadata" +"google.golang.org/grpc/status" +"google.golang.org/grpc/codes" +) + +func AuthUnaryInterceptor(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { +md, _ := metadata.FromIncomingContext(ctx) +token := "" +if vals := md.Get("authorization"); len(vals) > 0 { +token = vals[0] +} +if token == "" || !strings.HasPrefix(strings.ToLower(token), "bearer ") { +return nil, status.Error(codes.Unauthenticated, "missing or invalid token") +} +// TODO: 验证 JWT / API-Key +return handler(ctx, req) +} + +pgrpc.UseUnaryInterceptors(AuthUnaryInterceptor) +``` + +方法粒度速率限制(x/time/rate): + +```go +import ( +"context" +"sync" +"golang.org/x/time/rate" +"google.golang.org/grpc/status" +"google.golang.org/grpc/codes" +) + +var ( +rlmu sync.RWMutex +rlm = map[string]*rate.Limiter{} +) + +func limitFor(method string) *rate.Limiter { +rlmu.RLock() ; l := rlm[method]; rlmu.RUnlock() +if l != nil { return l } +rlmu.Lock() ; defer rlmu.Unlock() +if rlm[method] == nil { rlm[method] = rate.NewLimiter(100, 200) } // 100 rps, burst 200 +return rlm[method] +} + +func RateLimitUnaryInterceptor(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { +l := limitFor(info.FullMethod) +if !l.Allow() { +return nil, status.Error(codes.ResourceExhausted, "rate limited") +} +return handler(ctx, req) +} + +pgrpc.UseUnaryInterceptors(RateLimitUnaryInterceptor) +``` + +Request-ID 注入与日志关联: + +```go +import ( +"context" +"github.com/google/uuid" +"google.golang.org/grpc/metadata" +) + +type ctxKey string +const requestIDKey ctxKey = "request_id" + +func RequestIDUnaryInterceptor(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { +md, _ := metadata.FromIncomingContext(ctx) +var rid string +if v := md.Get("x-request-id"); len(v) > 0 { rid = v[0] } +if rid == "" { rid = uuid.New().String() } +ctx = context.WithValue(ctx, requestIDKey, rid) +return handler(ctx, req) +} + +pgrpc.UseUnaryInterceptors(RequestIDUnaryInterceptor) +``` + +无超时/超长请求治理(默认超时/拒绝超长): + +```go +import ( +"context" +"time" +"google.golang.org/grpc/status" +"google.golang.org/grpc/codes" +) + +func DeadlineUnaryInterceptor(max time.Duration) grpc.UnaryServerInterceptor { +return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { +if _, ok := ctx.Deadline() ; !ok { // 未设置超时 +var cancel context.CancelFunc +ctx, cancel = context.WithTimeout(ctx, max) +defer cancel() +} +resp, err := handler(ctx, req) +if err != nil && ctx.Err() == context.DeadlineExceeded { +return nil, status.Error(codes.DeadlineExceeded, "deadline exceeded") +} +return resp, err +} +} + +pgrpc.UseUnaryInterceptors(DeadlineUnaryInterceptor(5*time.Second)) +``` + +## 更多 Stream 拦截器示例 + +基于 Metadata 的鉴权(流): + +```go +import ( +"google.golang.org/grpc/metadata" +"google.golang.org/grpc/status" +"google.golang.org/grpc/codes" +) + +func AuthStreamInterceptor(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { +md, _ := metadata.FromIncomingContext(ss.Context()) +if len(md.Get("authorization")) == 0 { +return status.Error(codes.Unauthenticated, "missing token") +} +return handler(srv, ss) +} + +pgrpc.UseStreamInterceptors(AuthStreamInterceptor) +``` + +流级限流(示例:简单 Allow 检查): + +```go +func RateLimitStreamInterceptor(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { +l := limitFor(info.FullMethod) +if !l.Allow() { +return status.Error(codes.ResourceExhausted, "rate limited") +} +return handler(srv, ss) +} + +pgrpc.UseStreamInterceptors(RateLimitStreamInterceptor) +``` + +## 压缩与编码 + +注册 gzip 压缩器后,客户端可按需协商使用(新版本通过 encoding 注册): + +```go +import ( +_ "google.golang.org/grpc/encoding/gzip" // 注册 gzip 编解码器 +) + +// 仅需 import 即可,无额外 ServerOption +``` + +## OpenTelemetry 集成(推荐) + +使用 StatsHandler(推荐,不与拦截器同时使用,避免重复埋点): + +```go +import ( +otelgrpc "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" +) + +// 基本接入:使用全局 Tracer/Meter(由 OTEL Provider 初始化) +handler := otelgrpc.NewServerHandler( +otelgrpc.WithTraceEvents(), // 在 span 中记录消息事件 +) +pgrpc.UseOptions(grpc.StatsHandler(handler)) + +// 忽略某些方法(如健康检查),避免噪声: +handler = otelgrpc.NewServerHandler( +otelgrpc.WithFilter(func(ctx context.Context, fullMethod string) bool { +return fullMethod != "/grpc.health.v1.Health/Check" +}), +) +pgrpc.UseOptions(grpc.StatsHandler(handler)) +``` + +使用拦截器版本(如你更偏好 Interceptor 方案;与 StatsHandler 二选一): + +```go +import ( +otelgrpc "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" +) + +pgrpc.UseUnaryInterceptors(otelgrpc.UnaryServerInterceptor()) +pgrpc.UseStreamInterceptors(otelgrpc.StreamServerInterceptor()) +``` + +> 注意:不要同时启用 StatsHandler 和拦截器,否则会重复生成 span/metrics。 + +## OpenTracing(Jaeger)集成 + +当使用 Tracing Provider(Jaeger + OpenTracing)时,可使用 opentracing 的 gRPC 拦截器: + +```go +import ( +opentracing "github.com/opentracing/opentracing-go" +otgrpc "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" +) + +pgrpc.UseUnaryInterceptors(otgrpc.OpenTracingServerInterceptor(opentracing.GlobalTracer())) +pgrpc.UseStreamInterceptors(otgrpc.OpenTracingStreamServerInterceptor(opentracing.GlobalTracer())) +``` + +> 与 OTEL 方案互斥:如果已启用 OTEL,请不要再开启 OpenTracing 拦截器,以免重复埋点。 diff --git a/backend_v1/providers/grpc/provider.go b/backend_v1/providers/grpc/provider.go new file mode 100644 index 0000000..b76ff40 --- /dev/null +++ b/backend_v1/providers/grpc/provider.go @@ -0,0 +1,18 @@ +package grpc + +import ( + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/opt" +) + +func Provide(opts ...opt.Option) error { + o := opt.New(opts...) + var config Config + if err := o.UnmarshalConfig(&config); err != nil { + return err + } + + return container.Container.Provide(func() (*Grpc, error) { + return &Grpc{config: &config}, nil + }, o.DiOptions()...) +} diff --git a/backend_v1/providers/http/config.go b/backend_v1/providers/http/config.go new file mode 100644 index 0000000..611f210 --- /dev/null +++ b/backend_v1/providers/http/config.go @@ -0,0 +1,38 @@ +package http + +import ( + "fmt" +) + +const DefaultPrefix = "Http" + +type Config struct { + StaticPath *string + StaticRoute *string + BaseURI *string + Port uint + Tls *Tls + Cors *Cors +} + +type Tls struct { + Cert string + Key string +} + +type Cors struct { + Mode string + Whitelist []Whitelist +} + +type Whitelist struct { + AllowOrigin string + AllowHeaders string + AllowMethods string + ExposeHeaders string + AllowCredentials bool +} + +func (h *Config) Address() string { + return fmt.Sprintf(":%d", h.Port) +} diff --git a/backend_v1/providers/http/engine.go b/backend_v1/providers/http/engine.go new file mode 100644 index 0000000..40e15b6 --- /dev/null +++ b/backend_v1/providers/http/engine.go @@ -0,0 +1,203 @@ +package http + +import ( + "context" + "errors" + "fmt" + "net" + "runtime/debug" + "time" + + log "github.com/sirupsen/logrus" + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/opt" + + "github.com/gofiber/fiber/v3" + "github.com/gofiber/fiber/v3/middleware/compress" + "github.com/gofiber/fiber/v3/middleware/cors" + "github.com/gofiber/fiber/v3/middleware/helmet" + "github.com/gofiber/fiber/v3/middleware/limiter" + "github.com/gofiber/fiber/v3/middleware/logger" + "github.com/gofiber/fiber/v3/middleware/recover" + "github.com/gofiber/fiber/v3/middleware/requestid" + "github.com/samber/lo" +) + +func DefaultProvider() container.ProviderContainer { + return container.ProviderContainer{ + Provider: Provide, + Options: []opt.Option{ + opt.Prefix(DefaultPrefix), + }, + } +} + +type Service struct { + conf *Config + Engine *fiber.App +} + +func (svc *Service) listenerConfig() fiber.ListenConfig { + listenConfig := fiber.ListenConfig{ + EnablePrintRoutes: true, + // DisableStartupMessage: true, + } + + if svc.conf.Tls != nil { + if svc.conf.Tls.Cert == "" || svc.conf.Tls.Key == "" { + panic(errors.New("tls cert and key must be set")) + } + listenConfig.CertFile = svc.conf.Tls.Cert + listenConfig.CertKeyFile = svc.conf.Tls.Key + } + container.AddCloseAble(func() { + svc.Engine.ShutdownWithTimeout(time.Second * 10) + }) + return listenConfig +} + +func (svc *Service) Listener(ln net.Listener) error { + return svc.Engine.Listener(ln, svc.listenerConfig()) +} + +func (svc *Service) Serve(ctx context.Context) error { + ln, err := net.Listen("tcp", svc.conf.Address()) + if err != nil { + return err + } + + // Run the server in a goroutine so we can listen for context cancellation + serverErr := make(chan error, 1) + go func() { + serverErr <- svc.Engine.Listener(ln, svc.listenerConfig()) + }() + + select { + case <-ctx.Done(): + // Shutdown the server gracefully + if shutdownErr := svc.Engine.Shutdown(); shutdownErr != nil { + return shutdownErr + } + // treat context cancellation as graceful shutdown + return nil + case err := <-serverErr: + return err + } +} + +func Provide(opts ...opt.Option) error { + o := opt.New(opts...) + var config Config + if err := o.UnmarshalConfig(&config); err != nil { + return err + } + + return container.Container.Provide(func() (*Service, error) { + engine := fiber.New(fiber.Config{ + StrictRouting: true, + CaseSensitive: true, + BodyLimit: 10 * 1024 * 1024, // 10 MiB + ReadTimeout: 10 * time.Second, + WriteTimeout: 10 * time.Second, + IdleTimeout: 60 * time.Second, + ProxyHeader: fiber.HeaderXForwardedFor, + EnableIPValidation: true, + }) + + // request id first for correlation + engine.Use(requestid.New()) + + // recover with stack + request id + engine.Use(recover.New(recover.Config{ + EnableStackTrace: true, + StackTraceHandler: func(c fiber.Ctx, e any) { + rid := c.Get(fiber.HeaderXRequestID) + log.WithField("request_id", rid).Error(fmt.Sprintf("panic: %v\n%s\n", e, debug.Stack())) + }, + })) + + // basic security + compression + engine.Use(helmet.New()) + engine.Use(compress.New(compress.Config{Level: compress.LevelDefault})) + + // optional CORS based on config + if config.Cors != nil { + corsCfg := buildCORSConfig(config.Cors) + if corsCfg != nil { + engine.Use(cors.New(*corsCfg)) + } + } + + // logging with request id and latency + engine.Use(logger.New(logger.Config{ + // requestid middleware stores ctx.Locals("requestid") + Format: `${time} [${ip}] ${method} ${status} ${path} ${latency} rid=${locals:requestid} "${ua}"\n`, + TimeFormat: time.RFC3339, + TimeZone: "Asia/Shanghai", + })) + + // rate limit (enable standard headers; adjust Max via config if needed) + engine.Use(limiter.New(limiter.Config{Max: 0})) + + // static files (Fiber v3 Static helper moved; enable via filesystem middleware later) + // if config.StaticRoute != nil && config.StaticPath != nil { ... } + + // health endpoints + engine.Get("/healthz", func(c fiber.Ctx) error { return c.SendStatus(fiber.StatusNoContent) }) + engine.Get("/readyz", func(c fiber.Ctx) error { return c.SendStatus(fiber.StatusNoContent) }) + + engine.Hooks().OnPostShutdown(func(err error) error { + if err != nil { + log.Error("http server shutdown error: ", err) + } + log.Info("http server has shutdown success") + return nil + }) + + return &Service{ + Engine: engine, + conf: &config, + }, nil + }, o.DiOptions()...) +} + +// buildCORSConfig converts provider Cors config into fiber cors.Config +func buildCORSConfig(c *Cors) *cors.Config { + if c == nil { + return nil + } + if c.Mode == "disabled" { + return nil + } + var ( + origins []string + headers []string + methods []string + exposes []string + allowCreds bool + ) + for _, w := range c.Whitelist { + if w.AllowOrigin != "" { + origins = append(origins, w.AllowOrigin) + } + if w.AllowHeaders != "" { + headers = append(headers, w.AllowHeaders) + } + if w.AllowMethods != "" { + methods = append(methods, w.AllowMethods) + } + if w.ExposeHeaders != "" { + exposes = append(exposes, w.ExposeHeaders) + } + allowCreds = allowCreds || w.AllowCredentials + } + + cfg := cors.Config{ + AllowOrigins: lo.Uniq(origins), + AllowHeaders: lo.Uniq(headers), + AllowMethods: lo.Uniq(methods), + ExposeHeaders: lo.Uniq(exposes), + AllowCredentials: allowCreds, + } + return &cfg +} diff --git a/backend_v1/providers/http/swagger/config.go b/backend_v1/providers/http/swagger/config.go new file mode 100644 index 0000000..4b535a7 --- /dev/null +++ b/backend_v1/providers/http/swagger/config.go @@ -0,0 +1,317 @@ +package swagger + +import ( + "html/template" +) + +// Config stores SwaggerUI configuration variables +type Config struct { + // This parameter can be used to name different swagger document instances. + // default: "" + InstanceName string `json:"-"` + + // Title pointing to title of HTML page. + // default: "Swagger UI" + Title string `json:"-"` + + // URL to fetch external configuration document from. + // default: "" + ConfigURL string `json:"configUrl,omitempty"` + + // The URL pointing to API definition (normally swagger.json or swagger.yaml). + // default: "doc.json" + URL string `json:"url,omitempty"` + + // Enables overriding configuration parameters via URL search params. + // default: false + QueryConfigEnabled bool `json:"queryConfigEnabled,omitempty"` + + // The name of a component available via the plugin system to use as the top-level layout for Swagger UI. + // default: "StandaloneLayout" + Layout string `json:"layout,omitempty"` + + // An array of plugin functions to use in Swagger UI. + // default: [SwaggerUIBundle.plugins.DownloadUrl] + Plugins []template.JS `json:"-"` + + // An array of presets to use in Swagger UI. Usually, you'll want to include ApisPreset if you use this option. + // default: [SwaggerUIBundle.presets.apis, SwaggerUIStandalonePreset] + Presets []template.JS `json:"-"` + + // If set to true, enables deep linking for tags and operations. + // default: true + DeepLinking bool `json:"deepLinking"` + + // Controls the display of operationId in operations list. + // default: false + DisplayOperationId bool `json:"displayOperationId,omitempty"` + + // The default expansion depth for models (set to -1 completely hide the models). + // default: 1 + DefaultModelsExpandDepth int `json:"defaultModelsExpandDepth,omitempty"` + + // The default expansion depth for the model on the model-example section. + // default: 1 + DefaultModelExpandDepth int `json:"defaultModelExpandDepth,omitempty"` + + // Controls how the model is shown when the API is first rendered. + // The user can always switch the rendering for a given model by clicking the 'Model' and 'Example Value' links. + // default: "example" + DefaultModelRendering string `json:"defaultModelRendering,omitempty"` + + // Controls the display of the request duration (in milliseconds) for "Try it out" requests. + // default: false + DisplayRequestDuration bool `json:"displayRequestDuration,omitempty"` + + // Controls the default expansion setting for the operations and tags. + // 'list' (default, expands only the tags), + // 'full' (expands the tags and operations), + // 'none' (expands nothing) + DocExpansion string `json:"docExpansion,omitempty"` + + // If set, enables filtering. The top bar will show an edit box that you can use to filter the tagged operations that are shown. + // Can be Boolean to enable or disable, or a string, in which case filtering will be enabled using that string as the filter expression. + // Filtering is case sensitive matching the filter expression anywhere inside the tag. + // default: false + Filter FilterConfig `json:"-"` + + // If set, limits the number of tagged operations displayed to at most this many. The default is to show all operations. + // default: 0 + MaxDisplayedTags int `json:"maxDisplayedTags,omitempty"` + + // Controls the display of vendor extension (x-) fields and values for Operations, Parameters, Responses, and Schema. + // default: false + ShowExtensions bool `json:"showExtensions,omitempty"` + + // Controls the display of extensions (pattern, maxLength, minLength, maximum, minimum) fields and values for Parameters. + // default: false + ShowCommonExtensions bool `json:"showCommonExtensions,omitempty"` + + // Apply a sort to the tag list of each API. It can be 'alpha' (sort by paths alphanumerically) or a function (see Array.prototype.sort(). + // to learn how to write a sort function). Two tag name strings are passed to the sorter for each pass. + // default: "" -> Default is the order determined by Swagger UI. + TagsSorter template.JS `json:"-"` + + // Provides a mechanism to be notified when Swagger UI has finished rendering a newly provided definition. + // default: "" -> Function=NOOP + OnComplete template.JS `json:"-"` + + // An object with the activate and theme properties. + SyntaxHighlight *SyntaxHighlightConfig `json:"-"` + + // Controls whether the "Try it out" section should be enabled by default. + // default: false + TryItOutEnabled bool `json:"tryItOutEnabled,omitempty"` + + // Enables the request snippet section. When disabled, the legacy curl snippet will be used. + // default: false + RequestSnippetsEnabled bool `json:"requestSnippetsEnabled,omitempty"` + + // OAuth redirect URL. + // default: "" + OAuth2RedirectUrl string `json:"oauth2RedirectUrl,omitempty"` + + // MUST be a function. Function to intercept remote definition, "Try it out", and OAuth 2.0 requests. + // Accepts one argument requestInterceptor(request) and must return the modified request, or a Promise that resolves to the modified request. + // default: "" + RequestInterceptor template.JS `json:"-"` + + // If set, MUST be an array of command line options available to the curl command. This can be set on the mutated request in the requestInterceptor function. + // For example request.curlOptions = ["-g", "--limit-rate 20k"] + // default: nil + RequestCurlOptions []string `json:"request.curlOptions,omitempty"` + + // MUST be a function. Function to intercept remote definition, "Try it out", and OAuth 2.0 responses. + // Accepts one argument responseInterceptor(response) and must return the modified response, or a Promise that resolves to the modified response. + // default: "" + ResponseInterceptor template.JS `json:"-"` + + // If set to true, uses the mutated request returned from a requestInterceptor to produce the curl command in the UI, + // otherwise the request before the requestInterceptor was applied is used. + // default: true + ShowMutatedRequest bool `json:"showMutatedRequest"` + + // List of HTTP methods that have the "Try it out" feature enabled. An empty array disables "Try it out" for all operations. + // This does not filter the operations from the display. + // Possible values are ["get", "put", "post", "delete", "options", "head", "patch", "trace"] + // default: nil + SupportedSubmitMethods []string `json:"supportedSubmitMethods,omitempty"` + + // By default, Swagger UI attempts to validate specs against swagger.io's online validator. You can use this parameter to set a different validator URL. + // For example for locally deployed validators (https://github.com/swagger-api/validator-badge). + // Setting it to either none, 127.0.0.1 or localhost will disable validation. + // default: "" + ValidatorUrl string `json:"validatorUrl,omitempty"` + + // If set to true, enables passing credentials, as defined in the Fetch standard, in CORS requests that are sent by the browser. + // Note that Swagger UI cannot currently set cookies cross-domain (see https://github.com/swagger-api/swagger-js/issues/1163). + // as a result, you will have to rely on browser-supplied cookies (which this setting enables sending) that Swagger UI cannot control. + // default: false + WithCredentials bool `json:"withCredentials,omitempty"` + + // Function to set default values to each property in model. Accepts one argument modelPropertyMacro(property), property is immutable. + // default: "" + ModelPropertyMacro template.JS `json:"-"` + + // Function to set default value to parameters. Accepts two arguments parameterMacro(operation, parameter). + // Operation and parameter are objects passed for context, both remain immutable. + // default: "" + ParameterMacro template.JS `json:"-"` + + // If set to true, it persists authorization data and it would not be lost on browser close/refresh. + // default: false + PersistAuthorization bool `json:"persistAuthorization,omitempty"` + + // Configuration information for OAuth2, optional if using OAuth2 + OAuth *OAuthConfig `json:"-"` + + // (authDefinitionKey, username, password) => action + // Programmatically set values for a Basic authorization scheme. + // default: "" + PreauthorizeBasic template.JS `json:"-"` + + // (authDefinitionKey, apiKeyValue) => action + // Programmatically set values for an API key or Bearer authorization scheme. + // In case of OpenAPI 3.0 Bearer scheme, apiKeyValue must contain just the token itself without the Bearer prefix. + // default: "" + PreauthorizeApiKey template.JS `json:"-"` + + // Applies custom CSS styles. + // default: "" + CustomStyle template.CSS `json:"-"` + + // Applies custom JavaScript scripts. + // default "" + CustomScript template.JS `json:"-"` +} + +type FilterConfig struct { + Enabled bool + Expression string +} + +func (fc FilterConfig) Value() interface{} { + if fc.Expression != "" { + return fc.Expression + } + return fc.Enabled +} + +type SyntaxHighlightConfig struct { + // Whether syntax highlighting should be activated or not. + // default: true + Activate bool `json:"activate"` + // Highlight.js syntax coloring theme to use. + // Possible values are ["agate", "arta", "monokai", "nord", "obsidian", "tomorrow-night"] + // default: "agate" + Theme string `json:"theme,omitempty"` +} + +func (shc SyntaxHighlightConfig) Value() interface{} { + if shc.Activate { + return shc + } + return false +} + +type OAuthConfig struct { + // ID of the client sent to the OAuth2 provider. + // default: "" + ClientId string `json:"clientId,omitempty"` + + // Never use this parameter in your production environment. + // It exposes cruicial security information. This feature is intended for dev/test environments only. + // Secret of the client sent to the OAuth2 provider. + // default: "" + ClientSecret string `json:"clientSecret,omitempty"` + + // Application name, displayed in authorization popup. + // default: "" + AppName string `json:"appName,omitempty"` + + // Realm query parameter (for oauth1) added to authorizationUrl and tokenUrl. + // default: "" + Realm string `json:"realm,omitempty"` + + // String array of initially selected oauth scopes + // default: nil + Scopes []string `json:"scopes,omitempty"` + + // Additional query parameters added to authorizationUrl and tokenUrl. + // default: nil + AdditionalQueryStringParams map[string]string `json:"additionalQueryStringParams,omitempty"` + + // Unavailable Only activated for the accessCode flow. + // During the authorization_code request to the tokenUrl, pass the Client Password using the HTTP Basic Authentication scheme + // (Authorization header with Basic base64encode(client_id + client_secret)). + // default: false + UseBasicAuthenticationWithAccessCodeGrant bool `json:"useBasicAuthenticationWithAccessCodeGrant,omitempty"` + + // Only applies to authorizatonCode flows. + // Proof Key for Code Exchange brings enhanced security for OAuth public clients. + // default: false + UsePkceWithAuthorizationCodeGrant bool `json:"usePkceWithAuthorizationCodeGrant,omitempty"` +} + +var ConfigDefault = Config{ + Title: "Swagger UI", + Layout: "StandaloneLayout", + Plugins: []template.JS{ + template.JS("SwaggerUIBundle.plugins.DownloadUrl"), + }, + Presets: []template.JS{ + template.JS("SwaggerUIBundle.presets.apis"), + template.JS("SwaggerUIStandalonePreset"), + }, + DeepLinking: true, + DefaultModelsExpandDepth: 1, + DefaultModelExpandDepth: 1, + DefaultModelRendering: "example", + DocExpansion: "list", + SyntaxHighlight: &SyntaxHighlightConfig{ + Activate: true, + Theme: "agate", + }, + ShowMutatedRequest: true, +} + +// Helper function to set default values +func configDefault(config ...Config) Config { + // Return default config if nothing provided + if len(config) < 1 { + return ConfigDefault + } + + // Override default config + cfg := config[0] + + if cfg.Title == "" { + cfg.Title = ConfigDefault.Title + } + + if cfg.Layout == "" { + cfg.Layout = ConfigDefault.Layout + } + + if cfg.DefaultModelRendering == "" { + cfg.DefaultModelRendering = ConfigDefault.DefaultModelRendering + } + + if cfg.DocExpansion == "" { + cfg.DocExpansion = ConfigDefault.DocExpansion + } + + if cfg.Plugins == nil { + cfg.Plugins = ConfigDefault.Plugins + } + + if cfg.Presets == nil { + cfg.Presets = ConfigDefault.Presets + } + + if cfg.SyntaxHighlight == nil { + cfg.SyntaxHighlight = ConfigDefault.SyntaxHighlight + } + + return cfg +} diff --git a/backend_v1/providers/http/swagger/swagger.go b/backend_v1/providers/http/swagger/swagger.go new file mode 100644 index 0000000..0722e61 --- /dev/null +++ b/backend_v1/providers/http/swagger/swagger.go @@ -0,0 +1,103 @@ +package swagger + +import ( + "fmt" + "html/template" + "path" + "strings" + "sync" + + "github.com/gofiber/fiber/v3" + "github.com/gofiber/fiber/v3/middleware/static" + "github.com/gofiber/utils/v2" + "github.com/rogeecn/swag" + swaggerFiles "github.com/swaggo/files/v2" +) + +const ( + defaultDocURL = "doc.json" + defaultIndex = "index.html" +) + +var HandlerDefault = New() + +// New returns custom handler +func New(config ...Config) fiber.Handler { + cfg := configDefault(config...) + + index, err := template.New("swagger_index.html").Parse(indexTmpl) + if err != nil { + panic(fmt.Errorf("fiber: swagger middleware error -> %w", err)) + } + + var ( + prefix string + once sync.Once + ) + + return func(c fiber.Ctx) error { + // Set prefix + once.Do( + func() { + prefix = strings.ReplaceAll(c.Route().Path, "*", "") + + forwardedPrefix := getForwardedPrefix(c) + if forwardedPrefix != "" { + prefix = forwardedPrefix + prefix + } + + // Set doc url + if len(cfg.URL) == 0 { + cfg.URL = path.Join(prefix, defaultDocURL) + } + }, + ) + + p := c.Path(utils.CopyString(c.Params("*"))) + + switch p { + case defaultIndex: + c.Type("html") + return index.Execute(c, cfg) + case defaultDocURL: + var doc string + if doc, err = swag.ReadDoc(cfg.InstanceName); err != nil { + return err + } + return c.Type("json").SendString(doc) + case "", "/": + return c.Redirect().To(path.Join(prefix, defaultIndex)) + default: + // return fs(c) + return static.New("/swagger", static.Config{ + FS: swaggerFiles.FS, + Browse: true, + })(c) + } + } +} + +func getForwardedPrefix(c fiber.Ctx) string { + header := c.GetReqHeaders()["X-Forwarded-Prefix"] + + if len(header) == 0 { + return "" + } + + prefix := "" + + for _, rawPrefix := range header { + endIndex := len(rawPrefix) + for endIndex > 1 && rawPrefix[endIndex-1] == '/' { + endIndex-- + } + + if endIndex != len(rawPrefix) { + prefix += rawPrefix[:endIndex] + } else { + prefix += rawPrefix + } + } + + return prefix +} diff --git a/backend_v1/providers/http/swagger/template.go b/backend_v1/providers/http/swagger/template.go new file mode 100644 index 0000000..d90607f --- /dev/null +++ b/backend_v1/providers/http/swagger/template.go @@ -0,0 +1,107 @@ +package swagger + +const indexTmpl string = ` + + + + + + {{.Title}} + + + + + {{- if .CustomStyle}} + + {{- end}} + {{- if .CustomScript}} + + {{- end}} + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +` diff --git a/backend_v1/providers/job/config.go b/backend_v1/providers/job/config.go new file mode 100644 index 0000000..cf54cc8 --- /dev/null +++ b/backend_v1/providers/job/config.go @@ -0,0 +1,67 @@ +package job + +import ( + "github.com/riverqueue/river" + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/opt" +) + +const DefaultPrefix = "Job" + +func DefaultProvider() container.ProviderContainer { + return container.ProviderContainer{ + Provider: Provide, + Options: []opt.Option{ + opt.Prefix(DefaultPrefix), + }, + } +} + +type Config struct { + // Optional per-queue worker concurrency. If empty, defaults apply. + QueueWorkers QueueWorkersConfig +} + +// QueueWorkers allows configuring worker concurrency per queue. +// Key is the queue name, value is MaxWorkers. If empty, defaults are used. +// Example TOML: +// +// [Job] +// # high=20, default=10, low=5 +// # QueueWorkers = { high = 20, default = 10, low = 5 } +type QueueWorkersConfig map[string]int + +const ( + PriorityDefault = river.PriorityDefault + PriorityLow = 2 + PriorityMiddle = 3 + PriorityHigh = 3 +) + +const ( + QueueHigh = "high" + QueueDefault = river.QueueDefault + QueueLow = "low" +) + +// queueConfig returns a river.QueueConfig map built from QueueWorkers or defaults. +func (c *Config) queueConfig() map[string]river.QueueConfig { + cfg := map[string]river.QueueConfig{} + if c == nil || len(c.QueueWorkers) == 0 { + cfg[QueueHigh] = river.QueueConfig{MaxWorkers: 10} + cfg[QueueDefault] = river.QueueConfig{MaxWorkers: 10} + cfg[QueueLow] = river.QueueConfig{MaxWorkers: 10} + return cfg + } + for name, n := range c.QueueWorkers { + if n <= 0 { + n = 1 + } + cfg[name] = river.QueueConfig{MaxWorkers: n} + } + + if _, ok := cfg[QueueDefault]; !ok { + cfg[QueueDefault] = river.QueueConfig{MaxWorkers: 10} + } + return cfg +} diff --git a/backend_v1/providers/job/provider.go b/backend_v1/providers/job/provider.go new file mode 100644 index 0000000..1c37353 --- /dev/null +++ b/backend_v1/providers/job/provider.go @@ -0,0 +1,207 @@ +package job + +import ( + "context" + "fmt" + "sync" + "time" + + "quyun/v2/providers/postgres" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" + "github.com/pkg/errors" + "github.com/riverqueue/river" + "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivertype" + "github.com/samber/lo" + log "github.com/sirupsen/logrus" + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/contracts" + "go.ipao.vip/atom/opt" +) + +func Provide(opts ...opt.Option) error { + o := opt.New(opts...) + var config Config + if err := o.UnmarshalConfig(&config); err != nil { + return err + } + return container.Container.Provide(func(ctx context.Context, dbConf *postgres.Config) (*Job, error) { + workers := river.NewWorkers() + + dbPoolConfig, err := pgxpool.ParseConfig(dbConf.DSN()) + if err != nil { + return nil, err + } + + dbPool, err := pgxpool.NewWithConfig(ctx, dbPoolConfig) + if err != nil { + return nil, err + } + // health check ping with timeout + pingCtx, cancel := context.WithTimeout(ctx, 3*time.Second) + defer cancel() + if err := dbPool.Ping(pingCtx); err != nil { + return nil, fmt.Errorf("job provider: db ping failed: %w", err) + } + container.AddCloseAble(dbPool.Close) + pool := riverpgxv5.New(dbPool) + + queue := &Job{ + Workers: workers, + driver: pool, + ctx: ctx, + conf: &config, + periodicJobs: make(map[string]rivertype.PeriodicJobHandle), + } + container.AddCloseAble(queue.Close) + + return queue, nil + }, o.DiOptions()...) +} + +type Job struct { + ctx context.Context + conf *Config + Workers *river.Workers + driver *riverpgxv5.Driver + + l sync.Mutex + client *river.Client[pgx.Tx] + + periodicJobs map[string]rivertype.PeriodicJobHandle +} + +func (q *Job) Close() { + if q.client == nil { + return + } + + if err := q.client.StopAndCancel(q.ctx); err != nil { + log.Errorf("Failed to stop and cancel client: %s", err) + } + // clear references + q.l.Lock() + q.periodicJobs = map[string]rivertype.PeriodicJobHandle{} + q.l.Unlock() +} + +func (q *Job) Client() (*river.Client[pgx.Tx], error) { + q.l.Lock() + defer q.l.Unlock() + + if q.client == nil { + var err error + q.client, err = river.NewClient(q.driver, &river.Config{ + Workers: q.Workers, + Queues: q.conf.queueConfig(), + }) + if err != nil { + return nil, err + } + } + + return q.client, nil +} + +func (q *Job) Start(ctx context.Context) error { + client, err := q.Client() + if err != nil { + return errors.Wrap(err, "get client failed") + } + + if err := client.Start(ctx); err != nil { + return err + } + defer client.StopAndCancel(ctx) + + <-ctx.Done() + + return nil +} + +func (q *Job) StopAndCancel(ctx context.Context) error { + client, err := q.Client() + if err != nil { + return errors.Wrap(err, "get client failed") + } + + return client.StopAndCancel(ctx) +} + +func (q *Job) AddPeriodicJobs(job contracts.CronJob) error { + for _, job := range job.Args() { + if err := q.AddPeriodicJob(job); err != nil { + return err + } + } + return nil +} + +func (q *Job) AddPeriodicJob(job contracts.CronJobArg) error { + client, err := q.Client() + if err != nil { + return err + } + q.l.Lock() + defer q.l.Unlock() + + q.periodicJobs[job.Arg.UniqueID()] = client.PeriodicJobs().Add(river.NewPeriodicJob( + job.PeriodicInterval, + func() (river.JobArgs, *river.InsertOpts) { + return job.Arg, lo.ToPtr(job.Arg.InsertOpts()) + }, + &river.PeriodicJobOpts{ + RunOnStart: job.RunOnStart, + }, + )) + + return nil +} + +func (q *Job) Cancel(id string) error { + client, err := q.Client() + if err != nil { + return err + } + + q.l.Lock() + defer q.l.Unlock() + + if h, ok := q.periodicJobs[id]; ok { + client.PeriodicJobs().Remove(h) + delete(q.periodicJobs, id) + } + return nil +} + +// CancelContext is like Cancel but allows passing a context. +func (q *Job) CancelContext(ctx context.Context, id string) error { + client, err := q.Client() + if err != nil { + return err + } + q.l.Lock() + defer q.l.Unlock() + if h, ok := q.periodicJobs[id]; ok { + client.PeriodicJobs().Remove(h) + delete(q.periodicJobs, id) + return nil + } + + return nil +} + +func (q *Job) Add(job contracts.JobArgs) error { + client, err := q.Client() + if err != nil { + return err + } + + q.l.Lock() + defer q.l.Unlock() + + _, err = client.Insert(q.ctx, job, lo.ToPtr(job.InsertOpts())) + return err +} diff --git a/backend_v1/providers/jwt/config.go b/backend_v1/providers/jwt/config.go new file mode 100644 index 0000000..dc227d4 --- /dev/null +++ b/backend_v1/providers/jwt/config.go @@ -0,0 +1,35 @@ +package jwt + +import ( + "time" + + log "github.com/sirupsen/logrus" + + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/opt" +) + +const DefaultPrefix = "JWT" + +func DefaultProvider() container.ProviderContainer { + return container.ProviderContainer{ + Provider: Provide, + Options: []opt.Option{ + opt.Prefix(DefaultPrefix), + }, + } +} + +type Config struct { + SigningKey string // jwt签名 + ExpiresTime string // 过期时间 + Issuer string // 签发者 +} + +func (c *Config) ExpiresTimeDuration() time.Duration { + d, err := time.ParseDuration(c.ExpiresTime) + if err != nil { + log.Fatal(err) + } + return d +} diff --git a/backend_v1/providers/jwt/jwt.go b/backend_v1/providers/jwt/jwt.go new file mode 100644 index 0000000..dd94465 --- /dev/null +++ b/backend_v1/providers/jwt/jwt.go @@ -0,0 +1,118 @@ +package jwt + +import ( + "errors" + "strings" + "time" + + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/opt" + + jwt "github.com/golang-jwt/jwt/v4" + "golang.org/x/sync/singleflight" +) + +const ( + CtxKey = "claims" + HttpHeader = "Authorization" +) + +type BaseClaims struct { + OpenID string `json:"open_id,omitempty"` + Tenant string `json:"tenant,omitempty"` + UserID int64 `json:"user_id,omitempty"` + TenantID int64 `json:"tenant_id,omitempty"` +} + +// Custom claims structure +type Claims struct { + BaseClaims + jwt.RegisteredClaims +} + +const TokenPrefix = "Bearer " + +type JWT struct { + singleflight *singleflight.Group + config *Config + SigningKey []byte +} + +var ( + TokenExpired = errors.New("Token is expired") + TokenNotValidYet = errors.New("Token not active yet") + TokenMalformed = errors.New("That's not even a token") + TokenInvalid = errors.New("Couldn't handle this token:") +) + +func Provide(opts ...opt.Option) error { + o := opt.New(opts...) + var config Config + if err := o.UnmarshalConfig(&config); err != nil { + return err + } + return container.Container.Provide(func() (*JWT, error) { + return &JWT{ + singleflight: &singleflight.Group{}, + config: &config, + SigningKey: []byte(config.SigningKey), + }, nil + }, o.DiOptions()...) +} + +func (j *JWT) CreateClaims(baseClaims BaseClaims) *Claims { + ep, _ := time.ParseDuration(j.config.ExpiresTime) + claims := Claims{ + BaseClaims: baseClaims, + RegisteredClaims: jwt.RegisteredClaims{ + NotBefore: jwt.NewNumericDate(time.Now().Add(-time.Second * 10)), // 签名生效时间 + ExpiresAt: jwt.NewNumericDate(time.Now().Add(ep)), // 过期时间 7天 配置文件 + Issuer: j.config.Issuer, // 签名的发行者 + }, + } + return &claims +} + +// 创建一个token +func (j *JWT) CreateToken(claims *Claims) (string, error) { + token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) + return token.SignedString(j.SigningKey) +} + +// CreateTokenByOldToken 旧token 换新token 使用归并回源避免并发问题 +func (j *JWT) CreateTokenByOldToken(oldToken string, claims *Claims) (string, error) { + v, err, _ := j.singleflight.Do("JWT:"+oldToken, func() (interface{}, error) { + return j.CreateToken(claims) + }) + return v.(string), err +} + +// 解析 token +func (j *JWT) Parse(tokenString string) (*Claims, error) { + tokenString = strings.TrimPrefix(tokenString, TokenPrefix) + token, err := jwt.ParseWithClaims(tokenString, &Claims{}, func(token *jwt.Token) (i interface{}, e error) { + return j.SigningKey, nil + }) + if err != nil { + if ve, ok := err.(*jwt.ValidationError); ok { + if ve.Errors&jwt.ValidationErrorMalformed != 0 { + return nil, TokenMalformed + } else if ve.Errors&jwt.ValidationErrorExpired != 0 { + // Token is expired + return nil, TokenExpired + } else if ve.Errors&jwt.ValidationErrorNotValidYet != 0 { + return nil, TokenNotValidYet + } else { + return nil, TokenInvalid + } + } + } + if token != nil { + if claims, ok := token.Claims.(*Claims); ok && token.Valid { + return claims, nil + } + return nil, TokenInvalid + } else { + return nil, TokenInvalid + } +} diff --git a/backend_v1/providers/postgres/config.go b/backend_v1/providers/postgres/config.go new file mode 100644 index 0000000..de20ce8 --- /dev/null +++ b/backend_v1/providers/postgres/config.go @@ -0,0 +1,136 @@ +package postgres + +import ( + "fmt" + "strconv" + "time" + + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/opt" + "gorm.io/gorm/logger" +) + +const DefaultPrefix = "Database" + +func DefaultProvider() container.ProviderContainer { + return container.ProviderContainer{ + Provider: Provide, + Options: []opt.Option{ + opt.Prefix(DefaultPrefix), + }, + } +} + +type Config struct { + Username string + Password string + Database string + Schema string + Host string + Port uint + SslMode string + TimeZone string + Prefix string // 表前缀 + Singular bool // 是否开启全局禁用复数,true表示开启 + MaxIdleConns int // 空闲中的最大连接数 + MaxOpenConns int // 打开到数据库的最大连接数 + // 可选:连接生命周期配置(0 表示不设置) + ConnMaxLifetimeSeconds uint + ConnMaxIdleTimeSeconds uint + + // 可选:GORM 日志与行为配置 + LogLevel string // silent|error|warn|info(默认info) + SlowThresholdMs uint // 慢查询阈值(毫秒)默认200 + ParameterizedQueries bool // 占位符输出,便于日志安全与查询归并 + PrepareStmt bool // 预编译语句缓存 + SkipDefaultTransaction bool // 跳过默认事务 + + // 可选:DSN 增强 + UseSearchPath bool // 在 DSN 中附带 search_path + ApplicationName string // application_name +} + +func (m Config) GormSlowThreshold() time.Duration { + if m.SlowThresholdMs == 0 { + return 200 * time.Millisecond // 默认200ms + } + return time.Duration(m.SlowThresholdMs) * time.Millisecond +} + +func (m Config) GormLogLevel() logger.LogLevel { + switch m.LogLevel { + case "silent": + return logger.Silent + case "error": + return logger.Error + case "warn": + return logger.Warn + case "info", "": + return logger.Info + default: + return logger.Info + } +} + +func (m *Config) checkDefault() { + if m.MaxIdleConns == 0 { + m.MaxIdleConns = 10 + } + + if m.MaxOpenConns == 0 { + m.MaxOpenConns = 100 + } + + if m.Username == "" { + m.Username = "postgres" + } + + if m.SslMode == "" { + m.SslMode = "disable" + } + + if m.TimeZone == "" { + m.TimeZone = "Asia/Shanghai" + } + + if m.Port == 0 { + m.Port = 5432 + } + + if m.Schema == "" { + m.Schema = "public" + } +} + +func (m *Config) EmptyDsn() string { + // 基本 DSN + dsnTpl := "host=%s user=%s password=%s port=%d dbname=%s sslmode=%s TimeZone=%s" + m.checkDefault() + base := fmt.Sprintf(dsnTpl, m.Host, m.Username, m.Password, m.Port, m.Database, m.SslMode, m.TimeZone) + // 附加可选参数 + extras := "" + if m.UseSearchPath && m.Schema != "" { + extras += " search_path=" + m.Schema + } + if m.ApplicationName != "" { + extras += " application_name=" + strconv.Quote(m.ApplicationName) + } + return base + extras +} + +// DSN connection dsn +func (m *Config) DSN() string { + // 基本 DSN + dsnTpl := "host=%s user=%s password=%s dbname=%s port=%d sslmode=%s TimeZone=%s" + m.checkDefault() + base := fmt.Sprintf(dsnTpl, m.Host, m.Username, m.Password, m.Database, m.Port, m.SslMode, m.TimeZone) + // 附加可选参数 + extras := "" + if m.UseSearchPath && m.Schema != "" { + extras += " search_path=" + m.Schema + } + if m.ApplicationName != "" { + extras += " application_name=" + strconv.Quote(m.ApplicationName) + } + return base + extras +} diff --git a/backend_v1/providers/postgres/postgres.go b/backend_v1/providers/postgres/postgres.go new file mode 100644 index 0000000..4abe796 --- /dev/null +++ b/backend_v1/providers/postgres/postgres.go @@ -0,0 +1,91 @@ +package postgres + +import ( + "context" + "database/sql" + "time" + + "github.com/sirupsen/logrus" + "go.ipao.vip/atom/container" + "go.ipao.vip/atom/opt" + "gorm.io/driver/postgres" + "gorm.io/gorm" + "gorm.io/gorm/logger" + "gorm.io/gorm/schema" +) + +func Provide(opts ...opt.Option) error { + o := opt.New(opts...) + var conf Config + if err := o.UnmarshalConfig(&conf); err != nil { + return err + } + + return container.Container.Provide(func() (*gorm.DB, *sql.DB, *Config, error) { + dbConfig := postgres.Config{DSN: conf.DSN()} + + // 安全日志:不打印密码,仅输出关键连接信息 + logrus. + WithFields( + logrus.Fields{ + "host": conf.Host, + "port": conf.Port, + "db": conf.Database, + "schema": conf.Schema, + "ssl": conf.SslMode, + }, + ). + Info("opening PostgreSQL connection") + + // 映射日志等级 + lvl := conf.GormLogLevel() + slow := conf.GormSlowThreshold() + + gormConfig := gorm.Config{ + NamingStrategy: schema.NamingStrategy{ + TablePrefix: conf.Prefix, + SingularTable: conf.Singular, + }, + DisableForeignKeyConstraintWhenMigrating: true, + PrepareStmt: conf.PrepareStmt, + SkipDefaultTransaction: conf.SkipDefaultTransaction, + Logger: logger.New(logrus.StandardLogger(), logger.Config{ + SlowThreshold: slow, + LogLevel: lvl, + IgnoreRecordNotFoundError: true, + Colorful: false, + ParameterizedQueries: conf.ParameterizedQueries, + }), + } + + db, err := gorm.Open(postgres.New(dbConfig), &gormConfig) + if err != nil { + return nil, nil, nil, err + } + + sqlDB, err := db.DB() + if err != nil { + return nil, sqlDB, nil, err + } + sqlDB.SetMaxIdleConns(conf.MaxIdleConns) + sqlDB.SetMaxOpenConns(conf.MaxOpenConns) + if conf.ConnMaxLifetimeSeconds > 0 { + sqlDB.SetConnMaxLifetime(time.Duration(conf.ConnMaxLifetimeSeconds) * time.Second) + } + if conf.ConnMaxIdleTimeSeconds > 0 { + sqlDB.SetConnMaxIdleTime(time.Duration(conf.ConnMaxIdleTimeSeconds) * time.Second) + } + + // Ping 校验 + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + if err := sqlDB.PingContext(ctx); err != nil { + return nil, sqlDB, nil, err + } + + // 关闭钩子 + container.AddCloseAble(func() { _ = sqlDB.Close() }) + + return db, sqlDB, &conf, nil + }, o.DiOptions()...) +} diff --git a/backend_v1/tests/README.md b/backend_v1/tests/README.md new file mode 100644 index 0000000..0756793 --- /dev/null +++ b/backend_v1/tests/README.md @@ -0,0 +1,288 @@ +# 测试指南 + +本项目的测试使用 **Convey 框架**,分为三个层次:单元测试、集成测试和端到端测试。 + +## 测试结构 + +``` +tests/ +├── setup_test.go # 测试设置和通用工具 +├── unit/ # 单元测试 +│ ├── config_test.go # 配置测试 +│ └── ... # 其他单元测试 +├── integration/ # 集成测试 +│ ├── database_test.go # 数据库集成测试 +│ └── ... # 其他集成测试 +└── e2e/ # 端到端测试 + ├── api_test.go # API 测试 + └── ... # 其他 E2E 测试 +``` + +## Convey 框架概述 + +Convey 是一个 BDD 风格的 Go 测试框架,提供直观的语法和丰富的断言。 + +### 核心概念 + +- **Convey**: 定义测试上下文,类似于 `Describe` 或 `Context` +- **So**: 断言函数,验证预期结果 +- **Reset**: 清理函数,在每个测试后执行 + +### 基本语法 + +```go +Convey("测试场景描述", t, func() { + Convey("当某个条件发生时", func() { + // 准备测试数据 + result := SomeFunction() + + Convey("那么应该得到预期结果", func() { + So(result, ShouldEqual, "expected") + }) + }) + + Reset(func() { + // 清理测试数据 + }) +}) +``` + +## 运行测试 + +### 运行所有测试 +```bash +go test ./tests/... -v +``` + +### 运行特定类型的测试 +```bash +# 单元测试 +go test ./tests/unit/... -v + +# 集成测试 +go test ./tests/integration/... -v + +# 端到端测试 +go test ./tests/e2e/... -v +``` + +### 运行带覆盖率报告的测试 +```bash +go test ./tests/... -v -coverprofile=coverage.out +go tool cover -html=coverage.out -o coverage.html +``` + +### 运行基准测试 +```bash +go test ./tests/... -bench=. -v +``` + +## 测试环境配置 + +### 单元测试 +- 不需要外部依赖 +- 使用内存数据库或模拟对象 +- 快速执行 + +### 集成测试 +- 需要数据库连接 +- 使用测试数据库 `v2_test` +- 需要启动 Redis 等服务 + +### 端到端测试 +- 需要完整的应用环境 +- 测试真实的 HTTP 请求 +- 可能需要 Docker 环境 + +## Convey 测试最佳实践 + +### 1. 测试结构设计 +- 使用描述性的中文场景描述 +- 遵循 `当...那么...` 的语义结构 +- 嵌套 Convey 块来组织复杂测试逻辑 + +```go +Convey("用户认证测试", t, func() { + var user *User + var token string + + Convey("当用户注册时", func() { + user = &User{Name: "测试用户", Email: "test@example.com"} + err := user.Register() + So(err, ShouldBeNil) + + Convey("那么用户应该被创建", func() { + So(user.ID, ShouldBeGreaterThan, 0) + }) + }) + + Convey("当用户登录时", func() { + token, err := user.Login("password") + So(err, ShouldBeNil) + So(token, ShouldNotBeEmpty) + + Convey("那么应该获得有效的访问令牌", func() { + So(len(token), ShouldBeGreaterThan, 0) + }) + }) +}) +``` + +### 2. 断言使用 +- 使用丰富的 So 断言函数 +- 提供有意义的错误消息 +- 验证所有重要的方面 + +### 3. 数据管理 +- 使用 `Reset` 函数进行清理 +- 每个测试独立准备数据 +- 确保测试间不相互影响 + +### 4. 异步测试 +- 使用适当的超时设置 +- 处理并发测试 +- 使用 channel 进行同步 + +### 5. 错误处理 +- 测试错误情况 +- 验证错误消息 +- 确保错误处理逻辑正确 + +## 常用 Convey 断言 + +### 相等性断言 +```go +So(value, ShouldEqual, expected) +So(value, ShouldNotEqual, expected) +So(value, ShouldResemble, expected) // 深度比较 +So(value, ShouldNotResemble, expected) +``` + +### 类型断言 +```go +So(value, ShouldBeNil) +So(value, ShouldNotBeNil) +So(value, ShouldBeTrue) +So(value, ShouldBeFalse) +So(value, ShouldBeZeroValue) +``` + +### 数值断言 +```go +So(value, ShouldBeGreaterThan, expected) +So(value, ShouldBeLessThan, expected) +So(value, ShouldBeBetween, lower, upper) +``` + +### 集合断言 +```go +So(slice, ShouldHaveLength, expected) +So(slice, ShouldContain, expected) +So(slice, ShouldNotContain, expected) +So(map, ShouldContainKey, key) +``` + +### 字符串断言 +```go +So(str, ShouldContainSubstring, substr) +So(str, ShouldStartWith, prefix) +So(str, ShouldEndWith, suffix) +So(str, ShouldMatch, regexp) +``` + +### 错误断言 +```go +So(err, ShouldBeNil) +So(err, ShouldNotBeNil) +So(err, ShouldError, expectedError) +``` + +## 测试工具 + +- `goconvey/convey` - BDD 测试框架 +- `gomock` - Mock 生成器 +- `httptest` - HTTP 测试 +- `sqlmock` - 数据库 mock +- `testify` - 辅助测试工具(可选) + +## 测试示例 + +### 配置测试示例 +```go +Convey("配置加载测试", t, func() { + var config *Config + + Convey("当从文件加载配置时", func() { + config, err := LoadConfig("config.toml") + So(err, ShouldBeNil) + So(config, ShouldNotBeNil) + + Convey("那么配置应该正确加载", func() { + So(config.App.Mode, ShouldEqual, "development") + So(config.Http.Port, ShouldEqual, 8080) + }) + }) +}) +``` + +### 数据库测试示例 +```go +Convey("数据库操作测试", t, func() { + var db *gorm.DB + + Convey("当连接数据库时", func() { + db = SetupTestDB() + So(db, ShouldNotBeNil) + + Convey("那么应该能够创建记录", func() { + user := User{Name: "测试用户", Email: "test@example.com"} + result := db.Create(&user) + So(result.Error, ShouldBeNil) + So(user.ID, ShouldBeGreaterThan, 0) + }) + }) + + Reset(func() { + if db != nil { + CleanupTestDB(db) + } + }) +}) +``` + +### API 测试示例 +```go +Convey("API 端点测试", t, func() { + var server *httptest.Server + + Convey("当启动测试服务器时", func() { + server = httptest.NewServer(NewApp()) + So(server, ShouldNotBeNil) + + Convey("那么健康检查端点应该正常工作", func() { + resp, err := http.Get(server.URL + "/health") + So(err, ShouldBeNil) + So(resp.StatusCode, ShouldEqual, http.StatusOK) + + var result map[string]interface{} + json.NewDecoder(resp.Body).Decode(&result) + So(result["status"], ShouldEqual, "ok") + }) + }) + + Reset(func() { + if server != nil { + server.Close() + } + }) +}) +``` + +## CI/CD 集成 + +测试会在以下情况下自动运行: +- 代码提交时 +- 创建 Pull Request 时 +- 合并到主分支时 + +测试结果会影响代码合并决策。Convey 的详细输出有助于快速定位问题。 \ No newline at end of file diff --git a/backend_v1/tests/e2e/api_test.go b/backend_v1/tests/e2e/api_test.go new file mode 100644 index 0000000..26393a8 --- /dev/null +++ b/backend_v1/tests/e2e/api_test.go @@ -0,0 +1,419 @@ +package e2e + +import ( + "bytes" + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "sync" + "testing" + "time" + + . "github.com/smartystreets/goconvey/convey" + "quyun/v2/app" + "quyun/v2/app/config" +) + +// TestAPIHealth 测试 API 健康检查 +func TestAPIHealth(t *testing.T) { + Convey("API 健康检查测试", t, func() { + var server *httptest.Server + var testConfig *config.Config + + Convey("当启动测试服务器时", func() { + testConfig = &config.Config{ + App: config.AppConfig{ + Mode: "test", + BaseURI: "http://localhost:8080", + }, + Http: config.HttpConfig{ + Port: 8080, + }, + Log: config.LogConfig{ + Level: "debug", + Format: "text", + EnableCaller: true, + }, + } + + app := app.New(testConfig) + server = httptest.NewServer(app) + + Convey("服务器应该成功启动", func() { + So(server, ShouldNotBeNil) + So(server.URL, ShouldNotBeEmpty) + }) + }) + + Convey("当访问健康检查端点时", func() { + resp, err := http.Get(server.URL + "/health") + So(err, ShouldBeNil) + So(resp.StatusCode, ShouldEqual, http.StatusOK) + + defer resp.Body.Close() + + var result map[string]interface{} + err = json.NewDecoder(resp.Body).Decode(&result) + So(err, ShouldBeNil) + + Convey("响应应该包含正确的状态", func() { + So(result["status"], ShouldEqual, "ok") + }) + + Convey("响应应该包含时间戳", func() { + So(result, ShouldContainKey, "timestamp") + }) + + Convey("响应应该是 JSON 格式", func() { + So(resp.Header.Get("Content-Type"), ShouldEqual, "application/json; charset=utf-8") + }) + }) + + Convey("当访问不存在的端点时", func() { + resp, err := http.Get(server.URL + "/api/nonexistent") + So(err, ShouldBeNil) + So(resp.StatusCode, ShouldEqual, http.StatusNotFound) + + defer resp.Body.Close() + + var result map[string]interface{} + err = json.NewDecoder(resp.Body).Decode(&result) + So(err, ShouldBeNil) + + Convey("响应应该包含错误信息", func() { + So(result, ShouldContainKey, "error") + }) + }) + + Convey("当测试 CORS 支持", func() { + req, err := http.NewRequest("OPTIONS", server.URL+"/api/test", nil) + So(err, ShouldBeNil) + + req.Header.Set("Origin", "http://localhost:3000") + req.Header.Set("Access-Control-Request-Method", "POST") + req.Header.Set("Access-Control-Request-Headers", "Content-Type,Authorization") + + resp, err := http.DefaultClient.Do(req) + So(err, ShouldBeNil) + defer resp.Body.Close() + + Convey("应该返回正确的 CORS 头", func() { + So(resp.StatusCode, ShouldEqual, http.StatusOK) + So(resp.Header.Get("Access-Control-Allow-Origin"), ShouldContainSubstring, "localhost") + So(resp.Header.Get("Access-Control-Allow-Methods"), ShouldContainSubstring, "POST") + }) + }) + + Reset(func() { + if server != nil { + server.Close() + } + }) + }) +} + +// TestAPIPerformance 测试 API 性能 +func TestAPIPerformance(t *testing.T) { + Convey("API 性能测试", t, func() { + var server *httptest.Server + var testConfig *config.Config + + Convey("当准备性能测试时", func() { + testConfig = &config.Config{ + App: config.AppConfig{ + Mode: "test", + BaseURI: "http://localhost:8080", + }, + Http: config.HttpConfig{ + Port: 8080, + }, + Log: config.LogConfig{ + Level: "error", // 减少日志输出以提升性能 + Format: "text", + }, + } + + app := app.New(testConfig) + server = httptest.NewServer(app) + }) + + Convey("当测试响应时间时", func() { + start := time.Now() + resp, err := http.Get(server.URL + "/health") + So(err, ShouldBeNil) + defer resp.Body.Close() + + duration := time.Since(start) + So(resp.StatusCode, ShouldEqual, http.StatusOK) + + Convey("响应时间应该在合理范围内", func() { + So(duration, ShouldBeLessThan, 100*time.Millisecond) + }) + }) + + Convey("当测试并发请求时", func() { + const numRequests = 50 + const maxConcurrency = 10 + const timeout = 5 * time.Second + + var wg sync.WaitGroup + successCount := 0 + errorCount := 0 + var mu sync.Mutex + + // 使用信号量控制并发数 + sem := make(chan struct{}, maxConcurrency) + + start := time.Now() + + for i := 0; i < numRequests; i++ { + wg.Add(1) + go func(requestID int) { + defer wg.Done() + + // 获取信号量 + sem <- struct{}{} + defer func() { <-sem }() + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, "GET", server.URL+"/health", nil) + if err != nil { + mu.Lock() + errorCount++ + mu.Unlock() + return + } + + client := &http.Client{ + Timeout: timeout, + } + + resp, err := client.Do(req) + if err != nil { + mu.Lock() + errorCount++ + mu.Unlock() + return + } + defer resp.Body.Close() + + mu.Lock() + if resp.StatusCode == http.StatusOK { + successCount++ + } else { + errorCount++ + } + mu.Unlock() + }(i) + } + + wg.Wait() + duration := time.Since(start) + + Convey("所有请求都应该完成", func() { + So(successCount+errorCount, ShouldEqual, numRequests) + }) + + Convey("所有请求都应该成功", func() { + So(errorCount, ShouldEqual, 0) + }) + + Convey("总耗时应该在合理范围内", func() { + So(duration, ShouldBeLessThan, 10*time.Second) + }) + + Convey("并发性能应该良好", func() { + avgTime := duration / numRequests + So(avgTime, ShouldBeLessThan, 200*time.Millisecond) + }) + }) + + Reset(func() { + if server != nil { + server.Close() + } + }) + }) +} + +// TestAPIBehavior 测试 API 行为 +func TestAPIBehavior(t *testing.T) { + Convey("API 行为测试", t, func() { + var server *httptest.Server + var testConfig *config.Config + + Convey("当准备行为测试时", func() { + testConfig = &config.Config{ + App: config.AppConfig{ + Mode: "test", + BaseURI: "http://localhost:8080", + }, + Http: config.HttpConfig{ + Port: 8080, + }, + Log: config.LogConfig{ + Level: "debug", + Format: "text", + EnableCaller: true, + }, + } + + app := app.New(testConfig) + server = httptest.NewServer(app) + }) + + Convey("当测试不同 HTTP 方法时", func() { + testURL := server.URL + "/health" + + Convey("GET 请求应该成功", func() { + resp, err := http.Get(testURL) + So(err, ShouldBeNil) + defer resp.Body.Close() + So(resp.StatusCode, ShouldEqual, http.StatusOK) + }) + + Convey("POST 请求应该被处理", func() { + resp, err := http.Post(testURL, "application/json", bytes.NewBuffer([]byte{})) + So(err, ShouldBeNil) + defer resp.Body.Close() + // 健康检查端点通常支持所有方法 + So(resp.StatusCode, ShouldBeIn, []int{http.StatusOK, http.StatusMethodNotAllowed}) + }) + + Convey("PUT 请求应该被处理", func() { + req, err := http.NewRequest("PUT", testURL, bytes.NewBuffer([]byte{})) + So(err, ShouldBeNil) + resp, err := http.DefaultClient.Do(req) + So(err, ShouldBeNil) + defer resp.Body.Close() + So(resp.StatusCode, ShouldBeIn, []int{http.StatusOK, http.StatusMethodNotAllowed}) + }) + + Convey("DELETE 请求应该被处理", func() { + req, err := http.NewRequest("DELETE", testURL, nil) + So(err, ShouldBeNil) + resp, err := http.DefaultClient.Do(req) + So(err, ShouldBeNil) + defer resp.Body.Close() + So(resp.StatusCode, ShouldBeIn, []int{http.StatusOK, http.StatusMethodNotAllowed}) + }) + }) + + Convey("当测试自定义请求头时", func() { + req, err := http.NewRequest("GET", server.URL+"/health", nil) + So(err, ShouldBeNil) + + // 设置各种请求头 + req.Header.Set("User-Agent", "E2E-Test-Agent/1.0") + req.Header.Set("Accept", "application/json") + req.Header.Set("Accept-Language", "zh-CN,zh;q=0.9,en;q=0.8") + req.Header.Set("X-Custom-Header", "test-value") + req.Header.Set("X-Request-ID", "test-request-123") + req.Header.Set("Authorization", "Bearer test-token") + + resp, err := http.DefaultClient.Do(req) + So(err, ShouldBeNil) + defer resp.Body.Close() + + Convey("请求应该成功", func() { + So(resp.StatusCode, ShouldEqual, http.StatusOK) + }) + + Convey("响应应该是 JSON 格式", func() { + So(resp.Header.Get("Content-Type"), ShouldEqual, "application/json; charset=utf-8") + }) + }) + + Convey("当测试错误处理时", func() { + Convey("访问不存在的路径应该返回 404", func() { + resp, err := http.Get(server.URL + "/api/v1/nonexistent") + So(err, ShouldBeNil) + defer resp.Body.Close() + So(resp.StatusCode, ShouldEqual, http.StatusNotFound) + }) + + Convey("访问非法路径应该返回 404", func() { + resp, err := http.Get(server.URL + "/../etc/passwd") + So(err, ShouldBeNil) + defer resp.Body.Close() + So(resp.StatusCode, ShouldEqual, http.StatusNotFound) + }) + }) + + Reset(func() { + if server != nil { + server.Close() + } + }) + }) +} + +// TestAPIDocumentation 测试 API 文档 +func TestAPIDocumentation(t *testing.T) { + Convey("API 文档测试", t, func() { + var server *httptest.Server + var testConfig *config.Config + + Convey("当准备文档测试时", func() { + testConfig = &config.Config{ + App: config.AppConfig{ + Mode: "test", + BaseURI: "http://localhost:8080", + }, + Http: config.HttpConfig{ + Port: 8080, + }, + Log: config.LogConfig{ + Level: "debug", + Format: "text", + EnableCaller: true, + }, + } + + app := app.New(testConfig) + server = httptest.NewServer(app) + }) + + Convey("当访问 Swagger UI 时", func() { + resp, err := http.Get(server.URL + "/swagger/index.html") + So(err, ShouldBeNil) + defer resp.Body.Close() + + Convey("应该能够访问 Swagger UI", func() { + So(resp.StatusCode, ShouldEqual, http.StatusOK) + }) + + Convey("响应应该是 HTML 格式", func() { + contentType := resp.Header.Get("Content-Type") + So(contentType, ShouldContainSubstring, "text/html") + }) + }) + + Convey("当访问 OpenAPI 规范时", func() { + resp, err := http.Get(server.URL + "/swagger/doc.json") + So(err, ShouldBeNil) + defer resp.Body.Close() + + Convey("应该能够访问 OpenAPI 规范", func() { + // 如果存在则返回 200,不存在则返回 404 + So(resp.StatusCode, ShouldBeIn, []int{http.StatusOK, http.StatusNotFound}) + }) + + Convey("如果存在,响应应该是 JSON 格式", func() { + if resp.StatusCode == http.StatusOK { + contentType := resp.Header.Get("Content-Type") + So(contentType, ShouldContainSubstring, "application/json") + } + }) + }) + + Reset(func() { + if server != nil { + server.Close() + } + }) + }) +} diff --git a/backend_v1/tests/integration/database_test.go b/backend_v1/tests/integration/database_test.go new file mode 100644 index 0000000..1794dab --- /dev/null +++ b/backend_v1/tests/integration/database_test.go @@ -0,0 +1,364 @@ +package integration + +import ( + "context" + "database/sql" + "testing" + "time" + + . "github.com/smartystreets/goconvey/convey" + "gorm.io/driver/postgres" + "gorm.io/gorm" + "quyun/v2/app/config" + "quyun/v2/app/database" +) + +// TestUser 测试用户模型 +type TestUser struct { + ID int `gorm:"primaryKey"` + Name string `gorm:"size:100;not null"` + Email string `gorm:"size:100;unique;not null"` + CreatedAt time.Time `gorm:"autoCreateTime"` + UpdatedAt time.Time `gorm:"autoUpdateTime"` +} + +// TestDatabaseConnection 测试数据库连接 +func TestDatabaseConnection(t *testing.T) { + Convey("数据库连接测试", t, func() { + var db *gorm.DB + var sqlDB *sql.DB + var testConfig *config.Config + var testDBName string + + Convey("当准备测试数据库时", func() { + testDBName = "v2_test_integration" + testConfig = &config.Config{ + Database: config.DatabaseConfig{ + Host: "localhost", + Port: 5432, + Database: testDBName, + Username: "postgres", + Password: "password", + SslMode: "disable", + MaxIdleConns: 5, + MaxOpenConns: 20, + ConnMaxLifetime: 30 * time.Minute, + }, + } + + Convey("应该能够连接到数据库", func() { + dsn := testConfig.Database.GetDSN() + var err error + db, err = gorm.Open(postgres.Open(dsn), &gorm.Config{}) + So(err, ShouldBeNil) + So(db, ShouldNotBeNil) + + sqlDB, err = db.DB() + So(err, ShouldBeNil) + So(sqlDB, ShouldNotBeNil) + + // 设置连接池 + sqlDB.SetMaxIdleConns(testConfig.Database.MaxIdleConns) + sqlDB.SetMaxOpenConns(testConfig.Database.MaxOpenConns) + sqlDB.SetConnMaxLifetime(testConfig.Database.ConnMaxLifetime) + + // 测试连接 + err = sqlDB.Ping() + So(err, ShouldBeNil) + }) + + Convey("应该能够创建测试表", func() { + err := db.Exec(` + CREATE TABLE IF NOT EXISTS integration_test_users ( + id SERIAL PRIMARY KEY, + name VARCHAR(100) NOT NULL, + email VARCHAR(100) UNIQUE NOT NULL, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ) + `).Error + So(err, ShouldBeNil) + }) + }) + + Convey("当测试数据库操作时", func() { + Convey("应该能够创建记录", func() { + user := TestUser{ + Name: "Integration Test User", + Email: "integration@example.com", + } + + result := db.Create(&user) + So(result.Error, ShouldBeNil) + So(result.RowsAffected, ShouldEqual, 1) + So(user.ID, ShouldBeGreaterThan, 0) + }) + + Convey("应该能够查询记录", func() { + // 先插入测试数据 + user := TestUser{ + Name: "Query Test User", + Email: "query@example.com", + } + db.Create(&user) + + // 查询记录 + var result TestUser + err := db.First(&result, "email = ?", "query@example.com").Error + So(err, ShouldBeNil) + So(result.Name, ShouldEqual, "Query Test User") + So(result.Email, ShouldEqual, "query@example.com") + }) + + Convey("应该能够更新记录", func() { + // 先插入测试数据 + user := TestUser{ + Name: "Update Test User", + Email: "update@example.com", + } + db.Create(&user) + + // 更新记录 + result := db.Model(&user).Update("name", "Updated Integration User") + So(result.Error, ShouldBeNil) + So(result.RowsAffected, ShouldEqual, 1) + + // 验证更新 + var updatedUser TestUser + err := db.First(&updatedUser, user.ID).Error + So(err, ShouldBeNil) + So(updatedUser.Name, ShouldEqual, "Updated Integration User") + }) + + Convey("应该能够删除记录", func() { + // 先插入测试数据 + user := TestUser{ + Name: "Delete Test User", + Email: "delete@example.com", + } + db.Create(&user) + + // 删除记录 + result := db.Delete(&user) + So(result.Error, ShouldBeNil) + So(result.RowsAffected, ShouldEqual, 1) + + // 验证删除 + var deletedUser TestUser + err := db.First(&deletedUser, user.ID).Error + So(err, ShouldEqual, gorm.ErrRecordNotFound) + }) + }) + + Convey("当测试事务时", func() { + Convey("应该能够执行事务操作", func() { + // 开始事务 + tx := db.Begin() + So(tx, ShouldNotBeNil) + + // 在事务中插入数据 + user := TestUser{ + Name: "Transaction Test User", + Email: "transaction@example.com", + } + result := tx.Create(&user) + So(result.Error, ShouldBeNil) + So(result.RowsAffected, ShouldEqual, 1) + + // 查询事务中的数据 + var count int64 + tx.Model(&TestUser{}).Count(&count) + So(count, ShouldEqual, 1) + + // 提交事务 + err := tx.Commit().Error + So(err, ShouldBeNil) + + // 验证数据已提交 + db.Model(&TestUser{}).Count(&count) + So(count, ShouldBeGreaterThan, 0) + }) + + Convey("应该能够回滚事务", func() { + // 开始事务 + tx := db.Begin() + + // 在事务中插入数据 + user := TestUser{ + Name: "Rollback Test User", + Email: "rollback@example.com", + } + tx.Create(&user) + + // 回滚事务 + err := tx.Rollback().Error + So(err, ShouldBeNil) + + // 验证数据已回滚 + var count int64 + db.Model(&TestUser{}).Where("email = ?", "rollback@example.com").Count(&count) + So(count, ShouldEqual, 0) + }) + }) + + Convey("当测试批量操作时", func() { + Convey("应该能够批量插入记录", func() { + users := []TestUser{ + {Name: "Batch User 1", Email: "batch1@example.com"}, + {Name: "Batch User 2", Email: "batch2@example.com"}, + {Name: "Batch User 3", Email: "batch3@example.com"}, + } + + result := db.Create(&users) + So(result.Error, ShouldBeNil) + So(result.RowsAffected, ShouldEqual, 3) + + // 验证批量插入 + var count int64 + db.Model(&TestUser{}).Where("email LIKE ?", "batch%@example.com").Count(&count) + So(count, ShouldEqual, 3) + }) + + Convey("应该能够批量更新记录", func() { + // 先插入测试数据 + users := []TestUser{ + {Name: "Batch Update 1", Email: "batchupdate1@example.com"}, + {Name: "Batch Update 2", Email: "batchupdate2@example.com"}, + } + db.Create(&users) + + // 批量更新 + result := db.Model(&TestUser{}). + Where("email LIKE ?", "batchupdate%@example.com"). + Update("name", "Batch Updated User") + So(result.Error, ShouldBeNil) + So(result.RowsAffected, ShouldEqual, 2) + + // 验证更新 + var updatedCount int64 + db.Model(&TestUser{}). + Where("name = ?", "Batch Updated User"). + Count(&updatedCount) + So(updatedCount, ShouldEqual, 2) + }) + }) + + Convey("当测试查询条件时", func() { + Convey("应该能够使用各种查询条件", func() { + // 插入测试数据 + testUsers := []TestUser{ + {Name: "Alice", Email: "alice@example.com"}, + {Name: "Bob", Email: "bob@example.com"}, + {Name: "Charlie", Email: "charlie@example.com"}, + {Name: "Alice Smith", Email: "alice.smith@example.com"}, + } + db.Create(&testUsers) + + Convey("应该能够使用 LIKE 查询", func() { + var users []TestUser + err := db.Where("name LIKE ?", "Alice%").Find(&users).Error + So(err, ShouldBeNil) + So(len(users), ShouldEqual, 2) + }) + + Convey("应该能够使用 IN 查询", func() { + var users []TestUser + err := db.Where("name IN ?", []string{"Alice", "Bob"}).Find(&users).Error + So(err, ShouldBeNil) + So(len(users), ShouldEqual, 2) + }) + + Convey("应该能够使用 BETWEEN 查询", func() { + var users []TestUser + err := db.Where("id BETWEEN ? AND ?", 1, 3).Find(&users).Error + So(err, ShouldBeNil) + So(len(users), ShouldBeGreaterThan, 0) + }) + + Convey("应该能够使用多条件查询", func() { + var users []TestUser + err := db.Where("name LIKE ? AND email LIKE ?", "%Alice%", "%example.com").Find(&users).Error + So(err, ShouldBeNil) + So(len(users), ShouldEqual, 2) + }) + }) + }) + + Reset(func() { + // 清理测试表 + if db != nil { + db.Exec("DROP TABLE IF EXISTS integration_test_users") + } + // 关闭数据库连接 + if sqlDB != nil { + sqlDB.Close() + } + }) + }) +} + +// TestDatabaseConnectionPool 测试数据库连接池 +func TestDatabaseConnectionPool(t *testing.T) { + Convey("数据库连接池测试", t, func() { + var db *gorm.DB + var sqlDB *sql.DB + + Convey("当配置连接池时", func() { + testConfig := &config.Config{ + Database: config.DatabaseConfig{ + Host: "localhost", + Port: 5432, + Database: "v2_test_pool", + Username: "postgres", + Password: "password", + SslMode: "disable", + MaxIdleConns: 5, + MaxOpenConns: 10, + ConnMaxLifetime: 5 * time.Minute, + }, + } + + dsn := testConfig.Database.GetDSN() + var err error + db, err = gorm.Open(postgres.Open(dsn), &gorm.Config{}) + So(err, ShouldBeNil) + + sqlDB, err = db.DB() + So(err, ShouldBeNil) + + Convey("应该能够设置连接池参数", func() { + sqlDB.SetMaxIdleConns(testConfig.Database.MaxIdleConns) + sqlDB.SetMaxOpenConns(testConfig.Database.MaxOpenConns) + sqlDB.SetConnMaxLifetime(testConfig.Database.ConnMaxLifetime) + + // 验证设置 + stats := sqlDB.Stats() + So(stats.MaxOpenConns, ShouldEqual, testConfig.Database.MaxOpenConns) + So(stats.MaxIdleConns, ShouldEqual, testConfig.Database.MaxIdleConns) + }) + + Convey("应该能够监控连接池状态", func() { + // 获取初始状态 + initialStats := sqlDB.Stats() + So(initialStats.OpenConnections, ShouldEqual, 0) + + // 执行一些查询来创建连接 + for i := 0; i < 3; i++ { + sqlDB.Ping() + } + + // 获取使用后的状态 + afterStats := sqlDB.Stats() + So(afterStats.OpenConnections, ShouldBeGreaterThan, 0) + So(afterStats.InUse, ShouldBeGreaterThan, 0) + }) + }) + + Reset(func() { + // 关闭数据库连接 + if sqlDB != nil { + sqlDB.Close() + } + }) + }) +} diff --git a/backend_v1/tests/setup_test.go b/backend_v1/tests/setup_test.go new file mode 100644 index 0000000..19d5047 --- /dev/null +++ b/backend_v1/tests/setup_test.go @@ -0,0 +1,161 @@ +package tests + +import ( + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +// TestMain 测试入口点 +func TestMain(m *testing.M) { + // 运行测试 + m.Run() +} + +// TestSetup 测试基础设置 +func TestSetup(t *testing.T) { + Convey("测试基础设置", t, func() { + Convey("当初始化测试环境时", func() { + // 初始化测试环境 + testEnv := &TestEnvironment{ + Name: "test-env", + Version: "1.0.0", + } + + Convey("那么测试环境应该被正确创建", func() { + So(testEnv.Name, ShouldEqual, "test-env") + So(testEnv.Version, ShouldEqual, "1.0.0") + }) + }) + }) +} + +// TestEnvironment 测试环境结构 +type TestEnvironment struct { + Name string + Version string + Config map[string]interface{} +} + +// NewTestEnvironment 创建新的测试环境 +func NewTestEnvironment(name string) *TestEnvironment { + return &TestEnvironment{ + Name: name, + Config: make(map[string]interface{}), + } +} + +// WithConfig 设置配置 +func (e *TestEnvironment) WithConfig(key string, value interface{}) *TestEnvironment { + e.Config[key] = value + return e +} + +// GetConfig 获取配置 +func (e *TestEnvironment) GetConfig(key string) interface{} { + return e.Config[key] +} + +// Setup 设置测试环境 +func (e *TestEnvironment) Setup() *TestEnvironment { + // 初始化测试环境 + e.Config["initialized"] = true + return e +} + +// Cleanup 清理测试环境 +func (e *TestEnvironment) Cleanup() { + // 清理测试环境 + e.Config = make(map[string]interface{}) +} + +// TestEnvironmentManagement 测试环境管理 +func TestEnvironmentManagement(t *testing.T) { + Convey("测试环境管理", t, func() { + var env *TestEnvironment + + Convey("当创建新测试环境时", func() { + env = NewTestEnvironment("test-app") + + Convey("那么环境应该有正确的名称", func() { + So(env.Name, ShouldEqual, "test-app") + So(env.Version, ShouldBeEmpty) + So(env.Config, ShouldNotBeNil) + }) + }) + + Convey("当设置配置时", func() { + env.WithConfig("debug", true) + env.WithConfig("port", 8080) + + Convey("那么配置应该被正确设置", func() { + So(env.GetConfig("debug"), ShouldEqual, true) + So(env.GetConfig("port"), ShouldEqual, 8080) + }) + }) + + Convey("当初始化环境时", func() { + env.Setup() + + Convey("那么环境应该被标记为已初始化", func() { + So(env.GetConfig("initialized"), ShouldEqual, true) + }) + }) + + Reset(func() { + if env != nil { + env.Cleanup() + } + }) + }) +} + +// TestConveyBasicUsage 测试 Convey 基础用法 +func TestConveyBasicUsage(t *testing.T) { + Convey("Convey 基础用法测试", t, func() { + Convey("数字操作", func() { + num := 42 + + Convey("应该能够进行基本比较", func() { + So(num, ShouldEqual, 42) + So(num, ShouldBeGreaterThan, 0) + So(num, ShouldBeLessThan, 100) + }) + }) + + Convey("字符串操作", func() { + str := "hello world" + + Convey("应该能够进行字符串比较", func() { + So(str, ShouldEqual, "hello world") + So(str, ShouldContainSubstring, "hello") + So(str, ShouldStartWith, "hello") + So(str, ShouldEndWith, "world") + }) + }) + + Convey("切片操作", func() { + slice := []int{1, 2, 3, 4, 5} + + Convey("应该能够进行切片操作", func() { + So(slice, ShouldHaveLength, 5) + So(slice, ShouldContain, 3) + So(slice, ShouldNotContain, 6) + }) + }) + + Convey("Map 操作", func() { + m := map[string]interface{}{ + "name": "test", + "value": 123, + } + + Convey("应该能够进行 Map 操作", func() { + So(m, ShouldContainKey, "name") + So(m, ShouldContainKey, "value") + So(m["name"], ShouldEqual, "test") + So(m["value"], ShouldEqual, 123) + }) + }) + }) +} diff --git a/backend_v1/tests/unit/config_test.go b/backend_v1/tests/unit/config_test.go new file mode 100644 index 0000000..87336a1 --- /dev/null +++ b/backend_v1/tests/unit/config_test.go @@ -0,0 +1,287 @@ +package unit + +import ( + "os" + "path/filepath" + "testing" + + . "github.com/smartystreets/goconvey/convey" + "quyun/v2/app/config" +) + +// TestConfigLoading 测试配置加载功能 +func TestConfigLoading(t *testing.T) { + Convey("配置加载测试", t, func() { + var testConfig *config.Config + var configPath string + var testDir string + + Convey("当准备测试配置文件时", func() { + originalWd, _ := os.Getwd() + testDir = filepath.Join(originalWd, "..", "..", "fixtures", "test_config") + + Convey("应该创建测试配置目录", func() { + err := os.MkdirAll(testDir, 0o755) + So(err, ShouldBeNil) + }) + + Convey("应该创建测试配置文件", func() { + testConfigContent := `App: + Mode: "test" + BaseURI: "http://localhost:8080" +Http: + Port: 8080 +Database: + Host: "localhost" + Port: 5432 + Database: "test_db" + Username: "test_user" + Password: "test_password" + SslMode: "disable" +Log: + Level: "debug" + Format: "text" + EnableCaller: true` + + configPath = filepath.Join(testDir, "config.toml") + err := os.WriteFile(configPath, []byte(testConfigContent), 0o644) + So(err, ShouldBeNil) + }) + + Convey("应该成功加载配置", func() { + var err error + testConfig, err = config.Load(configPath) + So(err, ShouldBeNil) + So(testConfig, ShouldNotBeNil) + }) + }) + + Convey("验证配置内容", func() { + So(testConfig, ShouldNotBeNil) + + Convey("应用配置应该正确", func() { + So(testConfig.App.Mode, ShouldEqual, "test") + So(testConfig.App.BaseURI, ShouldEqual, "http://localhost:8080") + }) + + Convey("HTTP配置应该正确", func() { + So(testConfig.Http.Port, ShouldEqual, 8080) + }) + + Convey("数据库配置应该正确", func() { + So(testConfig.Database.Host, ShouldEqual, "localhost") + So(testConfig.Database.Port, ShouldEqual, 5432) + So(testConfig.Database.Database, ShouldEqual, "test_db") + So(testConfig.Database.Username, ShouldEqual, "test_user") + So(testConfig.Database.Password, ShouldEqual, "test_password") + So(testConfig.Database.SslMode, ShouldEqual, "disable") + }) + + Convey("日志配置应该正确", func() { + So(testConfig.Log.Level, ShouldEqual, "debug") + So(testConfig.Log.Format, ShouldEqual, "text") + So(testConfig.Log.EnableCaller, ShouldBeTrue) + }) + }) + + Reset(func() { + // 清理测试文件 + if testDir != "" { + os.RemoveAll(testDir) + } + }) + }) +} + +// TestConfigFromEnvironment 测试从环境变量加载配置 +func TestConfigFromEnvironment(t *testing.T) { + Convey("环境变量配置测试", t, func() { + var originalEnvVars map[string]string + + Convey("当设置环境变量时", func() { + // 保存原始环境变量 + originalEnvVars = map[string]string{ + "APP_MODE": os.Getenv("APP_MODE"), + "HTTP_PORT": os.Getenv("HTTP_PORT"), + "DB_HOST": os.Getenv("DB_HOST"), + } + + // 设置测试环境变量 + os.Setenv("APP_MODE", "test") + os.Setenv("HTTP_PORT", "9090") + os.Setenv("DB_HOST", "test-host") + + Convey("环境变量应该被正确设置", func() { + So(os.Getenv("APP_MODE"), ShouldEqual, "test") + So(os.Getenv("HTTP_PORT"), ShouldEqual, "9090") + So(os.Getenv("DB_HOST"), ShouldEqual, "test-host") + }) + }) + + Convey("当从环境变量加载配置时", func() { + originalWd, _ := os.Getwd() + testDir := filepath.Join(originalWd, "..", "..", "fixtures", "test_config_env") + + Convey("应该创建测试配置目录", func() { + err := os.MkdirAll(testDir, 0o755) + So(err, ShouldBeNil) + }) + + Convey("应该创建基础配置文件", func() { + testConfigContent := `App: + Mode: "development" + BaseURI: "http://localhost:3000" +Http: + Port: 3000 +Database: + Host: "localhost" + Port: 5432 + Database: "default_db" + Username: "default_user" + Password: "default_password" + SslMode: "disable"` + + configPath := filepath.Join(testDir, "config.toml") + err := os.WriteFile(configPath, []byte(testConfigContent), 0o644) + So(err, ShouldBeNil) + }) + + Convey("应该成功加载并合并配置", func() { + configPath := filepath.Join(testDir, "config.toml") + loadedConfig, err := config.Load(configPath) + + So(err, ShouldBeNil) + So(loadedConfig, ShouldNotBeNil) + + Convey("环境变量应该覆盖配置文件", func() { + So(loadedConfig.App.Mode, ShouldEqual, "test") + So(loadedConfig.Http.Port, ShouldEqual, 9090) + So(loadedConfig.Database.Host, ShouldEqual, "test-host") + }) + + Convey("配置文件的默认值应该保留", func() { + So(loadedConfig.App.BaseURI, ShouldEqual, "http://localhost:3000") + So(loadedConfig.Database.Database, ShouldEqual, "default_db") + }) + }) + + Reset(func() { + // 清理测试目录 + os.RemoveAll(testDir) + }) + }) + + Reset(func() { + // 恢复原始环境变量 + if originalEnvVars != nil { + for key, value := range originalEnvVars { + if value == "" { + os.Unsetenv(key) + } else { + os.Setenv(key, value) + } + } + } + }) + }) +} + +// TestConfigValidation 测试配置验证 +func TestConfigValidation(t *testing.T) { + Convey("配置验证测试", t, func() { + Convey("当配置为空时", func() { + config := &config.Config{} + + Convey("应该检测到缺失的必需配置", func() { + So(config.App.Mode, ShouldBeEmpty) + So(config.Http.Port, ShouldEqual, 0) + So(config.Database.Host, ShouldBeEmpty) + }) + }) + + Convey("当配置端口无效时", func() { + config := &config.Config{ + Http: config.HttpConfig{ + Port: -1, + }, + } + + Convey("应该检测到无效端口", func() { + So(config.Http.Port, ShouldBeLessThan, 0) + }) + }) + + Convey("当配置模式有效时", func() { + validModes := []string{"development", "production", "testing"} + + for _, mode := range validModes { + config := &config.Config{ + App: config.AppConfig{ + Mode: mode, + }, + } + + Convey("模式 "+mode+" 应该是有效的", func() { + So(config.App.Mode, ShouldBeIn, validModes) + }) + } + }) + }) +} + +// TestConfigDefaults 测试配置默认值 +func TestConfigDefaults(t *testing.T) { + Convey("配置默认值测试", t, func() { + Convey("当创建新配置时", func() { + config := &config.Config{} + + Convey("应该有合理的默认值", func() { + // 测试应用的默认值 + So(config.App.Mode, ShouldEqual, "development") + + // 测试HTTP的默认值 + So(config.Http.Port, ShouldEqual, 8080) + + // 测试数据库的默认值 + So(config.Database.Port, ShouldEqual, 5432) + So(config.Database.SslMode, ShouldEqual, "disable") + + // 测试日志的默认值 + So(config.Log.Level, ShouldEqual, "info") + So(config.Log.Format, ShouldEqual, "json") + }) + }) + }) +} + +// TestConfigHelpers 测试配置辅助函数 +func TestConfigHelpers(t *testing.T) { + Convey("配置辅助函数测试", t, func() { + Convey("当使用配置辅助函数时", func() { + config := &config.Config{ + App: config.AppConfig{ + Mode: "production", + BaseURI: "https://api.example.com", + }, + Http: config.HttpConfig{ + Port: 443, + }, + } + + Convey("应该能够获取应用环境", func() { + env := config.App.Mode + So(env, ShouldEqual, "production") + }) + + Convey("应该能够构建完整URL", func() { + fullURL := config.App.BaseURI + "/api/v1/users" + So(fullURL, ShouldEqual, "https://api.example.com/api/v1/users") + }) + + Convey("应该能够判断HTTPS", func() { + isHTTPS := config.Http.Port == 443 + So(isHTTPS, ShouldBeTrue) + }) + }) + }) +} diff --git a/backend_v1/utils/build_info.go b/backend_v1/utils/build_info.go new file mode 100644 index 0000000..8dfc5a7 --- /dev/null +++ b/backend_v1/utils/build_info.go @@ -0,0 +1,44 @@ +package utils + +import "fmt" + +// 构建信息变量,通过 ldflags 在构建时注入 +var ( + // Version 应用版本信息 + Version string + + // BuildAt 构建时间 + BuildAt string + + // GitHash Git 提交哈希 + GitHash string +) + +// GetBuildInfo 获取构建信息 +func GetBuildInfo() map[string]string { + return map[string]string{ + "version": Version, + "buildAt": BuildAt, + "gitHash": GitHash, + } +} + +// PrintBuildInfo 打印构建信息 +func PrintBuildInfo(appName string) { + buildInfo := GetBuildInfo() + + println("========================================") + printf("🚀 %s\n", appName) + println("========================================") + printf("📋 Version: %s\n", buildInfo["version"]) + printf("🕐 Build Time: %s\n", buildInfo["buildAt"]) + printf("🔗 Git Hash: %s\n", buildInfo["gitHash"]) + println("========================================") + println("🌟 Application is starting...") + println() +} + +// 为了避免导入 fmt 包,我们使用内置的 print 和 printf 函数 +func printf(format string, args ...interface{}) { + print(fmt.Sprintf(format, args...)) +}