feat: add backend_v1 migration
Some checks failed
build quyun / Build (push) Has been cancelled

This commit is contained in:
2025-12-19 14:46:58 +08:00
parent 218eb4689c
commit 24bd161df9
119 changed files with 12259 additions and 0 deletions

View File

@@ -0,0 +1,30 @@
package event
import "go.ipao.vip/atom/contracts"
const (
Go contracts.Channel = "go"
Kafka contracts.Channel = "kafka"
Redis contracts.Channel = "redis"
Sql contracts.Channel = "sql"
)
type DefaultPublishTo struct{}
func (d *DefaultPublishTo) PublishTo() (contracts.Channel, string) {
return Go, "event:processed"
}
type DefaultChannel struct{}
func (d *DefaultChannel) Channel() contracts.Channel { return Go }
// kafka
type KafkaChannel struct{}
func (k *KafkaChannel) Channel() contracts.Channel { return Kafka }
// kafka
type RedisChannel struct{}
func (k *RedisChannel) Channel() contracts.Channel { return Redis }

View File

@@ -0,0 +1,99 @@
package event
import (
"context"
"github.com/ThreeDotsLabs/watermill"
"github.com/ThreeDotsLabs/watermill/message"
"go.ipao.vip/atom/container"
"go.ipao.vip/atom/contracts"
"go.ipao.vip/atom/opt"
)
const DefaultPrefix = "Events"
func DefaultProvider() container.ProviderContainer {
return container.ProviderContainer{
Provider: ProvideChannel,
Options: []opt.Option{
opt.Prefix(DefaultPrefix),
},
}
}
type Config struct {
Sql *ConfigSql
Kafka *ConfigKafka
Redis *ConfigRedis
}
type ConfigSql struct {
ConsumerGroup string
}
type ConfigRedis struct {
ConsumerGroup string
Streams []string
}
type ConfigKafka struct {
ConsumerGroup string
Brokers []string
}
type PubSub struct {
Router *message.Router
publishers map[contracts.Channel]message.Publisher
subscribers map[contracts.Channel]message.Subscriber
}
func (ps *PubSub) Serve(ctx context.Context) error {
if err := ps.Router.Run(ctx); err != nil {
return err
}
return nil
}
// publish
func (ps *PubSub) Publish(e contracts.EventPublisher) error {
if e == nil {
return nil
}
payload, err := e.Marshal()
if err != nil {
return err
}
msg := message.NewMessage(watermill.NewUUID(), payload)
return ps.getPublisher(e.Channel()).Publish(e.Topic(), msg)
}
// getPublisher returns the publisher for the specified channel.
func (ps *PubSub) getPublisher(channel contracts.Channel) message.Publisher {
if pub, ok := ps.publishers[channel]; ok {
return pub
}
return ps.publishers[Go]
}
func (ps *PubSub) getSubscriber(channel contracts.Channel) message.Subscriber {
if sub, ok := ps.subscribers[channel]; ok {
return sub
}
return ps.subscribers[Go]
}
func (ps *PubSub) Handle(handlerName string, sub contracts.EventHandler) {
publishToCh, publishToTopic := sub.PublishTo()
ps.Router.AddHandler(
handlerName,
sub.Topic(),
ps.getSubscriber(sub.Channel()),
publishToTopic,
ps.getPublisher(publishToCh),
sub.Handler,
)
}

View File

@@ -0,0 +1,60 @@
package event
import (
"github.com/ThreeDotsLabs/watermill"
"github.com/sirupsen/logrus"
)
// LogrusLoggerAdapter is a watermill logger adapter for logrus.
type LogrusLoggerAdapter struct {
log *logrus.Logger
fields watermill.LogFields
}
// NewLogrusLogger returns a LogrusLoggerAdapter that sends all logs to
// the passed logrus instance.
func LogrusAdapter() watermill.LoggerAdapter {
return &LogrusLoggerAdapter{log: logrus.StandardLogger()}
}
// Error logs on level error with err as field and optional fields.
func (l *LogrusLoggerAdapter) Error(msg string, err error, fields watermill.LogFields) {
l.createEntry(fields.Add(watermill.LogFields{"err": err})).Error(msg)
}
// Info logs on level info with optional fields.
func (l *LogrusLoggerAdapter) Info(msg string, fields watermill.LogFields) {
l.createEntry(fields).Info(msg)
}
// Debug logs on level debug with optional fields.
func (l *LogrusLoggerAdapter) Debug(msg string, fields watermill.LogFields) {
l.createEntry(fields).Debug(msg)
}
// Trace logs on level trace with optional fields.
func (l *LogrusLoggerAdapter) Trace(msg string, fields watermill.LogFields) {
l.createEntry(fields).Trace(msg)
}
// With returns a new LogrusLoggerAdapter that includes fields
// to be re-used between logging statements.
func (l *LogrusLoggerAdapter) With(fields watermill.LogFields) watermill.LoggerAdapter {
return &LogrusLoggerAdapter{
log: l.log,
fields: l.fields.Add(fields),
}
}
// createEntry is a helper to add fields to a logrus entry if necessary.
func (l *LogrusLoggerAdapter) createEntry(fields watermill.LogFields) *logrus.Entry {
entry := logrus.NewEntry(l.log)
allFields := fields.Add(l.fields)
if len(allFields) > 0 {
entry = entry.WithFields(logrus.Fields(allFields))
}
return entry
}

View File

@@ -0,0 +1,109 @@
package event
import (
sqlDB "database/sql"
"go.ipao.vip/atom/container"
"go.ipao.vip/atom/contracts"
"go.ipao.vip/atom/opt"
"github.com/ThreeDotsLabs/watermill-kafka/v3/pkg/kafka"
"github.com/ThreeDotsLabs/watermill-redisstream/pkg/redisstream"
"github.com/ThreeDotsLabs/watermill-sql/v3/pkg/sql"
"github.com/ThreeDotsLabs/watermill/message"
"github.com/ThreeDotsLabs/watermill/pubsub/gochannel"
"github.com/redis/go-redis/v9"
)
func ProvideChannel(opts ...opt.Option) error {
o := opt.New(opts...)
var config Config
if err := o.UnmarshalConfig(&config); err != nil {
return err
}
return container.Container.Provide(func() (*PubSub, error) {
logger := LogrusAdapter()
publishers := make(map[contracts.Channel]message.Publisher)
subscribers := make(map[contracts.Channel]message.Subscriber)
// gochannel
client := gochannel.NewGoChannel(gochannel.Config{}, logger)
publishers[Go] = client
subscribers[Go] = client
// kafka
if config.Kafka != nil {
kafkaPublisher, err := kafka.NewPublisher(kafka.PublisherConfig{
Brokers: config.Kafka.Brokers,
Marshaler: kafka.DefaultMarshaler{},
}, logger)
if err != nil {
return nil, err
}
publishers[Kafka] = kafkaPublisher
kafkaSubscriber, err := kafka.NewSubscriber(kafka.SubscriberConfig{
Brokers: config.Kafka.Brokers,
Unmarshaler: kafka.DefaultMarshaler{},
ConsumerGroup: config.Kafka.ConsumerGroup,
}, logger)
if err != nil {
return nil, err
}
subscribers[Kafka] = kafkaSubscriber
}
// redis
if config.Redis != nil {
var rdb redis.UniversalClient
redisSubscriber, err := redisstream.NewSubscriber(redisstream.SubscriberConfig{
Client: rdb,
Unmarshaller: redisstream.DefaultMarshallerUnmarshaller{},
ConsumerGroup: config.Redis.ConsumerGroup,
}, logger)
if err != nil {
return nil, err
}
subscribers[Redis] = redisSubscriber
redisPublisher, err := redisstream.NewPublisher(redisstream.PublisherConfig{
Client: rdb,
Marshaller: redisstream.DefaultMarshallerUnmarshaller{},
}, logger)
if err != nil {
return nil, err
}
publishers[Redis] = redisPublisher
}
if config.Sql == nil {
var db *sqlDB.DB
sqlPublisher, err := sql.NewPublisher(db, sql.PublisherConfig{
SchemaAdapter: sql.DefaultPostgreSQLSchema{},
AutoInitializeSchema: false,
}, logger)
if err != nil {
return nil, err
}
publishers[Sql] = sqlPublisher
sqlSubscriber, err := sql.NewSubscriber(db, sql.SubscriberConfig{
SchemaAdapter: sql.DefaultPostgreSQLSchema{},
ConsumerGroup: config.Sql.ConsumerGroup,
}, logger)
if err != nil {
return nil, err
}
subscribers[Sql] = sqlSubscriber
}
router, err := message.NewRouter(message.RouterConfig{}, logger)
if err != nil {
return nil, err
}
return &PubSub{Router: router, publishers: publishers, subscribers: subscribers}, nil
}, o.DiOptions()...)
}