feat: Update project structure and configuration files
This commit is contained in:
18
backend/providers/app/app.go
Normal file
18
backend/providers/app/app.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"go.ipao.vip/atom/container"
|
||||
"go.ipao.vip/atom/opt"
|
||||
)
|
||||
|
||||
func Provide(opts ...opt.Option) error {
|
||||
o := opt.New(opts...)
|
||||
var config Config
|
||||
if err := o.UnmarshalConfig(&config); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return container.Container.Provide(func() (*Config, error) {
|
||||
return &config, nil
|
||||
}, o.DiOptions()...)
|
||||
}
|
||||
179
backend/providers/app/config.gen.go
Normal file
179
backend/providers/app/config.gen.go
Normal file
@@ -0,0 +1,179 @@
|
||||
// Code generated by go-enum DO NOT EDIT.
|
||||
// Version: -
|
||||
// Revision: -
|
||||
// Build Date: -
|
||||
// Built By: -
|
||||
|
||||
package app
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// AppModeDevelopment is a AppMode of type development.
|
||||
AppModeDevelopment AppMode = "development"
|
||||
// AppModeRelease is a AppMode of type release.
|
||||
AppModeRelease AppMode = "release"
|
||||
// AppModeTest is a AppMode of type test.
|
||||
AppModeTest AppMode = "test"
|
||||
)
|
||||
|
||||
var ErrInvalidAppMode = fmt.Errorf("not a valid AppMode, try [%s]", strings.Join(_AppModeNames, ", "))
|
||||
|
||||
var _AppModeNames = []string{
|
||||
string(AppModeDevelopment),
|
||||
string(AppModeRelease),
|
||||
string(AppModeTest),
|
||||
}
|
||||
|
||||
// AppModeNames returns a list of possible string values of AppMode.
|
||||
func AppModeNames() []string {
|
||||
tmp := make([]string, len(_AppModeNames))
|
||||
copy(tmp, _AppModeNames)
|
||||
return tmp
|
||||
}
|
||||
|
||||
// AppModeValues returns a list of the values for AppMode
|
||||
func AppModeValues() []AppMode {
|
||||
return []AppMode{
|
||||
AppModeDevelopment,
|
||||
AppModeRelease,
|
||||
AppModeTest,
|
||||
}
|
||||
}
|
||||
|
||||
// String implements the Stringer interface.
|
||||
func (x AppMode) String() string {
|
||||
return string(x)
|
||||
}
|
||||
|
||||
// IsValid provides a quick way to determine if the typed value is
|
||||
// part of the allowed enumerated values
|
||||
func (x AppMode) IsValid() bool {
|
||||
_, err := ParseAppMode(string(x))
|
||||
return err == nil
|
||||
}
|
||||
|
||||
var _AppModeValue = map[string]AppMode{
|
||||
"development": AppModeDevelopment,
|
||||
"release": AppModeRelease,
|
||||
"test": AppModeTest,
|
||||
}
|
||||
|
||||
// ParseAppMode attempts to convert a string to a AppMode.
|
||||
func ParseAppMode(name string) (AppMode, error) {
|
||||
if x, ok := _AppModeValue[name]; ok {
|
||||
return x, nil
|
||||
}
|
||||
return AppMode(""), fmt.Errorf("%s is %w", name, ErrInvalidAppMode)
|
||||
}
|
||||
|
||||
var errAppModeNilPtr = errors.New("value pointer is nil") // one per type for package clashes
|
||||
|
||||
// Scan implements the Scanner interface.
|
||||
func (x *AppMode) Scan(value interface{}) (err error) {
|
||||
if value == nil {
|
||||
*x = AppMode("")
|
||||
return
|
||||
}
|
||||
|
||||
// A wider range of scannable types.
|
||||
// driver.Value values at the top of the list for expediency
|
||||
switch v := value.(type) {
|
||||
case string:
|
||||
*x, err = ParseAppMode(v)
|
||||
case []byte:
|
||||
*x, err = ParseAppMode(string(v))
|
||||
case AppMode:
|
||||
*x = v
|
||||
case *AppMode:
|
||||
if v == nil {
|
||||
return errAppModeNilPtr
|
||||
}
|
||||
*x = *v
|
||||
case *string:
|
||||
if v == nil {
|
||||
return errAppModeNilPtr
|
||||
}
|
||||
*x, err = ParseAppMode(*v)
|
||||
default:
|
||||
return errors.New("invalid type for AppMode")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Value implements the driver Valuer interface.
|
||||
func (x AppMode) Value() (driver.Value, error) {
|
||||
return x.String(), nil
|
||||
}
|
||||
|
||||
// Set implements the Golang flag.Value interface func.
|
||||
func (x *AppMode) Set(val string) error {
|
||||
v, err := ParseAppMode(val)
|
||||
*x = v
|
||||
return err
|
||||
}
|
||||
|
||||
// Get implements the Golang flag.Getter interface func.
|
||||
func (x *AppMode) Get() interface{} {
|
||||
return *x
|
||||
}
|
||||
|
||||
// Type implements the github.com/spf13/pFlag Value interface.
|
||||
func (x *AppMode) Type() string {
|
||||
return "AppMode"
|
||||
}
|
||||
|
||||
type NullAppMode struct {
|
||||
AppMode AppMode
|
||||
Valid bool
|
||||
}
|
||||
|
||||
func NewNullAppMode(val interface{}) (x NullAppMode) {
|
||||
err := x.Scan(val) // yes, we ignore this error, it will just be an invalid value.
|
||||
_ = err // make any errcheck linters happy
|
||||
return
|
||||
}
|
||||
|
||||
// Scan implements the Scanner interface.
|
||||
func (x *NullAppMode) Scan(value interface{}) (err error) {
|
||||
if value == nil {
|
||||
x.AppMode, x.Valid = AppMode(""), false
|
||||
return
|
||||
}
|
||||
|
||||
err = x.AppMode.Scan(value)
|
||||
x.Valid = (err == nil)
|
||||
return
|
||||
}
|
||||
|
||||
// Value implements the driver Valuer interface.
|
||||
func (x NullAppMode) Value() (driver.Value, error) {
|
||||
if !x.Valid {
|
||||
return nil, nil
|
||||
}
|
||||
// driver.Value accepts int64 for int values.
|
||||
return string(x.AppMode), nil
|
||||
}
|
||||
|
||||
type NullAppModeStr struct {
|
||||
NullAppMode
|
||||
}
|
||||
|
||||
func NewNullAppModeStr(val interface{}) (x NullAppModeStr) {
|
||||
x.Scan(val) // yes, we ignore this error, it will just be an invalid value.
|
||||
return
|
||||
}
|
||||
|
||||
// Value implements the driver Valuer interface.
|
||||
func (x NullAppModeStr) Value() (driver.Value, error) {
|
||||
if !x.Valid {
|
||||
return nil, nil
|
||||
}
|
||||
return x.AppMode.String(), nil
|
||||
}
|
||||
46
backend/providers/app/config.go
Normal file
46
backend/providers/app/config.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"go.ipao.vip/atom/container"
|
||||
"go.ipao.vip/atom/opt"
|
||||
)
|
||||
|
||||
const DefaultPrefix = "App"
|
||||
|
||||
func DefaultProvider() container.ProviderContainer {
|
||||
return container.ProviderContainer{
|
||||
Provider: Provide,
|
||||
Options: []opt.Option{
|
||||
opt.Prefix(DefaultPrefix),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// swagger:enum AppMode
|
||||
// ENUM(development, release, test)
|
||||
type AppMode string
|
||||
|
||||
type Config struct {
|
||||
Mode AppMode
|
||||
Cert *Cert
|
||||
BaseURI *string
|
||||
StoragePath string
|
||||
}
|
||||
|
||||
func (c *Config) IsDevMode() bool {
|
||||
return c.Mode == AppModeDevelopment
|
||||
}
|
||||
|
||||
func (c *Config) IsReleaseMode() bool {
|
||||
return c.Mode == AppModeRelease
|
||||
}
|
||||
|
||||
func (c *Config) IsTestMode() bool {
|
||||
return c.Mode == AppModeTest
|
||||
}
|
||||
|
||||
type Cert struct {
|
||||
CA string
|
||||
Cert string
|
||||
Key string
|
||||
}
|
||||
61
backend/providers/cmux/config.go
Normal file
61
backend/providers/cmux/config.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package cmux
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"quyun/providers/grpc"
|
||||
"quyun/providers/http"
|
||||
|
||||
"github.com/soheilhy/cmux"
|
||||
"go.ipao.vip/atom/container"
|
||||
"go.ipao.vip/atom/opt"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const DefaultPrefix = "Cmux"
|
||||
|
||||
func DefaultProvider() container.ProviderContainer {
|
||||
return container.ProviderContainer{
|
||||
Provider: Provide,
|
||||
Options: []opt.Option{
|
||||
opt.Prefix(DefaultPrefix),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Host *string
|
||||
Port uint
|
||||
}
|
||||
|
||||
func (h *Config) Address() string {
|
||||
if h.Host == nil {
|
||||
return fmt.Sprintf(":%d", h.Port)
|
||||
}
|
||||
return fmt.Sprintf("%s:%d", *h.Host, h.Port)
|
||||
}
|
||||
|
||||
type CMux struct {
|
||||
Http *http.Service
|
||||
Grpc *grpc.Grpc
|
||||
Mux cmux.CMux
|
||||
}
|
||||
|
||||
func (c *CMux) Serve() error {
|
||||
// grpcL := c.Mux.Match(cmux.HTTP2HeaderField("content-type", "application/grpc"))
|
||||
// httpL := c.Mux.Match(cmux.HTTP1Fast())
|
||||
// httpL := c.Mux.Match(cmux.Any())
|
||||
httpL := c.Mux.Match(cmux.HTTP1Fast())
|
||||
grpcL := c.Mux.Match(cmux.Any())
|
||||
|
||||
var eg errgroup.Group
|
||||
eg.Go(func() error {
|
||||
return c.Grpc.ServeWithListener(grpcL)
|
||||
})
|
||||
|
||||
eg.Go(func() error {
|
||||
return c.Http.Listener(httpL)
|
||||
})
|
||||
|
||||
return c.Mux.Serve()
|
||||
}
|
||||
32
backend/providers/cmux/provider.go
Normal file
32
backend/providers/cmux/provider.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package cmux
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"quyun/providers/grpc"
|
||||
"quyun/providers/http"
|
||||
|
||||
"github.com/soheilhy/cmux"
|
||||
"go.ipao.vip/atom/container"
|
||||
"go.ipao.vip/atom/opt"
|
||||
)
|
||||
|
||||
func Provide(opts ...opt.Option) error {
|
||||
o := opt.New(opts...)
|
||||
var config Config
|
||||
if err := o.UnmarshalConfig(&config); err != nil {
|
||||
return err
|
||||
}
|
||||
return container.Container.Provide(func(http *http.Service, grpc *grpc.Grpc) (*CMux, error) {
|
||||
l, err := net.Listen("tcp", config.Address())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &CMux{
|
||||
Http: http,
|
||||
Grpc: grpc,
|
||||
Mux: cmux.New(l),
|
||||
}, nil
|
||||
}, o.DiOptions()...)
|
||||
}
|
||||
65
backend/providers/event/config.go
Normal file
65
backend/providers/event/config.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package event
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ThreeDotsLabs/watermill"
|
||||
"github.com/ThreeDotsLabs/watermill/message"
|
||||
"go.ipao.vip/atom/container"
|
||||
"go.ipao.vip/atom/contracts"
|
||||
"go.ipao.vip/atom/opt"
|
||||
)
|
||||
|
||||
const DefaultPrefix = "Events"
|
||||
|
||||
func DefaultProvider() container.ProviderContainer {
|
||||
return container.ProviderContainer{
|
||||
Provider: Provide,
|
||||
Options: []opt.Option{
|
||||
opt.Prefix(DefaultPrefix),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
ConsumerGroup string
|
||||
|
||||
Brokers []string
|
||||
}
|
||||
|
||||
type PubSub struct {
|
||||
Publisher message.Publisher
|
||||
Subscriber message.Subscriber
|
||||
Router *message.Router
|
||||
}
|
||||
|
||||
func (ps *PubSub) Serve(ctx context.Context) error {
|
||||
if err := ps.Router.Run(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ps *PubSub) Handle(
|
||||
handlerName string,
|
||||
consumerTopic string,
|
||||
publisherTopic string,
|
||||
handler message.HandlerFunc,
|
||||
) {
|
||||
ps.Router.AddHandler(handlerName, consumerTopic, ps.Subscriber, publisherTopic, ps.Publisher, handler)
|
||||
}
|
||||
|
||||
// publish
|
||||
func (ps *PubSub) Publish(e contracts.EventPublisher) error {
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
payload, err := e.Marshal()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msg := message.NewMessage(watermill.NewUUID(), payload)
|
||||
return ps.Publisher.Publish(e.Topic(), msg)
|
||||
}
|
||||
60
backend/providers/event/logrus_adapter.go
Normal file
60
backend/providers/event/logrus_adapter.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package event
|
||||
|
||||
import (
|
||||
"github.com/ThreeDotsLabs/watermill"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// LogrusLoggerAdapter is a watermill logger adapter for logrus.
|
||||
type LogrusLoggerAdapter struct {
|
||||
log *logrus.Logger
|
||||
fields watermill.LogFields
|
||||
}
|
||||
|
||||
// NewLogrusLogger returns a LogrusLoggerAdapter that sends all logs to
|
||||
// the passed logrus instance.
|
||||
func LogrusAdapter() watermill.LoggerAdapter {
|
||||
return &LogrusLoggerAdapter{log: logrus.StandardLogger()}
|
||||
}
|
||||
|
||||
// Error logs on level error with err as field and optional fields.
|
||||
func (l *LogrusLoggerAdapter) Error(msg string, err error, fields watermill.LogFields) {
|
||||
l.createEntry(fields.Add(watermill.LogFields{"err": err})).Error(msg)
|
||||
}
|
||||
|
||||
// Info logs on level info with optional fields.
|
||||
func (l *LogrusLoggerAdapter) Info(msg string, fields watermill.LogFields) {
|
||||
l.createEntry(fields).Info(msg)
|
||||
}
|
||||
|
||||
// Debug logs on level debug with optional fields.
|
||||
func (l *LogrusLoggerAdapter) Debug(msg string, fields watermill.LogFields) {
|
||||
l.createEntry(fields).Debug(msg)
|
||||
}
|
||||
|
||||
// Trace logs on level trace with optional fields.
|
||||
func (l *LogrusLoggerAdapter) Trace(msg string, fields watermill.LogFields) {
|
||||
l.createEntry(fields).Trace(msg)
|
||||
}
|
||||
|
||||
// With returns a new LogrusLoggerAdapter that includes fields
|
||||
// to be re-used between logging statements.
|
||||
func (l *LogrusLoggerAdapter) With(fields watermill.LogFields) watermill.LoggerAdapter {
|
||||
return &LogrusLoggerAdapter{
|
||||
log: l.log,
|
||||
fields: l.fields.Add(fields),
|
||||
}
|
||||
}
|
||||
|
||||
// createEntry is a helper to add fields to a logrus entry if necessary.
|
||||
func (l *LogrusLoggerAdapter) createEntry(fields watermill.LogFields) *logrus.Entry {
|
||||
entry := logrus.NewEntry(l.log)
|
||||
|
||||
allFields := fields.Add(l.fields)
|
||||
|
||||
if len(allFields) > 0 {
|
||||
entry = entry.WithFields(logrus.Fields(allFields))
|
||||
}
|
||||
|
||||
return entry
|
||||
}
|
||||
33
backend/providers/event/provider.go
Normal file
33
backend/providers/event/provider.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package event
|
||||
|
||||
import (
|
||||
"go.ipao.vip/atom/container"
|
||||
"go.ipao.vip/atom/opt"
|
||||
|
||||
"github.com/ThreeDotsLabs/watermill/message"
|
||||
"github.com/ThreeDotsLabs/watermill/pubsub/gochannel"
|
||||
)
|
||||
|
||||
func Provide(opts ...opt.Option) error {
|
||||
o := opt.New(opts...)
|
||||
var config Config
|
||||
if err := o.UnmarshalConfig(&config); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return container.Container.Provide(func() (*PubSub, error) {
|
||||
logger := LogrusAdapter()
|
||||
|
||||
client := gochannel.NewGoChannel(gochannel.Config{}, logger)
|
||||
router, err := message.NewRouter(message.RouterConfig{}, logger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &PubSub{
|
||||
Publisher: client,
|
||||
Subscriber: client,
|
||||
Router: router,
|
||||
}, nil
|
||||
}, o.DiOptions()...)
|
||||
}
|
||||
49
backend/providers/event/provider_kafka.go
Normal file
49
backend/providers/event/provider_kafka.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package event
|
||||
|
||||
import (
|
||||
"go.ipao.vip/atom/container"
|
||||
"go.ipao.vip/atom/opt"
|
||||
|
||||
"github.com/ThreeDotsLabs/watermill-kafka/v3/pkg/kafka"
|
||||
"github.com/ThreeDotsLabs/watermill/message"
|
||||
)
|
||||
|
||||
func ProvideKafka(opts ...opt.Option) error {
|
||||
o := opt.New(opts...)
|
||||
var config Config
|
||||
if err := o.UnmarshalConfig(&config); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return container.Container.Provide(func() (*PubSub, error) {
|
||||
logger := LogrusAdapter()
|
||||
|
||||
publisher, err := kafka.NewPublisher(kafka.PublisherConfig{
|
||||
Brokers: config.Brokers,
|
||||
Marshaler: kafka.DefaultMarshaler{},
|
||||
}, logger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
subscriber, err := kafka.NewSubscriber(kafka.SubscriberConfig{
|
||||
Brokers: config.Brokers,
|
||||
Unmarshaler: kafka.DefaultMarshaler{},
|
||||
ConsumerGroup: config.ConsumerGroup,
|
||||
}, logger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
router, err := message.NewRouter(message.RouterConfig{}, logger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &PubSub{
|
||||
Publisher: publisher,
|
||||
Subscriber: subscriber,
|
||||
Router: router,
|
||||
}, nil
|
||||
}, o.DiOptions()...)
|
||||
}
|
||||
50
backend/providers/event/provider_redis.go
Normal file
50
backend/providers/event/provider_redis.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package event
|
||||
|
||||
import (
|
||||
"go.ipao.vip/atom/container"
|
||||
"go.ipao.vip/atom/opt"
|
||||
|
||||
"github.com/ThreeDotsLabs/watermill-redisstream/pkg/redisstream"
|
||||
"github.com/ThreeDotsLabs/watermill/message"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
func ProvideRedis(opts ...opt.Option) error {
|
||||
o := opt.New(opts...)
|
||||
var config Config
|
||||
if err := o.UnmarshalConfig(&config); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return container.Container.Provide(func(rdb redis.UniversalClient) (*PubSub, error) {
|
||||
logger := LogrusAdapter()
|
||||
|
||||
subscriber, err := redisstream.NewSubscriber(redisstream.SubscriberConfig{
|
||||
Client: rdb,
|
||||
Unmarshaller: redisstream.DefaultMarshallerUnmarshaller{},
|
||||
ConsumerGroup: config.ConsumerGroup,
|
||||
}, logger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
publisher, err := redisstream.NewPublisher(redisstream.PublisherConfig{
|
||||
Client: rdb,
|
||||
Marshaller: redisstream.DefaultMarshallerUnmarshaller{},
|
||||
}, logger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
router, err := message.NewRouter(message.RouterConfig{}, logger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &PubSub{
|
||||
Publisher: publisher,
|
||||
Subscriber: subscriber,
|
||||
Router: router,
|
||||
}, nil
|
||||
}, o.DiOptions()...)
|
||||
}
|
||||
50
backend/providers/event/provider_sql.go
Normal file
50
backend/providers/event/provider_sql.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package event
|
||||
|
||||
import (
|
||||
sqlDB "database/sql"
|
||||
|
||||
"go.ipao.vip/atom/container"
|
||||
"go.ipao.vip/atom/opt"
|
||||
|
||||
"github.com/ThreeDotsLabs/watermill-sql/v3/pkg/sql"
|
||||
"github.com/ThreeDotsLabs/watermill/message"
|
||||
)
|
||||
|
||||
func ProvideSQL(opts ...opt.Option) error {
|
||||
o := opt.New(opts...)
|
||||
var config Config
|
||||
if err := o.UnmarshalConfig(&config); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return container.Container.Provide(func(db *sqlDB.DB) (*PubSub, error) {
|
||||
logger := LogrusAdapter()
|
||||
|
||||
publisher, err := sql.NewPublisher(db, sql.PublisherConfig{
|
||||
SchemaAdapter: sql.DefaultPostgreSQLSchema{},
|
||||
AutoInitializeSchema: false,
|
||||
}, logger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
subscriber, err := sql.NewSubscriber(db, sql.SubscriberConfig{
|
||||
SchemaAdapter: sql.DefaultPostgreSQLSchema{},
|
||||
ConsumerGroup: config.ConsumerGroup,
|
||||
}, logger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
router, err := message.NewRouter(message.RouterConfig{}, logger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &PubSub{
|
||||
Publisher: publisher,
|
||||
Subscriber: subscriber,
|
||||
Router: router,
|
||||
}, nil
|
||||
}, o.DiOptions()...)
|
||||
}
|
||||
53
backend/providers/grpc/config.go
Normal file
53
backend/providers/grpc/config.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"go.ipao.vip/atom/container"
|
||||
"go.ipao.vip/atom/opt"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
const DefaultPrefix = "Grpc"
|
||||
|
||||
func DefaultProvider() container.ProviderContainer {
|
||||
return container.ProviderContainer{
|
||||
Provider: Provide,
|
||||
Options: []opt.Option{
|
||||
opt.Prefix(DefaultPrefix),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Host *string
|
||||
Port uint
|
||||
}
|
||||
|
||||
func (h *Config) Address() string {
|
||||
if h.Host == nil {
|
||||
return fmt.Sprintf(":%d", h.Port)
|
||||
}
|
||||
return fmt.Sprintf("%s:%d", *h.Host, h.Port)
|
||||
}
|
||||
|
||||
type Grpc struct {
|
||||
Server *grpc.Server
|
||||
config *Config
|
||||
}
|
||||
|
||||
// Serve
|
||||
func (g *Grpc) Serve() error {
|
||||
l, err := net.Listen("tcp", g.config.Address())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return g.Server.Serve(l)
|
||||
}
|
||||
|
||||
func (g *Grpc) ServeWithListener(ln net.Listener) error {
|
||||
return g.Server.Serve(ln)
|
||||
}
|
||||
27
backend/providers/grpc/provider.go
Normal file
27
backend/providers/grpc/provider.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"go.ipao.vip/atom/container"
|
||||
"go.ipao.vip/atom/opt"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func Provide(opts ...opt.Option) error {
|
||||
o := opt.New(opts...)
|
||||
var config Config
|
||||
if err := o.UnmarshalConfig(&config); err != nil {
|
||||
return err
|
||||
}
|
||||
return container.Container.Provide(func() (*Grpc, error) {
|
||||
server := grpc.NewServer()
|
||||
|
||||
grpc := &Grpc{
|
||||
Server: server,
|
||||
config: &config,
|
||||
}
|
||||
container.AddCloseAble(grpc.Server.GracefulStop)
|
||||
|
||||
return grpc, nil
|
||||
}, o.DiOptions()...)
|
||||
}
|
||||
23
backend/providers/hashids/config.go
Normal file
23
backend/providers/hashids/config.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package hashids
|
||||
|
||||
import (
|
||||
"go.ipao.vip/atom/container"
|
||||
"go.ipao.vip/atom/opt"
|
||||
)
|
||||
|
||||
const DefaultPrefix = "HashIDs"
|
||||
|
||||
func DefaultProvider() container.ProviderContainer {
|
||||
return container.ProviderContainer{
|
||||
Provider: Provide,
|
||||
Options: []opt.Option{
|
||||
opt.Prefix(DefaultPrefix),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Alphabet string
|
||||
Salt string
|
||||
MinLength uint
|
||||
}
|
||||
35
backend/providers/hashids/hashids.go
Normal file
35
backend/providers/hashids/hashids.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package hashids
|
||||
|
||||
import (
|
||||
"go.ipao.vip/atom/container"
|
||||
"go.ipao.vip/atom/opt"
|
||||
|
||||
"github.com/speps/go-hashids/v2"
|
||||
)
|
||||
|
||||
func Provide(opts ...opt.Option) error {
|
||||
o := opt.New(opts...)
|
||||
var config Config
|
||||
if err := o.UnmarshalConfig(&config); err != nil {
|
||||
return err
|
||||
}
|
||||
return container.Container.Provide(func() (*hashids.HashID, error) {
|
||||
data := hashids.NewData()
|
||||
data.MinLength = int(config.MinLength)
|
||||
if data.MinLength == 0 {
|
||||
data.MinLength = 10
|
||||
}
|
||||
|
||||
data.Salt = config.Salt
|
||||
if data.Salt == "" {
|
||||
data.Salt = "default-salt-key"
|
||||
}
|
||||
|
||||
data.Alphabet = config.Alphabet
|
||||
if config.Alphabet == "" {
|
||||
data.Alphabet = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
||||
}
|
||||
|
||||
return hashids.NewWithData(data)
|
||||
}, o.DiOptions()...)
|
||||
}
|
||||
38
backend/providers/http/config.go
Normal file
38
backend/providers/http/config.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const DefaultPrefix = "Http"
|
||||
|
||||
type Config struct {
|
||||
StaticPath *string
|
||||
StaticRoute *string
|
||||
BaseURI *string
|
||||
Port uint
|
||||
Tls *Tls
|
||||
Cors *Cors
|
||||
}
|
||||
|
||||
type Tls struct {
|
||||
Cert string
|
||||
Key string
|
||||
}
|
||||
|
||||
type Cors struct {
|
||||
Mode string
|
||||
Whitelist []Whitelist
|
||||
}
|
||||
|
||||
type Whitelist struct {
|
||||
AllowOrigin string
|
||||
AllowHeaders string
|
||||
AllowMethods string
|
||||
ExposeHeaders string
|
||||
AllowCredentials bool
|
||||
}
|
||||
|
||||
func (h *Config) Address() string {
|
||||
return fmt.Sprintf(":%d", h.Port)
|
||||
}
|
||||
100
backend/providers/http/engine.go
Normal file
100
backend/providers/http/engine.go
Normal file
@@ -0,0 +1,100 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"runtime/debug"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.ipao.vip/atom/container"
|
||||
"go.ipao.vip/atom/opt"
|
||||
|
||||
"github.com/gofiber/fiber/v3"
|
||||
"github.com/gofiber/fiber/v3/middleware/logger"
|
||||
"github.com/gofiber/fiber/v3/middleware/recover"
|
||||
)
|
||||
|
||||
func DefaultProvider() container.ProviderContainer {
|
||||
return container.ProviderContainer{
|
||||
Provider: Provide,
|
||||
Options: []opt.Option{
|
||||
opt.Prefix(DefaultPrefix),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type Service struct {
|
||||
conf *Config
|
||||
Engine *fiber.App
|
||||
}
|
||||
|
||||
func (svc *Service) listenerConfig() fiber.ListenConfig {
|
||||
listenConfig := fiber.ListenConfig{
|
||||
EnablePrintRoutes: true,
|
||||
OnShutdownSuccess: func() {
|
||||
log.Info("http server shutdown success")
|
||||
},
|
||||
OnShutdownError: func(err error) {
|
||||
log.Error("http server shutdown error: ", err)
|
||||
},
|
||||
|
||||
// DisableStartupMessage: true,
|
||||
}
|
||||
|
||||
if svc.conf.Tls != nil {
|
||||
if svc.conf.Tls.Cert == "" || svc.conf.Tls.Key == "" {
|
||||
panic(errors.New("tls cert and key must be set"))
|
||||
}
|
||||
listenConfig.CertFile = svc.conf.Tls.Cert
|
||||
listenConfig.CertKeyFile = svc.conf.Tls.Key
|
||||
}
|
||||
container.AddCloseAble(func() {
|
||||
svc.Engine.Shutdown()
|
||||
})
|
||||
return listenConfig
|
||||
}
|
||||
|
||||
func (svc *Service) Listener(ln net.Listener) error {
|
||||
return svc.Engine.Listener(ln, svc.listenerConfig())
|
||||
}
|
||||
|
||||
func (svc *Service) Serve() error {
|
||||
return svc.Engine.Listen(svc.conf.Address(), svc.listenerConfig())
|
||||
}
|
||||
|
||||
func Provide(opts ...opt.Option) error {
|
||||
o := opt.New(opts...)
|
||||
var config Config
|
||||
if err := o.UnmarshalConfig(&config); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return container.Container.Provide(func() (*Service, error) {
|
||||
engine := fiber.New(fiber.Config{
|
||||
StrictRouting: true,
|
||||
})
|
||||
engine.Use(recover.New(recover.Config{
|
||||
EnableStackTrace: true,
|
||||
StackTraceHandler: func(c fiber.Ctx, e any) {
|
||||
log.Error(fmt.Sprintf("panic: %v\n%s\n", e, debug.Stack()))
|
||||
},
|
||||
}))
|
||||
|
||||
if config.StaticRoute != nil && config.StaticPath != nil {
|
||||
engine.Use(config.StaticRoute, config.StaticPath)
|
||||
}
|
||||
|
||||
engine.Use(logger.New(logger.Config{
|
||||
Format: `[${ip}:${port}] - [${time}] - ${method} - ${status} - ${path} ${latency} "${ua}"` + "\n",
|
||||
TimeFormat: time.RFC1123,
|
||||
TimeZone: "Asia/Shanghai",
|
||||
}))
|
||||
|
||||
return &Service{
|
||||
Engine: engine,
|
||||
conf: &config,
|
||||
}, nil
|
||||
}, o.DiOptions()...)
|
||||
}
|
||||
317
backend/providers/http/swagger/config.go
Normal file
317
backend/providers/http/swagger/config.go
Normal file
@@ -0,0 +1,317 @@
|
||||
package swagger
|
||||
|
||||
import (
|
||||
"html/template"
|
||||
)
|
||||
|
||||
// Config stores SwaggerUI configuration variables
|
||||
type Config struct {
|
||||
// This parameter can be used to name different swagger document instances.
|
||||
// default: ""
|
||||
InstanceName string `json:"-"`
|
||||
|
||||
// Title pointing to title of HTML page.
|
||||
// default: "Swagger UI"
|
||||
Title string `json:"-"`
|
||||
|
||||
// URL to fetch external configuration document from.
|
||||
// default: ""
|
||||
ConfigURL string `json:"configUrl,omitempty"`
|
||||
|
||||
// The URL pointing to API definition (normally swagger.json or swagger.yaml).
|
||||
// default: "doc.json"
|
||||
URL string `json:"url,omitempty"`
|
||||
|
||||
// Enables overriding configuration parameters via URL search params.
|
||||
// default: false
|
||||
QueryConfigEnabled bool `json:"queryConfigEnabled,omitempty"`
|
||||
|
||||
// The name of a component available via the plugin system to use as the top-level layout for Swagger UI.
|
||||
// default: "StandaloneLayout"
|
||||
Layout string `json:"layout,omitempty"`
|
||||
|
||||
// An array of plugin functions to use in Swagger UI.
|
||||
// default: [SwaggerUIBundle.plugins.DownloadUrl]
|
||||
Plugins []template.JS `json:"-"`
|
||||
|
||||
// An array of presets to use in Swagger UI. Usually, you'll want to include ApisPreset if you use this option.
|
||||
// default: [SwaggerUIBundle.presets.apis, SwaggerUIStandalonePreset]
|
||||
Presets []template.JS `json:"-"`
|
||||
|
||||
// If set to true, enables deep linking for tags and operations.
|
||||
// default: true
|
||||
DeepLinking bool `json:"deepLinking"`
|
||||
|
||||
// Controls the display of operationId in operations list.
|
||||
// default: false
|
||||
DisplayOperationId bool `json:"displayOperationId,omitempty"`
|
||||
|
||||
// The default expansion depth for models (set to -1 completely hide the models).
|
||||
// default: 1
|
||||
DefaultModelsExpandDepth int `json:"defaultModelsExpandDepth,omitempty"`
|
||||
|
||||
// The default expansion depth for the model on the model-example section.
|
||||
// default: 1
|
||||
DefaultModelExpandDepth int `json:"defaultModelExpandDepth,omitempty"`
|
||||
|
||||
// Controls how the model is shown when the API is first rendered.
|
||||
// The user can always switch the rendering for a given model by clicking the 'Model' and 'Example Value' links.
|
||||
// default: "example"
|
||||
DefaultModelRendering string `json:"defaultModelRendering,omitempty"`
|
||||
|
||||
// Controls the display of the request duration (in milliseconds) for "Try it out" requests.
|
||||
// default: false
|
||||
DisplayRequestDuration bool `json:"displayRequestDuration,omitempty"`
|
||||
|
||||
// Controls the default expansion setting for the operations and tags.
|
||||
// 'list' (default, expands only the tags),
|
||||
// 'full' (expands the tags and operations),
|
||||
// 'none' (expands nothing)
|
||||
DocExpansion string `json:"docExpansion,omitempty"`
|
||||
|
||||
// If set, enables filtering. The top bar will show an edit box that you can use to filter the tagged operations that are shown.
|
||||
// Can be Boolean to enable or disable, or a string, in which case filtering will be enabled using that string as the filter expression.
|
||||
// Filtering is case sensitive matching the filter expression anywhere inside the tag.
|
||||
// default: false
|
||||
Filter FilterConfig `json:"-"`
|
||||
|
||||
// If set, limits the number of tagged operations displayed to at most this many. The default is to show all operations.
|
||||
// default: 0
|
||||
MaxDisplayedTags int `json:"maxDisplayedTags,omitempty"`
|
||||
|
||||
// Controls the display of vendor extension (x-) fields and values for Operations, Parameters, Responses, and Schema.
|
||||
// default: false
|
||||
ShowExtensions bool `json:"showExtensions,omitempty"`
|
||||
|
||||
// Controls the display of extensions (pattern, maxLength, minLength, maximum, minimum) fields and values for Parameters.
|
||||
// default: false
|
||||
ShowCommonExtensions bool `json:"showCommonExtensions,omitempty"`
|
||||
|
||||
// Apply a sort to the tag list of each API. It can be 'alpha' (sort by paths alphanumerically) or a function (see Array.prototype.sort().
|
||||
// to learn how to write a sort function). Two tag name strings are passed to the sorter for each pass.
|
||||
// default: "" -> Default is the order determined by Swagger UI.
|
||||
TagsSorter template.JS `json:"-"`
|
||||
|
||||
// Provides a mechanism to be notified when Swagger UI has finished rendering a newly provided definition.
|
||||
// default: "" -> Function=NOOP
|
||||
OnComplete template.JS `json:"-"`
|
||||
|
||||
// An object with the activate and theme properties.
|
||||
SyntaxHighlight *SyntaxHighlightConfig `json:"-"`
|
||||
|
||||
// Controls whether the "Try it out" section should be enabled by default.
|
||||
// default: false
|
||||
TryItOutEnabled bool `json:"tryItOutEnabled,omitempty"`
|
||||
|
||||
// Enables the request snippet section. When disabled, the legacy curl snippet will be used.
|
||||
// default: false
|
||||
RequestSnippetsEnabled bool `json:"requestSnippetsEnabled,omitempty"`
|
||||
|
||||
// OAuth redirect URL.
|
||||
// default: ""
|
||||
OAuth2RedirectUrl string `json:"oauth2RedirectUrl,omitempty"`
|
||||
|
||||
// MUST be a function. Function to intercept remote definition, "Try it out", and OAuth 2.0 requests.
|
||||
// Accepts one argument requestInterceptor(request) and must return the modified request, or a Promise that resolves to the modified request.
|
||||
// default: ""
|
||||
RequestInterceptor template.JS `json:"-"`
|
||||
|
||||
// If set, MUST be an array of command line options available to the curl command. This can be set on the mutated request in the requestInterceptor function.
|
||||
// For example request.curlOptions = ["-g", "--limit-rate 20k"]
|
||||
// default: nil
|
||||
RequestCurlOptions []string `json:"request.curlOptions,omitempty"`
|
||||
|
||||
// MUST be a function. Function to intercept remote definition, "Try it out", and OAuth 2.0 responses.
|
||||
// Accepts one argument responseInterceptor(response) and must return the modified response, or a Promise that resolves to the modified response.
|
||||
// default: ""
|
||||
ResponseInterceptor template.JS `json:"-"`
|
||||
|
||||
// If set to true, uses the mutated request returned from a requestInterceptor to produce the curl command in the UI,
|
||||
// otherwise the request before the requestInterceptor was applied is used.
|
||||
// default: true
|
||||
ShowMutatedRequest bool `json:"showMutatedRequest"`
|
||||
|
||||
// List of HTTP methods that have the "Try it out" feature enabled. An empty array disables "Try it out" for all operations.
|
||||
// This does not filter the operations from the display.
|
||||
// Possible values are ["get", "put", "post", "delete", "options", "head", "patch", "trace"]
|
||||
// default: nil
|
||||
SupportedSubmitMethods []string `json:"supportedSubmitMethods,omitempty"`
|
||||
|
||||
// By default, Swagger UI attempts to validate specs against swagger.io's online validator. You can use this parameter to set a different validator URL.
|
||||
// For example for locally deployed validators (https://github.com/swagger-api/validator-badge).
|
||||
// Setting it to either none, 127.0.0.1 or localhost will disable validation.
|
||||
// default: ""
|
||||
ValidatorUrl string `json:"validatorUrl,omitempty"`
|
||||
|
||||
// If set to true, enables passing credentials, as defined in the Fetch standard, in CORS requests that are sent by the browser.
|
||||
// Note that Swagger UI cannot currently set cookies cross-domain (see https://github.com/swagger-api/swagger-js/issues/1163).
|
||||
// as a result, you will have to rely on browser-supplied cookies (which this setting enables sending) that Swagger UI cannot control.
|
||||
// default: false
|
||||
WithCredentials bool `json:"withCredentials,omitempty"`
|
||||
|
||||
// Function to set default values to each property in model. Accepts one argument modelPropertyMacro(property), property is immutable.
|
||||
// default: ""
|
||||
ModelPropertyMacro template.JS `json:"-"`
|
||||
|
||||
// Function to set default value to parameters. Accepts two arguments parameterMacro(operation, parameter).
|
||||
// Operation and parameter are objects passed for context, both remain immutable.
|
||||
// default: ""
|
||||
ParameterMacro template.JS `json:"-"`
|
||||
|
||||
// If set to true, it persists authorization data and it would not be lost on browser close/refresh.
|
||||
// default: false
|
||||
PersistAuthorization bool `json:"persistAuthorization,omitempty"`
|
||||
|
||||
// Configuration information for OAuth2, optional if using OAuth2
|
||||
OAuth *OAuthConfig `json:"-"`
|
||||
|
||||
// (authDefinitionKey, username, password) => action
|
||||
// Programmatically set values for a Basic authorization scheme.
|
||||
// default: ""
|
||||
PreauthorizeBasic template.JS `json:"-"`
|
||||
|
||||
// (authDefinitionKey, apiKeyValue) => action
|
||||
// Programmatically set values for an API key or Bearer authorization scheme.
|
||||
// In case of OpenAPI 3.0 Bearer scheme, apiKeyValue must contain just the token itself without the Bearer prefix.
|
||||
// default: ""
|
||||
PreauthorizeApiKey template.JS `json:"-"`
|
||||
|
||||
// Applies custom CSS styles.
|
||||
// default: ""
|
||||
CustomStyle template.CSS `json:"-"`
|
||||
|
||||
// Applies custom JavaScript scripts.
|
||||
// default ""
|
||||
CustomScript template.JS `json:"-"`
|
||||
}
|
||||
|
||||
type FilterConfig struct {
|
||||
Enabled bool
|
||||
Expression string
|
||||
}
|
||||
|
||||
func (fc FilterConfig) Value() interface{} {
|
||||
if fc.Expression != "" {
|
||||
return fc.Expression
|
||||
}
|
||||
return fc.Enabled
|
||||
}
|
||||
|
||||
type SyntaxHighlightConfig struct {
|
||||
// Whether syntax highlighting should be activated or not.
|
||||
// default: true
|
||||
Activate bool `json:"activate"`
|
||||
// Highlight.js syntax coloring theme to use.
|
||||
// Possible values are ["agate", "arta", "monokai", "nord", "obsidian", "tomorrow-night"]
|
||||
// default: "agate"
|
||||
Theme string `json:"theme,omitempty"`
|
||||
}
|
||||
|
||||
func (shc SyntaxHighlightConfig) Value() interface{} {
|
||||
if shc.Activate {
|
||||
return shc
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type OAuthConfig struct {
|
||||
// ID of the client sent to the OAuth2 provider.
|
||||
// default: ""
|
||||
ClientId string `json:"clientId,omitempty"`
|
||||
|
||||
// Never use this parameter in your production environment.
|
||||
// It exposes cruicial security information. This feature is intended for dev/test environments only.
|
||||
// Secret of the client sent to the OAuth2 provider.
|
||||
// default: ""
|
||||
ClientSecret string `json:"clientSecret,omitempty"`
|
||||
|
||||
// Application name, displayed in authorization popup.
|
||||
// default: ""
|
||||
AppName string `json:"appName,omitempty"`
|
||||
|
||||
// Realm query parameter (for oauth1) added to authorizationUrl and tokenUrl.
|
||||
// default: ""
|
||||
Realm string `json:"realm,omitempty"`
|
||||
|
||||
// String array of initially selected oauth scopes
|
||||
// default: nil
|
||||
Scopes []string `json:"scopes,omitempty"`
|
||||
|
||||
// Additional query parameters added to authorizationUrl and tokenUrl.
|
||||
// default: nil
|
||||
AdditionalQueryStringParams map[string]string `json:"additionalQueryStringParams,omitempty"`
|
||||
|
||||
// Unavailable Only activated for the accessCode flow.
|
||||
// During the authorization_code request to the tokenUrl, pass the Client Password using the HTTP Basic Authentication scheme
|
||||
// (Authorization header with Basic base64encode(client_id + client_secret)).
|
||||
// default: false
|
||||
UseBasicAuthenticationWithAccessCodeGrant bool `json:"useBasicAuthenticationWithAccessCodeGrant,omitempty"`
|
||||
|
||||
// Only applies to authorizatonCode flows.
|
||||
// Proof Key for Code Exchange brings enhanced security for OAuth public clients.
|
||||
// default: false
|
||||
UsePkceWithAuthorizationCodeGrant bool `json:"usePkceWithAuthorizationCodeGrant,omitempty"`
|
||||
}
|
||||
|
||||
var ConfigDefault = Config{
|
||||
Title: "Swagger UI",
|
||||
Layout: "StandaloneLayout",
|
||||
Plugins: []template.JS{
|
||||
template.JS("SwaggerUIBundle.plugins.DownloadUrl"),
|
||||
},
|
||||
Presets: []template.JS{
|
||||
template.JS("SwaggerUIBundle.presets.apis"),
|
||||
template.JS("SwaggerUIStandalonePreset"),
|
||||
},
|
||||
DeepLinking: true,
|
||||
DefaultModelsExpandDepth: 1,
|
||||
DefaultModelExpandDepth: 1,
|
||||
DefaultModelRendering: "example",
|
||||
DocExpansion: "list",
|
||||
SyntaxHighlight: &SyntaxHighlightConfig{
|
||||
Activate: true,
|
||||
Theme: "agate",
|
||||
},
|
||||
ShowMutatedRequest: true,
|
||||
}
|
||||
|
||||
// Helper function to set default values
|
||||
func configDefault(config ...Config) Config {
|
||||
// Return default config if nothing provided
|
||||
if len(config) < 1 {
|
||||
return ConfigDefault
|
||||
}
|
||||
|
||||
// Override default config
|
||||
cfg := config[0]
|
||||
|
||||
if cfg.Title == "" {
|
||||
cfg.Title = ConfigDefault.Title
|
||||
}
|
||||
|
||||
if cfg.Layout == "" {
|
||||
cfg.Layout = ConfigDefault.Layout
|
||||
}
|
||||
|
||||
if cfg.DefaultModelRendering == "" {
|
||||
cfg.DefaultModelRendering = ConfigDefault.DefaultModelRendering
|
||||
}
|
||||
|
||||
if cfg.DocExpansion == "" {
|
||||
cfg.DocExpansion = ConfigDefault.DocExpansion
|
||||
}
|
||||
|
||||
if cfg.Plugins == nil {
|
||||
cfg.Plugins = ConfigDefault.Plugins
|
||||
}
|
||||
|
||||
if cfg.Presets == nil {
|
||||
cfg.Presets = ConfigDefault.Presets
|
||||
}
|
||||
|
||||
if cfg.SyntaxHighlight == nil {
|
||||
cfg.SyntaxHighlight = ConfigDefault.SyntaxHighlight
|
||||
}
|
||||
|
||||
return cfg
|
||||
}
|
||||
103
backend/providers/http/swagger/swagger.go
Normal file
103
backend/providers/http/swagger/swagger.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package swagger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"html/template"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/gofiber/fiber/v3"
|
||||
"github.com/gofiber/fiber/v3/middleware/static"
|
||||
"github.com/gofiber/utils/v2"
|
||||
"github.com/rogeecn/swag"
|
||||
swaggerFiles "github.com/swaggo/files/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultDocURL = "doc.json"
|
||||
defaultIndex = "index.html"
|
||||
)
|
||||
|
||||
var HandlerDefault = New()
|
||||
|
||||
// New returns custom handler
|
||||
func New(config ...Config) fiber.Handler {
|
||||
cfg := configDefault(config...)
|
||||
|
||||
index, err := template.New("swagger_index.html").Parse(indexTmpl)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("fiber: swagger middleware error -> %w", err))
|
||||
}
|
||||
|
||||
var (
|
||||
prefix string
|
||||
once sync.Once
|
||||
)
|
||||
|
||||
return func(c fiber.Ctx) error {
|
||||
// Set prefix
|
||||
once.Do(
|
||||
func() {
|
||||
prefix = strings.ReplaceAll(c.Route().Path, "*", "")
|
||||
|
||||
forwardedPrefix := getForwardedPrefix(c)
|
||||
if forwardedPrefix != "" {
|
||||
prefix = forwardedPrefix + prefix
|
||||
}
|
||||
|
||||
// Set doc url
|
||||
if len(cfg.URL) == 0 {
|
||||
cfg.URL = path.Join(prefix, defaultDocURL)
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
p := c.Path(utils.CopyString(c.Params("*")))
|
||||
|
||||
switch p {
|
||||
case defaultIndex:
|
||||
c.Type("html")
|
||||
return index.Execute(c, cfg)
|
||||
case defaultDocURL:
|
||||
var doc string
|
||||
if doc, err = swag.ReadDoc(cfg.InstanceName); err != nil {
|
||||
return err
|
||||
}
|
||||
return c.Type("json").SendString(doc)
|
||||
case "", "/":
|
||||
return c.Redirect().To(path.Join(prefix, defaultIndex))
|
||||
default:
|
||||
// return fs(c)
|
||||
return static.New("/swagger", static.Config{
|
||||
FS: swaggerFiles.FS,
|
||||
Browse: true,
|
||||
})(c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getForwardedPrefix(c fiber.Ctx) string {
|
||||
header := c.GetReqHeaders()["X-Forwarded-Prefix"]
|
||||
|
||||
if len(header) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
prefix := ""
|
||||
|
||||
for _, rawPrefix := range header {
|
||||
endIndex := len(rawPrefix)
|
||||
for endIndex > 1 && rawPrefix[endIndex-1] == '/' {
|
||||
endIndex--
|
||||
}
|
||||
|
||||
if endIndex != len(rawPrefix) {
|
||||
prefix += rawPrefix[:endIndex]
|
||||
} else {
|
||||
prefix += rawPrefix
|
||||
}
|
||||
}
|
||||
|
||||
return prefix
|
||||
}
|
||||
107
backend/providers/http/swagger/template.go
Normal file
107
backend/providers/http/swagger/template.go
Normal file
@@ -0,0 +1,107 @@
|
||||
package swagger
|
||||
|
||||
const indexTmpl string = `
|
||||
<!-- HTML for static distribution bundle build -->
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<title>{{.Title}}</title>
|
||||
<link href="https://fonts.googleapis.com/css?family=Open+Sans:400,700|Source+Code+Pro:300,600|Titillium+Web:400,600,700" rel="stylesheet">
|
||||
<link rel="stylesheet" type="text/css" href="./swagger-ui.css" >
|
||||
<link rel="icon" type="image/png" href="./favicon-32x32.png" sizes="32x32" />
|
||||
<link rel="icon" type="image/png" href="./favicon-16x16.png" sizes="16x16" />
|
||||
{{- if .CustomStyle}}
|
||||
<style>
|
||||
body { margin: 0; }
|
||||
{{.CustomStyle}}
|
||||
</style>
|
||||
{{- end}}
|
||||
{{- if .CustomScript}}
|
||||
<script>
|
||||
{{.CustomScript}}
|
||||
</script>
|
||||
{{- end}}
|
||||
</head>
|
||||
<body>
|
||||
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" style="position:absolute;width:0;height:0">
|
||||
<defs>
|
||||
<symbol viewBox="0 0 20 20" id="unlocked">
|
||||
<path d="M15.8 8H14V5.6C14 2.703 12.665 1 10 1 7.334 1 6 2.703 6 5.6V6h2v-.801C8 3.754 8.797 3 10 3c1.203 0 2 .754 2 2.199V8H4c-.553 0-1 .646-1 1.199V17c0 .549.428 1.139.951 1.307l1.197.387C5.672 18.861 6.55 19 7.1 19h5.8c.549 0 1.428-.139 1.951-.307l1.196-.387c.524-.167.953-.757.953-1.306V9.199C17 8.646 16.352 8 15.8 8z"></path>
|
||||
</symbol>
|
||||
<symbol viewBox="0 0 20 20" id="locked">
|
||||
<path d="M15.8 8H14V5.6C14 2.703 12.665 1 10 1 7.334 1 6 2.703 6 5.6V8H4c-.553 0-1 .646-1 1.199V17c0 .549.428 1.139.951 1.307l1.197.387C5.672 18.861 6.55 19 7.1 19h5.8c.549 0 1.428-.139 1.951-.307l1.196-.387c.524-.167.953-.757.953-1.306V9.199C17 8.646 16.352 8 15.8 8zM12 8H8V5.199C8 3.754 8.797 3 10 3c1.203 0 2 .754 2 2.199V8z"/>
|
||||
</symbol>
|
||||
<symbol viewBox="0 0 20 20" id="close">
|
||||
<path d="M14.348 14.849c-.469.469-1.229.469-1.697 0L10 11.819l-2.651 3.029c-.469.469-1.229.469-1.697 0-.469-.469-.469-1.229 0-1.697l2.758-3.15-2.759-3.152c-.469-.469-.469-1.228 0-1.697.469-.469 1.228-.469 1.697 0L10 8.183l2.651-3.031c.469-.469 1.228-.469 1.697 0 .469.469.469 1.229 0 1.697l-2.758 3.152 2.758 3.15c.469.469.469 1.229 0 1.698z"/>
|
||||
</symbol>
|
||||
<symbol viewBox="0 0 20 20" id="large-arrow">
|
||||
<path d="M13.25 10L6.109 2.58c-.268-.27-.268-.707 0-.979.268-.27.701-.27.969 0l7.83 7.908c.268.271.268.709 0 .979l-7.83 7.908c-.268.271-.701.27-.969 0-.268-.269-.268-.707 0-.979L13.25 10z"/>
|
||||
</symbol>
|
||||
<symbol viewBox="0 0 20 20" id="large-arrow-down">
|
||||
<path d="M17.418 6.109c.272-.268.709-.268.979 0s.271.701 0 .969l-7.908 7.83c-.27.268-.707.268-.979 0l-7.908-7.83c-.27-.268-.27-.701 0-.969.271-.268.709-.268.979 0L10 13.25l7.418-7.141z"/>
|
||||
</symbol>
|
||||
<symbol viewBox="0 0 24 24" id="jump-to">
|
||||
<path d="M19 7v4H5.83l3.58-3.59L8 6l-6 6 6 6 1.41-1.41L5.83 13H21V7z"/>
|
||||
</symbol>
|
||||
<symbol viewBox="0 0 24 24" id="expand">
|
||||
<path d="M10 18h4v-2h-4v2zM3 6v2h18V6H3zm3 7h12v-2H6v2z"/>
|
||||
</symbol>
|
||||
</defs>
|
||||
</svg>
|
||||
<div id="swagger-ui"></div>
|
||||
<script src="./swagger-ui-bundle.js"> </script>
|
||||
<script src="./swagger-ui-standalone-preset.js"> </script>
|
||||
<script>
|
||||
window.onload = function() {
|
||||
config = {{.}};
|
||||
config.dom_id = '#swagger-ui';
|
||||
config.plugins = [
|
||||
{{- range $plugin := .Plugins }}
|
||||
{{$plugin}},
|
||||
{{- end}}
|
||||
];
|
||||
config.presets = [
|
||||
{{- range $preset := .Presets }}
|
||||
{{$preset}},
|
||||
{{- end}}
|
||||
];
|
||||
config.filter = {{.Filter.Value}}
|
||||
config.syntaxHighlight = {{.SyntaxHighlight.Value}}
|
||||
{{if .TagsSorter}}
|
||||
config.tagsSorter = {{.TagsSorter}}
|
||||
{{end}}
|
||||
{{if .OnComplete}}
|
||||
config.onComplete = {{.OnComplete}}
|
||||
{{end}}
|
||||
{{if .RequestInterceptor}}
|
||||
config.requestInterceptor = {{.RequestInterceptor}}
|
||||
{{end}}
|
||||
{{if .ResponseInterceptor}}
|
||||
config.responseInterceptor = {{.ResponseInterceptor}}
|
||||
{{end}}
|
||||
{{if .ModelPropertyMacro}}
|
||||
config.modelPropertyMacro = {{.ModelPropertyMacro}}
|
||||
{{end}}
|
||||
{{if .ParameterMacro}}
|
||||
config.parameterMacro = {{.ParameterMacro}}
|
||||
{{end}}
|
||||
|
||||
const ui = SwaggerUIBundle(config);
|
||||
|
||||
{{if .OAuth}}
|
||||
ui.initOAuth({{.OAuth}});
|
||||
{{end}}
|
||||
{{if .PreauthorizeBasic}}
|
||||
ui.preauthorizeBasic({{.PreauthorizeBasic}});
|
||||
{{end}}
|
||||
{{if .PreauthorizeApiKey}}
|
||||
ui.preauthorizeApiKey({{.PreauthorizeApiKey}});
|
||||
{{end}}
|
||||
|
||||
window.ui = ui
|
||||
}
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
`
|
||||
33
backend/providers/job/config.go
Normal file
33
backend/providers/job/config.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package job
|
||||
|
||||
import (
|
||||
"github.com/riverqueue/river"
|
||||
"go.ipao.vip/atom/container"
|
||||
"go.ipao.vip/atom/opt"
|
||||
)
|
||||
|
||||
const DefaultPrefix = "Job"
|
||||
|
||||
func DefaultProvider() container.ProviderContainer {
|
||||
return container.ProviderContainer{
|
||||
Provider: Provide,
|
||||
Options: []opt.Option{
|
||||
opt.Prefix(DefaultPrefix),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type Config struct{}
|
||||
|
||||
const (
|
||||
PriorityDefault = river.PriorityDefault
|
||||
PriorityLow = 2
|
||||
PriorityMiddle = 3
|
||||
PriorityHigh = 3
|
||||
)
|
||||
|
||||
const (
|
||||
QueueHigh = "high"
|
||||
QueueDefault = river.QueueDefault
|
||||
QueueLow = "low"
|
||||
)
|
||||
187
backend/providers/job/provider.go
Normal file
187
backend/providers/job/provider.go
Normal file
@@ -0,0 +1,187 @@
|
||||
package job
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"quyun/providers/postgres"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/jackc/pgx/v5/pgxpool"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/riverqueue/river"
|
||||
"github.com/riverqueue/river/riverdriver/riverpgxv5"
|
||||
"github.com/riverqueue/river/rivertype"
|
||||
"github.com/samber/lo"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.ipao.vip/atom/container"
|
||||
"go.ipao.vip/atom/contracts"
|
||||
"go.ipao.vip/atom/opt"
|
||||
)
|
||||
|
||||
func Provide(opts ...opt.Option) error {
|
||||
o := opt.New(opts...)
|
||||
var config Config
|
||||
if err := o.UnmarshalConfig(&config); err != nil {
|
||||
return err
|
||||
}
|
||||
return container.Container.Provide(func(ctx context.Context, dbConf *postgres.Config) (*Job, error) {
|
||||
workers := river.NewWorkers()
|
||||
|
||||
dbPoolConfig, err := pgxpool.ParseConfig(dbConf.DSN())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dbPool, err := pgxpool.NewWithConfig(ctx, dbPoolConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
container.AddCloseAble(dbPool.Close)
|
||||
pool := riverpgxv5.New(dbPool)
|
||||
|
||||
queue := &Job{Workers: workers, driver: pool, ctx: ctx, periodicJobs: make(map[string]rivertype.PeriodicJobHandle), jobs: make(map[string]*rivertype.JobInsertResult)}
|
||||
container.AddCloseAble(queue.Close)
|
||||
|
||||
return queue, nil
|
||||
}, o.DiOptions()...)
|
||||
}
|
||||
|
||||
type Job struct {
|
||||
ctx context.Context
|
||||
Workers *river.Workers
|
||||
driver *riverpgxv5.Driver
|
||||
|
||||
l sync.Mutex
|
||||
client *river.Client[pgx.Tx]
|
||||
|
||||
periodicJobs map[string]rivertype.PeriodicJobHandle
|
||||
jobs map[string]*rivertype.JobInsertResult
|
||||
}
|
||||
|
||||
func (q *Job) Close() {
|
||||
if q.client == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err := q.client.StopAndCancel(q.ctx); err != nil {
|
||||
log.Errorf("Failed to stop and cancel client: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (q *Job) Client() (*river.Client[pgx.Tx], error) {
|
||||
q.l.Lock()
|
||||
defer q.l.Unlock()
|
||||
|
||||
if q.client == nil {
|
||||
var err error
|
||||
q.client, err = river.NewClient(q.driver, &river.Config{
|
||||
Workers: q.Workers,
|
||||
Queues: map[string]river.QueueConfig{
|
||||
QueueHigh: {MaxWorkers: 10},
|
||||
QueueDefault: {MaxWorkers: 10},
|
||||
QueueLow: {MaxWorkers: 10},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return q.client, nil
|
||||
}
|
||||
|
||||
func (q *Job) Start(ctx context.Context) error {
|
||||
client, err := q.Client()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "get client failed")
|
||||
}
|
||||
|
||||
if err := client.Start(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
defer client.StopAndCancel(ctx)
|
||||
|
||||
<-ctx.Done()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *Job) StopAndCancel(ctx context.Context) error {
|
||||
client, err := q.Client()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "get client failed")
|
||||
}
|
||||
|
||||
return client.StopAndCancel(ctx)
|
||||
}
|
||||
|
||||
func (q *Job) AddPeriodicJobs(job contracts.CronJob) error {
|
||||
for _, job := range job.Args() {
|
||||
if err := q.AddPeriodicJob(job); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *Job) AddPeriodicJob(job contracts.CronJobArg) error {
|
||||
client, err := q.Client()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
q.l.Lock()
|
||||
defer q.l.Unlock()
|
||||
|
||||
q.periodicJobs[job.Arg.UniqueID()] = client.PeriodicJobs().Add(river.NewPeriodicJob(
|
||||
job.PeriodicInterval,
|
||||
func() (river.JobArgs, *river.InsertOpts) {
|
||||
return job.Arg, lo.ToPtr(job.Arg.InsertOpts())
|
||||
},
|
||||
&river.PeriodicJobOpts{
|
||||
RunOnStart: job.RunOnStart,
|
||||
},
|
||||
))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *Job) Cancel(id string) error {
|
||||
client, err := q.Client()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
q.l.Lock()
|
||||
defer q.l.Unlock()
|
||||
|
||||
if h, ok := q.periodicJobs[id]; ok {
|
||||
client.PeriodicJobs().Remove(h)
|
||||
delete(q.periodicJobs, id)
|
||||
return nil
|
||||
}
|
||||
|
||||
if r, ok := q.jobs[id]; ok {
|
||||
_, err = client.JobCancel(q.ctx, r.Job.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
delete(q.jobs, id)
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *Job) Add(job contracts.JobArgs) error {
|
||||
client, err := q.Client()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
q.l.Lock()
|
||||
defer q.l.Unlock()
|
||||
|
||||
q.jobs[job.UniqueID()], err = client.Insert(q.ctx, job, lo.ToPtr(job.InsertOpts()))
|
||||
return err
|
||||
}
|
||||
35
backend/providers/jwt/config.go
Normal file
35
backend/providers/jwt/config.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package jwt
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"go.ipao.vip/atom/container"
|
||||
"go.ipao.vip/atom/opt"
|
||||
)
|
||||
|
||||
const DefaultPrefix = "JWT"
|
||||
|
||||
func DefaultProvider() container.ProviderContainer {
|
||||
return container.ProviderContainer{
|
||||
Provider: Provide,
|
||||
Options: []opt.Option{
|
||||
opt.Prefix(DefaultPrefix),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
SigningKey string // jwt签名
|
||||
ExpiresTime string // 过期时间
|
||||
Issuer string // 签发者
|
||||
}
|
||||
|
||||
func (c *Config) ExpiresTimeDuration() time.Duration {
|
||||
d, err := time.ParseDuration(c.ExpiresTime)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return d
|
||||
}
|
||||
118
backend/providers/jwt/jwt.go
Normal file
118
backend/providers/jwt/jwt.go
Normal file
@@ -0,0 +1,118 @@
|
||||
package jwt
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.ipao.vip/atom/container"
|
||||
"go.ipao.vip/atom/opt"
|
||||
|
||||
jwt "github.com/golang-jwt/jwt/v4"
|
||||
"golang.org/x/sync/singleflight"
|
||||
)
|
||||
|
||||
const (
|
||||
CtxKey = "claims"
|
||||
HttpHeader = "Authorization"
|
||||
)
|
||||
|
||||
type BaseClaims struct {
|
||||
OpenID string `json:"open_id,omitempty"`
|
||||
Tenant string `json:"tenant,omitempty"`
|
||||
UserID int64 `json:"user_id,omitempty"`
|
||||
TenantID int64 `json:"tenant_id,omitempty"`
|
||||
}
|
||||
|
||||
// Custom claims structure
|
||||
type Claims struct {
|
||||
BaseClaims
|
||||
jwt.RegisteredClaims
|
||||
}
|
||||
|
||||
const TokenPrefix = "Bearer "
|
||||
|
||||
type JWT struct {
|
||||
singleflight *singleflight.Group
|
||||
config *Config
|
||||
SigningKey []byte
|
||||
}
|
||||
|
||||
var (
|
||||
TokenExpired = errors.New("Token is expired")
|
||||
TokenNotValidYet = errors.New("Token not active yet")
|
||||
TokenMalformed = errors.New("That's not even a token")
|
||||
TokenInvalid = errors.New("Couldn't handle this token:")
|
||||
)
|
||||
|
||||
func Provide(opts ...opt.Option) error {
|
||||
o := opt.New(opts...)
|
||||
var config Config
|
||||
if err := o.UnmarshalConfig(&config); err != nil {
|
||||
return err
|
||||
}
|
||||
return container.Container.Provide(func() (*JWT, error) {
|
||||
return &JWT{
|
||||
singleflight: &singleflight.Group{},
|
||||
config: &config,
|
||||
SigningKey: []byte(config.SigningKey),
|
||||
}, nil
|
||||
}, o.DiOptions()...)
|
||||
}
|
||||
|
||||
func (j *JWT) CreateClaims(baseClaims BaseClaims) *Claims {
|
||||
ep, _ := time.ParseDuration(j.config.ExpiresTime)
|
||||
claims := Claims{
|
||||
BaseClaims: baseClaims,
|
||||
RegisteredClaims: jwt.RegisteredClaims{
|
||||
NotBefore: jwt.NewNumericDate(time.Now().Add(-time.Second * 10)), // 签名生效时间
|
||||
ExpiresAt: jwt.NewNumericDate(time.Now().Add(ep)), // 过期时间 7天 配置文件
|
||||
Issuer: j.config.Issuer, // 签名的发行者
|
||||
},
|
||||
}
|
||||
return &claims
|
||||
}
|
||||
|
||||
// 创建一个token
|
||||
func (j *JWT) CreateToken(claims *Claims) (string, error) {
|
||||
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
|
||||
return token.SignedString(j.SigningKey)
|
||||
}
|
||||
|
||||
// CreateTokenByOldToken 旧token 换新token 使用归并回源避免并发问题
|
||||
func (j *JWT) CreateTokenByOldToken(oldToken string, claims *Claims) (string, error) {
|
||||
v, err, _ := j.singleflight.Do("JWT:"+oldToken, func() (interface{}, error) {
|
||||
return j.CreateToken(claims)
|
||||
})
|
||||
return v.(string), err
|
||||
}
|
||||
|
||||
// 解析 token
|
||||
func (j *JWT) Parse(tokenString string) (*Claims, error) {
|
||||
tokenString = strings.TrimPrefix(tokenString, TokenPrefix)
|
||||
token, err := jwt.ParseWithClaims(tokenString, &Claims{}, func(token *jwt.Token) (i interface{}, e error) {
|
||||
return j.SigningKey, nil
|
||||
})
|
||||
if err != nil {
|
||||
if ve, ok := err.(*jwt.ValidationError); ok {
|
||||
if ve.Errors&jwt.ValidationErrorMalformed != 0 {
|
||||
return nil, TokenMalformed
|
||||
} else if ve.Errors&jwt.ValidationErrorExpired != 0 {
|
||||
// Token is expired
|
||||
return nil, TokenExpired
|
||||
} else if ve.Errors&jwt.ValidationErrorNotValidYet != 0 {
|
||||
return nil, TokenNotValidYet
|
||||
} else {
|
||||
return nil, TokenInvalid
|
||||
}
|
||||
}
|
||||
}
|
||||
if token != nil {
|
||||
if claims, ok := token.Claims.(*Claims); ok && token.Valid {
|
||||
return claims, nil
|
||||
}
|
||||
return nil, TokenInvalid
|
||||
} else {
|
||||
return nil, TokenInvalid
|
||||
}
|
||||
}
|
||||
54
backend/providers/otel/config.go
Normal file
54
backend/providers/otel/config.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package otel
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"go.ipao.vip/atom"
|
||||
"go.ipao.vip/atom/container"
|
||||
"go.ipao.vip/atom/opt"
|
||||
)
|
||||
|
||||
const DefaultPrefix = "OTEL"
|
||||
|
||||
func DefaultProvider() container.ProviderContainer {
|
||||
return container.ProviderContainer{
|
||||
Provider: Provide,
|
||||
Options: []opt.Option{
|
||||
opt.Prefix(DefaultPrefix),
|
||||
opt.Group(atom.GroupInitialName),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
ServiceName string
|
||||
Version string
|
||||
Env string
|
||||
|
||||
EndpointGRPC string
|
||||
EndpointHTTP string
|
||||
Token string
|
||||
}
|
||||
|
||||
func (c *Config) format() {
|
||||
if c.ServiceName == "" {
|
||||
c.ServiceName = os.Getenv("SERVICE_NAME")
|
||||
if c.ServiceName == "" {
|
||||
c.ServiceName = "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
if c.Version == "" {
|
||||
c.Version = os.Getenv("SERVICE_VERSION")
|
||||
if c.Version == "" {
|
||||
c.Version = "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
if c.Env == "" {
|
||||
c.Env = os.Getenv("DEPLOY_ENVIRONMENT")
|
||||
if c.Env == "" {
|
||||
c.Env = "unknown"
|
||||
}
|
||||
}
|
||||
}
|
||||
30
backend/providers/otel/docker/.env
Normal file
30
backend/providers/otel/docker/.env
Normal file
@@ -0,0 +1,30 @@
|
||||
# Dependent images
|
||||
GRAFANA_IMAGE=docker.hub.ipao.vip/grafana/grafana:11.4.0
|
||||
JAEGERTRACING_IMAGE=docker.hub.ipao.vip/jaegertracing/all-in-one:1.64.0
|
||||
OPENSEARCH_IMAGE=docker.hub.ipao.vip/opensearchproject/opensearch:2.18.0
|
||||
COLLECTOR_CONTRIB_IMAGE=docker-ghcr.hub.ipao.vip/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-contrib:0.116.1
|
||||
PROMETHEUS_IMAGE=docker-quay.hub.ipao.vip/prometheus/prometheus:v3.0.1
|
||||
|
||||
# OpenTelemetry Collector
|
||||
HOST_FILESYSTEM=/
|
||||
DOCKER_SOCK=/var/run/docker.sock
|
||||
OTEL_COLLECTOR_HOST=otel-collector
|
||||
OTEL_COLLECTOR_PORT_GRPC=4317
|
||||
OTEL_COLLECTOR_PORT_HTTP=4318
|
||||
OTEL_COLLECTOR_CONFIG=./otel-collector/otelcol-config.yml
|
||||
OTEL_COLLECTOR_CONFIG_EXTRAS=./otel-collector/otelcol-config-extras.yml
|
||||
OTEL_EXPORTER_OTLP_ENDPOINT=http://${OTEL_COLLECTOR_HOST}:${OTEL_COLLECTOR_PORT_GRPC}
|
||||
PUBLIC_OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=http://localhost:8080/otlp-http/v1/traces
|
||||
|
||||
# Grafana
|
||||
GRAFANA_SERVICE_PORT=3000
|
||||
GRAFANA_SERVICE_HOST=grafana
|
||||
|
||||
# Jaeger
|
||||
JAEGER_SERVICE_PORT=16686
|
||||
JAEGER_SERVICE_HOST=jaeger
|
||||
|
||||
# Prometheus
|
||||
PROMETHEUS_SERVICE_PORT=9090
|
||||
PROMETHEUS_SERVICE_HOST=prometheus
|
||||
PROMETHEUS_ADDR=${PROMETHEUS_SERVICE_HOST}:${PROMETHEUS_SERVICE_PORT}
|
||||
153
backend/providers/otel/docker/docker-compose.yaml
Normal file
153
backend/providers/otel/docker/docker-compose.yaml
Normal file
@@ -0,0 +1,153 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
x-default-logging: &logging
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "5m"
|
||||
max-file: "2"
|
||||
tag: "{{.Name}}"
|
||||
|
||||
networks:
|
||||
default:
|
||||
name: opentelemetry-demo
|
||||
driver: bridge
|
||||
|
||||
services:
|
||||
# ********************
|
||||
# Telemetry Components
|
||||
# ********************
|
||||
# Jaeger
|
||||
jaeger:
|
||||
image: ${JAEGERTRACING_IMAGE}
|
||||
container_name: jaeger
|
||||
command:
|
||||
- "--memory.max-traces=5000"
|
||||
- "--query.base-path=/jaeger/ui"
|
||||
- "--prometheus.server-url=http://${PROMETHEUS_ADDR}"
|
||||
- "--prometheus.query.normalize-calls=true"
|
||||
- "--prometheus.query.normalize-duration=true"
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 400M
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "${JAEGER_SERVICE_PORT}:${JAEGER_SERVICE_PORT}" # Jaeger UI
|
||||
# - "${OTEL_COLLECTOR_PORT_GRPC}"
|
||||
environment:
|
||||
- METRICS_STORAGE_TYPE=prometheus
|
||||
logging: *logging
|
||||
|
||||
# Grafana
|
||||
grafana:
|
||||
image: ${GRAFANA_IMAGE}
|
||||
container_name: grafana
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 100M
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- "GF_INSTALL_PLUGINS=grafana-opensearch-datasource"
|
||||
volumes:
|
||||
- ./grafana/grafana.ini:/etc/grafana/grafana.ini
|
||||
- ./grafana/provisioning/:/etc/grafana/provisioning/
|
||||
ports:
|
||||
- "${GRAFANA_SERVICE_PORT}:${GRAFANA_SERVICE_PORT}"
|
||||
logging: *logging
|
||||
|
||||
# OpenTelemetry Collector
|
||||
otel-collector:
|
||||
image: ${COLLECTOR_CONTRIB_IMAGE}
|
||||
container_name: otel-collector
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 200M
|
||||
restart: unless-stopped
|
||||
command:
|
||||
[
|
||||
"--config=/etc/otelcol-config.yml",
|
||||
"--config=/etc/otelcol-config-extras.yml",
|
||||
]
|
||||
user: 0:0
|
||||
volumes:
|
||||
- ${HOST_FILESYSTEM}:/hostfs:ro
|
||||
- ${DOCKER_SOCK}:/var/run/docker.sock:ro
|
||||
- ${OTEL_COLLECTOR_CONFIG}:/etc/otelcol-config.yml
|
||||
- ${OTEL_COLLECTOR_CONFIG_EXTRAS}:/etc/otelcol-config-extras.yml
|
||||
ports:
|
||||
- "${OTEL_COLLECTOR_PORT_GRPC}:${OTEL_COLLECTOR_PORT_GRPC}"
|
||||
- "${OTEL_COLLECTOR_PORT_HTTP}:${OTEL_COLLECTOR_PORT_HTTP}"
|
||||
depends_on:
|
||||
jaeger:
|
||||
condition: service_started
|
||||
opensearch:
|
||||
condition: service_healthy
|
||||
logging: *logging
|
||||
environment:
|
||||
- ENVOY_PORT
|
||||
- HOST_FILESYSTEM
|
||||
- OTEL_COLLECTOR_HOST
|
||||
- OTEL_COLLECTOR_PORT_GRPC
|
||||
- OTEL_COLLECTOR_PORT_HTTP
|
||||
|
||||
# Prometheus
|
||||
prometheus:
|
||||
image: ${PROMETHEUS_IMAGE}
|
||||
container_name: prometheus
|
||||
command:
|
||||
- --web.console.templates=/etc/prometheus/consoles
|
||||
- --web.console.libraries=/etc/prometheus/console_libraries
|
||||
- --storage.tsdb.retention.time=1h
|
||||
- --config.file=/etc/prometheus/prometheus-config.yaml
|
||||
- --storage.tsdb.path=/prometheus
|
||||
- --web.enable-lifecycle
|
||||
- --web.route-prefix=/
|
||||
- --web.enable-otlp-receiver
|
||||
- --enable-feature=exemplar-storage
|
||||
volumes:
|
||||
- ./prometheus/prometheus-config.yaml:/etc/prometheus/prometheus-config.yaml
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 300M
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "${PROMETHEUS_SERVICE_PORT}:${PROMETHEUS_SERVICE_PORT}"
|
||||
logging: *logging
|
||||
|
||||
# OpenSearch
|
||||
opensearch:
|
||||
image: ${OPENSEARCH_IMAGE}
|
||||
container_name: opensearch
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 1G
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- cluster.name=demo-cluster
|
||||
- node.name=demo-node
|
||||
- bootstrap.memory_lock=true
|
||||
- discovery.type=single-node
|
||||
- OPENSEARCH_JAVA_OPTS=-Xms300m -Xmx300m
|
||||
- DISABLE_INSTALL_DEMO_CONFIG=true
|
||||
- DISABLE_SECURITY_PLUGIN=true
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: -1
|
||||
hard: -1
|
||||
nofile:
|
||||
soft: 65536
|
||||
hard: 65536
|
||||
ports:
|
||||
- "9200:9200"
|
||||
healthcheck:
|
||||
test: curl -s http://localhost:9200/_cluster/health | grep -E '"status":"(green|yellow)"'
|
||||
start_period: 10s
|
||||
interval: 5s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
logging: *logging
|
||||
1170
backend/providers/otel/docker/grafana/grafana.ini
Normal file
1170
backend/providers/otel/docker/grafana/grafana.ini
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,14 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
|
||||
apiVersion: 1
|
||||
providers:
|
||||
- name: 'OpenTelemetry Demo'
|
||||
orgId: 1
|
||||
folder: 'Demo'
|
||||
type: file
|
||||
disableDeletion: false
|
||||
editable: true
|
||||
options:
|
||||
path: /etc/grafana/provisioning/dashboards/demo
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,435 @@
|
||||
{
|
||||
"annotations": {
|
||||
"list": [
|
||||
{
|
||||
"builtIn": 1,
|
||||
"datasource": {
|
||||
"type": "grafana",
|
||||
"uid": "-- Grafana --"
|
||||
},
|
||||
"enable": true,
|
||||
"hide": true,
|
||||
"iconColor": "rgba(0, 211, 255, 1)",
|
||||
"name": "Annotations & Alerts",
|
||||
"type": "dashboard"
|
||||
}
|
||||
]
|
||||
},
|
||||
"editable": true,
|
||||
"fiscalYearStartMonth": 0,
|
||||
"graphTooltip": 0,
|
||||
"id": 5,
|
||||
"links": [],
|
||||
"panels": [
|
||||
{
|
||||
"collapsed": false,
|
||||
"gridPos": {
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"id": 4,
|
||||
"panels": [],
|
||||
"title": "GetCart Exemplars",
|
||||
"type": "row"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "webstore-metrics"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisBorderShow": false,
|
||||
"axisCenteredZero": false,
|
||||
"axisColorMode": "text",
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"insertNulls": false,
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 1,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 10,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 10
|
||||
},
|
||||
"id": 5,
|
||||
"interval": "2m",
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom",
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.3.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "webstore-metrics"
|
||||
},
|
||||
"disableTextWrap": false,
|
||||
"editorMode": "builder",
|
||||
"exemplar": true,
|
||||
"expr": "histogram_quantile(0.95, sum by(le) (rate(app_cart_get_cart_latency_bucket[$__rate_interval])))",
|
||||
"fullMetaSearch": false,
|
||||
"includeNullMetadata": false,
|
||||
"legendFormat": "p95 GetCart",
|
||||
"range": true,
|
||||
"refId": "A",
|
||||
"useBackend": false
|
||||
}
|
||||
],
|
||||
"title": "95th Pct Cart GetCart Latency with Exemplars",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "webstore-metrics"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"custom": {
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
}
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 1
|
||||
},
|
||||
"id": 2,
|
||||
"interval": "2m",
|
||||
"options": {
|
||||
"calculate": false,
|
||||
"cellGap": 1,
|
||||
"color": {
|
||||
"exponent": 0.5,
|
||||
"fill": "dark-orange",
|
||||
"mode": "scheme",
|
||||
"reverse": false,
|
||||
"scale": "exponential",
|
||||
"scheme": "Spectral",
|
||||
"steps": 64
|
||||
},
|
||||
"exemplars": {
|
||||
"color": "rgba(255,0,255,0.7)"
|
||||
},
|
||||
"filterValues": {
|
||||
"le": 1e-9
|
||||
},
|
||||
"legend": {
|
||||
"show": true
|
||||
},
|
||||
"rowsFrame": {
|
||||
"layout": "auto"
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"showColorScale": false,
|
||||
"yHistogram": false
|
||||
},
|
||||
"yAxis": {
|
||||
"axisPlacement": "left",
|
||||
"reverse": false
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.3.0",
|
||||
"targets": [
|
||||
{
|
||||
"disableTextWrap": false,
|
||||
"editorMode": "builder",
|
||||
"exemplar": true,
|
||||
"expr": "sum by(le) (rate(app_cart_get_cart_latency_bucket[$__rate_interval]))",
|
||||
"format": "heatmap",
|
||||
"fullMetaSearch": false,
|
||||
"includeNullMetadata": false,
|
||||
"instant": true,
|
||||
"legendFormat": "{{le}}",
|
||||
"range": true,
|
||||
"refId": "A",
|
||||
"useBackend": false
|
||||
}
|
||||
],
|
||||
"title": "GetCart Latency Heatmap with Exemplars",
|
||||
"type": "heatmap"
|
||||
},
|
||||
{
|
||||
"collapsed": false,
|
||||
"gridPos": {
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 20
|
||||
},
|
||||
"id": 3,
|
||||
"panels": [],
|
||||
"title": "AddItem Exemplars",
|
||||
"type": "row"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "webstore-metrics"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"custom": {
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
}
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 21
|
||||
},
|
||||
"id": 6,
|
||||
"interval": "2m",
|
||||
"options": {
|
||||
"calculate": false,
|
||||
"cellGap": 1,
|
||||
"color": {
|
||||
"exponent": 0.5,
|
||||
"fill": "dark-orange",
|
||||
"mode": "scheme",
|
||||
"reverse": false,
|
||||
"scale": "exponential",
|
||||
"scheme": "Spectral",
|
||||
"steps": 64
|
||||
},
|
||||
"exemplars": {
|
||||
"color": "rgba(255,0,255,0.7)"
|
||||
},
|
||||
"filterValues": {
|
||||
"le": 1e-9
|
||||
},
|
||||
"legend": {
|
||||
"show": true
|
||||
},
|
||||
"rowsFrame": {
|
||||
"layout": "auto"
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"showColorScale": false,
|
||||
"yHistogram": false
|
||||
},
|
||||
"yAxis": {
|
||||
"axisPlacement": "left",
|
||||
"reverse": false
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.3.0",
|
||||
"targets": [
|
||||
{
|
||||
"disableTextWrap": false,
|
||||
"editorMode": "builder",
|
||||
"exemplar": true,
|
||||
"expr": "sum by(le) (rate(app_cart_add_item_latency_bucket[$__rate_interval]))",
|
||||
"format": "heatmap",
|
||||
"fullMetaSearch": false,
|
||||
"includeNullMetadata": false,
|
||||
"instant": true,
|
||||
"legendFormat": "{{le}}",
|
||||
"range": true,
|
||||
"refId": "A",
|
||||
"useBackend": false
|
||||
}
|
||||
],
|
||||
"title": "AddItem Latency Heatmap with Exemplars",
|
||||
"type": "heatmap"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "webstore-metrics"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisBorderShow": false,
|
||||
"axisCenteredZero": false,
|
||||
"axisColorMode": "text",
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"barWidthFactor": 0.6,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"insertNulls": false,
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 1,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 10,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 30
|
||||
},
|
||||
"id": 1,
|
||||
"interval": "2m",
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom",
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.3.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "webstore-metrics"
|
||||
},
|
||||
"disableTextWrap": false,
|
||||
"editorMode": "builder",
|
||||
"exemplar": true,
|
||||
"expr": "histogram_quantile(0.95, sum by(le) (rate(app_cart_add_item_latency_bucket[$__rate_interval])))",
|
||||
"fullMetaSearch": false,
|
||||
"includeNullMetadata": false,
|
||||
"legendFormat": "p95 AddItem",
|
||||
"range": true,
|
||||
"refId": "A",
|
||||
"useBackend": false
|
||||
}
|
||||
],
|
||||
"title": "95th Pct Cart AddItem Latency with Exemplars",
|
||||
"type": "timeseries"
|
||||
}
|
||||
],
|
||||
"preload": false,
|
||||
"schemaVersion": 40,
|
||||
"tags": [],
|
||||
"templating": {
|
||||
"list": []
|
||||
},
|
||||
"time": {
|
||||
"from": "now-1h",
|
||||
"to": "now"
|
||||
},
|
||||
"timepicker": {},
|
||||
"timezone": "browser",
|
||||
"title": "Cart Service Exemplars",
|
||||
"uid": "ce6sd46kfkglca",
|
||||
"version": 1,
|
||||
"weekStart": ""
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,21 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Prometheus
|
||||
uid: webstore-metrics
|
||||
type: prometheus
|
||||
url: http://prometheus:9090
|
||||
editable: true
|
||||
isDefault: true
|
||||
jsonData:
|
||||
exemplarTraceIdDestinations:
|
||||
- datasourceUid: webstore-traces
|
||||
name: trace_id
|
||||
|
||||
- url: http://localhost:8080/jaeger/ui/trace/$${__value.raw}
|
||||
name: trace_id
|
||||
urlDisplayLabel: View in Jaeger UI
|
||||
@@ -0,0 +1,13 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Jaeger
|
||||
uid: webstore-traces
|
||||
type: jaeger
|
||||
url: http://jaeger:16686/jaeger/ui
|
||||
editable: true
|
||||
isDefault: false
|
||||
@@ -0,0 +1,20 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: OpenSearch
|
||||
type: grafana-opensearch-datasource
|
||||
url: http://opensearch:9200/
|
||||
access: proxy
|
||||
editable: true
|
||||
isDefault: false
|
||||
jsonData:
|
||||
database: otel
|
||||
flavor: opensearch
|
||||
logLevelField: severity
|
||||
logMessageField: body
|
||||
pplEnabled: true
|
||||
timeField: observedTimestamp
|
||||
version: 2.18.0
|
||||
@@ -0,0 +1,18 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# extra settings to be merged into OpenTelemetry Collector configuration
|
||||
# do not delete this file
|
||||
|
||||
## Example configuration for sending data to your own OTLP HTTP backend
|
||||
## Note: the spanmetrics exporter must be included in the exporters array
|
||||
## if overriding the traces pipeline.
|
||||
##
|
||||
# exporters:
|
||||
# otlphttp/example:
|
||||
# endpoint: <your-endpoint-url>
|
||||
#
|
||||
# service:
|
||||
# pipelines:
|
||||
# traces:
|
||||
# exporters: [spanmetrics, otlphttp/example]
|
||||
128
backend/providers/otel/docker/otel-collector/otelcol-config.yml
Normal file
128
backend/providers/otel/docker/otel-collector/otelcol-config.yml
Normal file
@@ -0,0 +1,128 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: ${env:OTEL_COLLECTOR_HOST}:${env:OTEL_COLLECTOR_PORT_GRPC}
|
||||
http:
|
||||
endpoint: ${env:OTEL_COLLECTOR_HOST}:${env:OTEL_COLLECTOR_PORT_HTTP}
|
||||
cors:
|
||||
allowed_origins:
|
||||
- "http://*"
|
||||
- "https://*"
|
||||
docker_stats:
|
||||
endpoint: unix:///var/run/docker.sock
|
||||
# Host metrics
|
||||
hostmetrics:
|
||||
root_path: /hostfs
|
||||
scrapers:
|
||||
cpu:
|
||||
metrics:
|
||||
system.cpu.utilization:
|
||||
enabled: true
|
||||
disk:
|
||||
load:
|
||||
filesystem:
|
||||
exclude_mount_points:
|
||||
mount_points:
|
||||
- /dev/*
|
||||
- /proc/*
|
||||
- /sys/*
|
||||
- /run/k3s/containerd/*
|
||||
- /var/lib/docker/*
|
||||
- /var/lib/kubelet/*
|
||||
- /snap/*
|
||||
match_type: regexp
|
||||
exclude_fs_types:
|
||||
fs_types:
|
||||
- autofs
|
||||
- binfmt_misc
|
||||
- bpf
|
||||
- cgroup2
|
||||
- configfs
|
||||
- debugfs
|
||||
- devpts
|
||||
- devtmpfs
|
||||
- fusectl
|
||||
- hugetlbfs
|
||||
- iso9660
|
||||
- mqueue
|
||||
- nsfs
|
||||
- overlay
|
||||
- proc
|
||||
- procfs
|
||||
- pstore
|
||||
- rpc_pipefs
|
||||
- securityfs
|
||||
- selinuxfs
|
||||
- squashfs
|
||||
- sysfs
|
||||
- tracefs
|
||||
match_type: strict
|
||||
memory:
|
||||
metrics:
|
||||
system.memory.utilization:
|
||||
enabled: true
|
||||
network:
|
||||
paging:
|
||||
processes:
|
||||
process:
|
||||
mute_process_exe_error: true
|
||||
mute_process_io_error: true
|
||||
mute_process_user_error: true
|
||||
# Collector metrics
|
||||
prometheus:
|
||||
config:
|
||||
scrape_configs:
|
||||
- job_name: "otel-collector"
|
||||
scrape_interval: 10s
|
||||
static_configs:
|
||||
- targets: ["0.0.0.0:8888"]
|
||||
|
||||
exporters:
|
||||
debug:
|
||||
otlp:
|
||||
endpoint: "jaeger:4317"
|
||||
tls:
|
||||
insecure: true
|
||||
otlphttp/prometheus:
|
||||
endpoint: "http://prometheus:9090/api/v1/otlp"
|
||||
tls:
|
||||
insecure: true
|
||||
opensearch:
|
||||
logs_index: otel
|
||||
http:
|
||||
endpoint: "http://opensearch:9200"
|
||||
tls:
|
||||
insecure: true
|
||||
|
||||
processors:
|
||||
batch:
|
||||
transform:
|
||||
error_mode: ignore
|
||||
trace_statements:
|
||||
- context: span
|
||||
statements:
|
||||
# could be removed when https://github.com/vercel/next.js/pull/64852 is fixed upstream
|
||||
- replace_pattern(name, "\\?.*", "")
|
||||
- replace_match(name, "GET /api/products/*", "GET /api/products/{productId}")
|
||||
|
||||
connectors:
|
||||
spanmetrics:
|
||||
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [transform, batch]
|
||||
exporters: [otlp, debug, spanmetrics]
|
||||
metrics:
|
||||
receivers: [hostmetrics, docker_stats, otlp, prometheus, spanmetrics]
|
||||
processors: [batch]
|
||||
exporters: [otlphttp/prometheus, debug]
|
||||
logs:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [opensearch, debug]
|
||||
@@ -0,0 +1,27 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
global:
|
||||
scrape_interval: 5s
|
||||
scrape_timeout: 3s
|
||||
evaluation_interval: 30s
|
||||
|
||||
otlp:
|
||||
promote_resource_attributes:
|
||||
- service.instance.id
|
||||
- service.name
|
||||
- service.namespace
|
||||
- cloud.availability_zone
|
||||
- cloud.region
|
||||
- container.name
|
||||
- deployment.environment.name
|
||||
|
||||
scrape_configs:
|
||||
- job_name: otel-collector
|
||||
static_configs:
|
||||
- targets:
|
||||
- 'otel-collector:8888'
|
||||
|
||||
storage:
|
||||
tsdb:
|
||||
out_of_order_time_window: 30m
|
||||
91
backend/providers/otel/funcs.go
Normal file
91
backend/providers/otel/funcs.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package otel
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
var (
|
||||
tracer trace.Tracer
|
||||
meter metric.Meter
|
||||
)
|
||||
|
||||
func Start(ctx context.Context, spanName string, opts ...trace.SpanStartOption) (context.Context, trace.Span) {
|
||||
return tracer.Start(ctx, spanName, opts...)
|
||||
}
|
||||
|
||||
func Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) {
|
||||
return meter.Int64Counter(name, options...)
|
||||
}
|
||||
|
||||
// Int64UpDownCounter
|
||||
func Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
|
||||
return meter.Int64UpDownCounter(name, options...)
|
||||
}
|
||||
|
||||
// Int64Histogram
|
||||
func Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
|
||||
return meter.Int64Histogram(name, options...)
|
||||
}
|
||||
|
||||
// Int64Gauge
|
||||
func Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) {
|
||||
return meter.Int64Gauge(name, options...)
|
||||
}
|
||||
|
||||
// Int64ObservableCounter
|
||||
func Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
|
||||
return meter.Int64ObservableCounter(name, options...)
|
||||
}
|
||||
|
||||
// Int64ObservableUpDownCounter
|
||||
func Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
|
||||
return meter.Int64ObservableUpDownCounter(name, options...)
|
||||
}
|
||||
|
||||
// Int64ObservableGauge
|
||||
func Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
|
||||
return meter.Int64ObservableGauge(name, options...)
|
||||
}
|
||||
|
||||
// Float64Counter
|
||||
func Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) {
|
||||
return meter.Float64Counter(name, options...)
|
||||
}
|
||||
|
||||
// Float64UpDownCounter
|
||||
func Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) {
|
||||
return meter.Float64UpDownCounter(name, options...)
|
||||
}
|
||||
|
||||
// Float64Histogram
|
||||
func Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
|
||||
return meter.Float64Histogram(name, options...)
|
||||
}
|
||||
|
||||
// Float64Gauge
|
||||
func Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) {
|
||||
return meter.Float64Gauge(name, options...)
|
||||
}
|
||||
|
||||
// Float64ObservableCounter
|
||||
func Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
|
||||
return meter.Float64ObservableCounter(name, options...)
|
||||
}
|
||||
|
||||
// Float64ObservableUpDownCounter
|
||||
func Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
|
||||
return meter.Float64ObservableUpDownCounter(name, options...)
|
||||
}
|
||||
|
||||
// Float64ObservableGauge
|
||||
func Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
|
||||
return meter.Float64ObservableGauge(name, options...)
|
||||
}
|
||||
|
||||
// RegisterCallback
|
||||
func RegisterCallback(f metric.Callback, instruments ...metric.Observable) (metric.Registration, error) {
|
||||
return meter.RegisterCallback(f, instruments...)
|
||||
}
|
||||
247
backend/providers/otel/provider.go
Normal file
247
backend/providers/otel/provider.go
Normal file
@@ -0,0 +1,247 @@
|
||||
package otel
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"go.ipao.vip/atom/container"
|
||||
"go.ipao.vip/atom/contracts"
|
||||
"go.ipao.vip/atom/opt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opentelemetry.io/contrib/instrumentation/runtime"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
|
||||
"go.opentelemetry.io/otel/propagation"
|
||||
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
sdktrace "go.opentelemetry.io/otel/sdk/trace"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.15.0"
|
||||
"google.golang.org/grpc/encoding/gzip"
|
||||
)
|
||||
|
||||
func Provide(opts ...opt.Option) error {
|
||||
o := opt.New(opts...)
|
||||
var config Config
|
||||
if err := o.UnmarshalConfig(&config); err != nil {
|
||||
return err
|
||||
}
|
||||
config.format()
|
||||
return container.Container.Provide(func(ctx context.Context) (contracts.Initial, error) {
|
||||
o := &builder{
|
||||
config: &config,
|
||||
}
|
||||
|
||||
if err := o.initResource(ctx); err != nil {
|
||||
return o, errors.Wrapf(err, "Failed to create OpenTelemetry resource")
|
||||
}
|
||||
|
||||
if err := o.initMeterProvider(ctx); err != nil {
|
||||
return o, errors.Wrapf(err, "Failed to create OpenTelemetry metric provider")
|
||||
}
|
||||
|
||||
if err := o.initTracerProvider(ctx); err != nil {
|
||||
return o, errors.Wrapf(err, "Failed to create OpenTelemetry tracer provider")
|
||||
}
|
||||
|
||||
tracer = otel.Tracer(config.ServiceName)
|
||||
meter = otel.Meter(config.ServiceName)
|
||||
|
||||
log.Info("otel provider init success")
|
||||
return o, nil
|
||||
}, o.DiOptions()...)
|
||||
}
|
||||
|
||||
type builder struct {
|
||||
config *Config
|
||||
|
||||
resource *resource.Resource
|
||||
}
|
||||
|
||||
func (o *builder) initResource(ctx context.Context) (err error) {
|
||||
hostName, _ := os.Hostname()
|
||||
|
||||
o.resource, err = resource.New(
|
||||
ctx,
|
||||
resource.WithFromEnv(),
|
||||
resource.WithProcess(),
|
||||
resource.WithTelemetrySDK(),
|
||||
resource.WithHost(),
|
||||
resource.WithOS(),
|
||||
resource.WithContainer(),
|
||||
resource.WithAttributes(
|
||||
semconv.ServiceNameKey.String(o.config.ServiceName), // 应用名
|
||||
semconv.ServiceVersionKey.String(o.config.Version), // 应用版本
|
||||
semconv.DeploymentEnvironmentKey.String(o.config.Env), // 部署环境
|
||||
semconv.HostNameKey.String(hostName), // 主机名
|
||||
),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
func (o *builder) initMeterProvider(ctx context.Context) (err error) {
|
||||
exporterGrpcFunc := func(ctx context.Context) (sdkmetric.Exporter, error) {
|
||||
opts := []otlpmetricgrpc.Option{
|
||||
otlpmetricgrpc.WithEndpoint(o.config.EndpointGRPC),
|
||||
otlpmetricgrpc.WithCompressor(gzip.Name),
|
||||
}
|
||||
|
||||
if o.config.Token != "" {
|
||||
headers := map[string]string{"Authentication": o.config.Token}
|
||||
opts = append(opts, otlpmetricgrpc.WithHeaders(headers))
|
||||
}
|
||||
|
||||
exporter, err := otlpmetricgrpc.New(ctx, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return exporter, nil
|
||||
}
|
||||
|
||||
exporterHttpFunc := func(ctx context.Context) (sdkmetric.Exporter, error) {
|
||||
opts := []otlpmetrichttp.Option{
|
||||
otlpmetrichttp.WithEndpoint(o.config.EndpointHTTP),
|
||||
otlpmetrichttp.WithCompression(1),
|
||||
}
|
||||
|
||||
if o.config.Token != "" {
|
||||
opts = append(opts, otlpmetrichttp.WithURLPath(o.config.Token))
|
||||
}
|
||||
|
||||
exporter, err := otlpmetrichttp.New(ctx, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return exporter, nil
|
||||
}
|
||||
|
||||
var exporter sdkmetric.Exporter
|
||||
if o.config.EndpointHTTP != "" {
|
||||
exporter, err = exporterHttpFunc(ctx)
|
||||
} else {
|
||||
exporter, err = exporterGrpcFunc(ctx)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
meterProvider := sdkmetric.NewMeterProvider(
|
||||
sdkmetric.WithReader(
|
||||
sdkmetric.NewPeriodicReader(exporter),
|
||||
),
|
||||
sdkmetric.WithResource(o.resource),
|
||||
)
|
||||
otel.SetMeterProvider(meterProvider)
|
||||
|
||||
err = runtime.Start(runtime.WithMinimumReadMemStatsInterval(time.Second * 5))
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Failed to start runtime metrics")
|
||||
}
|
||||
|
||||
container.AddCloseAble(func() {
|
||||
if err := meterProvider.Shutdown(ctx); err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (o *builder) initTracerProvider(ctx context.Context) error {
|
||||
exporterGrpcFunc := func(ctx context.Context) (*otlptrace.Exporter, error) {
|
||||
opts := []otlptracegrpc.Option{
|
||||
otlptracegrpc.WithCompressor(gzip.Name),
|
||||
otlptracegrpc.WithEndpoint(o.config.EndpointGRPC),
|
||||
otlptracegrpc.WithInsecure(), // 添加不安全连接选项
|
||||
}
|
||||
|
||||
if o.config.Token != "" {
|
||||
headers := map[string]string{
|
||||
"Authentication": o.config.Token,
|
||||
"authorization": o.config.Token, // 添加标准认证头
|
||||
}
|
||||
opts = append(opts, otlptracegrpc.WithHeaders(headers))
|
||||
}
|
||||
|
||||
log.Debugf("Creating GRPC trace exporter with endpoint: %s", o.config.EndpointGRPC)
|
||||
exporter, err := otlptrace.New(ctx, otlptracegrpc.NewClient(opts...))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create GRPC trace exporter")
|
||||
}
|
||||
|
||||
container.AddCloseAble(func() {
|
||||
cxt, cancel := context.WithTimeout(ctx, time.Second)
|
||||
defer cancel()
|
||||
if err := exporter.Shutdown(cxt); err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
})
|
||||
|
||||
return exporter, nil
|
||||
}
|
||||
|
||||
exporterHttpFunc := func(ctx context.Context) (*otlptrace.Exporter, error) {
|
||||
opts := []otlptracehttp.Option{
|
||||
otlptracehttp.WithInsecure(),
|
||||
otlptracehttp.WithCompression(1),
|
||||
otlptracehttp.WithEndpoint(o.config.EndpointHTTP),
|
||||
}
|
||||
|
||||
if o.config.Token != "" {
|
||||
opts = append(opts,
|
||||
otlptracehttp.WithHeaders(map[string]string{
|
||||
"Authentication": o.config.Token,
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
log.Debugf("Creating HTTP trace exporter with endpoint: %s", o.config.EndpointHTTP)
|
||||
exporter, err := otlptrace.New(ctx, otlptracehttp.NewClient(opts...))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create HTTP trace exporter")
|
||||
}
|
||||
|
||||
return exporter, nil
|
||||
}
|
||||
|
||||
var exporter *otlptrace.Exporter
|
||||
var err error
|
||||
if o.config.EndpointHTTP != "" {
|
||||
exporter, err = exporterHttpFunc(ctx)
|
||||
log.Infof("otel http exporter: %s", o.config.EndpointHTTP)
|
||||
} else {
|
||||
exporter, err = exporterGrpcFunc(ctx)
|
||||
log.Infof("otel grpc exporter: %s", o.config.EndpointGRPC)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
traceProvider := sdktrace.NewTracerProvider(
|
||||
sdktrace.WithSampler(sdktrace.AlwaysSample()),
|
||||
sdktrace.WithResource(o.resource),
|
||||
sdktrace.WithBatcher(exporter),
|
||||
)
|
||||
container.AddCloseAble(func() {
|
||||
log.Error("shut down")
|
||||
if err := traceProvider.Shutdown(ctx); err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
})
|
||||
|
||||
otel.SetTracerProvider(traceProvider)
|
||||
otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(
|
||||
propagation.TraceContext{},
|
||||
propagation.Baggage{},
|
||||
))
|
||||
|
||||
return err
|
||||
}
|
||||
79
backend/providers/postgres/config.go
Normal file
79
backend/providers/postgres/config.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"go.ipao.vip/atom/container"
|
||||
"go.ipao.vip/atom/opt"
|
||||
)
|
||||
|
||||
const DefaultPrefix = "Database"
|
||||
|
||||
func DefaultProvider() container.ProviderContainer {
|
||||
return container.ProviderContainer{
|
||||
Provider: Provide,
|
||||
Options: []opt.Option{
|
||||
opt.Prefix(DefaultPrefix),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Username string
|
||||
Password string
|
||||
Database string
|
||||
Schema string
|
||||
Host string
|
||||
Port uint
|
||||
SslMode string
|
||||
TimeZone string
|
||||
Prefix string // 表前缀
|
||||
Singular bool // 是否开启全局禁用复数,true表示开启
|
||||
MaxIdleConns int // 空闲中的最大连接数
|
||||
MaxOpenConns int // 打开到数据库的最大连接数
|
||||
}
|
||||
|
||||
func (m *Config) checkDefault() {
|
||||
if m.MaxIdleConns == 0 {
|
||||
m.MaxIdleConns = 10
|
||||
}
|
||||
|
||||
if m.MaxOpenConns == 0 {
|
||||
m.MaxOpenConns = 100
|
||||
}
|
||||
|
||||
if m.Username == "" {
|
||||
m.Username = "postgres"
|
||||
}
|
||||
|
||||
if m.SslMode == "" {
|
||||
m.SslMode = "disable"
|
||||
}
|
||||
|
||||
if m.TimeZone == "" {
|
||||
m.TimeZone = "Asia/Shanghai"
|
||||
}
|
||||
|
||||
if m.Port == 0 {
|
||||
m.Port = 5432
|
||||
}
|
||||
|
||||
if m.Schema == "" {
|
||||
m.Schema = "public"
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Config) EmptyDsn() string {
|
||||
dsnTpl := "host=%s user=%s password=%s port=%d dbname=%s sslmode=%s TimeZone=%s"
|
||||
m.checkDefault()
|
||||
|
||||
return fmt.Sprintf(dsnTpl, m.Host, m.Username, m.Password, m.Port, m.Database, m.SslMode, m.TimeZone)
|
||||
}
|
||||
|
||||
// DSN connection dsn
|
||||
func (m *Config) DSN() string {
|
||||
dsnTpl := "host=%s user=%s password=%s dbname=%s port=%d sslmode=%s TimeZone=%s"
|
||||
m.checkDefault()
|
||||
|
||||
return fmt.Sprintf(dsnTpl, m.Host, m.Username, m.Password, m.Database, m.Port, m.SslMode, m.TimeZone)
|
||||
}
|
||||
34
backend/providers/postgres/postgres.go
Normal file
34
backend/providers/postgres/postgres.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
_ "github.com/lib/pq"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.ipao.vip/atom/container"
|
||||
"go.ipao.vip/atom/opt"
|
||||
)
|
||||
|
||||
func Provide(opts ...opt.Option) error {
|
||||
o := opt.New(opts...)
|
||||
var conf Config
|
||||
if err := o.UnmarshalConfig(&conf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return container.Container.Provide(func() (*sql.DB, *Config, error) {
|
||||
log.Debugf("connect postgres with dsn: '%s'", conf.DSN())
|
||||
db, err := sql.Open("postgres", conf.DSN())
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "connect database")
|
||||
}
|
||||
|
||||
if err := db.Ping(); err != nil {
|
||||
db.Close()
|
||||
return nil, nil, errors.Wrap(err, "ping database")
|
||||
}
|
||||
|
||||
return db, &conf, err
|
||||
}, o.DiOptions()...)
|
||||
}
|
||||
44
backend/providers/redis/config.go
Normal file
44
backend/providers/redis/config.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"go.ipao.vip/atom/container"
|
||||
"go.ipao.vip/atom/opt"
|
||||
)
|
||||
|
||||
const DefaultPrefix = "Redis"
|
||||
|
||||
func DefaultProvider() container.ProviderContainer {
|
||||
return container.ProviderContainer{
|
||||
Provider: Provide,
|
||||
Options: []opt.Option{
|
||||
opt.Prefix(DefaultPrefix),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Host string
|
||||
Port uint
|
||||
Password string
|
||||
DB uint
|
||||
}
|
||||
|
||||
func (c *Config) format() {
|
||||
if c.Host == "" {
|
||||
c.Host = "localhost"
|
||||
}
|
||||
|
||||
if c.Port == 0 {
|
||||
c.Port = 6379
|
||||
}
|
||||
|
||||
if c.DB == 0 {
|
||||
c.DB = 0
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Config) Addr() string {
|
||||
return fmt.Sprintf("%s:%d", c.Host, c.Port)
|
||||
}
|
||||
33
backend/providers/redis/provider.go
Normal file
33
backend/providers/redis/provider.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
"go.ipao.vip/atom/container"
|
||||
"go.ipao.vip/atom/opt"
|
||||
)
|
||||
|
||||
func Provide(opts ...opt.Option) error {
|
||||
o := opt.New(opts...)
|
||||
var config Config
|
||||
if err := o.UnmarshalConfig(&config); err != nil {
|
||||
return err
|
||||
}
|
||||
config.format()
|
||||
return container.Container.Provide(func() (redis.UniversalClient, error) {
|
||||
rdb := redis.NewClient(&redis.Options{
|
||||
Addr: config.Addr(),
|
||||
Password: config.Password,
|
||||
DB: int(config.DB),
|
||||
})
|
||||
|
||||
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
if _, err := rdb.Ping(ctx).Result(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return rdb, nil
|
||||
}, o.DiOptions()...)
|
||||
}
|
||||
152
backend/providers/req/client.go
Normal file
152
backend/providers/req/client.go
Normal file
@@ -0,0 +1,152 @@
|
||||
package req
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"quyun/providers/req/cookiejar"
|
||||
|
||||
"github.com/imroc/req/v3"
|
||||
"go.ipao.vip/atom/container"
|
||||
"go.ipao.vip/atom/opt"
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
client *req.Client
|
||||
jar *cookiejar.Jar
|
||||
}
|
||||
|
||||
func Provide(opts ...opt.Option) error {
|
||||
o := opt.New(opts...)
|
||||
var config Config
|
||||
if err := o.UnmarshalConfig(&config); err != nil {
|
||||
return err
|
||||
}
|
||||
return container.Container.Provide(func() (*Client, error) {
|
||||
c := &Client{}
|
||||
|
||||
client := req.C()
|
||||
if config.DevMode {
|
||||
client.DevMode()
|
||||
}
|
||||
|
||||
if config.CookieJarFile != "" {
|
||||
dir := filepath.Dir(config.CookieJarFile)
|
||||
if _, err := os.Stat(dir); os.IsNotExist(err) {
|
||||
err = os.MkdirAll(dir, 0o755)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
jar, err := cookiejar.New(&cookiejar.Options{
|
||||
Filename: config.CookieJarFile,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.jar = jar
|
||||
client.SetCookieJar(jar)
|
||||
}
|
||||
|
||||
if config.RootCa != nil {
|
||||
client.SetRootCertsFromFile(config.RootCa...)
|
||||
}
|
||||
|
||||
if config.InsecureSkipVerify {
|
||||
client.EnableInsecureSkipVerify()
|
||||
}
|
||||
|
||||
if config.UserAgent != "" {
|
||||
client.SetUserAgent(config.UserAgent)
|
||||
}
|
||||
if config.Timeout > 0 {
|
||||
client.SetTimeout(time.Duration(config.Timeout) * time.Second)
|
||||
}
|
||||
|
||||
if config.CommonHeaders != nil {
|
||||
client.SetCommonHeaders(config.CommonHeaders)
|
||||
}
|
||||
|
||||
if config.AuthBasic.Username != "" && config.AuthBasic.Password != "" {
|
||||
client.SetCommonBasicAuth(config.AuthBasic.Username, config.AuthBasic.Password)
|
||||
}
|
||||
|
||||
if config.AuthBearerToken != "" {
|
||||
client.SetCommonBearerAuthToken(config.AuthBearerToken)
|
||||
}
|
||||
|
||||
if config.ProxyURL != "" {
|
||||
client.SetProxyURL(config.ProxyURL)
|
||||
}
|
||||
|
||||
if config.RedirectPolicy != nil {
|
||||
client.SetRedirectPolicy(parsePolicies(config.RedirectPolicy)...)
|
||||
}
|
||||
|
||||
c.client = client
|
||||
return c, nil
|
||||
}, o.DiOptions()...)
|
||||
}
|
||||
|
||||
func parsePolicies(policies []string) []req.RedirectPolicy {
|
||||
ps := []req.RedirectPolicy{}
|
||||
for _, policy := range policies {
|
||||
policyItems := strings.Split(policy, ":")
|
||||
if len(policyItems) != 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
switch policyItems[0] {
|
||||
case "Max":
|
||||
max, err := strconv.Atoi(policyItems[1])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
ps = append(ps, req.MaxRedirectPolicy(max))
|
||||
case "No":
|
||||
ps = append(ps, req.NoRedirectPolicy())
|
||||
case "SameDomain":
|
||||
ps = append(ps, req.SameDomainRedirectPolicy())
|
||||
case "SameHost":
|
||||
ps = append(ps, req.SameHostRedirectPolicy())
|
||||
case "AllowedHost":
|
||||
ps = append(ps, req.AllowedHostRedirectPolicy(strings.Split(policyItems[1], ",")...))
|
||||
case "AllowedDomain":
|
||||
ps = append(ps, req.AllowedDomainRedirectPolicy(strings.Split(policyItems[1], ",")...))
|
||||
}
|
||||
}
|
||||
|
||||
return ps
|
||||
}
|
||||
|
||||
func (c *Client) R() *req.Request {
|
||||
return c.client.R()
|
||||
}
|
||||
|
||||
func (c *Client) SaveCookJar() error {
|
||||
return c.jar.Save()
|
||||
}
|
||||
|
||||
func (c *Client) GetCookie(key string) (string, bool) {
|
||||
kv := c.AllCookiesKV()
|
||||
v, ok := kv[key]
|
||||
return v, ok
|
||||
}
|
||||
|
||||
func (c *Client) AllCookies() []*http.Cookie {
|
||||
return c.jar.AllCookies()
|
||||
}
|
||||
|
||||
func (c *Client) AllCookiesKV() map[string]string {
|
||||
return c.jar.KVData()
|
||||
}
|
||||
|
||||
func (c *Client) SetCookie(u *url.URL, cookies []*http.Cookie) {
|
||||
c.jar.SetCookies(u, cookies)
|
||||
}
|
||||
34
backend/providers/req/config.go
Normal file
34
backend/providers/req/config.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package req
|
||||
|
||||
import (
|
||||
"go.ipao.vip/atom/container"
|
||||
"go.ipao.vip/atom/opt"
|
||||
)
|
||||
|
||||
const DefaultPrefix = "HttpClient"
|
||||
|
||||
func DefaultProvider() container.ProviderContainer {
|
||||
return container.ProviderContainer{
|
||||
Provider: Provide,
|
||||
Options: []opt.Option{
|
||||
opt.Prefix(DefaultPrefix),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
DevMode bool
|
||||
CookieJarFile string
|
||||
RootCa []string
|
||||
UserAgent string
|
||||
InsecureSkipVerify bool
|
||||
CommonHeaders map[string]string
|
||||
Timeout uint
|
||||
AuthBasic struct {
|
||||
Username string
|
||||
Password string
|
||||
}
|
||||
AuthBearerToken string
|
||||
ProxyURL string
|
||||
RedirectPolicy []string // "Max:10;No;SameDomain;SameHost;AllowedHost:x,x,x,x,x,AllowedDomain:x,x,x,x,x"
|
||||
}
|
||||
704
backend/providers/req/cookiejar/jar.go
Normal file
704
backend/providers/req/cookiejar/jar.go
Normal file
@@ -0,0 +1,704 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package cookiejar implements an in-memory RFC 6265-compliant http.CookieJar.
|
||||
//
|
||||
// This implementation is a fork of net/http/cookiejar which also
|
||||
// implements methods for dumping the cookies to persistent
|
||||
// storage and retrieving them.
|
||||
package cookiejar
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/publicsuffix"
|
||||
)
|
||||
|
||||
// PublicSuffixList provides the public suffix of a domain. For example:
|
||||
// - the public suffix of "example.com" is "com",
|
||||
// - the public suffix of "foo1.foo2.foo3.co.uk" is "co.uk", and
|
||||
// - the public suffix of "bar.pvt.k12.ma.us" is "pvt.k12.ma.us".
|
||||
//
|
||||
// Implementations of PublicSuffixList must be safe for concurrent use by
|
||||
// multiple goroutines.
|
||||
//
|
||||
// An implementation that always returns "" is valid and may be useful for
|
||||
// testing but it is not secure: it means that the HTTP server for foo.com can
|
||||
// set a cookie for bar.com.
|
||||
//
|
||||
// A public suffix list implementation is in the package
|
||||
// golang.org/x/net/publicsuffix.
|
||||
type PublicSuffixList interface {
|
||||
// PublicSuffix returns the public suffix of domain.
|
||||
//
|
||||
// TODO: specify which of the caller and callee is responsible for IP
|
||||
// addresses, for leading and trailing dots, for case sensitivity, and
|
||||
// for IDN/Punycode.
|
||||
PublicSuffix(domain string) string
|
||||
|
||||
// String returns a description of the source of this public suffix
|
||||
// list. The description will typically contain something like a time
|
||||
// stamp or version number.
|
||||
String() string
|
||||
}
|
||||
|
||||
// Options are the options for creating a new Jar.
|
||||
type Options struct {
|
||||
// PublicSuffixList is the public suffix list that determines whether
|
||||
// an HTTP server can set a cookie for a domain.
|
||||
//
|
||||
// If this is nil, the public suffix list implementation in golang.org/x/net/publicsuffix
|
||||
// is used.
|
||||
PublicSuffixList PublicSuffixList
|
||||
|
||||
// Filename holds the file to use for storage of the cookies.
|
||||
// If it is empty, the value of DefaultCookieFile will be used.
|
||||
Filename string
|
||||
|
||||
// NoPersist specifies whether no persistence should be used
|
||||
// (useful for tests). If this is true, the value of Filename will be
|
||||
// ignored.
|
||||
NoPersist bool
|
||||
}
|
||||
|
||||
// Jar implements the http.CookieJar interface from the net/http package.
|
||||
type Jar struct {
|
||||
// filename holds the file that the cookies were loaded from.
|
||||
filename string
|
||||
|
||||
psList PublicSuffixList
|
||||
|
||||
// mu locks the remaining fields.
|
||||
mu sync.Mutex
|
||||
|
||||
// entries is a set of entries, keyed by their eTLD+1 and subkeyed by
|
||||
// their name/domain/path.
|
||||
entries map[string]map[string]entry
|
||||
}
|
||||
|
||||
var noOptions Options
|
||||
|
||||
// New returns a new cookie jar. A nil *Options is equivalent to a zero
|
||||
// Options.
|
||||
//
|
||||
// New will return an error if the cookies could not be loaded
|
||||
// from the file for any reason than if the file does not exist.
|
||||
func New(o *Options) (*Jar, error) {
|
||||
return newAtTime(o, time.Now())
|
||||
}
|
||||
|
||||
// newAtTime is like New but takes the current time as a parameter.
|
||||
func newAtTime(o *Options, now time.Time) (*Jar, error) {
|
||||
jar := &Jar{
|
||||
entries: make(map[string]map[string]entry),
|
||||
}
|
||||
if o == nil {
|
||||
o = &noOptions
|
||||
}
|
||||
if jar.psList = o.PublicSuffixList; jar.psList == nil {
|
||||
jar.psList = publicsuffix.List
|
||||
}
|
||||
if !o.NoPersist {
|
||||
if jar.filename = o.Filename; jar.filename == "" {
|
||||
jar.filename = DefaultCookieFile()
|
||||
}
|
||||
if err := jar.load(); err != nil {
|
||||
return nil, errors.Wrap(err, "cannot load cookies")
|
||||
}
|
||||
}
|
||||
jar.deleteExpired(now)
|
||||
return jar, nil
|
||||
}
|
||||
|
||||
// homeDir returns the OS-specific home path as specified in the environment.
|
||||
func homeDir() string {
|
||||
if runtime.GOOS == "windows" {
|
||||
return filepath.Join(os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"))
|
||||
}
|
||||
return os.Getenv("HOME")
|
||||
}
|
||||
|
||||
// entry is the internal representation of a cookie.
|
||||
//
|
||||
// This struct type is not used outside of this package per se, but the exported
|
||||
// fields are those of RFC 6265.
|
||||
// Note that this structure is marshaled to JSON, so backward-compatibility
|
||||
// should be preserved.
|
||||
type entry struct {
|
||||
Name string
|
||||
Value string
|
||||
Domain string
|
||||
Path string
|
||||
Secure bool
|
||||
HttpOnly bool
|
||||
Persistent bool
|
||||
HostOnly bool
|
||||
Expires time.Time
|
||||
Creation time.Time
|
||||
LastAccess time.Time
|
||||
|
||||
// Updated records when the cookie was updated.
|
||||
// This is different from creation time because a cookie
|
||||
// can be changed without updating the creation time.
|
||||
Updated time.Time
|
||||
|
||||
// CanonicalHost stores the original canonical host name
|
||||
// that the cookie was associated with. We store this
|
||||
// so that even if the public suffix list changes (for example
|
||||
// when storing/loading cookies) we can still get the correct
|
||||
// jar keys.
|
||||
CanonicalHost string
|
||||
}
|
||||
|
||||
// id returns the domain;path;name triple of e as an id.
|
||||
func (e *entry) id() string {
|
||||
return id(e.Domain, e.Path, e.Name)
|
||||
}
|
||||
|
||||
// id returns the domain;path;name triple as an id.
|
||||
func id(domain, path, name string) string {
|
||||
return fmt.Sprintf("%s;%s;%s", domain, path, name)
|
||||
}
|
||||
|
||||
// shouldSend determines whether e's cookie qualifies to be included in a
|
||||
// request to host/path. It is the caller's responsibility to check if the
|
||||
// cookie is expired.
|
||||
func (e *entry) shouldSend(https bool, host, path string) bool {
|
||||
return e.domainMatch(host) && e.pathMatch(path) && (https || !e.Secure)
|
||||
}
|
||||
|
||||
// domainMatch implements "domain-match" of RFC 6265 section 5.1.3.
|
||||
func (e *entry) domainMatch(host string) bool {
|
||||
if e.Domain == host {
|
||||
return true
|
||||
}
|
||||
return !e.HostOnly && hasDotSuffix(host, e.Domain)
|
||||
}
|
||||
|
||||
// pathMatch implements "path-match" according to RFC 6265 section 5.1.4.
|
||||
func (e *entry) pathMatch(requestPath string) bool {
|
||||
if requestPath == e.Path {
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(requestPath, e.Path) {
|
||||
if e.Path[len(e.Path)-1] == '/' {
|
||||
return true // The "/any/" matches "/any/path" case.
|
||||
} else if requestPath[len(e.Path)] == '/' {
|
||||
return true // The "/any" matches "/any/path" case.
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// hasDotSuffix reports whether s ends in "."+suffix.
|
||||
func hasDotSuffix(s, suffix string) bool {
|
||||
return len(s) > len(suffix) && s[len(s)-len(suffix)-1] == '.' && s[len(s)-len(suffix):] == suffix
|
||||
}
|
||||
|
||||
type byCanonicalHost struct {
|
||||
byPathLength
|
||||
}
|
||||
|
||||
func (s byCanonicalHost) Less(i, j int) bool {
|
||||
e0, e1 := &s.byPathLength[i], &s.byPathLength[j]
|
||||
if e0.CanonicalHost != e1.CanonicalHost {
|
||||
return e0.CanonicalHost < e1.CanonicalHost
|
||||
}
|
||||
return s.byPathLength.Less(i, j)
|
||||
}
|
||||
|
||||
// byPathLength is a []entry sort.Interface that sorts according to RFC 6265
|
||||
// section 5.4 point 2: by longest path and then by earliest creation time.
|
||||
type byPathLength []entry
|
||||
|
||||
func (s byPathLength) Len() int { return len(s) }
|
||||
|
||||
func (s byPathLength) Less(i, j int) bool {
|
||||
e0, e1 := &s[i], &s[j]
|
||||
if len(e0.Path) != len(e1.Path) {
|
||||
return len(e0.Path) > len(e1.Path)
|
||||
}
|
||||
if !e0.Creation.Equal(e1.Creation) {
|
||||
return e0.Creation.Before(e1.Creation)
|
||||
}
|
||||
// The following are not strictly necessary
|
||||
// but are useful for providing deterministic
|
||||
// behaviour in tests.
|
||||
if e0.Name != e1.Name {
|
||||
return e0.Name < e1.Name
|
||||
}
|
||||
return e0.Value < e1.Value
|
||||
}
|
||||
|
||||
func (s byPathLength) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// Cookies implements the Cookies method of the http.CookieJar interface.
|
||||
//
|
||||
// It returns an empty slice if the URL's scheme is not HTTP or HTTPS.
|
||||
func (j *Jar) Cookies(u *url.URL) (cookies []*http.Cookie) {
|
||||
return j.cookies(u, time.Now())
|
||||
}
|
||||
|
||||
// cookies is like Cookies but takes the current time as a parameter.
|
||||
func (j *Jar) cookies(u *url.URL, now time.Time) (cookies []*http.Cookie) {
|
||||
if u.Scheme != "http" && u.Scheme != "https" {
|
||||
return cookies
|
||||
}
|
||||
host, err := canonicalHost(u.Host)
|
||||
if err != nil {
|
||||
return cookies
|
||||
}
|
||||
key := jarKey(host, j.psList)
|
||||
|
||||
j.mu.Lock()
|
||||
defer j.mu.Unlock()
|
||||
|
||||
submap := j.entries[key]
|
||||
if submap == nil {
|
||||
return cookies
|
||||
}
|
||||
|
||||
https := u.Scheme == "https"
|
||||
path := u.Path
|
||||
if path == "" {
|
||||
path = "/"
|
||||
}
|
||||
|
||||
var selected []entry
|
||||
for id, e := range submap {
|
||||
if !e.Expires.After(now) {
|
||||
// Save some space by deleting the value when the cookie
|
||||
// expires. We can't delete the cookie itself because then
|
||||
// we wouldn't know that the cookie had expired when
|
||||
// we merge with another cookie jar.
|
||||
if e.Value != "" {
|
||||
e.Value = ""
|
||||
submap[id] = e
|
||||
}
|
||||
continue
|
||||
}
|
||||
if !e.shouldSend(https, host, path) {
|
||||
continue
|
||||
}
|
||||
e.LastAccess = now
|
||||
submap[id] = e
|
||||
selected = append(selected, e)
|
||||
}
|
||||
|
||||
sort.Sort(byPathLength(selected))
|
||||
for _, e := range selected {
|
||||
cookies = append(cookies, &http.Cookie{Name: e.Name, Value: e.Value})
|
||||
}
|
||||
|
||||
return cookies
|
||||
}
|
||||
|
||||
// AllCookies returns all cookies in the jar. The returned cookies will
|
||||
// have Domain, Expires, HttpOnly, Name, Secure, Path, and Value filled
|
||||
// out. Expired cookies will not be returned. This function does not
|
||||
// modify the cookie jar.
|
||||
func (j *Jar) AllCookies() (cookies []*http.Cookie) {
|
||||
return j.allCookies(time.Now())
|
||||
}
|
||||
|
||||
// allCookies is like AllCookies but takes the current time as a parameter.
|
||||
func (j *Jar) allCookies(now time.Time) []*http.Cookie {
|
||||
var selected []entry
|
||||
j.mu.Lock()
|
||||
defer j.mu.Unlock()
|
||||
for _, submap := range j.entries {
|
||||
for _, e := range submap {
|
||||
if !e.Expires.After(now) {
|
||||
// Do not return expired cookies.
|
||||
continue
|
||||
}
|
||||
selected = append(selected, e)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(byCanonicalHost{byPathLength(selected)})
|
||||
cookies := make([]*http.Cookie, len(selected))
|
||||
for i, e := range selected {
|
||||
// Note: The returned cookies do not contain sufficient
|
||||
// information to recreate the database.
|
||||
cookies[i] = &http.Cookie{
|
||||
Name: e.Name,
|
||||
Value: e.Value,
|
||||
Path: e.Path,
|
||||
Domain: e.Domain,
|
||||
Expires: e.Expires,
|
||||
Secure: e.Secure,
|
||||
HttpOnly: e.HttpOnly,
|
||||
}
|
||||
}
|
||||
|
||||
return cookies
|
||||
}
|
||||
|
||||
// RemoveCookie removes the cookie matching the name, domain and path
|
||||
// specified by c.
|
||||
func (j *Jar) RemoveCookie(c *http.Cookie) {
|
||||
j.mu.Lock()
|
||||
defer j.mu.Unlock()
|
||||
id := id(c.Domain, c.Path, c.Name)
|
||||
key := jarKey(c.Domain, j.psList)
|
||||
if e, ok := j.entries[key][id]; ok {
|
||||
e.Value = ""
|
||||
e.Expires = time.Now().Add(-1 * time.Second)
|
||||
j.entries[key][id] = e
|
||||
}
|
||||
}
|
||||
|
||||
// merge merges all the given entries into j. More recently changed
|
||||
// cookies take precedence over older ones.
|
||||
func (j *Jar) merge(entries []entry) {
|
||||
for _, e := range entries {
|
||||
if e.CanonicalHost == "" {
|
||||
continue
|
||||
}
|
||||
key := jarKey(e.CanonicalHost, j.psList)
|
||||
id := e.id()
|
||||
submap := j.entries[key]
|
||||
if submap == nil {
|
||||
j.entries[key] = map[string]entry{
|
||||
id: e,
|
||||
}
|
||||
continue
|
||||
}
|
||||
oldEntry, ok := submap[id]
|
||||
if !ok || e.Updated.After(oldEntry.Updated) {
|
||||
submap[id] = e
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var expiryRemovalDuration = 24 * time.Hour
|
||||
|
||||
// deleteExpired deletes all entries that have expired for long enough
|
||||
// that we can actually expect there to be no external copies of it that
|
||||
// might resurrect the dead cookie.
|
||||
func (j *Jar) deleteExpired(now time.Time) {
|
||||
for tld, submap := range j.entries {
|
||||
for id, e := range submap {
|
||||
if !e.Expires.After(now) && !e.Updated.Add(expiryRemovalDuration).After(now) {
|
||||
delete(submap, id)
|
||||
}
|
||||
}
|
||||
if len(submap) == 0 {
|
||||
delete(j.entries, tld)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveAllHost removes any cookies from the jar that were set for the given host.
|
||||
func (j *Jar) RemoveAllHost(host string) {
|
||||
host, err := canonicalHost(host)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
key := jarKey(host, j.psList)
|
||||
|
||||
j.mu.Lock()
|
||||
defer j.mu.Unlock()
|
||||
|
||||
expired := time.Now().Add(-1 * time.Second)
|
||||
submap := j.entries[key]
|
||||
for id, e := range submap {
|
||||
if e.CanonicalHost == host {
|
||||
// Save some space by deleting the value when the cookie
|
||||
// expires. We can't delete the cookie itself because then
|
||||
// we wouldn't know that the cookie had expired when
|
||||
// we merge with another cookie jar.
|
||||
e.Value = ""
|
||||
e.Expires = expired
|
||||
submap[id] = e
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveAll removes all the cookies from the jar.
|
||||
func (j *Jar) RemoveAll() {
|
||||
expired := time.Now().Add(-1 * time.Second)
|
||||
j.mu.Lock()
|
||||
defer j.mu.Unlock()
|
||||
for _, submap := range j.entries {
|
||||
for id, e := range submap {
|
||||
// Save some space by deleting the value when the cookie
|
||||
// expires. We can't delete the cookie itself because then
|
||||
// we wouldn't know that the cookie had expired when
|
||||
// we merge with another cookie jar.
|
||||
e.Value = ""
|
||||
e.Expires = expired
|
||||
submap[id] = e
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetCookies implements the SetCookies method of the http.CookieJar interface.
|
||||
//
|
||||
// It does nothing if the URL's scheme is not HTTP or HTTPS.
|
||||
func (j *Jar) SetCookies(u *url.URL, cookies []*http.Cookie) {
|
||||
j.setCookies(u, cookies, time.Now())
|
||||
}
|
||||
|
||||
// setCookies is like SetCookies but takes the current time as parameter.
|
||||
func (j *Jar) setCookies(u *url.URL, cookies []*http.Cookie, now time.Time) {
|
||||
if len(cookies) == 0 {
|
||||
return
|
||||
}
|
||||
if u.Scheme != "http" && u.Scheme != "https" {
|
||||
// TODO is this really correct? It might be nice to send
|
||||
// cookies to websocket connections, for example.
|
||||
return
|
||||
}
|
||||
host, err := canonicalHost(u.Host)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
key := jarKey(host, j.psList)
|
||||
defPath := defaultPath(u.Path)
|
||||
|
||||
j.mu.Lock()
|
||||
defer j.mu.Unlock()
|
||||
|
||||
submap := j.entries[key]
|
||||
for _, cookie := range cookies {
|
||||
e, err := j.newEntry(cookie, now, defPath, host)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
e.CanonicalHost = host
|
||||
id := e.id()
|
||||
if submap == nil {
|
||||
submap = make(map[string]entry)
|
||||
j.entries[key] = submap
|
||||
}
|
||||
if old, ok := submap[id]; ok {
|
||||
e.Creation = old.Creation
|
||||
} else {
|
||||
e.Creation = now
|
||||
}
|
||||
e.Updated = now
|
||||
e.LastAccess = now
|
||||
submap[id] = e
|
||||
}
|
||||
}
|
||||
|
||||
// canonicalHost strips port from host if present and returns the canonicalized
|
||||
// host name.
|
||||
func canonicalHost(host string) (string, error) {
|
||||
var err error
|
||||
host = strings.ToLower(host)
|
||||
if hasPort(host) {
|
||||
host, _, err = net.SplitHostPort(host)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
if strings.HasSuffix(host, ".") {
|
||||
// Strip trailing dot from fully qualified domain names.
|
||||
host = host[:len(host)-1]
|
||||
}
|
||||
return toASCII(host)
|
||||
}
|
||||
|
||||
// hasPort reports whether host contains a port number. host may be a host
|
||||
// name, an IPv4 or an IPv6 address.
|
||||
func hasPort(host string) bool {
|
||||
colons := strings.Count(host, ":")
|
||||
if colons == 0 {
|
||||
return false
|
||||
}
|
||||
if colons == 1 {
|
||||
return true
|
||||
}
|
||||
return host[0] == '[' && strings.Contains(host, "]:")
|
||||
}
|
||||
|
||||
// jarKey returns the key to use for a jar.
|
||||
func jarKey(host string, psl PublicSuffixList) string {
|
||||
if isIP(host) {
|
||||
return host
|
||||
}
|
||||
|
||||
var i int
|
||||
if psl == nil {
|
||||
i = strings.LastIndex(host, ".")
|
||||
if i == -1 {
|
||||
return host
|
||||
}
|
||||
} else {
|
||||
suffix := psl.PublicSuffix(host)
|
||||
if suffix == host {
|
||||
return host
|
||||
}
|
||||
i = len(host) - len(suffix)
|
||||
if i <= 0 || host[i-1] != '.' {
|
||||
// The provided public suffix list psl is broken.
|
||||
// Storing cookies under host is a safe stopgap.
|
||||
return host
|
||||
}
|
||||
}
|
||||
prevDot := strings.LastIndex(host[:i-1], ".")
|
||||
return host[prevDot+1:]
|
||||
}
|
||||
|
||||
// isIP reports whether host is an IP address.
|
||||
func isIP(host string) bool {
|
||||
return net.ParseIP(host) != nil
|
||||
}
|
||||
|
||||
// defaultPath returns the directory part of an URL's path according to
|
||||
// RFC 6265 section 5.1.4.
|
||||
func defaultPath(path string) string {
|
||||
if len(path) == 0 || path[0] != '/' {
|
||||
return "/" // Path is empty or malformed.
|
||||
}
|
||||
|
||||
i := strings.LastIndex(path, "/") // Path starts with "/", so i != -1.
|
||||
if i == 0 {
|
||||
return "/" // Path has the form "/abc".
|
||||
}
|
||||
return path[:i] // Path is either of form "/abc/xyz" or "/abc/xyz/".
|
||||
}
|
||||
|
||||
// newEntry creates an entry from a http.Cookie c. now is the current
|
||||
// time and is compared to c.Expires to determine deletion of c. defPath
|
||||
// and host are the default-path and the canonical host name of the URL
|
||||
// c was received from.
|
||||
//
|
||||
// The returned entry should be removed if its expiry time is in the
|
||||
// past. In this case, e may be incomplete, but it will be valid to call
|
||||
// e.id (which depends on e's Name, Domain and Path).
|
||||
//
|
||||
// A malformed c.Domain will result in an error.
|
||||
func (j *Jar) newEntry(c *http.Cookie, now time.Time, defPath, host string) (e entry, err error) {
|
||||
e.Name = c.Name
|
||||
if c.Path == "" || c.Path[0] != '/' {
|
||||
e.Path = defPath
|
||||
} else {
|
||||
e.Path = c.Path
|
||||
}
|
||||
|
||||
e.Domain, e.HostOnly, err = j.domainAndType(host, c.Domain)
|
||||
if err != nil {
|
||||
return e, err
|
||||
}
|
||||
// MaxAge takes precedence over Expires.
|
||||
if c.MaxAge != 0 {
|
||||
e.Persistent = true
|
||||
e.Expires = now.Add(time.Duration(c.MaxAge) * time.Second)
|
||||
if c.MaxAge < 0 {
|
||||
return e, nil
|
||||
}
|
||||
} else if c.Expires.IsZero() {
|
||||
e.Expires = endOfTime
|
||||
} else {
|
||||
e.Persistent = true
|
||||
e.Expires = c.Expires
|
||||
if !c.Expires.After(now) {
|
||||
return e, nil
|
||||
}
|
||||
}
|
||||
|
||||
e.Value = c.Value
|
||||
e.Secure = c.Secure
|
||||
e.HttpOnly = c.HttpOnly
|
||||
|
||||
return e, nil
|
||||
}
|
||||
|
||||
var (
|
||||
errIllegalDomain = errors.New("cookiejar: illegal cookie domain attribute")
|
||||
errMalformedDomain = errors.New("cookiejar: malformed cookie domain attribute")
|
||||
errNoHostname = errors.New("cookiejar: no host name available (IP only)")
|
||||
)
|
||||
|
||||
// endOfTime is the time when session (non-persistent) cookies expire.
|
||||
// This instant is representable in most date/time formats (not just
|
||||
// Go's time.Time) and should be far enough in the future.
|
||||
var endOfTime = time.Date(9999, 12, 31, 23, 59, 59, 0, time.UTC)
|
||||
|
||||
// domainAndType determines the cookie's domain and hostOnly attribute.
|
||||
func (j *Jar) domainAndType(host, domain string) (string, bool, error) {
|
||||
if domain == "" {
|
||||
// No domain attribute in the SetCookie header indicates a
|
||||
// host cookie.
|
||||
return host, true, nil
|
||||
}
|
||||
|
||||
if isIP(host) {
|
||||
// According to RFC 6265 domain-matching includes not being
|
||||
// an IP address.
|
||||
// TODO: This might be relaxed as in common browsers.
|
||||
return "", false, errNoHostname
|
||||
}
|
||||
|
||||
// From here on: If the cookie is valid, it is a domain cookie (with
|
||||
// the one exception of a public suffix below).
|
||||
// See RFC 6265 section 5.2.3.
|
||||
if domain[0] == '.' {
|
||||
domain = domain[1:]
|
||||
}
|
||||
|
||||
if len(domain) == 0 || domain[0] == '.' {
|
||||
// Received either "Domain=." or "Domain=..some.thing",
|
||||
// both are illegal.
|
||||
return "", false, errMalformedDomain
|
||||
}
|
||||
domain = strings.ToLower(domain)
|
||||
|
||||
if domain[len(domain)-1] == '.' {
|
||||
// We received stuff like "Domain=www.example.com.".
|
||||
// Browsers do handle such stuff (actually differently) but
|
||||
// RFC 6265 seems to be clear here (e.g. section 4.1.2.3) in
|
||||
// requiring a reject. 4.1.2.3 is not normative, but
|
||||
// "Domain Matching" (5.1.3) and "Canonicalized Host Names"
|
||||
// (5.1.2) are.
|
||||
return "", false, errMalformedDomain
|
||||
}
|
||||
|
||||
// See RFC 6265 section 5.3 #5.
|
||||
if j.psList != nil {
|
||||
if ps := j.psList.PublicSuffix(domain); ps != "" && !hasDotSuffix(domain, ps) {
|
||||
if host == domain {
|
||||
// This is the one exception in which a cookie
|
||||
// with a domain attribute is a host cookie.
|
||||
return host, true, nil
|
||||
}
|
||||
return "", false, errIllegalDomain
|
||||
}
|
||||
}
|
||||
|
||||
// The domain must domain-match host: www.mycompany.com cannot
|
||||
// set cookies for .ourcompetitors.com.
|
||||
if host != domain && !hasDotSuffix(host, domain) {
|
||||
return "", false, errIllegalDomain
|
||||
}
|
||||
|
||||
return domain, false, nil
|
||||
}
|
||||
|
||||
// DefaultCookieFile returns the default cookie file to use
|
||||
// for persisting cookie data.
|
||||
// The following names will be used in decending order of preference:
|
||||
// - the value of the $GOCOOKIES environment variable.
|
||||
// - $HOME/.go-cookies
|
||||
func DefaultCookieFile() string {
|
||||
if f := os.Getenv("GOCOOKIES"); f != "" {
|
||||
return f
|
||||
}
|
||||
return filepath.Join(homeDir(), ".go-cookies")
|
||||
}
|
||||
159
backend/providers/req/cookiejar/punycode.go
Normal file
159
backend/providers/req/cookiejar/punycode.go
Normal file
@@ -0,0 +1,159 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cookiejar
|
||||
|
||||
// This file implements the Punycode algorithm from RFC 3492.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// These parameter values are specified in section 5.
|
||||
//
|
||||
// All computation is done with int32s, so that overflow behavior is identical
|
||||
// regardless of whether int is 32-bit or 64-bit.
|
||||
const (
|
||||
base int32 = 36
|
||||
damp int32 = 700
|
||||
initialBias int32 = 72
|
||||
initialN int32 = 128
|
||||
skew int32 = 38
|
||||
tmax int32 = 26
|
||||
tmin int32 = 1
|
||||
)
|
||||
|
||||
// encode encodes a string as specified in section 6.3 and prepends prefix to
|
||||
// the result.
|
||||
//
|
||||
// The "while h < length(input)" line in the specification becomes "for
|
||||
// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes.
|
||||
func encode(prefix, s string) (string, error) {
|
||||
output := make([]byte, len(prefix), len(prefix)+1+2*len(s))
|
||||
copy(output, prefix)
|
||||
delta, n, bias := int32(0), initialN, initialBias
|
||||
b, remaining := int32(0), int32(0)
|
||||
for _, r := range s {
|
||||
if r < 0x80 {
|
||||
b++
|
||||
output = append(output, byte(r))
|
||||
} else {
|
||||
remaining++
|
||||
}
|
||||
}
|
||||
h := b
|
||||
if b > 0 {
|
||||
output = append(output, '-')
|
||||
}
|
||||
for remaining != 0 {
|
||||
m := int32(0x7fffffff)
|
||||
for _, r := range s {
|
||||
if m > r && r >= n {
|
||||
m = r
|
||||
}
|
||||
}
|
||||
delta += (m - n) * (h + 1)
|
||||
if delta < 0 {
|
||||
return "", fmt.Errorf("cookiejar: invalid label %q", s)
|
||||
}
|
||||
n = m
|
||||
for _, r := range s {
|
||||
if r < n {
|
||||
delta++
|
||||
if delta < 0 {
|
||||
return "", fmt.Errorf("cookiejar: invalid label %q", s)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if r > n {
|
||||
continue
|
||||
}
|
||||
q := delta
|
||||
for k := base; ; k += base {
|
||||
t := k - bias
|
||||
if t < tmin {
|
||||
t = tmin
|
||||
} else if t > tmax {
|
||||
t = tmax
|
||||
}
|
||||
if q < t {
|
||||
break
|
||||
}
|
||||
output = append(output, encodeDigit(t+(q-t)%(base-t)))
|
||||
q = (q - t) / (base - t)
|
||||
}
|
||||
output = append(output, encodeDigit(q))
|
||||
bias = adapt(delta, h+1, h == b)
|
||||
delta = 0
|
||||
h++
|
||||
remaining--
|
||||
}
|
||||
delta++
|
||||
n++
|
||||
}
|
||||
return string(output), nil
|
||||
}
|
||||
|
||||
func encodeDigit(digit int32) byte {
|
||||
switch {
|
||||
case 0 <= digit && digit < 26:
|
||||
return byte(digit + 'a')
|
||||
case 26 <= digit && digit < 36:
|
||||
return byte(digit + ('0' - 26))
|
||||
}
|
||||
panic("cookiejar: internal error in punycode encoding")
|
||||
}
|
||||
|
||||
// adapt is the bias adaptation function specified in section 6.1.
|
||||
func adapt(delta, numPoints int32, firstTime bool) int32 {
|
||||
if firstTime {
|
||||
delta /= damp
|
||||
} else {
|
||||
delta /= 2
|
||||
}
|
||||
delta += delta / numPoints
|
||||
k := int32(0)
|
||||
for delta > ((base-tmin)*tmax)/2 {
|
||||
delta /= base - tmin
|
||||
k += base
|
||||
}
|
||||
return k + (base-tmin+1)*delta/(delta+skew)
|
||||
}
|
||||
|
||||
// Strictly speaking, the remaining code below deals with IDNA (RFC 5890 and
|
||||
// friends) and not Punycode (RFC 3492) per se.
|
||||
|
||||
// acePrefix is the ASCII Compatible Encoding prefix.
|
||||
const acePrefix = "xn--"
|
||||
|
||||
// toASCII converts a domain or domain label to its ASCII form. For example,
|
||||
// toASCII("bücher.example.com") is "xn--bcher-kva.example.com", and
|
||||
// toASCII("golang") is "golang".
|
||||
func toASCII(s string) (string, error) {
|
||||
if ascii(s) {
|
||||
return s, nil
|
||||
}
|
||||
labels := strings.Split(s, ".")
|
||||
for i, label := range labels {
|
||||
if !ascii(label) {
|
||||
a, err := encode(acePrefix, label)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
labels[i] = a
|
||||
}
|
||||
}
|
||||
return strings.Join(labels, "."), nil
|
||||
}
|
||||
|
||||
func ascii(s string) bool {
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] >= utf8.RuneSelf {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
188
backend/providers/req/cookiejar/serialize.go
Normal file
188
backend/providers/req/cookiejar/serialize.go
Normal file
@@ -0,0 +1,188 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cookiejar
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"gopkg.in/retry.v1"
|
||||
|
||||
filelock "github.com/juju/go4/lock"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Save saves the cookies to the persistent cookie file.
|
||||
// Before the file is written, it reads any cookies that
|
||||
// have been stored from it and merges them into j.
|
||||
func (j *Jar) Save() error {
|
||||
if j.filename == "" {
|
||||
return nil
|
||||
}
|
||||
return j.save(time.Now())
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler by encoding all persistent cookies
|
||||
// currently in the jar.
|
||||
func (j *Jar) MarshalJSON() ([]byte, error) {
|
||||
j.mu.Lock()
|
||||
defer j.mu.Unlock()
|
||||
// Marshaling entries can never fail.
|
||||
data, _ := json.Marshal(j.allPersistentEntries())
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// save is like Save but takes the current time as a parameter.
|
||||
func (j *Jar) save(now time.Time) error {
|
||||
locked, err := lockFile(lockFileName(j.filename))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer locked.Close()
|
||||
f, err := os.OpenFile(j.filename, os.O_RDWR|os.O_CREATE, 0o600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
// TODO optimization: if the file hasn't changed since we
|
||||
// loaded it, don't bother with the merge step.
|
||||
|
||||
j.mu.Lock()
|
||||
defer j.mu.Unlock()
|
||||
if err := j.mergeFrom(f); err != nil {
|
||||
// The cookie file is probably corrupt.
|
||||
log.Printf("cannot read cookie file to merge it; ignoring it: %v", err)
|
||||
}
|
||||
j.deleteExpired(now)
|
||||
if err := f.Truncate(0); err != nil {
|
||||
return errors.Wrap(err, "cannot truncate file")
|
||||
}
|
||||
if _, err := f.Seek(0, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
return j.writeTo(f)
|
||||
}
|
||||
|
||||
// load loads the cookies from j.filename. If the file does not exist,
|
||||
// no error will be returned and no cookies will be loaded.
|
||||
func (j *Jar) load() error {
|
||||
if _, err := os.Stat(filepath.Dir(j.filename)); os.IsNotExist(err) {
|
||||
// The directory that we'll store the cookie jar
|
||||
// in doesn't exist, so don't bother trying
|
||||
// to acquire the lock.
|
||||
return nil
|
||||
}
|
||||
locked, err := lockFile(lockFileName(j.filename))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer locked.Close()
|
||||
f, err := os.Open(j.filename)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
if err := j.mergeFrom(f); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// mergeFrom reads all the cookies from r and stores them in the Jar.
|
||||
func (j *Jar) mergeFrom(r io.Reader) error {
|
||||
decoder := json.NewDecoder(r)
|
||||
// Cope with old cookiejar format by just discarding
|
||||
// cookies, but still return an error if it's invalid JSON.
|
||||
var data json.RawMessage
|
||||
if err := decoder.Decode(&data); err != nil {
|
||||
if err == io.EOF {
|
||||
// Empty file.
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
var entries []entry
|
||||
if err := json.Unmarshal(data, &entries); err != nil {
|
||||
log.Printf("warning: discarding cookies in invalid format (error: %v)", err)
|
||||
return nil
|
||||
}
|
||||
j.merge(entries)
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeTo writes all the cookies in the jar to w
|
||||
// as a JSON array.
|
||||
func (j *Jar) writeTo(w io.Writer) error {
|
||||
encoder := json.NewEncoder(w)
|
||||
entries := j.allPersistentEntries()
|
||||
if err := encoder.Encode(entries); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// allPersistentEntries returns all the entries in the jar, sorted by primarly by canonical host
|
||||
// name and secondarily by path length.
|
||||
func (j *Jar) allPersistentEntries() []entry {
|
||||
var entries []entry
|
||||
for _, submap := range j.entries {
|
||||
for _, e := range submap {
|
||||
if e.Persistent {
|
||||
entries = append(entries, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
sort.Sort(byCanonicalHost{entries})
|
||||
return entries
|
||||
}
|
||||
|
||||
func (j *Jar) KVData() map[string]string {
|
||||
pairs := make(map[string]string)
|
||||
|
||||
entries := j.allPersistentEntries()
|
||||
if len(entries) == 0 {
|
||||
return pairs
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
pairs[strings.ToLower(entry.Name)] = entry.Value
|
||||
}
|
||||
|
||||
return pairs
|
||||
}
|
||||
|
||||
// lockFileName returns the name of the lock file associated with
|
||||
// the given path.
|
||||
func lockFileName(path string) string {
|
||||
return path + ".lock"
|
||||
}
|
||||
|
||||
var attempt = retry.LimitTime(3*time.Second, retry.Exponential{
|
||||
Initial: 100 * time.Microsecond,
|
||||
Factor: 1.5,
|
||||
MaxDelay: 100 * time.Millisecond,
|
||||
})
|
||||
|
||||
func lockFile(path string) (io.Closer, error) {
|
||||
for a := retry.Start(attempt, nil); a.Next(); {
|
||||
locker, err := filelock.Lock(path)
|
||||
if err == nil {
|
||||
return locker, nil
|
||||
}
|
||||
if !a.More() {
|
||||
return nil, errors.Wrap(err, "file locked for too long; giving up")
|
||||
}
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
51
backend/providers/tracing/config.go
Normal file
51
backend/providers/tracing/config.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package tracing
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.ipao.vip/atom/container"
|
||||
"go.ipao.vip/atom/opt"
|
||||
)
|
||||
|
||||
const DefaultPrefix = "Tracing"
|
||||
|
||||
func DefaultProvider() container.ProviderContainer {
|
||||
return container.ProviderContainer{
|
||||
Provider: Provide,
|
||||
Options: []opt.Option{
|
||||
opt.Prefix(DefaultPrefix),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// 自定义的 Logger 实现
|
||||
type jaegerLogrus struct {
|
||||
logger *logrus.Logger
|
||||
}
|
||||
|
||||
func (l *jaegerLogrus) Error(msg string) {
|
||||
l.logger.Error(msg)
|
||||
}
|
||||
|
||||
func (l *jaegerLogrus) Infof(msg string, args ...interface{}) {
|
||||
l.logger.Infof(msg, args...)
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Name string
|
||||
Reporter_LocalAgentHostPort string //: "127.0.0.1:6831",
|
||||
Reporter_CollectorEndpoint string //: "http://127.0.0.1:14268/api/traces",
|
||||
}
|
||||
|
||||
func (c *Config) format() {
|
||||
if c.Reporter_LocalAgentHostPort == "" {
|
||||
c.Reporter_LocalAgentHostPort = "127.0.0.1:6831"
|
||||
}
|
||||
|
||||
if c.Reporter_CollectorEndpoint == "" {
|
||||
c.Reporter_CollectorEndpoint = "http://127.0.0.1:14268/api/traces"
|
||||
}
|
||||
|
||||
if c.Name == "" {
|
||||
c.Name = "default"
|
||||
}
|
||||
}
|
||||
57
backend/providers/tracing/provider.go
Normal file
57
backend/providers/tracing/provider.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package tracing
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
opentracing "github.com/opentracing/opentracing-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
config "github.com/uber/jaeger-client-go/config"
|
||||
"go.ipao.vip/atom/container"
|
||||
"go.ipao.vip/atom/opt"
|
||||
)
|
||||
|
||||
func Provide(opts ...opt.Option) error {
|
||||
o := opt.New(opts...)
|
||||
var conf Config
|
||||
if err := o.UnmarshalConfig(&conf); err != nil {
|
||||
return err
|
||||
}
|
||||
conf.format()
|
||||
return container.Container.Provide(func() (opentracing.Tracer, io.Closer, error) {
|
||||
log := logrus.New()
|
||||
log.SetFormatter(&logrus.TextFormatter{
|
||||
FullTimestamp: true,
|
||||
TimestampFormat: "2006-01-02 15:04:05",
|
||||
})
|
||||
|
||||
cfg := &config.Configuration{
|
||||
ServiceName: conf.Name,
|
||||
Sampler: &config.SamplerConfig{
|
||||
Type: "const",
|
||||
Param: 1,
|
||||
},
|
||||
Reporter: &config.ReporterConfig{
|
||||
LogSpans: true,
|
||||
LocalAgentHostPort: conf.Reporter_LocalAgentHostPort,
|
||||
CollectorEndpoint: conf.Reporter_CollectorEndpoint,
|
||||
BufferFlushInterval: 100 * time.Millisecond,
|
||||
QueueSize: 1000,
|
||||
},
|
||||
}
|
||||
|
||||
// 使用自定义的 logger
|
||||
jLogger := &jaegerLogrus{logger: log}
|
||||
tracer, closer, err := cfg.NewTracer(
|
||||
config.Logger(jLogger),
|
||||
config.ZipkinSharedRPCSpan(true),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "无法初始化 Jaeger: %v", err)
|
||||
}
|
||||
opentracing.SetGlobalTracer(tracer)
|
||||
|
||||
return tracer, closer, nil
|
||||
}, o.DiOptions()...)
|
||||
}
|
||||
Reference in New Issue
Block a user