diff --git a/api/api.go b/api/api.go index 48fcf25a39..9915d47980 100644 --- a/api/api.go +++ b/api/api.go @@ -333,6 +333,7 @@ func (a *ApplicationHandler) BuildControlPlaneRoutes() *chi.Mux { e.With(handler.RequireEnabledProject()).Delete("/", handler.DeleteEndpoint) e.With(handler.RequireEnabledProject()).Put("/expire_secret", handler.ExpireSecret) e.With(handler.RequireEnabledProject()).Put("/pause", handler.PauseEndpoint) + e.With(handler.RequireEnabledProject()).Post("/activate", handler.ActivateEndpoint) }) }) diff --git a/api/handlers/configuration.go b/api/handlers/configuration.go index ad0d51ee25..c7e6dddcb8 100644 --- a/api/handlers/configuration.go +++ b/api/handlers/configuration.go @@ -17,24 +17,24 @@ import ( ) func (h *Handler) GetConfiguration(w http.ResponseWriter, r *http.Request) { - config, err := postgres.NewConfigRepo(h.A.DB).LoadConfiguration(r.Context()) + configuration, err := postgres.NewConfigRepo(h.A.DB).LoadConfiguration(r.Context()) if err != nil && !errors.Is(err, datastore.ErrConfigNotFound) { _ = render.Render(w, r, util.NewServiceErrResponse(err)) return } - configResponse := []*models.ConfigurationResponse{} - if config != nil { - if config.StoragePolicy.Type == datastore.S3 { + var configResponse []*models.ConfigurationResponse + if configuration != nil { + if configuration.StoragePolicy.Type == datastore.S3 { policy := &datastore.S3Storage{} - policy.Bucket = config.StoragePolicy.S3.Bucket - policy.Endpoint = config.StoragePolicy.S3.Endpoint - policy.Region = config.StoragePolicy.S3.Region - config.StoragePolicy.S3 = policy + policy.Bucket = configuration.StoragePolicy.S3.Bucket + policy.Endpoint = configuration.StoragePolicy.S3.Endpoint + policy.Region = configuration.StoragePolicy.S3.Region + configuration.StoragePolicy.S3 = policy } c := &models.ConfigurationResponse{ - Configuration: config, + Configuration: configuration, ApiVersion: convoy.GetVersion(), } @@ -61,14 +61,14 @@ func (h *Handler) CreateConfiguration(w http.ResponseWriter, r *http.Request) { NewConfig: &newConfig, } - config, err := cc.Run(r.Context()) + configuration, err := cc.Run(r.Context()) if err != nil { _ = render.Render(w, r, util.NewServiceErrResponse(err)) return } c := &models.ConfigurationResponse{ - Configuration: config, + Configuration: configuration, ApiVersion: convoy.GetVersion(), } @@ -92,14 +92,14 @@ func (h *Handler) UpdateConfiguration(w http.ResponseWriter, r *http.Request) { Config: &newConfig, } - config, err := uc.Run(r.Context()) + configuration, err := uc.Run(r.Context()) if err != nil { _ = render.Render(w, r, util.NewServiceErrResponse(err)) return } c := &models.ConfigurationResponse{ - Configuration: config, + Configuration: configuration, ApiVersion: convoy.GetVersion(), } diff --git a/api/handlers/endpoint.go b/api/handlers/endpoint.go index 28103ff580..5c3c7f058b 100644 --- a/api/handlers/endpoint.go +++ b/api/handlers/endpoint.go @@ -3,7 +3,11 @@ package handlers import ( "context" "encoding/json" + "fmt" + "github.com/frain-dev/convoy/pkg/circuit_breaker" + "github.com/frain-dev/convoy/pkg/msgpack" "net/http" + "time" "github.com/frain-dev/convoy/api/models" "github.com/frain-dev/convoy/database/postgres" @@ -207,9 +211,37 @@ func (h *Handler) GetEndpoints(w http.ResponseWriter, r *http.Request) { return } + // fetch keys from redis and mutate endpoints slice + keys := make([]string, len(endpoints)) + for i := 0; i < len(endpoints); i++ { + keys[i] = fmt.Sprintf("breaker:%s", endpoints[i].UID) + } + + cbs, err := h.A.Redis.MGet(r.Context(), keys...).Result() + if err != nil { + _ = render.Render(w, r, util.NewServiceErrResponse(err)) + return + } + + for i := 0; i < len(cbs); i++ { + if cbs[i] != nil { + str, ok := cbs[i].(string) + if ok { + var c circuit_breaker.CircuitBreaker + asBytes := []byte(str) + innerErr := msgpack.DecodeMsgPack(asBytes, &c) + if innerErr != nil { + continue + } + endpoints[i].FailureRate = c.FailureRate + } + } + } + resp := models.NewListResponse(endpoints, func(endpoint datastore.Endpoint) models.EndpointResponse { return models.EndpointResponse{Endpoint: &endpoint} }) + serverResponse := util.NewServerResponse( "Endpoints fetched successfully", models.PagedResponse{Content: &resp, Pagination: &paginationData}, http.StatusOK) @@ -410,7 +442,7 @@ func (h *Handler) ExpireSecret(w http.ResponseWriter, r *http.Request) { // PauseEndpoint // // @Summary Pause endpoint -// @Description This endpoint toggles an endpoint status between the active and paused states +// @Description Toggles an endpoint's status between active and paused states // @Id PauseEndpoint // @Tags Endpoints // @Accept json @@ -458,6 +490,76 @@ func (h *Handler) PauseEndpoint(w http.ResponseWriter, r *http.Request) { util.WriteResponse(w, r, resBytes, http.StatusAccepted) } +// ActivateEndpoint +// +// @Summary Activate endpoint +// @Description Activated an inactive endpoint +// @Id PauseEndpoint +// @Tags Endpoints +// @Accept json +// @Produce json +// @Param projectID path string true "Project ID" +// @Param endpointID path string true "Endpoint ID" +// @Success 202 {object} util.ServerResponse{data=models.EndpointResponse} +// @Failure 400,401,404 {object} util.ServerResponse{data=Stub} +// @Security ApiKeyAuth +// @Router /v1/projects/{projectID}/endpoints/{endpointID}/activate [post] +func (h *Handler) ActivateEndpoint(w http.ResponseWriter, r *http.Request) { + project, err := h.retrieveProject(r) + if err != nil { + _ = render.Render(w, r, util.NewErrorResponse(err.Error(), http.StatusBadRequest)) + return + } + + aes := services.ActivateEndpointService{ + EndpointRepo: postgres.NewEndpointRepo(h.A.DB, h.A.Cache), + ProjectID: project.UID, + EndpointId: chi.URLParam(r, "endpointID"), + } + + endpoint, err := aes.Run(r.Context()) + if err != nil { + _ = render.Render(w, r, util.NewServiceErrResponse(err)) + return + } + + cbs, err := h.A.Redis.Get(r.Context(), fmt.Sprintf("breaker:%s", endpoint.UID)).Result() + if err != nil { + h.A.Logger.WithError(err).Error("failed to find circuit breaker") + } + + if len(cbs) > 0 { + c, innerErr := circuit_breaker.NewCircuitBreakerFromStore([]byte(cbs), h.A.Logger.(*log.Logger)) + if innerErr != nil { + h.A.Logger.WithError(innerErr).Error("failed to decode circuit breaker") + } else { + c.Reset(time.Now()) + b, msgPackErr := msgpack.EncodeMsgPack(c) + if msgPackErr != nil { + h.A.Logger.WithError(msgPackErr).Error("failed to encode circuit breaker") + } + h.A.Redis.Set(r.Context(), fmt.Sprintf("breaker:%s", endpoint.UID), b, time.Minute*5) + } + } + + resp := &models.EndpointResponse{Endpoint: endpoint} + serverResponse := util.NewServerResponse("endpoint status successfully activated", resp, http.StatusAccepted) + + rb, err := json.Marshal(serverResponse) + if err != nil { + _ = render.Render(w, r, util.NewServiceErrResponse(err)) + return + } + + resBytes, err := h.RM.VersionResponse(r, rb, "UpdateEndpoint") + if err != nil { + _ = render.Render(w, r, util.NewServiceErrResponse(err)) + return + } + + util.WriteResponse(w, r, resBytes, http.StatusAccepted) +} + func (h *Handler) retrieveEndpoint(ctx context.Context, endpointID, projectID string) (*datastore.Endpoint, error) { endpointRepo := postgres.NewEndpointRepo(h.A.DB, h.A.Cache) return endpointRepo.FindEndpointByID(ctx, endpointID, projectID) diff --git a/api/models/models.go b/api/models/models.go index fa225e0af1..b9604530af 100644 --- a/api/models/models.go +++ b/api/models/models.go @@ -154,7 +154,7 @@ type PortalLinkResponse struct { DeletedAt null.Time `json:"deleted_at,omitempty"` } -// NewListResponse is generic function for looping over +// NewListResponse is a generic function for looping over // a slice of type M and returning a slice of type T func NewListResponse[T, M any](items []M, fn func(item M) T) []T { results := make([]T, 0) diff --git a/api/server_suite_test.go b/api/server_suite_test.go index 669fbba395..ddb2ee9ac6 100644 --- a/api/server_suite_test.go +++ b/api/server_suite_test.go @@ -128,10 +128,13 @@ func buildServer() *ApplicationHandler { noopCache := ncache.NewNoopCache() r, _ := rlimiter.NewRedisLimiter(cfg.Redis.BuildDsn()) + rd, _ := rdb.NewClient(cfg.Redis.BuildDsn()) + ah, _ := NewApplicationHandler( &types.APIOptions{ DB: db, Queue: newQueue, + Redis: rd.Client(), Logger: logger, Cache: noopCache, Rate: r, diff --git a/api/testdb/seed.go b/api/testdb/seed.go index 728744f6b3..4a7d25d4cd 100644 --- a/api/testdb/seed.go +++ b/api/testdb/seed.go @@ -557,21 +557,23 @@ func SeedUser(db database.Database, email, password string) (*datastore.User, er } func SeedConfiguration(db database.Database) (*datastore.Configuration, error) { - config := &datastore.Configuration{ - UID: ulid.Make().String(), - IsAnalyticsEnabled: true, - IsSignupEnabled: true, - StoragePolicy: &datastore.DefaultStoragePolicy, + c := &datastore.Configuration{ + UID: ulid.Make().String(), + IsAnalyticsEnabled: true, + IsSignupEnabled: true, + StoragePolicy: &datastore.DefaultStoragePolicy, + RetentionPolicy: &datastore.DefaultRetentionPolicy, + CircuitBreakerConfig: &datastore.DefaultCircuitBreakerConfiguration, } // Seed Data configRepo := postgres.NewConfigRepo(db) - err := configRepo.CreateConfiguration(context.TODO(), config) + err := configRepo.CreateConfiguration(context.TODO(), c) if err != nil { return nil, err } - return config, nil + return c, nil } func SeedDevice(db database.Database, g *datastore.Project, endpointID string) error { diff --git a/api/types/types.go b/api/types/types.go index 6b706f2f14..defd48861d 100644 --- a/api/types/types.go +++ b/api/types/types.go @@ -9,6 +9,7 @@ import ( "github.com/frain-dev/convoy/internal/pkg/limiter" "github.com/frain-dev/convoy/pkg/log" "github.com/frain-dev/convoy/queue" + "github.com/redis/go-redis/v9" ) type ContextKey string @@ -16,6 +17,7 @@ type ContextKey string type APIOptions struct { FFlag *fflag.FFlag DB database.Database + Redis redis.UniversalClient Queue queue.Queuer Logger log.StdLogger Cache cache.Cache diff --git a/cmd/agent/agent.go b/cmd/agent/agent.go index 0628cb7f78..08274b9841 100644 --- a/cmd/agent/agent.go +++ b/cmd/agent/agent.go @@ -2,7 +2,6 @@ package agent import ( "context" - "fmt" "os" "os/signal" "time" @@ -26,8 +25,6 @@ import ( func AddAgentCommand(a *cli.App) *cobra.Command { var agentPort uint32 - var ingestPort uint32 - var workerPort uint32 var logLevel string var consumerPoolSize int var interval int @@ -110,11 +107,9 @@ func AddAgentCommand(a *cli.App) *cobra.Command { cmd.Flags().StringVar(&smtpUrl, "smtp-url", "", "SMTP provider URL") cmd.Flags().Uint32Var(&smtpPort, "smtp-port", 0, "SMTP Port") - cmd.Flags().Uint32Var(&agentPort, "agent-port", 0, "Agent port") - cmd.Flags().Uint32Var(&workerPort, "worker-port", 0, "Worker port") - cmd.Flags().Uint32Var(&ingestPort, "ingest-port", 0, "Ingest port") + cmd.Flags().Uint32Var(&agentPort, "port", 0, "Agent port") - cmd.Flags().StringVar(&logLevel, "log-level", "", "scheduler log level") + cmd.Flags().StringVar(&logLevel, "log-level", "", "Log level") cmd.Flags().IntVar(&consumerPoolSize, "consumers", -1, "Size of the consumers pool.") cmd.Flags().IntVar(&interval, "interval", 10, "the time interval, measured in seconds to update the in-memory store from the database") cmd.Flags().StringVar(&executionMode, "mode", "", "Execution Mode (one of events, retry and default)") @@ -122,30 +117,27 @@ func AddAgentCommand(a *cli.App) *cobra.Command { return cmd } -func startServerComponent(ctx context.Context, a *cli.App) error { +func startServerComponent(_ context.Context, a *cli.App) error { + lo := a.Logger.(*log.Logger) + lo.SetPrefix("agent") + cfg, err := config.Get() if err != nil { - a.Logger.WithError(err).Fatal("Failed to load configuration") + lo.WithError(err).Fatal("Failed to load configuration") } start := time.Now() - a.Logger.Info("Starting Convoy data plane ...") + lo.Info("Starting Convoy data plane") apiKeyRepo := postgres.NewAPIKeyRepo(a.DB, a.Cache) userRepo := postgres.NewUserRepo(a.DB, a.Cache) portalLinkRepo := postgres.NewPortalLinkRepo(a.DB, a.Cache) err = realm_chain.Init(&cfg.Auth, apiKeyRepo, userRepo, portalLinkRepo, a.Cache) if err != nil { - a.Logger.WithError(err).Fatal("failed to initialize realm chain") + lo.WithError(err).Fatal("failed to initialize realm chain") } - flag, err := fflag.NewFFlag(&cfg) - if err != nil { - a.Logger.WithError(err).Fatal("failed to create fflag controller") - } - - lo := a.Logger.(*log.Logger) - lo.SetPrefix("api server") + flag := fflag.NewFFlag(&cfg) lvl, err := log.ParseLevel(cfg.Logger.Level) if err != nil { @@ -164,6 +156,7 @@ func startServerComponent(ctx context.Context, a *cli.App) error { Logger: lo, Cache: a.Cache, Rate: a.Rate, + Redis: a.Redis, Licenser: a.Licenser, }) if err != nil { @@ -172,7 +165,7 @@ func startServerComponent(ctx context.Context, a *cli.App) error { srv.SetHandler(evHandler.BuildDataPlaneRoutes()) - fmt.Printf("Started convoy server in %s\n", time.Since(start)) + lo.Infof("Started convoy server in %s", time.Since(start)) httpConfig := cfg.Server.HTTP if httpConfig.SSL { @@ -181,7 +174,7 @@ func startServerComponent(ctx context.Context, a *cli.App) error { return nil } - fmt.Printf("Starting Convoy Agent on port %v\n", cfg.Server.HTTP.AgentPort) + lo.Infof("Starting Convoy Agent on port %v", cfg.Server.HTTP.AgentPort) go func() { srv.Listen() @@ -194,7 +187,7 @@ func buildAgentCliConfiguration(cmd *cobra.Command) (*config.Configuration, erro c := &config.Configuration{} // PORT - port, err := cmd.Flags().GetUint32("agent-port") + port, err := cmd.Flags().GetUint32("port") if err != nil { return nil, err } @@ -203,25 +196,6 @@ func buildAgentCliConfiguration(cmd *cobra.Command) (*config.Configuration, erro c.Server.HTTP.AgentPort = port } - ingestPort, err := cmd.Flags().GetUint32("ingest-port") - if err != nil { - return nil, err - } - - if ingestPort != 0 { - c.Server.HTTP.IngestPort = ingestPort - } - - // CONVOY_WORKER_PORT - workerPort, err := cmd.Flags().GetUint32("worker-port") - if err != nil { - return nil, err - } - - if workerPort != 0 { - c.Server.HTTP.WorkerPort = workerPort - } - logLevel, err := cmd.Flags().GetString("log-level") if err != nil { return nil, err diff --git a/cmd/ff/feature_flags.go b/cmd/ff/feature_flags.go index c72e7eea04..88e578bf50 100644 --- a/cmd/ff/feature_flags.go +++ b/cmd/ff/feature_flags.go @@ -20,10 +20,8 @@ func AddFeatureFlagsCommand() *cobra.Command { if err != nil { log.WithError(err).Fatal("Error fetching the config.") } - f, err := fflag2.NewFFlag(&cfg) - if err != nil { - return err - } + + f := fflag2.NewFFlag(&cfg) return f.ListFeatures() }, PersistentPostRun: func(cmd *cobra.Command, args []string) {}, diff --git a/cmd/hooks/hooks.go b/cmd/hooks/hooks.go index 838097a20b..a838ae4761 100644 --- a/cmd/hooks/hooks.go +++ b/cmd/hooks/hooks.go @@ -122,6 +122,11 @@ func PreRun(app *cli.App, db *postgres.Postgres) func(cmd *cobra.Command, args [ lo := log.NewLogger(os.Stdout) + rd, err := rdb.NewClient(cfg.Redis.BuildDsn()) + if err != nil { + return err + } + ca, err = cache.NewCache(cfg.Redis) if err != nil { return err @@ -155,6 +160,7 @@ func PreRun(app *cli.App, db *postgres.Postgres) func(cmd *cobra.Command, args [ } } + app.Redis = rd.Client() app.DB = postgresDB app.Queue = q app.Logger = lo @@ -327,21 +333,32 @@ func ensureInstanceConfig(ctx context.Context, a *cli.App, cfg config.Configurat IsRetentionPolicyEnabled: cfg.RetentionPolicy.IsRetentionPolicyEnabled, } + circuitBreakerConfig := &datastore.CircuitBreakerConfig{ + SampleRate: cfg.CircuitBreaker.SampleRate, + ErrorTimeout: cfg.CircuitBreaker.ErrorTimeout, + FailureThreshold: cfg.CircuitBreaker.FailureThreshold, + SuccessThreshold: cfg.CircuitBreaker.SuccessThreshold, + ObservabilityWindow: cfg.CircuitBreaker.ObservabilityWindow, + MinimumRequestCount: cfg.CircuitBreaker.MinimumRequestCount, + ConsecutiveFailureThreshold: cfg.CircuitBreaker.ConsecutiveFailureThreshold, + } + configuration, err := configRepo.LoadConfiguration(ctx) if err != nil { if errors.Is(err, datastore.ErrConfigNotFound) { a.Logger.Info("Creating Instance Config") - cfg := &datastore.Configuration{ - UID: ulid.Make().String(), - StoragePolicy: storagePolicy, - IsAnalyticsEnabled: cfg.Analytics.IsEnabled, - IsSignupEnabled: cfg.Auth.IsSignupEnabled, - RetentionPolicy: retentionPolicy, - CreatedAt: time.Now(), - UpdatedAt: time.Now(), + c := &datastore.Configuration{ + UID: ulid.Make().String(), + StoragePolicy: storagePolicy, + IsAnalyticsEnabled: cfg.Analytics.IsEnabled, + IsSignupEnabled: cfg.Auth.IsSignupEnabled, + RetentionPolicy: retentionPolicy, + CircuitBreakerConfig: circuitBreakerConfig, + CreatedAt: time.Now(), + UpdatedAt: time.Now(), } - return cfg, configRepo.CreateConfiguration(ctx, cfg) + return c, configRepo.CreateConfiguration(ctx, c) } return configuration, err @@ -350,6 +367,7 @@ func ensureInstanceConfig(ctx context.Context, a *cli.App, cfg config.Configurat configuration.StoragePolicy = storagePolicy configuration.IsSignupEnabled = cfg.Auth.IsSignupEnabled configuration.IsAnalyticsEnabled = cfg.Analytics.IsEnabled + configuration.CircuitBreakerConfig = circuitBreakerConfig configuration.RetentionPolicy = retentionPolicy configuration.UpdatedAt = time.Now() @@ -567,32 +585,34 @@ func buildCliConfiguration(cmd *cobra.Command) (*config.Configuration, error) { } - flag, err := fflag2.NewFFlag(c) - if err != nil { - return nil, err - } + flag := fflag2.NewFFlag(c) c.Metrics = config.MetricsConfiguration{ IsEnabled: false, } + if flag.CanAccessFeature(fflag2.Prometheus) { metricsBackend, err := cmd.Flags().GetString("metrics-backend") if err != nil { return nil, err } + if !config.IsStringEmpty(metricsBackend) { c.Metrics = config.MetricsConfiguration{ IsEnabled: false, Backend: config.MetricsBackend(metricsBackend), } + switch c.Metrics.Backend { case config.PrometheusMetricsProvider: sampleTime, err := cmd.Flags().GetUint64("metrics-prometheus-sample-time") if err != nil { return nil, err } + if sampleTime < 1 { return nil, errors.New("metrics-prometheus-sample-time must be non-zero") } + c.Metrics = config.MetricsConfiguration{ IsEnabled: true, Backend: config.MetricsBackend(metricsBackend), @@ -602,14 +622,17 @@ func buildCliConfiguration(cmd *cobra.Command) (*config.Configuration, error) { } } } else { - log.Warn("No metrics-backend specified") + log.Warn("metrics backend not specified") } + } else { + log.Info(fflag2.ErrPrometheusMetricsNotEnabled) } maxRetrySeconds, err := cmd.Flags().GetUint64("max-retry-seconds") if err != nil { return nil, err } + c.MaxRetrySeconds = maxRetrySeconds return c, nil diff --git a/cmd/ingest/ingest.go b/cmd/ingest/ingest.go index 334f7bdbd1..27b89e6619 100644 --- a/cmd/ingest/ingest.go +++ b/cmd/ingest/ingest.go @@ -65,7 +65,7 @@ func AddIngestCommand(a *cli.App) *cobra.Command { } cmd.Flags().Uint32Var(&ingestPort, "ingest-port", 0, "Ingest port") - cmd.Flags().StringVar(&logLevel, "log-level", "", "ingest log level") + cmd.Flags().StringVar(&logLevel, "log-level", "", "Log level") cmd.Flags().IntVar(&interval, "interval", 10, "the time interval, measured in seconds, at which the database should be polled for new pub sub sources") return cmd @@ -119,7 +119,7 @@ func StartIngest(ctx context.Context, a *cli.App, cfg config.Configuration, inte go ingest.Run() - fmt.Println("Starting Convoy Ingester") + log.Println("Starting Convoy Ingester") return nil } diff --git a/cmd/main.go b/cmd/main.go index f76ac4963d..22d530c523 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -120,8 +120,8 @@ func main() { c.Flags().StringVar(&metricsBackend, "metrics-backend", "prometheus", "Metrics backend e.g. prometheus. ('prometheus' feature flag required") c.Flags().Uint64Var(&prometheusMetricsSampleTime, "metrics-prometheus-sample-time", 5, "Prometheus metrics sample time") - c.Flags().StringVar(&retentionPolicy, "retention-policy", "", "SMTP Port") - c.Flags().BoolVar(&retentionPolicyEnabled, "retention-policy-enabled", false, "SMTP Port") + c.Flags().StringVar(&retentionPolicy, "retention-policy", "", "Retention Policy Duration") + c.Flags().BoolVar(&retentionPolicyEnabled, "retention-policy-enabled", false, "Retention Policy Enabled") c.Flags().Uint64Var(&maxRetrySeconds, "max-retry-seconds", 7200, "Max retry seconds exponential backoff") diff --git a/cmd/server/server.go b/cmd/server/server.go index 904951bfac..edfe0c7a51 100644 --- a/cmd/server/server.go +++ b/cmd/server/server.go @@ -107,10 +107,7 @@ func startConvoyServer(a *cli.App) error { a.Logger.WithError(err).Fatal("failed to initialize realm chain") } - flag, err := fflag.NewFFlag(&cfg) - if err != nil { - a.Logger.WithError(err).Fatal("failed to create fflag controller") - } + flag := fflag.NewFFlag(&cfg) if cfg.Server.HTTP.Port <= 0 { return errors.New("please provide the HTTP port in the convoy.json file") @@ -133,6 +130,7 @@ func startConvoyServer(a *cli.App) error { DB: a.DB, Queue: a.Queue, Logger: lo, + Redis: a.Redis, Cache: a.Cache, Rate: a.Rate, Licenser: a.Licenser, @@ -157,7 +155,7 @@ func startConvoyServer(a *cli.App) error { s.RegisterTask("0 0 * * *", convoy.ScheduleQueue, convoy.RetentionPolicies) s.RegisterTask("0 * * * *", convoy.ScheduleQueue, convoy.TokenizeSearch) - metrics.RegisterQueueMetrics(a.Queue, a.DB) + metrics.RegisterQueueMetrics(a.Queue, a.DB, nil) // Start scheduler s.Start() diff --git a/cmd/stream/stream.go b/cmd/stream/stream.go index 72d37baf48..9a5275aebf 100644 --- a/cmd/stream/stream.go +++ b/cmd/stream/stream.go @@ -136,12 +136,12 @@ func AddStreamCommand(a *cli.App) *cobra.Command { } cmd.Flags().Uint32Var(&socketPort, "socket-port", 5008, "Socket port") - cmd.Flags().StringVar(&logLevel, "log-level", "error", "stream log level") + cmd.Flags().StringVar(&logLevel, "log-level", "", "Log level") return cmd } -func buildCliFlagConfiguration(cmd *cobra.Command) (*config.Configuration, error) { +func buildCliFlagConfiguration(_ *cobra.Command) (*config.Configuration, error) { c := &config.Configuration{} return c, nil diff --git a/cmd/worker/worker.go b/cmd/worker/worker.go index 546e1f2d09..547c28b24b 100644 --- a/cmd/worker/worker.go +++ b/cmd/worker/worker.go @@ -3,13 +3,15 @@ package worker import ( "context" "fmt" + "github.com/frain-dev/convoy/datastore" + "github.com/frain-dev/convoy/internal/pkg/fflag" "net/http" + "strings" "github.com/frain-dev/convoy" "github.com/frain-dev/convoy/config" "github.com/frain-dev/convoy/database/postgres" "github.com/frain-dev/convoy/internal/pkg/cli" - fflag2 "github.com/frain-dev/convoy/internal/pkg/fflag" "github.com/frain-dev/convoy/internal/pkg/limiter" "github.com/frain-dev/convoy/internal/pkg/loader" "github.com/frain-dev/convoy/internal/pkg/memorystore" @@ -19,6 +21,8 @@ import ( "github.com/frain-dev/convoy/internal/pkg/smtp" "github.com/frain-dev/convoy/internal/telemetry" "github.com/frain-dev/convoy/net" + cb "github.com/frain-dev/convoy/pkg/circuit_breaker" + "github.com/frain-dev/convoy/pkg/clock" "github.com/frain-dev/convoy/pkg/log" "github.com/frain-dev/convoy/queue" redisQueue "github.com/frain-dev/convoy/queue/redis" @@ -131,7 +135,7 @@ func StartWorker(ctx context.Context, a *cli.App, cfg config.Configuration, inte sc, err := smtp.NewClient(&cfg.SMTP) if err != nil { - a.Logger.WithError(err).Error("Failed to create smtp client") + lo.WithError(err).Error("Failed to create smtp client") return err } @@ -225,11 +229,11 @@ func StartWorker(ctx context.Context, a *cli.App, cfg config.Configuration, inte configuration, err := configRepo.LoadConfiguration(context.Background()) if err != nil { - a.Logger.WithError(err).Fatal("Failed to instance configuration") + lo.WithError(err).Fatal("Failed to instance configuration") return err } - subscriptionsLoader := loader.NewSubscriptionLoader(subRepo, projectRepo, a.Logger, 0) + subscriptionsLoader := loader.NewSubscriptionLoader(subRepo, projectRepo, lo, 0) subscriptionsTable := memorystore.NewTable(memorystore.OptionSyncer(subscriptionsLoader)) err = memorystore.DefaultStore.Register("subscriptions", subscriptionsTable) @@ -245,17 +249,59 @@ func StartWorker(ctx context.Context, a *cli.App, cfg config.Configuration, inte go memorystore.DefaultStore.Sync(ctx, interval) - newTelemetry := telemetry.NewTelemetry(a.Logger.(*log.Logger), configuration, + newTelemetry := telemetry.NewTelemetry(lo, configuration, telemetry.OptionTracker(counter), telemetry.OptionBackend(pb), telemetry.OptionBackend(mb)) dispatcher, err := net.NewDispatcher(cfg.Server.HTTP.HttpProxy, a.Licenser, false) if err != nil { - a.Logger.WithError(err).Fatal("Failed to create new net dispatcher") + lo.WithError(err).Fatal("Failed to create new net dispatcher") return err } + featureFlag := fflag.NewFFlag(&cfg) + var circuitBreakerManager *cb.CircuitBreakerManager + + if featureFlag.CanAccessFeature(fflag.CircuitBreaker) { + circuitBreakerManager, err = cb.NewCircuitBreakerManager( + cb.ConfigOption(configuration.ToCircuitBreakerConfig()), + cb.StoreOption(cb.NewRedisStore(rd.Client(), clock.NewRealClock())), + cb.ClockOption(clock.NewRealClock()), + cb.LoggerOption(lo), + cb.NotificationFunctionOption(func(n cb.NotificationType, c cb.CircuitBreakerConfig, b cb.CircuitBreaker) error { + endpointId := strings.Split(b.Key, ":")[1] + project, funcErr := projectRepo.FetchProjectByID(ctx, b.TenantId) + if funcErr != nil { + return funcErr + } + + endpoint, funcErr := endpointRepo.FindEndpointByID(ctx, endpointId, b.TenantId) + if funcErr != nil { + return funcErr + } + + switch n { + case cb.TypeDisableResource: + breakerErr := endpointRepo.UpdateEndpointStatus(ctx, project.UID, endpoint.UID, datastore.InactiveEndpointStatus) + if breakerErr != nil { + return breakerErr + } + default: + return fmt.Errorf("unsupported circuit breaker notification type: %s", n) + } + return nil + }), + ) + if err != nil { + lo.WithError(err).Fatal("Failed to create circuit breaker manager") + } + + go circuitBreakerManager.Start(ctx, attemptRepo.GetFailureAndSuccessCounts) + } else { + lo.Warn(fflag.ErrCircuitBreakerNotEnabled) + } + channels := make(map[string]task.EventChannel) defaultCh, broadcastCh, dynamicCh := task.NewDefaultEventChannel(), task.NewBroadcastEventChannel(subscriptionsTable), task.NewDynamicEventChannel() channels["default"] = defaultCh @@ -271,6 +317,8 @@ func StartWorker(ctx context.Context, a *cli.App, cfg config.Configuration, inte rateLimiter, dispatcher, attemptRepo, + circuitBreakerManager, + featureFlag, ), newTelemetry) consumer.RegisterHandlers(convoy.CreateEventProcessor, task.ProcessEventCreation( @@ -286,11 +334,14 @@ func StartWorker(ctx context.Context, a *cli.App, cfg config.Configuration, inte consumer.RegisterHandlers(convoy.RetryEventProcessor, task.ProcessRetryEventDelivery( endpointRepo, eventDeliveryRepo, + a.Licenser, projectRepo, a.Queue, rateLimiter, dispatcher, attemptRepo, + circuitBreakerManager, + featureFlag, ), newTelemetry) consumer.RegisterHandlers(convoy.CreateBroadcastEventProcessor, task.ProcessBroadcastEventCreation( @@ -340,11 +391,7 @@ func StartWorker(ctx context.Context, a *cli.App, cfg config.Configuration, inte consumer.RegisterHandlers(convoy.DailyAnalytics, task.PushDailyTelemetry(lo, a.DB, a.Cache, rd), nil) consumer.RegisterHandlers(convoy.EmailProcessor, task.ProcessEmails(sc), nil) - fflag, err := fflag2.NewFFlag(&cfg) - if err != nil { - return nil - } - if fflag.CanAccessFeature(fflag2.FullTextSearch) && a.Licenser.AdvancedWebhookFiltering() { + if featureFlag.CanAccessFeature(fflag.FullTextSearch) && a.Licenser.AdvancedWebhookFiltering() { consumer.RegisterHandlers(convoy.TokenizeSearch, task.GeneralTokenizerHandler(projectRepo, eventRepo, jobRepo, rd), nil) consumer.RegisterHandlers(convoy.TokenizeSearchForProject, task.TokenizerHandler(eventRepo, jobRepo), nil) } @@ -353,11 +400,11 @@ func StartWorker(ctx context.Context, a *cli.App, cfg config.Configuration, inte consumer.RegisterHandlers(convoy.MetaEventProcessor, task.ProcessMetaEvent(projectRepo, metaEventRepo, dispatcher), nil) consumer.RegisterHandlers(convoy.DeleteArchivedTasksProcessor, task.DeleteArchivedTasks(a.Queue, rd), nil) - metrics.RegisterQueueMetrics(a.Queue, a.DB) + metrics.RegisterQueueMetrics(a.Queue, a.DB, circuitBreakerManager) // start worker consumer.Start() - fmt.Println("Starting Convoy Consumer Pool") + lo.Println("Starting Convoy Consumer Pool") return ctx.Err() } diff --git a/config/config.go b/config/config.go index c1f46dc02b..613fa4f561 100644 --- a/config/config.go +++ b/config/config.go @@ -74,6 +74,15 @@ var DefaultConfiguration = Configuration{ Policy: "720h", IsRetentionPolicyEnabled: false, }, + CircuitBreaker: CircuitBreakerConfiguration{ + SampleRate: 30, + ErrorTimeout: 30, + FailureThreshold: 70, + SuccessThreshold: 5, + ObservabilityWindow: 5, + MinimumRequestCount: 10, + ConsecutiveFailureThreshold: 10, + }, Auth: AuthConfiguration{ IsSignupEnabled: true, Native: NativeRealmOptions{ @@ -262,6 +271,16 @@ type RetentionPolicyConfiguration struct { IsRetentionPolicyEnabled bool `json:"enabled" envconfig:"CONVOY_RETENTION_POLICY_ENABLED"` } +type CircuitBreakerConfiguration struct { + SampleRate uint64 `json:"sample_rate" envconfig:"CONVOY_CIRCUIT_BREAKER_SAMPLE_RATE"` + ErrorTimeout uint64 `json:"error_timeout" envconfig:"CONVOY_CIRCUIT_BREAKER_ERROR_TIMEOUT"` + FailureThreshold uint64 `json:"failure_threshold" envconfig:"CONVOY_CIRCUIT_BREAKER_FAILURE_THRESHOLD"` + SuccessThreshold uint64 `json:"success_threshold" envconfig:"CONVOY_CIRCUIT_BREAKER_SUCCESS_THRESHOLD"` + MinimumRequestCount uint64 `json:"minimum_request_count" envconfig:"CONVOY_MINIMUM_REQUEST_COUNT"` + ObservabilityWindow uint64 `json:"observability_window" envconfig:"CONVOY_CIRCUIT_BREAKER_OBSERVABILITY_WINDOW"` + ConsecutiveFailureThreshold uint64 `json:"consecutive_failure_threshold" envconfig:"CONVOY_CIRCUIT_BREAKER_CONSECUTIVE_FAILURE_THRESHOLD"` +} + type AnalyticsConfiguration struct { IsEnabled bool `json:"enabled" envconfig:"CONVOY_ANALYTICS_ENABLED"` } @@ -358,6 +377,7 @@ type Configuration struct { CustomDomainSuffix string `json:"custom_domain_suffix" envconfig:"CONVOY_CUSTOM_DOMAIN_SUFFIX"` EnableFeatureFlag []string `json:"enable_feature_flag" envconfig:"CONVOY_ENABLE_FEATURE_FLAG"` RetentionPolicy RetentionPolicyConfiguration `json:"retention_policy"` + CircuitBreaker CircuitBreakerConfiguration `json:"circuit_breaker"` Analytics AnalyticsConfiguration `json:"analytics"` StoragePolicy StoragePolicyConfiguration `json:"storage_policy"` ConsumerPoolSize int `json:"consumer_pool_size" envconfig:"CONVOY_CONSUMER_POOL_SIZE"` diff --git a/config/config_test.go b/config/config_test.go index 3227f8b347..7d594cf955 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -125,6 +125,15 @@ func TestLoadConfig(t *testing.T) { Policy: "720h", IsRetentionPolicyEnabled: true, }, + CircuitBreaker: CircuitBreakerConfiguration{ + SampleRate: 30, + ErrorTimeout: 30, + FailureThreshold: 70, + SuccessThreshold: 5, + ObservabilityWindow: 5, + MinimumRequestCount: 10, + ConsecutiveFailureThreshold: 10, + }, Server: ServerConfiguration{ HTTP: HTTPServerConfiguration{ Port: 80, @@ -197,6 +206,15 @@ func TestLoadConfig(t *testing.T) { Port: 5432, SetConnMaxLifetime: 3600, }, + CircuitBreaker: CircuitBreakerConfiguration{ + SampleRate: 30, + ErrorTimeout: 30, + FailureThreshold: 70, + SuccessThreshold: 5, + ObservabilityWindow: 5, + MinimumRequestCount: 10, + ConsecutiveFailureThreshold: 10, + }, Redis: RedisConfiguration{ Scheme: "redis", Host: "localhost", @@ -264,6 +282,15 @@ func TestLoadConfig(t *testing.T) { Host: "localhost:5005", RetentionPolicy: RetentionPolicyConfiguration{Policy: "720h"}, ConsumerPoolSize: 100, + CircuitBreaker: CircuitBreakerConfiguration{ + SampleRate: 30, + ErrorTimeout: 30, + FailureThreshold: 70, + SuccessThreshold: 5, + ObservabilityWindow: 5, + MinimumRequestCount: 10, + ConsecutiveFailureThreshold: 10, + }, Database: DatabaseConfiguration{ Type: PostgresDatabaseProvider, Scheme: "postgres", diff --git a/database/postgres/configuration.go b/database/postgres/configuration.go index 30fcbf0e01..d369b8c1f8 100644 --- a/database/postgres/configuration.go +++ b/database/postgres/configuration.go @@ -19,9 +19,13 @@ const ( storage_policy_type, on_prem_path, s3_prefix, s3_bucket, s3_access_key, s3_secret_key, s3_region, s3_session_token, s3_endpoint, - retention_policy_policy, retention_policy_enabled + retention_policy_policy, retention_policy_enabled, + cb_sample_rate,cb_error_timeout, + cb_failure_threshold, cb_success_threshold, + cb_observability_window, + cb_consecutive_failure_threshold, cb_minimum_request_count ) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14); + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21); ` fetchConfiguration = ` @@ -40,6 +44,13 @@ const ( s3_session_token AS "storage_policy.s3.session_token", s3_endpoint AS "storage_policy.s3.endpoint", s3_prefix AS "storage_policy.s3.prefix", + cb_sample_rate AS "circuit_breaker.sample_rate", + cb_error_timeout AS "circuit_breaker.error_timeout", + cb_failure_threshold AS "circuit_breaker.failure_threshold", + cb_success_threshold AS "circuit_breaker.success_threshold", + cb_observability_window AS "circuit_breaker.observability_window", + cb_minimum_request_count as "circuit_breaker.minimum_request_count", + cb_consecutive_failure_threshold AS "circuit_breaker.consecutive_failure_threshold", created_at, updated_at, deleted_at @@ -64,6 +75,13 @@ const ( s3_prefix = $12, retention_policy_policy = $13, retention_policy_enabled = $14, + cb_sample_rate = $15, + cb_error_timeout = $16, + cb_failure_threshold = $17, + cb_success_threshold = $18, + cb_observability_window = $19, + cb_consecutive_failure_threshold = $20, + cb_minimum_request_count = $21, updated_at = NOW() WHERE id = $1 AND deleted_at IS NULL; ` @@ -95,6 +113,7 @@ func (c *configRepo) CreateConfiguration(ctx context.Context, config *datastore. } rc := config.GetRetentionPolicyConfig() + cb := config.GetCircuitBreakerConfig() r, err := c.db.ExecContext(ctx, createConfiguration, config.UID, @@ -111,6 +130,13 @@ func (c *configRepo) CreateConfiguration(ctx context.Context, config *datastore. config.StoragePolicy.S3.Endpoint, rc.Policy, rc.IsRetentionPolicyEnabled, + cb.SampleRate, + cb.ErrorTimeout, + cb.FailureThreshold, + cb.SuccessThreshold, + cb.ObservabilityWindow, + cb.ConsecutiveFailureThreshold, + cb.MinimumRequestCount, ) if err != nil { return err @@ -159,6 +185,7 @@ func (c *configRepo) UpdateConfiguration(ctx context.Context, cfg *datastore.Con } rc := cfg.GetRetentionPolicyConfig() + cb := cfg.GetCircuitBreakerConfig() result, err := c.db.ExecContext(ctx, updateConfiguration, cfg.UID, @@ -175,6 +202,13 @@ func (c *configRepo) UpdateConfiguration(ctx context.Context, cfg *datastore.Con cfg.StoragePolicy.S3.Prefix, rc.Policy, rc.IsRetentionPolicyEnabled, + cb.SampleRate, + cb.ErrorTimeout, + cb.FailureThreshold, + cb.SuccessThreshold, + cb.ObservabilityWindow, + cb.ConsecutiveFailureThreshold, + cb.MinimumRequestCount, ) if err != nil { return err diff --git a/database/postgres/configuration_test.go b/database/postgres/configuration_test.go index cfe2ecf51c..1fc9411a1d 100644 --- a/database/postgres/configuration_test.go +++ b/database/postgres/configuration_test.go @@ -93,10 +93,6 @@ func generateConfig() *datastore.Configuration { UID: ulid.Make().String(), IsAnalyticsEnabled: true, IsSignupEnabled: false, - RetentionPolicy: &datastore.RetentionPolicyConfiguration{ - Policy: "720h", - IsRetentionPolicyEnabled: true, - }, StoragePolicy: &datastore.StoragePolicyConfiguration{ Type: datastore.OnPrem, S3: &datastore.S3Storage{ @@ -112,5 +108,17 @@ func generateConfig() *datastore.Configuration { Path: null.NewString("path", true), }, }, + RetentionPolicy: &datastore.RetentionPolicyConfiguration{ + Policy: "720h", + IsRetentionPolicyEnabled: true, + }, + CircuitBreakerConfig: &datastore.CircuitBreakerConfig{ + SampleRate: 30, + ErrorTimeout: 30, + FailureThreshold: 10, + SuccessThreshold: 5, + ObservabilityWindow: 5, + ConsecutiveFailureThreshold: 10, + }, } } diff --git a/database/postgres/delivery_attempts.go b/database/postgres/delivery_attempts.go index 41d39d5723..f525c0f5e0 100644 --- a/database/postgres/delivery_attempts.go +++ b/database/postgres/delivery_attempts.go @@ -4,8 +4,10 @@ import ( "context" "database/sql" "errors" + "fmt" "github.com/frain-dev/convoy/database" "github.com/frain-dev/convoy/datastore" + "github.com/frain-dev/convoy/pkg/circuit_breaker" "github.com/jmoiron/sqlx" "io" "time" @@ -130,6 +132,66 @@ func (d *deliveryAttemptRepo) DeleteProjectDeliveriesAttempts(ctx context.Contex return nil } +func (d *deliveryAttemptRepo) GetFailureAndSuccessCounts(ctx context.Context, lookBackDuration uint64, resetTimes map[string]time.Time) (map[string]circuit_breaker.PollResult, error) { + resultsMap := map[string]circuit_breaker.PollResult{} + + query := ` + SELECT + endpoint_id AS key, + project_id AS tenant_id, + COUNT(CASE WHEN status = false THEN 1 END) AS failures, + COUNT(CASE WHEN status = true THEN 1 END) AS successes + FROM convoy.delivery_attempts + WHERE created_at >= NOW() - MAKE_INTERVAL(mins := $1) + group by endpoint_id, project_id; + ` + + rows, err := d.db.QueryxContext(ctx, query, lookBackDuration) + if err != nil { + return nil, err + } + defer rows.Close() + + for rows.Next() { + var rowValue circuit_breaker.PollResult + if rowScanErr := rows.StructScan(&rowValue); rowScanErr != nil { + return nil, rowScanErr + } + resultsMap[rowValue.Key] = rowValue + } + + // this is an n+1 query? yikes + query2 := ` + SELECT + endpoint_id AS key, + project_id AS tenant_id, + COUNT(CASE WHEN status = false THEN 1 END) AS failures, + COUNT(CASE WHEN status = true THEN 1 END) AS successes + FROM convoy.delivery_attempts + WHERE endpoint_id = '%s' AND created_at >= TIMESTAMP '%s' AT TIME ZONE 'UTC' + group by endpoint_id, project_id; + ` + + customFormat := "2006-01-02 15:04:05" + for k, t := range resetTimes { + // remove the old key so it doesn't pollute the results + delete(resultsMap, k) + qq := fmt.Sprintf(query2, k, t.Format(customFormat)) + + var rowValue circuit_breaker.PollResult + err = d.db.QueryRowxContext(ctx, qq).StructScan(&rowValue) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + continue + } + } + + resultsMap[k] = rowValue + } + + return resultsMap, nil +} + func (d *deliveryAttemptRepo) ExportRecords(ctx context.Context, projectID string, createdAt time.Time, w io.Writer) (int64, error) { return exportRecords(ctx, d.db, "convoy.delivery_attempts", projectID, createdAt, w) } diff --git a/datastore/models.go b/datastore/models.go index f5877559c8..468a210bb8 100644 --- a/datastore/models.go +++ b/datastore/models.go @@ -6,6 +6,7 @@ import ( "encoding/json" "errors" "fmt" + cb "github.com/frain-dev/convoy/pkg/circuit_breaker" "math" "net/http" "strings" @@ -317,6 +318,15 @@ var ( IsRetentionPolicyEnabled: false, Policy: "720h", } + + DefaultCircuitBreakerConfiguration = CircuitBreakerConfig{ + SampleRate: 30, + ErrorTimeout: 30, + FailureThreshold: 70, + SuccessThreshold: 5, + ObservabilityWindow: 5, + ConsecutiveFailureThreshold: 10, + } ) func GetDefaultSignatureConfig() *SignatureConfiguration { @@ -406,8 +416,9 @@ type Endpoint struct { Events int64 `json:"events,omitempty" db:"event_count"` Authentication *EndpointAuthentication `json:"authentication" db:"authentication"` - RateLimit int `json:"rate_limit" db:"rate_limit"` - RateLimitDuration uint64 `json:"rate_limit_duration" db:"rate_limit_duration"` + RateLimit int `json:"rate_limit" db:"rate_limit"` + RateLimitDuration uint64 `json:"rate_limit_duration" db:"rate_limit_duration"` + FailureRate float64 `json:"failure_rate" db:"-"` CreatedAt time.Time `json:"created_at,omitempty" db:"created_at,omitempty" swaggertype:"string"` UpdatedAt time.Time `json:"updated_at,omitempty" db:"updated_at,omitempty" swaggertype:"string"` @@ -1324,17 +1335,38 @@ type Organisation struct { } type Configuration struct { - UID string `json:"uid" db:"id"` - IsAnalyticsEnabled bool `json:"is_analytics_enabled" db:"is_analytics_enabled"` - IsSignupEnabled bool `json:"is_signup_enabled" db:"is_signup_enabled"` - StoragePolicy *StoragePolicyConfiguration `json:"storage_policy" db:"storage_policy"` - RetentionPolicy *RetentionPolicyConfiguration `json:"retention_policy" db:"retention_policy"` + UID string `json:"uid" db:"id"` + IsAnalyticsEnabled bool `json:"is_analytics_enabled" db:"is_analytics_enabled"` + IsSignupEnabled bool `json:"is_signup_enabled" db:"is_signup_enabled"` + + StoragePolicy *StoragePolicyConfiguration `json:"storage_policy" db:"storage_policy"` + RetentionPolicy *RetentionPolicyConfiguration `json:"retention_policy" db:"retention_policy"` + CircuitBreakerConfig *CircuitBreakerConfig `json:"circuit_breaker" db:"circuit_breaker"` CreatedAt time.Time `json:"created_at,omitempty" db:"created_at,omitempty" swaggertype:"string"` UpdatedAt time.Time `json:"updated_at,omitempty" db:"updated_at,omitempty" swaggertype:"string"` DeletedAt null.Time `json:"deleted_at,omitempty" db:"deleted_at" swaggertype:"string"` } +func (c *Configuration) GetCircuitBreakerConfig() CircuitBreakerConfig { + if c.CircuitBreakerConfig != nil { + return *c.CircuitBreakerConfig + } + return CircuitBreakerConfig{} +} + +func (c *Configuration) ToCircuitBreakerConfig() *cb.CircuitBreakerConfig { + return &cb.CircuitBreakerConfig{ + SampleRate: c.CircuitBreakerConfig.SampleRate, + BreakerTimeout: c.CircuitBreakerConfig.ErrorTimeout, + FailureThreshold: c.CircuitBreakerConfig.FailureThreshold, + SuccessThreshold: c.CircuitBreakerConfig.SuccessThreshold, + ObservabilityWindow: c.CircuitBreakerConfig.ObservabilityWindow, + MinimumRequestCount: c.CircuitBreakerConfig.MinimumRequestCount, + ConsecutiveFailureThreshold: c.CircuitBreakerConfig.ConsecutiveFailureThreshold, + } +} + func (c *Configuration) GetRetentionPolicyConfig() RetentionPolicyConfiguration { if c.RetentionPolicy != nil { return *c.RetentionPolicy @@ -1362,6 +1394,16 @@ type OnPremStorage struct { Path null.String `json:"path" db:"path"` } +type CircuitBreakerConfig struct { + SampleRate uint64 `json:"sample_rate" db:"sample_rate"` + ErrorTimeout uint64 `json:"error_timeout" db:"error_timeout"` + FailureThreshold uint64 `json:"failure_threshold" db:"failure_threshold"` + SuccessThreshold uint64 `json:"success_threshold" db:"success_threshold"` + ObservabilityWindow uint64 `json:"observability_window" db:"observability_window"` + MinimumRequestCount uint64 `json:"minimum_request_count" db:"minimum_request_count"` + ConsecutiveFailureThreshold uint64 `json:"consecutive_failure_threshold" db:"consecutive_failure_threshold"` +} + type OrganisationMember struct { UID string `json:"uid" db:"id"` OrganisationID string `json:"organisation_id" db:"organisation_id"` diff --git a/datastore/repository.go b/datastore/repository.go index 022b006239..cf0315ca65 100644 --- a/datastore/repository.go +++ b/datastore/repository.go @@ -2,6 +2,7 @@ package datastore import ( "context" + "github.com/frain-dev/convoy/pkg/circuit_breaker" "io" "time" @@ -207,4 +208,5 @@ type DeliveryAttemptsRepository interface { FindDeliveryAttemptById(context.Context, string, string) (*DeliveryAttempt, error) FindDeliveryAttempts(context.Context, string) ([]DeliveryAttempt, error) DeleteProjectDeliveriesAttempts(ctx context.Context, projectID string, filter *DeliveryAttemptsFilter, hardDelete bool) error + GetFailureAndSuccessCounts(ctx context.Context, lookBackDuration uint64, resetTimes map[string]time.Time) (resultsMap map[string]circuit_breaker.PollResult, err error) } diff --git a/internal/email/templates/endpoint.update.html b/internal/email/templates/endpoint.update.html index c935a2e0bc..b61816eac5 100644 --- a/internal/email/templates/endpoint.update.html +++ b/internal/email/templates/endpoint.update.html @@ -1,141 +1,74 @@ -
- - - -
-
-
-
-
-
-
-
-
- Hi there,- {{if eq .endpoint_status "active"}} -- Please note your endpoint has been enabled. See details: - -
- Important: Congrats! Your endpoint as been restored successfully. Please head over to your dashboard to retry - all failed and discarded events. - - {{else}} -- Please note your endpoint has been disabled. See details: - -
- Important: You're receiving this email because your endpoint has consecutively failed to receive events, and - needs to be checked. To re-activate your endpoint please head over to your dashboard and retry any failed event. - - {{end}} - -- For any enquiry or complaint, you can reply to this email. - - |
-