diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..5e7fe84 --- /dev/null +++ b/.env.example @@ -0,0 +1,17 @@ +# Service Configuration +SERVICE_HOST=localhost:8080 +SERVICE_API_PORT=8080 +SERVICE_ENVIRONMENT=local + +# ElasticMQ/SQS Configuration +SQS_ENDPOINT=http://elasticmq:9324 +SQS_QUEUE_URL=http://elasticmq:9324/000000000000/events +SQS_REGION=eu-central-1 + +# ClickHouse Configuration +CLICKHOUSE_HOST=clickhouse +CLICKHOUSE_PORT=9000 +CLICKHOUSE_DATABASE=events +CLICKHOUSE_USER=default +CLICKHOUSE_PASSWORD=default +CLICKHOUSE_USE_TLS=false diff --git a/.github/workflows/cd.yml b/.github/workflows/cd.yml new file mode 100644 index 0000000..135ed25 --- /dev/null +++ b/.github/workflows/cd.yml @@ -0,0 +1,63 @@ +name: CD + +on: + push: + branches: + - 'main' + workflow_dispatch: + +permissions: + id-token: write + contents: read + +jobs: + build-and-deploy: + runs-on: ubuntu-24.04 + strategy: + matrix: + service: + - name: api + dockerfile: deployments/docker/api.Dockerfile + image: event-analytics-api + ecs-service: api + + - name: consumer + dockerfile: deployments/docker/consumer.Dockerfile + image: event-analytics-consumer + ecs-service: consumer + steps: + - name: Checkout code + uses: actions/checkout@v6 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v5 + with: + role-to-assume: ${{ secrets.AWS_ROLE_ARN }} + aws-region: ${{ vars.AWS_REGION }} + + - name: Configure ECR credentials + id: login-ecr + uses: aws-actions/amazon-ecr-login@v2 + + - name: Build and push ${{ matrix.service.name }} + uses: docker/build-push-action@v6 + with: + context: . + file: ${{ matrix.service.dockerfile }} + push: true + tags: | + ${{ steps.login-ecr.outputs.registry }}/${{ matrix.service.image }}:${{ github.sha }} + ${{ steps.login-ecr.outputs.registry }}/${{ matrix.service.image }}:latest + cache-from: type=gha + cache-to: type=gha,mode=max + + - name: Update ECS service ${{ matrix.service.name }} + run: | + aws ecs update-service \ + --cluster event-analytics \ + --service ${{ matrix.service.ecs-service }} \ + --force-new-deployment \ + --region ${{ vars.AWS_REGION }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..61549ab --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,89 @@ +name: CI + +on: + push: + branches: + - '**' + pull_request: + branches: + - '**' + workflow_dispatch: + +env: + GO_VERSION: '1.25.5' + +jobs: + test: + name: Test + runs-on: ubuntu-24.04 + steps: + - name: Checkout code + uses: actions/checkout@v6 + + - name: Set up Go + uses: actions/setup-go@v6 + with: + go-version: ${{ env.GO_VERSION }} + + - name: Download dependencies + run: go mod download + + - name: Run tests + run: go test -v -race ./... + + build-api: + name: Build API + runs-on: ubuntu-24.04 + steps: + - name: Checkout code + uses: actions/checkout@v6 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build API Docker image + uses: docker/build-push-action@v6 + with: + context: . + file: ./deployments/docker/api.Dockerfile + push: false + tags: event-analytics-service-api:latest + cache-from: type=gha + cache-to: type=gha,mode=max + + build-consumer: + name: Build Consumer + runs-on: ubuntu-24.04 + steps: + - name: Checkout code + uses: actions/checkout@v6 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build Consumer Docker image + uses: docker/build-push-action@v6 + with: + context: . + file: ./deployments/docker/consumer.Dockerfile + push: false + tags: event-analytics-service-consumer:latest + cache-from: type=gha + cache-to: type=gha,mode=max + + lint: + name: Lint + runs-on: ubuntu-24.04 + steps: + - name: Checkout code + uses: actions/checkout@v6 + + - name: Set up Go + uses: actions/setup-go@v6 + with: + go-version: ${{ env.GO_VERSION }} + + - name: Run golangci-lint + uses: golangci/golangci-lint-action@v9 + with: + version: latest \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..d7e1ab4 --- /dev/null +++ b/.gitignore @@ -0,0 +1,8 @@ +# JetBrains IDEs +/.idea + +# Environment Variables +*.env + +# Binaries +/bin \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..26a6ebc --- /dev/null +++ b/Makefile @@ -0,0 +1,31 @@ +.PHONY: fmt build build-api build-consumer test test-no-race lint swagger env + +fmt: + @echo "Formatting code..." + @go fmt ./... + +build: build-api build-consumer + +build-api: + @echo "Building API..." + @go build -o bin/api ./cmd/api + +build-consumer: + @echo "Building consumer..." + @go build -o bin/consumer ./cmd/consumer + +test: + @echo "Running tests..." + @CGO_ENABLED=1 go test -v -race ./... + +lint: + @echo "Running linter..." + @golangci-lint run + +swagger: + @echo "Generating swagger documentation..." + @swag init -g cmd/api/main.go -o docs + +env: + @echo "Copying .env.example to .env..." + @cp .env.example .env \ No newline at end of file diff --git a/README.md b/README.md index 842a75b..d15eaa9 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,154 @@ # event-analytics-service -An event ingestion service. +An event ingestion and analytics service. + +[![CI](https://github.com/BarkinBalci/event-analytics-service/actions/workflows/ci.yml/badge.svg)](https://github.com/BarkinBalci/event-analytics-service/actions/workflows/ci.yml) +[![CD](https://github.com/BarkinBalci/event-analytics-service/actions/workflows/cd.yml/badge.svg)](https://github.com/BarkinBalci/event-analytics-service/actions/workflows/cd.yml) +![Go](https://img.shields.io/badge/Go-1.25.5-00ADD8?logo=go) +![Terraform](https://img.shields.io/badge/Terraform-1.14.3-7B42BC?logo=terraform) + +## Architecture +![An architecture diagram](docs/architecture_diagram.png) + +The service uses a two-component architecture that separates ingestion from processing. The API Service accepts HTTP events, publishes them to SQS, and immediately returns a 202 Accepted response. Meanwhile, the Consumer Service independently polls SQS and writes events to ClickHouse for analytics. + +This design keeps the API highly available since it never waits for database writes, while SQS buffers events between the two services. Both components can scale independently based on their workloads, ensuring the system handles traffic spikes without losing events. + +## Prerequisites + +- Go 1.25+ +- Docker +- Docker Compose + +## Quick Start + +```bash +# Create an .env file from the .env.example +make env +``` +```bash +# Start the application +docker-compose up -d +``` + +```bash +# Check health +curl http://localhost:8080/health +``` + +```bash +# Post an event +curl -X POST http://localhost:8080/events \ + -H "Content-Type: application/json" \ + -d '{ + "event_name": "product_view", + "channel": "web", + "campaign_id": "cmp_987", + "user_id": "user_123", + "timestamp": '$(date +%s)', + "tags": ["test"], + "metadata": {"test": true} + }' +``` + +```bash +# Query metrics grouped by channel +curl "http://localhost:8080/metrics?event_name=product_view&from=1766717657&to=1766747957&group_by=channel" +``` + +```bash +# Query metrics grouped by hour +curl "http://localhost:8080/metrics?event_name=product_view&from=1766717657&to=1766747957&group_by=hour" +``` + +```bash +# Query metrics grouped by day +curl "http://localhost:8080/metrics?event_name=product_view&from=1766717657&to=1766747957&group_by=day" +``` + +## API Documentation + +Swagger is available in http://localhost:8080/docs/index.html and [docs/swagger.yaml](docs/swagger.yaml) + +## Benchmark + +Load test performed with [hey](https://github.com/rakyll/hey) using 250 concurrent connections over 10 minutes: + +```bash +hey -z 600s -c 250 -m POST \ + -H "Content-Type: application/json" \ + -d '{"event_name":"product_view","channel":"web","campaign_id":"cmp_987","user_id":"user_123","timestamp":'$(date +%s)',"tags":["test"],"metadata":{"test":true}}' \ + +``` + +**Test Configuration:** +- ECS with autoscaling enabled but limited to 10 tasks +- Fargate task specs (I did not have enough time to do right sizing 😅): + - API: 0.5 vCPU, 1 GB RAM + - Consumer: 1 vCPU, 2 GB RAM + +### Results + +``` +Summary: + Total: 600.1190 secs + Slowest: 1.2400 secs + Fastest: 0.0406 secs + Average: 0.1500 secs + Requests/sec: 5039.5303 + + Total data: 214726507 bytes + Size/request: 214 bytes + +Response time histogram: + 0.041 [1] | + 0.161 [995221] |■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■ + 0.281 [2900] | + 0.400 [913] | + 0.520 [408] | + 0.640 [280] | + 0.760 [190] | + 0.880 [63] | + 1.000 [20] | + 1.120 [2] | + 1.240 [2] | + +Latency distribution: + 10% in 0.0442 secs + 25% in 0.0452 secs + 50% in 0.0469 secs + 75% in 0.0492 secs + 90% in 0.0519 secs + 95% in 0.0561 secs + 99% in 0.1079 secs + +Details (average, fastest, slowest): + DNS+dialup: 0.0000 secs, 0.0406 secs, 1.2400 secs + DNS-lookup: 0.0000 secs, 0.0000 secs, 0.1176 secs + req write: 0.0000 secs, 0.0000 secs, 0.0022 secs + resp wait: 0.1497 secs, 0.0406 secs, 1.2399 secs + resp read: 0.0001 secs, 0.0000 secs, 0.0081 secs + +Status code distribution: + [202] 1000000 responses +``` + +## Design Decisions + +### Why ClickHouse? + +I went with ClickHouse because it's built specifically for this kind of workload with lots of writes, time-based queries, and aggregations, and the ReplacingMergeTree engine can handle deduplication without much developer effort. + +### Why AWS SQS? + +I considered Kafka for lower latency at first, but its operational overhead and more so cost floor had pushed me towards SQS. + +### Why Fargate? + +I chose Fargate because I didn't want to spend time and resources managing and configuring EC2 instances. Deployment is as simple as pushing a Docker image and updating the ECS task definition. + +## Known Limitations + +- API has no authentication, rate limiting, or explicit request size validation. +- Health check endpoint does not verify ClickHouse or SQS connectivity, which means the load balancer might route traffic to instances with broken dependencies. +- Fargate autoscaling and task sizes were chosen arbitrarily and likely need right sizing based on production profiling. +- Consumer retry logic relies on SQS visibility timeout expiration rather than immediate retry with exponential backoff. \ No newline at end of file diff --git a/cmd/api/main.go b/cmd/api/main.go new file mode 100644 index 0000000..6dc7506 --- /dev/null +++ b/cmd/api/main.go @@ -0,0 +1,84 @@ +package main + +import ( + "context" + "fmt" + "net/http" + + "go.uber.org/zap" + + "github.com/BarkinBalci/event-analytics-service/docs" + "github.com/BarkinBalci/event-analytics-service/internal/config" + "github.com/BarkinBalci/event-analytics-service/internal/handler" + "github.com/BarkinBalci/event-analytics-service/internal/logger" + "github.com/BarkinBalci/event-analytics-service/internal/queue/sqs" + "github.com/BarkinBalci/event-analytics-service/internal/repository/clickhouse" + "github.com/BarkinBalci/event-analytics-service/internal/service" +) + +// @title Event Analytics Service API +// @version 1.0 +// @description API for publishing and managing analytics events +// @host localhost:8080 +// @BasePath / +// @schemes http https +func main() { + cfg, err := config.Load() + if err != nil { + panic(fmt.Sprintf("Failed to load config: %v", err)) + } + + // Initialize logger + log, err := logger.New(cfg.Service.Environment) + if err != nil { + panic(fmt.Sprintf("Failed to initialize logger: %v", err)) + } + defer func(log *zap.Logger) { + err := log.Sync() + if err != nil { + log.Error("Failed to sync logger", zap.Error(err)) + } + }(log) + + log.Info("Starting API service", + zap.String("environment", cfg.Service.Environment), + zap.String("port", cfg.Service.APIPort)) + + // Configure Swagger host dynamically + docs.SwaggerInfo.Host = cfg.Service.Host + + ctx := context.Background() + + // Initialize SQS client + sqsClient, err := sqs.NewClient(ctx, cfg.SQS, log) + if err != nil { + log.Fatal("Failed to create SQS client", zap.Error(err)) + } + + // Initialize ClickHouse client + clickhouseClient, err := clickhouse.NewClient(ctx, &cfg.ClickHouse, log) + if err != nil { + log.Fatal("Failed to create ClickHouse client", zap.Error(err)) + } + defer func(clickhouseClient *clickhouse.Client) { + if err := clickhouseClient.Close(); err != nil { + log.Error("Failed to close ClickHouse client", zap.Error(err)) + } + }(clickhouseClient) + + // Initialize repository + repo := clickhouse.NewRepository(clickhouseClient, log) + + // Initialize event service + eventService := service.NewEventService(sqsClient, repo, log) + + // Initialize handler + h := handler.NewHandler(eventService, log) + + addr := fmt.Sprintf(":%s", cfg.Service.APIPort) + log.Info("API server starting", zap.String("address", addr)) + + if err := http.ListenAndServe(addr, h); err != nil { + log.Fatal("Failed to start API server", zap.Error(err)) + } +} diff --git a/cmd/consumer/main.go b/cmd/consumer/main.go new file mode 100644 index 0000000..52a8862 --- /dev/null +++ b/cmd/consumer/main.go @@ -0,0 +1,109 @@ +package main + +import ( + "context" + "fmt" + "net/http" + "os" + "os/signal" + "syscall" + + "go.uber.org/zap" + + "github.com/BarkinBalci/event-analytics-service/internal/config" + "github.com/BarkinBalci/event-analytics-service/internal/consumer" + "github.com/BarkinBalci/event-analytics-service/internal/logger" + "github.com/BarkinBalci/event-analytics-service/internal/queue/sqs" + "github.com/BarkinBalci/event-analytics-service/internal/repository/clickhouse" +) + +func main() { + // Load configuration + cfg, err := config.Load() + if err != nil { + panic(fmt.Sprintf("Failed to load config: %v", err)) + } + + // Initialize logger + log, err := logger.New(cfg.Service.Environment) + if err != nil { + panic(fmt.Sprintf("Failed to initialize logger: %v", err)) + } + defer func(log *zap.Logger) { + err := log.Sync() + if err != nil { + log.Error("Failed to sync logger", zap.Error(err)) + } + }(log) + + log.Info("Starting consumer service", + zap.String("environment", cfg.Service.Environment)) + + ctx := context.Background() + + // Initialize ClickHouse client + chClient, err := clickhouse.NewClient(ctx, &cfg.ClickHouse, log) + if err != nil { + log.Fatal("Failed to create ClickHouse client", zap.Error(err)) + } + defer func() { + if err := chClient.Close(); err != nil { + log.Error("Failed to close ClickHouse client", zap.Error(err)) + } + }() + + // Initialize repository + repo := clickhouse.NewRepository(chClient, log) + + // Initialize schema (create tables if not exist) + if err := repo.InitSchema(ctx); err != nil { + log.Fatal("Failed to initialize schema", zap.Error(err)) + } + log.Info("Database schema initialized") + + // Initialize SQS client + sqsClient, err := sqs.NewClient(ctx, cfg.SQS, log) + if err != nil { + log.Fatal("Failed to create SQS client", zap.Error(err)) + } + + // Initialize consumer + c := consumer.NewConsumer(cfg, sqsClient, repo, log) + + // Start health check endpoint + go func() { + http.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) { + if err := repo.Ping(r.Context()); err != nil { + log.Warn("Health check failed", zap.Error(err)) + w.WriteHeader(http.StatusServiceUnavailable) + return + } + w.WriteHeader(http.StatusOK) + }) + + addr := ":" + cfg.Consumer.HealthCheckPort + log.Info("Health check server starting", zap.String("address", addr)) + if err := http.ListenAndServe(addr, nil); err != nil { + log.Error("Health check server error", zap.Error(err)) + } + }() + + // Start consumer + consumerCtx, cancel := context.WithCancel(ctx) + defer cancel() + + log.Info("Consumer starting") + + go func() { + if err := c.Start(consumerCtx); err != nil { + log.Fatal("Consumer error", zap.Error(err)) + } + }() + + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM) + <-sigChan + + log.Info("Shutting down consumer gracefully") + cancel() +} diff --git a/deployments/docker/api.Dockerfile b/deployments/docker/api.Dockerfile new file mode 100644 index 0000000..f1b2e00 --- /dev/null +++ b/deployments/docker/api.Dockerfile @@ -0,0 +1,14 @@ +FROM golang:1.25.5 AS builder + +WORKDIR /app +COPY go.mod go.sum ./ +RUN go mod download +COPY . . +RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o main ./cmd/api + +FROM alpine:3.22 +RUN apk --no-cache add ca-certificates +WORKDIR /root/ +COPY --from=builder /app/main . +EXPOSE 8080 +CMD ["./main"] diff --git a/deployments/docker/consumer.Dockerfile b/deployments/docker/consumer.Dockerfile new file mode 100644 index 0000000..962b4a9 --- /dev/null +++ b/deployments/docker/consumer.Dockerfile @@ -0,0 +1,13 @@ +FROM golang:1.25.5 AS builder + +WORKDIR /app +COPY go.mod go.sum ./ +RUN go mod download +COPY . . +RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o main ./cmd/consumer + +FROM alpine:3.22 +RUN apk --no-cache add ca-certificates +WORKDIR /root/ +COPY --from=builder /app/main . +CMD ["./main"] diff --git a/deployments/terraform/.gitignore b/deployments/terraform/.gitignore new file mode 100644 index 0000000..7c14c23 --- /dev/null +++ b/deployments/terraform/.gitignore @@ -0,0 +1,3 @@ +.terraform/ +*.tfvars +!terraform.tfvars.example diff --git a/deployments/terraform/.terraform.lock.hcl b/deployments/terraform/.terraform.lock.hcl new file mode 100644 index 0000000..df1003b --- /dev/null +++ b/deployments/terraform/.terraform.lock.hcl @@ -0,0 +1,25 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/aws" { + version = "6.27.0" + constraints = ">= 4.66.1, >= 6.0.0, >= 6.25.0, ~> 6.27" + hashes = [ + "h1:bixp2PSsP5ZGBczGCxcbSDn6lF5QFlUXlNroq9cdab4=", + "zh:177a24b806c72e8484b5cabc93b2b38e3d770ae6f745a998b54d6619fd0e8129", + "zh:4ac4a85c14fb868a3306b542e6a56c10bd6c6d5a67bc0c9b8f6a9060cf5f3be7", + "zh:552652185bc85c8ba1da1d65dea47c454728a5c6839c458b6dcd3ce71c19ccfc", + "zh:60284b8172d09aee91eae0856f09855eaf040ce3a58d6933602ae17c53f8ed04", + "zh:6be38d156756ca61fb8e7c752cc5d769cd709686700ac4b230f40a6e95b5dbc9", + "zh:7a409138fae4ef42e3a637e37cb9efedf96459e28a3c764fc4e855e8db9a7485", + "zh:8070cf5224ed1ed3a3e9a59f7c30ff88bf071c7567165275d477c1738a56c064", + "zh:894439ef340a9a79f69cd759e27ad11c7826adeca27be1b1ca82b3c9702fa300", + "zh:89d035eebf08a97c89374ff06040955ddc09f275ecca609d0c9d58d149bef5cf", + "zh:985b1145d724fc1f38369099e4a5087141885740fd6c0b1dbc492171e73c2e49", + "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", + "zh:a80b47ae8d1475201c86bd94a5dcb9dd4da5e8b73102a90820b68b66b76d50fd", + "zh:d3395be1556210f82199b9166a6b2e677cee9c4b67e96e63f6c3a98325ad7ab0", + "zh:db0b869d09657f6f1e4110b56093c5fcdf9dbdd97c020db1e577b239c0adcbce", + "zh:ffc72e680370ae7c21f9bd3082c6317730df805c6797427839a6b6b7e9a26a01", + ] +} diff --git a/deployments/terraform/main.tf b/deployments/terraform/main.tf new file mode 100644 index 0000000..c11950e --- /dev/null +++ b/deployments/terraform/main.tf @@ -0,0 +1,580 @@ +terraform { + required_version = ">= 1.14.3" + + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 6.27" + } + } + + cloud { + organization = "barkinbalci" + + workspaces { + name = "event-analytics-service" + } + } +} + +provider "aws" { + region = var.aws_region +} + +locals { + name_prefix = "event-analytics" + services = toset(["api", "consumer"]) + + common_tags = { + Project = "event-analytics-service" + ManagedBy = "Terraform" + } + + # Reusable policies and configurations + ecr_lifecycle_policy = { + rules = [{ + rulePriority = 1 + description = "Keep last 10 images" + selection = { + tagStatus = "any" + countType = "imageCountMoreThan" + countNumber = 10 + } + action = { type = "expire" } + }] + } + + ecs_assume_role_policy = { + Version = "2012-10-17" + Statement = [{ + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { Service = "ecs-tasks.amazonaws.com" } + }] + } + + allow_all_egress = { + description = "Allow all outbound traffic" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + ipv6_cidr_blocks = [] + prefix_list_ids = [] + security_groups = [] + self = false + } + + clickhouse_secrets = [ + { name = "CLICKHOUSE_HOST", valueFrom = "${aws_secretsmanager_secret.clickhouse.arn}:host::" }, + { name = "CLICKHOUSE_PORT", valueFrom = "${aws_secretsmanager_secret.clickhouse.arn}:port::" }, + { name = "CLICKHOUSE_DATABASE", valueFrom = "${aws_secretsmanager_secret.clickhouse.arn}:database::" }, + { name = "CLICKHOUSE_USER", valueFrom = "${aws_secretsmanager_secret.clickhouse.arn}:user::" }, + { name = "CLICKHOUSE_PASSWORD", valueFrom = "${aws_secretsmanager_secret.clickhouse.arn}:password::" } + ] + + awslogs_options = { + "awslogs-region" = var.aws_region + } + + container_health_check = { + interval = 30 + timeout = 5 + retries = 3 + startPeriod = 60 + } + + deployment_config = { + maximum_percent = 200 + minimum_healthy_percent = 100 + } + + scaling_cooldowns = { + scale_in_cooldown = 300 + scale_out_cooldown = 60 + } +} + +# VPC +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "~> 6.5" + + name = "${local.name_prefix}-vpc" + cidr = var.vpc_cidr + + azs = var.availability_zones + private_subnets = var.private_subnet_cidrs + public_subnets = var.public_subnet_cidrs + + enable_nat_gateway = true + single_nat_gateway = var.single_nat_gateway + enable_dns_hostnames = true + enable_dns_support = true + + tags = local.common_tags +} + +# ECR Repositories +resource "aws_ecr_repository" "service" { + for_each = local.services + name = "${local.name_prefix}-${each.key}" + image_tag_mutability = "MUTABLE" + + image_scanning_configuration { + scan_on_push = true + } + + tags = local.common_tags +} + +resource "aws_ecr_lifecycle_policy" "service" { + for_each = local.services + repository = aws_ecr_repository.service[each.key].name + policy = jsonencode(local.ecr_lifecycle_policy) +} + +# SQS Queue +resource "aws_sqs_queue" "events" { + name = "${local.name_prefix}-events" + visibility_timeout_seconds = 60 + message_retention_seconds = 1209600 + receive_wait_time_seconds = 20 + + redrive_policy = jsonencode({ + deadLetterTargetArn = aws_sqs_queue.events_dlq.arn + maxReceiveCount = 3 + }) + + tags = local.common_tags +} + +resource "aws_sqs_queue" "events_dlq" { + name = "${local.name_prefix}-events-dlq" + message_retention_seconds = 1209600 + + tags = local.common_tags +} + +# Secrets Manager +resource "aws_secretsmanager_secret" "clickhouse" { + name = "${local.name_prefix}/clickhouse" + description = "ClickHouse credentials" + + tags = local.common_tags +} + +resource "aws_secretsmanager_secret_version" "clickhouse" { + secret_id = aws_secretsmanager_secret.clickhouse.id + secret_string = jsonencode({ + host = var.CLICKHOUSE_HOST + port = var.CLICKHOUSE_PORT + database = var.CLICKHOUSE_DATABASE + user = var.CLICKHOUSE_USER + password = var.CLICKHOUSE_PASSWORD + }) +} + +# IAM Roles +resource "aws_iam_role" "ecs_task_execution" { + name = "${local.name_prefix}-ecs-task-execution" + assume_role_policy = jsonencode(local.ecs_assume_role_policy) + tags = local.common_tags +} + +resource "aws_iam_role_policy_attachment" "ecs_task_execution_policy" { + role = aws_iam_role.ecs_task_execution.name + policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy" +} + +resource "aws_iam_role_policy" "ecs_secrets_access" { + name = "${local.name_prefix}-ecs-secrets" + role = aws_iam_role.ecs_task_execution.id + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Effect = "Allow" + Action = ["secretsmanager:GetSecretValue", "kms:Decrypt"] + Resource = [aws_secretsmanager_secret.clickhouse.arn] + }] + }) +} + +resource "aws_iam_role" "api_task" { + name = "${local.name_prefix}-api-task" + assume_role_policy = jsonencode(local.ecs_assume_role_policy) + tags = local.common_tags +} + +resource "aws_iam_role_policy" "api_sqs" { + name = "${local.name_prefix}-api-sqs" + role = aws_iam_role.api_task.id + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Effect = "Allow" + Action = ["sqs:SendMessage", "sqs:GetQueueUrl", "sqs:GetQueueAttributes"] + Resource = [aws_sqs_queue.events.arn] + }] + }) +} + +resource "aws_iam_role" "consumer_task" { + name = "${local.name_prefix}-consumer-task" + assume_role_policy = jsonencode(local.ecs_assume_role_policy) + tags = local.common_tags +} + +resource "aws_iam_role_policy" "consumer_sqs" { + name = "${local.name_prefix}-consumer-sqs" + role = aws_iam_role.consumer_task.id + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [{ + Effect = "Allow" + Action = [ + "sqs:ReceiveMessage", + "sqs:DeleteMessage", + "sqs:DeleteMessageBatch", + "sqs:GetQueueUrl", + "sqs:GetQueueAttributes", + "sqs:ChangeMessageVisibility" + ] + Resource = [aws_sqs_queue.events.arn] + }] + }) +} + +# ALB +module "alb" { + source = "terraform-aws-modules/alb/aws" + version = "~> 10.4" + + name = "${local.name_prefix}-alb" + load_balancer_type = "application" + vpc_id = module.vpc.vpc_id + subnets = module.vpc.public_subnets + security_groups = [aws_security_group.alb.id] + + enable_deletion_protection = false + + listeners = { + http = { + port = 80 + protocol = "HTTP" + forward = { target_group_key = "api" } + } + } + + target_groups = { + api = { + name = "${local.name_prefix}-api" + backend_protocol = "HTTP" + backend_port = 8080 + target_type = "ip" + deregistration_delay = 30 + + health_check = { + enabled = true + interval = 30 + path = "/health" + port = "traffic-port" + protocol = "HTTP" + timeout = 5 + healthy_threshold = 2 + unhealthy_threshold = 3 + matcher = "200" + } + + create_attachment = false + } + } + + tags = local.common_tags +} + +resource "aws_security_group" "alb" { + name = "${local.name_prefix}-alb" + description = "ALB security group" + vpc_id = module.vpc.vpc_id + + ingress { + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + + egress = [local.allow_all_egress] + tags = local.common_tags +} + +# ECS Cluster +module "ecs" { + source = "terraform-aws-modules/ecs/aws" + version = "~> 6.11" + + cluster_name = local.name_prefix + + default_capacity_provider_strategy = { + FARGATE = { + weight = 50 + } + FARGATE_SPOT = { + weight = 50 + } + } + + tags = local.common_tags +} + +# CloudWatch Log Groups +resource "aws_cloudwatch_log_group" "service" { + for_each = local.services + name = "/ecs/${local.name_prefix}/${each.key}" + retention_in_days = var.log_retention_days + tags = local.common_tags +} + +# Security Groups +resource "aws_security_group" "api" { + name = "${local.name_prefix}-api" + description = "API service" + vpc_id = module.vpc.vpc_id + + ingress { + from_port = 8080 + to_port = 8080 + protocol = "tcp" + security_groups = [aws_security_group.alb.id] + } + + egress = [local.allow_all_egress] + tags = local.common_tags +} + +resource "aws_security_group" "consumer" { + name = "${local.name_prefix}-consumer" + description = "Consumer service" + vpc_id = module.vpc.vpc_id + egress = [local.allow_all_egress] + tags = local.common_tags +} + +# ECS Task Definitions +resource "aws_ecs_task_definition" "api" { + family = "${local.name_prefix}-api" + network_mode = "awsvpc" + requires_compatibilities = ["FARGATE"] + cpu = var.api_cpu + memory = var.api_memory + execution_role_arn = aws_iam_role.ecs_task_execution.arn + task_role_arn = aws_iam_role.api_task.arn + + container_definitions = jsonencode([{ + name = "api" + image = "${aws_ecr_repository.service["api"].repository_url}:${var.image_tag}" + essential = true + + portMappings = [{ containerPort = 8080, protocol = "tcp" }] + + environment = [ + { name = "SERVICE_ENVIRONMENT", value = "production" }, + { name = "SERVICE_API_PORT", value = "8080" }, + { name = "SERVICE_HOST", value = var.service_host }, + { name = "SQS_QUEUE_URL", value = aws_sqs_queue.events.url }, + { name = "SQS_REGION", value = var.aws_region } + ] + + secrets = local.clickhouse_secrets + + logConfiguration = { + logDriver = "awslogs" + options = merge(local.awslogs_options, { + "awslogs-group" = aws_cloudwatch_log_group.service["api"].name + "awslogs-stream-prefix" = "api" + }) + } + + healthCheck = merge(local.container_health_check, { + command = ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:8080/health || exit 1"] + }) + }]) + + tags = local.common_tags +} + +resource "aws_ecs_task_definition" "consumer" { + family = "${local.name_prefix}-consumer" + network_mode = "awsvpc" + requires_compatibilities = ["FARGATE"] + cpu = var.consumer_cpu + memory = var.consumer_memory + execution_role_arn = aws_iam_role.ecs_task_execution.arn + task_role_arn = aws_iam_role.consumer_task.arn + + container_definitions = jsonencode([{ + name = "consumer" + image = "${aws_ecr_repository.service["consumer"].repository_url}:${var.image_tag}" + essential = true + + environment = [ + { name = "SERVICE_ENVIRONMENT", value = "production" }, + { name = "SQS_QUEUE_URL", value = aws_sqs_queue.events.url }, + { name = "SQS_REGION", value = var.aws_region }, + { name = "CONSUMER_BATCH_SIZE_MIN", value = "100" }, + { name = "CONSUMER_BATCH_SIZE_MAX", value = "2000" }, + { name = "CONSUMER_BATCH_TIMEOUT_SEC", value = "10" }, + { name = "CONSUMER_HEALTH_CHECK_PORT", value = "8081" }, + { name = "CLICKHOUSE_MAX_OPEN_CONNS", value = "10" }, + { name = "CLICKHOUSE_MAX_IDLE_CONNS", value = "5" }, + { name = "CLICKHOUSE_CONN_MAX_LIFETIME_SEC", value = "3600" } + ] + + secrets = local.clickhouse_secrets + + logConfiguration = { + logDriver = "awslogs" + options = merge(local.awslogs_options, { + "awslogs-group" = aws_cloudwatch_log_group.service["consumer"].name + "awslogs-stream-prefix" = "consumer" + }) + } + + healthCheck = merge(local.container_health_check, { + command = ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:8081/health || exit 1"] + }) + }]) + + tags = local.common_tags +} + +# ECS Services +resource "aws_ecs_service" "api" { + name = "api" + cluster = module.ecs.cluster_id + task_definition = aws_ecs_task_definition.api.arn + desired_count = var.api_desired_count + + capacity_provider_strategy { + capacity_provider = "FARGATE" + weight = 50 + base = 1 + } + + capacity_provider_strategy { + capacity_provider = "FARGATE_SPOT" + weight = 50 + } + + network_configuration { + subnets = module.vpc.private_subnets + security_groups = [aws_security_group.api.id] + assign_public_ip = false + } + + load_balancer { + target_group_arn = module.alb.target_groups["api"].arn + container_name = "api" + container_port = 8080 + } + + deployment_maximum_percent = local.deployment_config.maximum_percent + deployment_minimum_healthy_percent = local.deployment_config.minimum_healthy_percent + + tags = local.common_tags + + depends_on = [module.alb] +} + +resource "aws_ecs_service" "consumer" { + name = "consumer" + cluster = module.ecs.cluster_id + task_definition = aws_ecs_task_definition.consumer.arn + desired_count = var.consumer_desired_count + + capacity_provider_strategy { + capacity_provider = "FARGATE_SPOT" + weight = 100 + } + + network_configuration { + subnets = module.vpc.private_subnets + security_groups = [aws_security_group.consumer.id] + assign_public_ip = false + } + + deployment_maximum_percent = local.deployment_config.maximum_percent + deployment_minimum_healthy_percent = local.deployment_config.minimum_healthy_percent + + tags = local.common_tags +} + +# Auto Scaling +resource "aws_appautoscaling_target" "api" { + max_capacity = var.api_max_capacity + min_capacity = var.api_min_capacity + resource_id = "service/${module.ecs.cluster_name}/${aws_ecs_service.api.name}" + scalable_dimension = "ecs:service:DesiredCount" + service_namespace = "ecs" +} + +resource "aws_appautoscaling_policy" "api_cpu" { + name = "${local.name_prefix}-api-cpu" + policy_type = "TargetTrackingScaling" + resource_id = aws_appautoscaling_target.api.resource_id + scalable_dimension = aws_appautoscaling_target.api.scalable_dimension + service_namespace = aws_appautoscaling_target.api.service_namespace + + target_tracking_scaling_policy_configuration { + target_value = 70.0 + predefined_metric_specification { + predefined_metric_type = "ECSServiceAverageCPUUtilization" + } + scale_in_cooldown = local.scaling_cooldowns.scale_in_cooldown + scale_out_cooldown = local.scaling_cooldowns.scale_out_cooldown + } +} + +resource "aws_appautoscaling_target" "consumer" { + max_capacity = var.consumer_max_capacity + min_capacity = var.consumer_min_capacity + resource_id = "service/${module.ecs.cluster_name}/${aws_ecs_service.consumer.name}" + scalable_dimension = "ecs:service:DesiredCount" + service_namespace = "ecs" +} + +resource "aws_appautoscaling_policy" "consumer_sqs" { + name = "${local.name_prefix}-consumer-sqs" + policy_type = "TargetTrackingScaling" + resource_id = aws_appautoscaling_target.consumer.resource_id + scalable_dimension = aws_appautoscaling_target.consumer.scalable_dimension + service_namespace = aws_appautoscaling_target.consumer.service_namespace + + target_tracking_scaling_policy_configuration { + target_value = var.consumer_target_queue_depth + + customized_metric_specification { + metrics { + id = "m1" + metric_stat { + metric { + namespace = "AWS/SQS" + metric_name = "ApproximateNumberOfMessagesVisible" + dimensions { + name = "QueueName" + value = aws_sqs_queue.events.name + } + } + stat = "Average" + } + } + } + + scale_in_cooldown = local.scaling_cooldowns.scale_in_cooldown + scale_out_cooldown = local.scaling_cooldowns.scale_out_cooldown + } +} diff --git a/deployments/terraform/outputs.tf b/deployments/terraform/outputs.tf new file mode 100644 index 0000000..2cf816f --- /dev/null +++ b/deployments/terraform/outputs.tf @@ -0,0 +1,24 @@ +output "alb_dns_name" { + description = "ALB DNS name" + value = module.alb.dns_name +} + +output "ecr_api_repository_url" { + description = "API ECR repository URL" + value = aws_ecr_repository.service["api"].repository_url +} + +output "ecr_consumer_repository_url" { + description = "Consumer ECR repository URL" + value = aws_ecr_repository.service["consumer"].repository_url +} + +output "sqs_queue_url" { + description = "SQS queue URL" + value = aws_sqs_queue.events.url +} + +output "ecs_cluster_name" { + description = "ECS cluster name" + value = module.ecs.cluster_name +} diff --git a/deployments/terraform/terraform.tfvars.example b/deployments/terraform/terraform.tfvars.example new file mode 100644 index 0000000..8a644de --- /dev/null +++ b/deployments/terraform/terraform.tfvars.example @@ -0,0 +1,31 @@ +aws_region = "eu-central-1" + +vpc_cidr = "10.0.0.0/16" +availability_zones = ["eu-central-1a", "eu-central-1b"] +private_subnet_cidrs = ["10.0.1.0/24", "10.0.2.0/24"] +public_subnet_cidrs = ["10.0.101.0/24", "10.0.102.0/24"] +single_nat_gateway = true + +CLICKHOUSE_HOST = "your-instance.clickhouse.cloud" +CLICKHOUSE_PORT = 9440 +CLICKHOUSE_DATABASE = "events" +CLICKHOUSE_USER = "default" +CLICKHOUSE_PASSWORD = "CHANGE_ME" + +service_host = "eas.barkin.dev" +image_tag = "latest" + +api_cpu = "512" +api_memory = "1024" +api_desired_count = 2 +api_min_capacity = 1 +api_max_capacity = 10 + +consumer_cpu = "1024" +consumer_memory = "2048" +consumer_desired_count = 1 +consumer_min_capacity = 1 +consumer_max_capacity = 5 +consumer_target_queue_depth = 1000 + +log_retention_days = 30 diff --git a/deployments/terraform/variables.tf b/deployments/terraform/variables.tf new file mode 100644 index 0000000..b446392 --- /dev/null +++ b/deployments/terraform/variables.tf @@ -0,0 +1,148 @@ +variable "aws_region" { + description = "AWS region" + type = string + default = "eu-central-1" +} + +variable "vpc_cidr" { + description = "VPC CIDR" + type = string + default = "10.0.0.0/16" +} + +variable "availability_zones" { + description = "Availability zones" + type = list(string) + default = ["eu-central-1a", "eu-central-1b"] +} + +variable "private_subnet_cidrs" { + description = "Private subnet CIDRs" + type = list(string) + default = ["10.0.1.0/24", "10.0.2.0/24"] +} + +variable "public_subnet_cidrs" { + description = "Public subnet CIDRs" + type = list(string) + default = ["10.0.101.0/24", "10.0.102.0/24"] +} + +variable "single_nat_gateway" { + description = "Use single NAT gateway (cost savings)" + type = bool + default = true +} + +variable "CLICKHOUSE_HOST" { + description = "ClickHouse host" + type = string +} + +variable "CLICKHOUSE_PORT" { + description = "ClickHouse port" + type = number + default = 9440 +} + +variable "CLICKHOUSE_DATABASE" { + description = "ClickHouse database" + type = string + default = "events" +} + +variable "CLICKHOUSE_USER" { + description = "ClickHouse user" + type = string + default = "default" +} + +variable "CLICKHOUSE_PASSWORD" { + description = "ClickHouse password" + type = string + sensitive = true +} + +variable "service_host" { + description = "Service hostname for API" + type = string + default = "localhost:8080" +} + +variable "image_tag" { + description = "Docker image tag" + type = string + default = "latest" +} + +variable "api_cpu" { + description = "API task CPU" + type = string + default = "256" +} + +variable "api_memory" { + description = "API task memory" + type = string + default = "512" +} + +variable "api_desired_count" { + description = "API desired count" + type = number + default = 2 +} + +variable "api_min_capacity" { + description = "API min capacity" + type = number + default = 1 +} + +variable "api_max_capacity" { + description = "API max capacity" + type = number + default = 10 +} + +variable "consumer_cpu" { + description = "Consumer task CPU" + type = string + default = "256" +} + +variable "consumer_memory" { + description = "Consumer task memory" + type = string + default = "512" +} + +variable "consumer_desired_count" { + description = "Consumer desired count" + type = number + default = 1 +} + +variable "consumer_min_capacity" { + description = "Consumer min capacity" + type = number + default = 1 +} + +variable "consumer_max_capacity" { + description = "Consumer max capacity" + type = number + default = 5 +} + +variable "consumer_target_queue_depth" { + description = "Target SQS queue depth per consumer" + type = number + default = 1000 +} + +variable "log_retention_days" { + description = "CloudWatch log retention" + type = number + default = 30 +} diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..47da791 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,87 @@ +services: + api: + build: + context: . + dockerfile: deployments/docker/api.Dockerfile + container_name: event-analytics-api + ports: + - "8080:8080" + env_file: + - .env + depends_on: + clickhouse: + condition: service_healthy + elasticmq: + condition: service_healthy + networks: + - event-analytics-network + restart: unless-stopped + + consumer: + build: + context: . + dockerfile: deployments/docker/consumer.Dockerfile + container_name: event-analytics-consumer + env_file: + - .env + depends_on: + clickhouse: + condition: service_healthy + elasticmq: + condition: service_healthy + networks: + - event-analytics-network + restart: unless-stopped + + clickhouse: + image: clickhouse/clickhouse-server:latest + container_name: event-analytics-clickhouse + ports: + - "8123:8123" # HTTP interface + - "9000:9000" # Native client + environment: + - CLICKHOUSE_DB=events + env_file: + - .env + volumes: + - clickhouse-data:/var/lib/clickhouse + - clickhouse-logs:/var/log/clickhouse-server + ulimits: + nofile: + soft: 262144 + hard: 262144 + networks: + - event-analytics-network + restart: unless-stopped + healthcheck: + test: ["CMD", "clickhouse-client", "--query", "SELECT 1"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 30s + + elasticmq: + image: softwaremill/elasticmq:latest + container_name: event-analytics-elasticmq + ports: + - "9324:9324" # API Server + - "9325:9325" # Web UI + volumes: + - ./elasticmq.conf:/opt/elasticmq.conf:ro + networks: + - event-analytics-network + restart: unless-stopped + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:9325/"] + interval: 5s + timeout: 3s + retries: 5 + start_period: 10s + +networks: + event-analytics-network: + driver: bridge + +volumes: + clickhouse-data: + clickhouse-logs: diff --git a/docs/architecture_diagram.png b/docs/architecture_diagram.png new file mode 100644 index 0000000..25739f9 Binary files /dev/null and b/docs/architecture_diagram.png differ diff --git a/docs/docs.go b/docs/docs.go new file mode 100644 index 0000000..78b3834 --- /dev/null +++ b/docs/docs.go @@ -0,0 +1,395 @@ +// Package docs Code generated by swaggo/swag. DO NOT EDIT +package docs + +import "github.com/swaggo/swag" + +const docTemplate = `{ + "schemes": {{ marshal .Schemes }}, + "swagger": "2.0", + "info": { + "description": "{{escape .Description}}", + "title": "{{.Title}}", + "contact": {}, + "version": "{{.Version}}" + }, + "host": "{{.Host}}", + "basePath": "{{.BasePath}}", + "paths": { + "/events": { + "post": { + "description": "Publish a single analytics event to the queue", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "events" + ], + "summary": "Publish a single event", + "parameters": [ + { + "description": "Event data", + "name": "event", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/dto.PublishEventRequest" + } + } + ], + "responses": { + "202": { + "description": "Accepted", + "schema": { + "$ref": "#/definitions/dto.PublishEventResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/dto.ErrorResponse" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/dto.ErrorResponse" + } + } + } + } + }, + "/events/bulk": { + "post": { + "description": "Publish multiple analytics events in bulk to the queue", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "events" + ], + "summary": "Publish multiple events", + "parameters": [ + { + "description": "Bulk events data", + "name": "events", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/dto.PublishEventsBulkRequest" + } + } + ], + "responses": { + "202": { + "description": "Accepted", + "schema": { + "$ref": "#/definitions/dto.PublishBulkEventsResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/dto.ErrorResponse" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/dto.ErrorResponse" + } + } + } + } + }, + "/health": { + "get": { + "description": "Check if the service is running", + "produces": [ + "application/json" + ], + "tags": [ + "health" + ], + "summary": "Health check", + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + } + } + }, + "/metrics": { + "get": { + "description": "Retrieve aggregated event metrics with optional grouping by channel, hour, or day", + "produces": [ + "application/json" + ], + "tags": [ + "metrics" + ], + "summary": "Get aggregated metrics", + "parameters": [ + { + "type": "string", + "description": "Event name to filter by", + "name": "event_name", + "in": "query", + "required": true + }, + { + "type": "integer", + "description": "Start timestamp (Unix epoch)", + "name": "from", + "in": "query", + "required": true + }, + { + "type": "integer", + "description": "End timestamp (Unix epoch)", + "name": "to", + "in": "query", + "required": true + }, + { + "enum": [ + "channel", + "hour", + "day" + ], + "type": "string", + "description": "Field to group by (channel, hour, day)", + "name": "group_by", + "in": "query" + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/dto.GetMetricsResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/dto.ErrorResponse" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/dto.ErrorResponse" + } + } + } + } + } + }, + "definitions": { + "dto.ErrorResponse": { + "type": "object", + "properties": { + "error": { + "type": "string", + "example": "validation_error" + }, + "message": { + "type": "string", + "example": "event_name is required" + } + } + }, + "dto.GetMetricsResponse": { + "type": "object", + "properties": { + "event_name": { + "type": "string", + "example": "product_view" + }, + "from": { + "type": "integer", + "example": 1723475612 + }, + "group_by": { + "type": "string", + "example": "channel" + }, + "groups": { + "type": "array", + "items": { + "$ref": "#/definitions/dto.MetricsGroupData" + } + }, + "to": { + "type": "integer", + "example": 1723562012 + }, + "total_count": { + "type": "integer", + "example": 5000 + }, + "unique_count": { + "type": "integer", + "example": 2500 + } + } + }, + "dto.MetricsGroupData": { + "type": "object", + "properties": { + "group_value": { + "type": "string", + "example": "web" + }, + "total_count": { + "type": "integer", + "example": 1500 + } + } + }, + "dto.PublishBulkEventsResponse": { + "type": "object", + "properties": { + "accepted": { + "type": "integer", + "example": 5 + }, + "errors": { + "type": "array", + "items": { + "type": "string" + }, + "example": [ + "validation error on event 3" + ] + }, + "event_ids": { + "type": "array", + "items": { + "type": "string" + }, + "example": [ + "evt_1", + "evt_2", + "evt_3" + ] + }, + "rejected": { + "type": "integer", + "example": 0 + } + } + }, + "dto.PublishEventRequest": { + "type": "object", + "required": [ + "channel", + "event_name", + "timestamp", + "user_id" + ], + "properties": { + "campaign_id": { + "type": "string", + "example": "cmp_987" + }, + "channel": { + "type": "string", + "example": "web" + }, + "event_name": { + "type": "string", + "example": "product_view" + }, + "metadata": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "example": { + "price": "129.99", + "product_id": "prod-789" + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "example": [ + "electronics", + "homepage", + "flash_sale" + ] + }, + "timestamp": { + "type": "integer", + "example": 1723475612 + }, + "user_id": { + "type": "string", + "example": "user_123" + } + } + }, + "dto.PublishEventResponse": { + "type": "object", + "properties": { + "event_id": { + "type": "string", + "example": "evt_1a2b3c4d5e6f" + }, + "status": { + "type": "string", + "example": "accepted" + } + } + }, + "dto.PublishEventsBulkRequest": { + "type": "object", + "required": [ + "events" + ], + "properties": { + "events": { + "type": "array", + "maxItems": 1000, + "minItems": 1, + "items": { + "$ref": "#/definitions/dto.PublishEventRequest" + } + } + } + } + } +}` + +// SwaggerInfo holds exported Swagger Info so clients can modify it +var SwaggerInfo = &swag.Spec{ + Version: "1.0", + Host: "localhost:8080", + BasePath: "/", + Schemes: []string{"http", "https"}, + Title: "Event Analytics Service API", + Description: "API for publishing and managing analytics events", + InfoInstanceName: "swagger", + SwaggerTemplate: docTemplate, + LeftDelim: "{{", + RightDelim: "}}", +} + +func init() { + swag.Register(SwaggerInfo.InstanceName(), SwaggerInfo) +} diff --git a/docs/swagger.json b/docs/swagger.json new file mode 100644 index 0000000..f0822fb --- /dev/null +++ b/docs/swagger.json @@ -0,0 +1,375 @@ +{ + "schemes": [ + "http", + "https" + ], + "swagger": "2.0", + "info": { + "description": "API for publishing and managing analytics events", + "title": "Event Analytics Service API", + "contact": {}, + "version": "1.0" + }, + "host": "localhost:8080", + "basePath": "/", + "paths": { + "/events": { + "post": { + "description": "Publish a single analytics event to the queue", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "events" + ], + "summary": "Publish a single event", + "parameters": [ + { + "description": "Event data", + "name": "event", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/dto.PublishEventRequest" + } + } + ], + "responses": { + "202": { + "description": "Accepted", + "schema": { + "$ref": "#/definitions/dto.PublishEventResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/dto.ErrorResponse" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/dto.ErrorResponse" + } + } + } + } + }, + "/events/bulk": { + "post": { + "description": "Publish multiple analytics events in bulk to the queue", + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "events" + ], + "summary": "Publish multiple events", + "parameters": [ + { + "description": "Bulk events data", + "name": "events", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/dto.PublishEventsBulkRequest" + } + } + ], + "responses": { + "202": { + "description": "Accepted", + "schema": { + "$ref": "#/definitions/dto.PublishBulkEventsResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/dto.ErrorResponse" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/dto.ErrorResponse" + } + } + } + } + }, + "/health": { + "get": { + "description": "Check if the service is running", + "produces": [ + "application/json" + ], + "tags": [ + "health" + ], + "summary": "Health check", + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + } + } + }, + "/metrics": { + "get": { + "description": "Retrieve aggregated event metrics with optional grouping by channel, hour, or day", + "produces": [ + "application/json" + ], + "tags": [ + "metrics" + ], + "summary": "Get aggregated metrics", + "parameters": [ + { + "type": "string", + "description": "Event name to filter by", + "name": "event_name", + "in": "query", + "required": true + }, + { + "type": "integer", + "description": "Start timestamp (Unix epoch)", + "name": "from", + "in": "query", + "required": true + }, + { + "type": "integer", + "description": "End timestamp (Unix epoch)", + "name": "to", + "in": "query", + "required": true + }, + { + "enum": [ + "channel", + "hour", + "day" + ], + "type": "string", + "description": "Field to group by (channel, hour, day)", + "name": "group_by", + "in": "query" + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/dto.GetMetricsResponse" + } + }, + "400": { + "description": "Bad Request", + "schema": { + "$ref": "#/definitions/dto.ErrorResponse" + } + }, + "500": { + "description": "Internal Server Error", + "schema": { + "$ref": "#/definitions/dto.ErrorResponse" + } + } + } + } + } + }, + "definitions": { + "dto.ErrorResponse": { + "type": "object", + "properties": { + "error": { + "type": "string", + "example": "validation_error" + }, + "message": { + "type": "string", + "example": "event_name is required" + } + } + }, + "dto.GetMetricsResponse": { + "type": "object", + "properties": { + "event_name": { + "type": "string", + "example": "product_view" + }, + "from": { + "type": "integer", + "example": 1723475612 + }, + "group_by": { + "type": "string", + "example": "channel" + }, + "groups": { + "type": "array", + "items": { + "$ref": "#/definitions/dto.MetricsGroupData" + } + }, + "to": { + "type": "integer", + "example": 1723562012 + }, + "total_count": { + "type": "integer", + "example": 5000 + }, + "unique_count": { + "type": "integer", + "example": 2500 + } + } + }, + "dto.MetricsGroupData": { + "type": "object", + "properties": { + "group_value": { + "type": "string", + "example": "web" + }, + "total_count": { + "type": "integer", + "example": 1500 + } + } + }, + "dto.PublishBulkEventsResponse": { + "type": "object", + "properties": { + "accepted": { + "type": "integer", + "example": 5 + }, + "errors": { + "type": "array", + "items": { + "type": "string" + }, + "example": [ + "validation error on event 3" + ] + }, + "event_ids": { + "type": "array", + "items": { + "type": "string" + }, + "example": [ + "evt_1", + "evt_2", + "evt_3" + ] + }, + "rejected": { + "type": "integer", + "example": 0 + } + } + }, + "dto.PublishEventRequest": { + "type": "object", + "required": [ + "channel", + "event_name", + "timestamp", + "user_id" + ], + "properties": { + "campaign_id": { + "type": "string", + "example": "cmp_987" + }, + "channel": { + "type": "string", + "example": "web" + }, + "event_name": { + "type": "string", + "example": "product_view" + }, + "metadata": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "example": { + "price": "129.99", + "product_id": "prod-789" + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "example": [ + "electronics", + "homepage", + "flash_sale" + ] + }, + "timestamp": { + "type": "integer", + "example": 1723475612 + }, + "user_id": { + "type": "string", + "example": "user_123" + } + } + }, + "dto.PublishEventResponse": { + "type": "object", + "properties": { + "event_id": { + "type": "string", + "example": "evt_1a2b3c4d5e6f" + }, + "status": { + "type": "string", + "example": "accepted" + } + } + }, + "dto.PublishEventsBulkRequest": { + "type": "object", + "required": [ + "events" + ], + "properties": { + "events": { + "type": "array", + "maxItems": 1000, + "minItems": 1, + "items": { + "$ref": "#/definitions/dto.PublishEventRequest" + } + } + } + } + } +} \ No newline at end of file diff --git a/docs/swagger.yaml b/docs/swagger.yaml new file mode 100644 index 0000000..d070d38 --- /dev/null +++ b/docs/swagger.yaml @@ -0,0 +1,258 @@ +basePath: / +definitions: + dto.ErrorResponse: + properties: + error: + example: validation_error + type: string + message: + example: event_name is required + type: string + type: object + dto.GetMetricsResponse: + properties: + event_name: + example: product_view + type: string + from: + example: 1723475612 + type: integer + group_by: + example: channel + type: string + groups: + items: + $ref: '#/definitions/dto.MetricsGroupData' + type: array + to: + example: 1723562012 + type: integer + total_count: + example: 5000 + type: integer + unique_count: + example: 2500 + type: integer + type: object + dto.MetricsGroupData: + properties: + group_value: + example: web + type: string + total_count: + example: 1500 + type: integer + type: object + dto.PublishBulkEventsResponse: + properties: + accepted: + example: 5 + type: integer + errors: + example: + - validation error on event 3 + items: + type: string + type: array + event_ids: + example: + - evt_1 + - evt_2 + - evt_3 + items: + type: string + type: array + rejected: + example: 0 + type: integer + type: object + dto.PublishEventRequest: + properties: + campaign_id: + example: cmp_987 + type: string + channel: + example: web + type: string + event_name: + example: product_view + type: string + metadata: + additionalProperties: + type: string + example: + price: "129.99" + product_id: prod-789 + type: object + tags: + example: + - electronics + - homepage + - flash_sale + items: + type: string + type: array + timestamp: + example: 1723475612 + type: integer + user_id: + example: user_123 + type: string + required: + - channel + - event_name + - timestamp + - user_id + type: object + dto.PublishEventResponse: + properties: + event_id: + example: evt_1a2b3c4d5e6f + type: string + status: + example: accepted + type: string + type: object + dto.PublishEventsBulkRequest: + properties: + events: + items: + $ref: '#/definitions/dto.PublishEventRequest' + maxItems: 1000 + minItems: 1 + type: array + required: + - events + type: object +host: localhost:8080 +info: + contact: {} + description: API for publishing and managing analytics events + title: Event Analytics Service API + version: "1.0" +paths: + /events: + post: + consumes: + - application/json + description: Publish a single analytics event to the queue + parameters: + - description: Event data + in: body + name: event + required: true + schema: + $ref: '#/definitions/dto.PublishEventRequest' + produces: + - application/json + responses: + "202": + description: Accepted + schema: + $ref: '#/definitions/dto.PublishEventResponse' + "400": + description: Bad Request + schema: + $ref: '#/definitions/dto.ErrorResponse' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/dto.ErrorResponse' + summary: Publish a single event + tags: + - events + /events/bulk: + post: + consumes: + - application/json + description: Publish multiple analytics events in bulk to the queue + parameters: + - description: Bulk events data + in: body + name: events + required: true + schema: + $ref: '#/definitions/dto.PublishEventsBulkRequest' + produces: + - application/json + responses: + "202": + description: Accepted + schema: + $ref: '#/definitions/dto.PublishBulkEventsResponse' + "400": + description: Bad Request + schema: + $ref: '#/definitions/dto.ErrorResponse' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/dto.ErrorResponse' + summary: Publish multiple events + tags: + - events + /health: + get: + description: Check if the service is running + produces: + - application/json + responses: + "200": + description: OK + schema: + additionalProperties: + type: string + type: object + summary: Health check + tags: + - health + /metrics: + get: + description: Retrieve aggregated event metrics with optional grouping by channel, + hour, or day + parameters: + - description: Event name to filter by + in: query + name: event_name + required: true + type: string + - description: Start timestamp (Unix epoch) + in: query + name: from + required: true + type: integer + - description: End timestamp (Unix epoch) + in: query + name: to + required: true + type: integer + - description: Field to group by (channel, hour, day) + enum: + - channel + - hour + - day + in: query + name: group_by + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/dto.GetMetricsResponse' + "400": + description: Bad Request + schema: + $ref: '#/definitions/dto.ErrorResponse' + "500": + description: Internal Server Error + schema: + $ref: '#/definitions/dto.ErrorResponse' + summary: Get aggregated metrics + tags: + - metrics +schemes: +- http +- https +swagger: "2.0" diff --git a/elasticmq.conf b/elasticmq.conf new file mode 100644 index 0000000..8fbe611 --- /dev/null +++ b/elasticmq.conf @@ -0,0 +1,40 @@ +include classpath("application.conf") + +node-address { + protocol = http + host = "*" + port = 9324 + context-path = "" +} + +rest-sqs { + enabled = true + bind-port = 9324 + bind-hostname = "0.0.0.0" + sqs-limits = strict +} + +rest-stats { + enabled = true + bind-port = 9325 + bind-hostname = "0.0.0.0" +} + +queues { + events { + defaultVisibilityTimeout = 10 seconds + delay = 0 seconds + receiveMessageWait = 0 seconds + deadLettersQueue { + name = "events-dlq" + maxReceiveCount = 3 + } + } + events-dlq { + defaultVisibilityTimeout = 10 seconds + delay = 0 seconds + receiveMessageWait = 0 seconds + } +} + +queues-path = "/data" diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..ea7ae6f --- /dev/null +++ b/go.mod @@ -0,0 +1,94 @@ +module github.com/BarkinBalci/event-analytics-service + +go 1.25.5 + +require ( + github.com/BarkinBalci/envconfig v0.0.0-20251224144430-3f9701ee1dff + github.com/ClickHouse/clickhouse-go/v2 v2.42.0 + github.com/aws/aws-sdk-go-v2 v1.41.0 + github.com/aws/aws-sdk-go-v2/config v1.32.6 + github.com/aws/aws-sdk-go-v2/credentials v1.19.6 + github.com/aws/aws-sdk-go-v2/service/sqs v1.42.20 + github.com/gin-gonic/gin v1.11.0 + github.com/stretchr/testify v1.11.1 + github.com/swaggo/files v1.0.1 + github.com/swaggo/gin-swagger v1.6.1 + github.com/swaggo/swag v1.16.6 + go.uber.org/zap v1.27.1 +) + +require ( + github.com/ClickHouse/ch-go v0.69.0 // indirect + github.com/KyleBanks/depth v1.2.1 // indirect + github.com/andybalholm/brotli v1.2.0 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 // indirect + github.com/aws/aws-sdk-go-v2/service/signin v1.0.4 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.30.8 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 // indirect + github.com/aws/smithy-go v1.24.0 // indirect + github.com/bytedance/gopkg v0.1.3 // indirect + github.com/bytedance/sonic v1.14.2 // indirect + github.com/bytedance/sonic/loader v0.4.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cloudwego/base64x v0.1.6 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/gabriel-vasile/mimetype v1.4.12 // indirect + github.com/gin-contrib/sse v1.1.0 // indirect + github.com/go-faster/city v1.0.1 // indirect + github.com/go-faster/errors v0.7.1 // indirect + github.com/go-openapi/jsonpointer v0.22.4 // indirect + github.com/go-openapi/jsonreference v0.21.4 // indirect + github.com/go-openapi/spec v0.22.3 // indirect + github.com/go-openapi/swag/conv v0.25.4 // indirect + github.com/go-openapi/swag/jsonname v0.25.4 // indirect + github.com/go-openapi/swag/jsonutils v0.25.4 // indirect + github.com/go-openapi/swag/loading v0.25.4 // indirect + github.com/go-openapi/swag/stringutils v0.25.4 // indirect + github.com/go-openapi/swag/typeutils v0.25.4 // indirect + github.com/go-openapi/swag/yamlutils v0.25.4 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-playground/validator/v10 v10.30.1 // indirect + github.com/goccy/go-json v0.10.5 // indirect + github.com/goccy/go-yaml v1.19.1 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.18.2 // indirect + github.com/klauspost/cpuid/v2 v2.3.0 // indirect + github.com/leodido/go-urn v1.4.0 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/paulmach/orb v0.12.0 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect + github.com/pierrec/lz4/v4 v4.1.23 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/quic-go/qpack v0.6.0 // indirect + github.com/quic-go/quic-go v0.58.0 // indirect + github.com/segmentio/asm v1.2.1 // indirect + github.com/shopspring/decimal v1.4.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/twitchyliquid64/golang-asm v0.15.1 // indirect + github.com/ugorji/go/codec v1.3.1 // indirect + go.opentelemetry.io/otel v1.39.0 // indirect + go.opentelemetry.io/otel/trace v1.39.0 // indirect + go.uber.org/mock v0.6.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/arch v0.23.0 // indirect + golang.org/x/crypto v0.46.0 // indirect + golang.org/x/mod v0.31.0 // indirect + golang.org/x/net v0.48.0 // indirect + golang.org/x/sync v0.19.0 // indirect + golang.org/x/sys v0.39.0 // indirect + golang.org/x/text v0.32.0 // indirect + golang.org/x/tools v0.40.0 // indirect + google.golang.org/protobuf v1.36.11 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..37d4af1 --- /dev/null +++ b/go.sum @@ -0,0 +1,283 @@ +github.com/BarkinBalci/envconfig v0.0.0-20251224144430-3f9701ee1dff h1:yuHwtcc4jHD5VkVplYFv6JeV/JjsOEXg1IcyoVzD7Ds= +github.com/BarkinBalci/envconfig v0.0.0-20251224144430-3f9701ee1dff/go.mod h1:smM1iKKmJuYvPnc2uB8QdWmthSibrxfC4SGYTqUpEYE= +github.com/ClickHouse/ch-go v0.69.0 h1:nO0OJkpxOlN/eaXFj0KzjTz5p7vwP1/y3GN4qc5z/iM= +github.com/ClickHouse/ch-go v0.69.0/go.mod h1:9XeZpSAT4S0kVjOpaJ5186b7PY/NH/hhF8R6u0WIjwg= +github.com/ClickHouse/clickhouse-go/v2 v2.42.0 h1:MdujEfIrpXesQUH0k0AnuVtJQXk6RZmxEhsKUCcv5xk= +github.com/ClickHouse/clickhouse-go/v2 v2.42.0/go.mod h1:riWnuo4YMVdajYll0q6FzRBomdyCrXyFY3VXeXczA8s= +github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc= +github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= +github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ= +github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY= +github.com/aws/aws-sdk-go-v2 v1.41.0 h1:tNvqh1s+v0vFYdA1xq0aOJH+Y5cRyZ5upu6roPgPKd4= +github.com/aws/aws-sdk-go-v2 v1.41.0/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0= +github.com/aws/aws-sdk-go-v2/config v1.32.6 h1:hFLBGUKjmLAekvi1evLi5hVvFQtSo3GYwi+Bx4lpJf8= +github.com/aws/aws-sdk-go-v2/config v1.32.6/go.mod h1:lcUL/gcd8WyjCrMnxez5OXkO3/rwcNmvfno62tnXNcI= +github.com/aws/aws-sdk-go-v2/credentials v1.19.6 h1:F9vWao2TwjV2MyiyVS+duza0NIRtAslgLUM0vTA1ZaE= +github.com/aws/aws-sdk-go-v2/credentials v1.19.6/go.mod h1:SgHzKjEVsdQr6Opor0ihgWtkWdfRAIwxYzSJ8O85VHY= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16 h1:80+uETIWS1BqjnN9uJ0dBUaETh+P1XwFy5vwHwK5r9k= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16/go.mod h1:wOOsYuxYuB/7FlnVtzeBYRcjSRtQpAW0hCP7tIULMwo= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 h1:rgGwPzb82iBYSvHMHXc8h9mRoOUBZIGFgKb9qniaZZc= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16/go.mod h1:L/UxsGeKpGoIj6DxfhOWHWQ/kGKcd4I1VncE4++IyKA= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 h1:1jtGzuV7c82xnqOVfx2F0xmJcOw5374L7N6juGW6x6U= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16/go.mod h1:M2E5OQf+XLe+SZGmmpaI2yy+J326aFf6/+54PoxSANc= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 h1:oHjJHeUy0ImIV0bsrX0X91GkV5nJAyv1l1CC9lnO0TI= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16/go.mod h1:iRSNGgOYmiYwSCXxXaKb9HfOEj40+oTKn8pTxMlYkRM= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.4 h1:HpI7aMmJ+mm1wkSHIA2t5EaFFv5EFYXePW30p1EIrbQ= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.4/go.mod h1:C5RdGMYGlfM0gYq/tifqgn4EbyX99V15P2V3R+VHbQU= +github.com/aws/aws-sdk-go-v2/service/sqs v1.42.20 h1:qa+1W+Kon3WDwO+8ugco4D9KvO0Pf0KBTn1hN7opIFw= +github.com/aws/aws-sdk-go-v2/service/sqs v1.42.20/go.mod h1:OG0Y3TgC+IeM++ngh+IcEkN24ruGsmRiAP8GUsOhMW8= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.8 h1:aM/Q24rIlS3bRAhTyFurowU8A0SMyGDtEOY/l/s/1Uw= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.8/go.mod h1:+fWt2UHSb4kS7Pu8y+BMBvJF0EWx+4H0hzNwtDNRTrg= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12 h1:AHDr0DaHIAo8c9t1emrzAlVDFp+iMMKnPdYy6XO4MCE= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12/go.mod h1:GQ73XawFFiWxyWXMHWfhiomvP3tXtdNar/fi8z18sx0= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 h1:SciGFVNZ4mHdm7gpD1dgZYnCuVdX1s+lFTg4+4DOy70= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.5/go.mod h1:iW40X4QBmUxdP+fZNOpfmkdMZqsovezbAeO+Ubiv2pk= +github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= +github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= +github.com/bytedance/gopkg v0.1.3 h1:TPBSwH8RsouGCBcMBktLt1AymVo2TVsBVCY4b6TnZ/M= +github.com/bytedance/gopkg v0.1.3/go.mod h1:576VvJ+eJgyCzdjS+c4+77QF3p7ubbtiKARP3TxducM= +github.com/bytedance/sonic v1.14.2 h1:k1twIoe97C1DtYUo+fZQy865IuHia4PR5RPiuGPPIIE= +github.com/bytedance/sonic v1.14.2/go.mod h1:T80iDELeHiHKSc0C9tubFygiuXoGzrkjKzX2quAx980= +github.com/bytedance/sonic/loader v0.4.0 h1:olZ7lEqcxtZygCK9EKYKADnpQoYkRQxaeY2NYzevs+o= +github.com/bytedance/sonic/loader v0.4.0/go.mod h1:AR4NYCk5DdzZizZ5djGqQ92eEhCCcdf5x77udYiSJRo= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M= +github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gEHfghB2IPU= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/gabriel-vasile/mimetype v1.4.12 h1:e9hWvmLYvtp846tLHam2o++qitpguFiYCKbn0w9jyqw= +github.com/gabriel-vasile/mimetype v1.4.12/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= +github.com/gin-contrib/gzip v0.0.6 h1:NjcunTcGAj5CO1gn4N8jHOSIeRFHIbn51z6K+xaN4d4= +github.com/gin-contrib/gzip v0.0.6/go.mod h1:QOJlmV2xmayAjkNS2Y8NQsMneuRShOU/kjovCXNuzzk= +github.com/gin-contrib/sse v1.1.0 h1:n0w2GMuUpWDVp7qSpvze6fAu9iRxJY4Hmj6AmBOU05w= +github.com/gin-contrib/sse v1.1.0/go.mod h1:hxRZ5gVpWMT7Z0B0gSNYqqsSCNIJMjzvm6fqCz9vjwM= +github.com/gin-gonic/gin v1.11.0 h1:OW/6PLjyusp2PPXtyxKHU0RbX6I/l28FTdDlae5ueWk= +github.com/gin-gonic/gin v1.11.0/go.mod h1:+iq/FyxlGzII0KHiBGjuNn4UNENUlKbGlNmc+W50Dls= +github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw= +github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw= +github.com/go-faster/errors v0.7.1 h1:MkJTnDoEdi9pDabt1dpWf7AA8/BaSYZqibYyhZ20AYg= +github.com/go-faster/errors v0.7.1/go.mod h1:5ySTjWFiphBs07IKuiL69nxdfd5+fzh1u7FPGZP2quo= +github.com/go-openapi/jsonpointer v0.22.4 h1:dZtK82WlNpVLDW2jlA1YCiVJFVqkED1MegOUy9kR5T4= +github.com/go-openapi/jsonpointer v0.22.4/go.mod h1:elX9+UgznpFhgBuaMQ7iu4lvvX1nvNsesQ3oxmYTw80= +github.com/go-openapi/jsonreference v0.21.4 h1:24qaE2y9bx/q3uRK/qN+TDwbok1NhbSmGjjySRCHtC8= +github.com/go-openapi/jsonreference v0.21.4/go.mod h1:rIENPTjDbLpzQmQWCj5kKj3ZlmEh+EFVbz3RTUh30/4= +github.com/go-openapi/spec v0.22.3 h1:qRSmj6Smz2rEBxMnLRBMeBWxbbOvuOoElvSvObIgwQc= +github.com/go-openapi/spec v0.22.3/go.mod h1:iIImLODL2loCh3Vnox8TY2YWYJZjMAKYyLH2Mu8lOZs= +github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM= +github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4= +github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU= +github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI= +github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag= +github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA= +github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4 h1:IACsSvBhiNJwlDix7wq39SS2Fh7lUOCJRmx/4SN4sVo= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4/go.mod h1:Mt0Ost9l3cUzVv4OEZG+WSeoHwjWLnarzMePNDAOBiM= +github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s= +github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE= +github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8= +github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0= +github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw= +github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE= +github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw= +github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc= +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxEodtNSI1WG1c/m5Akw4= +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg= +github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls= +github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.30.1 h1:f3zDSN/zOma+w6+1Wswgd9fLkdwy06ntQJp0BBvFG0w= +github.com/go-playground/validator/v10 v10.30.1/go.mod h1:oSuBIQzuJxL//3MelwSLD5hc2Tu889bF0Idm9Dg26cM= +github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= +github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/goccy/go-yaml v1.19.1 h1:3rG3+v8pkhRqoQ/88NYNMHYVGYztCOCIZ7UQhu7H+NE= +github.com/goccy/go-yaml v1.19.1/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk= +github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= +github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= +github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/paulmach/orb v0.12.0 h1:z+zOwjmG3MyEEqzv92UN49Lg1JFYx0L9GpGKNVDKk1s= +github.com/paulmach/orb v0.12.0/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU= +github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= +github.com/pierrec/lz4/v4 v4.1.23 h1:oJE7T90aYBGtFNrI8+KbETnPymobAhzRrR8Mu8n1yfU= +github.com/pierrec/lz4/v4 v4.1.23/go.mod h1:EoQMVJgeeEOMsCqCzqFm2O0cJvljX2nGZjcRIPL34O4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8= +github.com/quic-go/qpack v0.6.0/go.mod h1:lUpLKChi8njB4ty2bFLX2x4gzDqXwUpaO1DP9qMDZII= +github.com/quic-go/quic-go v0.58.0 h1:ggY2pvZaVdB9EyojxL1p+5mptkuHyX5MOSv4dgWF4Ug= +github.com/quic-go/quic-go v0.58.0/go.mod h1:upnsH4Ju1YkqpLXC305eW3yDZ4NfnNbmQRCMWS58IKU= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/segmentio/asm v1.2.1 h1:DTNbBqs57ioxAD4PrArqftgypG4/qNpXoJx8TVXxPR0= +github.com/segmentio/asm v1.2.1/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/swaggo/files v1.0.1 h1:J1bVJ4XHZNq0I46UU90611i9/YzdrF7x92oX1ig5IdE= +github.com/swaggo/files v1.0.1/go.mod h1:0qXmMNH6sXNf+73t65aKeB+ApmgxdnkQzVTAj2uaMUg= +github.com/swaggo/gin-swagger v1.6.1 h1:Ri06G4gc9N4t4k8hekMigJ9zKTFSlqj/9paAQCQs7cY= +github.com/swaggo/gin-swagger v1.6.1/go.mod h1:LQ+hJStHakCWRiK/YNYtJOu4mR2FP+pxLnILT/qNiTw= +github.com/swaggo/swag v1.16.6 h1:qBNcx53ZaX+M5dxVyTrgQ0PJ/ACK+NzhwcbieTt+9yI= +github.com/swaggo/swag v1.16.6/go.mod h1:ngP2etMK5a0P3QBizic5MEwpRmluJZPHjXcMoj4Xesg= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= +github.com/ugorji/go/codec v1.3.1 h1:waO7eEiFDwidsBN6agj1vJQ4AG7lh2yqXyOXqhgQuyY= +github.com/ugorji/go/codec v1.3.1/go.mod h1:pRBVtBSKl77K30Bv8R2P+cLSGaTtex6fsA2Wjqmfxj4= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= +github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= +github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= +go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48= +go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8= +go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI= +go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y= +go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= +go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/arch v0.23.0 h1:lKF64A2jF6Zd8L0knGltUnegD62JMFBiCPBmQpToHhg= +golang.org/x/arch v0.23.0/go.mod h1:dNHoOeKiyja7GTvF9NJS1l3Z2yntpQNzgrjh1cU103A= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= +golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= +golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= +golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= +golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/internal/config/config.go b/internal/config/config.go new file mode 100644 index 0000000..6d8d75c --- /dev/null +++ b/internal/config/config.go @@ -0,0 +1,54 @@ +package config + +import ( + "fmt" + + "github.com/BarkinBalci/envconfig" +) + +type Service struct { + Environment string `envconfig:"SERVICE_ENVIRONMENT" required:"true"` + APIPort string `envconfig:"SERVICE_API_PORT" default:"8080"` + Host string `envconfig:"SERVICE_HOST" default:"localhost:8080"` +} + +type SQS struct { + Endpoint string `envconfig:"SQS_ENDPOINT"` + QueueURL string `envconfig:"SQS_QUEUE_URL" required:"true"` + Region string `envconfig:"SQS_REGION" required:"true"` +} + +type ClickHouse struct { + Host string `envconfig:"CLICKHOUSE_HOST" required:"true"` + Port string `envconfig:"CLICKHOUSE_PORT" required:"true"` + Database string `envconfig:"CLICKHOUSE_DATABASE" required:"true"` + User string `envconfig:"CLICKHOUSE_USER" default:""` + Password string `envconfig:"CLICKHOUSE_PASSWORD" default:""` + UseTLS bool `envconfig:"CLICKHOUSE_USE_TLS" default:"true"` + MaxOpenConns int `envconfig:"CLICKHOUSE_MAX_OPEN_CONNS" default:"5"` + MaxIdleConns int `envconfig:"CLICKHOUSE_MAX_IDLE_CONNS" default:"2"` + ConnMaxLifetime int `envconfig:"CLICKHOUSE_CONN_MAX_LIFETIME" default:"10"` +} + +type Consumer struct { + BatchSizeMin int `envconfig:"CONSUMER_BATCH_SIZE_MIN" default:"100"` + BatchSizeMax int `envconfig:"CONSUMER_BATCH_SIZE_MAX" default:"2000"` + BatchTimeoutSec int `envconfig:"CONSUMER_BATCH_TIMEOUT_SEC" default:"10"` + HealthCheckPort string `envconfig:"CONSUMER_HEALTH_CHECK_PORT" default:"8081"` +} + +type Config struct { + Service Service + SQS SQS + ClickHouse ClickHouse + Consumer Consumer +} + +func Load() (*Config, error) { + var cfg Config + if err := envconfig.Process("", &cfg); err != nil { + return nil, fmt.Errorf("failed to process config: %w", err) + } + + return &cfg, nil +} diff --git a/internal/consumer/batch_writer.go b/internal/consumer/batch_writer.go new file mode 100644 index 0000000..2ca6df5 --- /dev/null +++ b/internal/consumer/batch_writer.go @@ -0,0 +1,153 @@ +package consumer + +import ( + "context" + "fmt" + "time" + + "go.uber.org/zap" + + "github.com/BarkinBalci/event-analytics-service/internal/domain" + "github.com/BarkinBalci/event-analytics-service/internal/repository" +) + +// BatchWriterConfig configures the batch writer +type BatchWriterConfig struct { + MaxBatchSize int + FlushTimeout time.Duration +} + +// BatchWriter handles batching and writing events to the repository +type BatchWriter struct { + repository repository.EventRepository + config BatchWriterConfig + log *zap.Logger +} + +// NewBatchWriter creates a new batch writer +func NewBatchWriter(repo repository.EventRepository, config BatchWriterConfig, log *zap.Logger) *BatchWriter { + return &BatchWriter{ + repository: repo, + config: config, + log: log, + } +} + +// Start begins processing envelopes, batching, and writing to the repository +func (w *BatchWriter) Start(ctx context.Context, in <-chan *Envelope) { + ticker := time.NewTicker(w.config.FlushTimeout) + defer ticker.Stop() + + batch := make([]*Envelope, 0, w.config.MaxBatchSize) + + for { + select { + case <-ctx.Done(): + w.log.Info("Batch writer shutting down") + if len(batch) > 0 { + w.log.Info("Flushing final batch", zap.Int("envelope_count", len(batch))) + w.processBatch(ctx, batch) + } + return + + case envelope, ok := <-in: + if !ok { + w.log.Info("Batch writer input channel closed") + if len(batch) > 0 { + w.log.Info("Flushing final batch", zap.Int("envelope_count", len(batch))) + w.processBatch(ctx, batch) + } + return + } + + batch = append(batch, envelope) + + if len(batch) >= w.config.MaxBatchSize { + w.log.Info("Batch size threshold reached", zap.Int("batch_size", len(batch))) + w.processBatch(ctx, batch) + batch = make([]*Envelope, 0, w.config.MaxBatchSize) + ticker.Reset(w.config.FlushTimeout) + } + + case <-ticker.C: + if len(batch) > 0 { + w.log.Info("Batch timeout reached", zap.Int("envelope_count", len(batch))) + w.processBatch(ctx, batch) + batch = make([]*Envelope, 0, w.config.MaxBatchSize) + } + } + } +} + +// processBatch handles the atomic transaction: insert + ack/nack +func (w *BatchWriter) processBatch(ctx context.Context, envelopes []*Envelope) { + if len(envelopes) == 0 { + return + } + + events := make([]*domain.Event, len(envelopes)) + for i, env := range envelopes { + events[i] = env.Event + } + + insertedCount, err := w.repository.InsertBatch(ctx, events) + + if err != nil { + w.log.Error("Failed to insert batch", + zap.Error(err), + zap.Int("event_count", len(events))) + w.nackAll(ctx, envelopes) + return + } + + if insertedCount != len(events) { + w.log.Warn("Partial insert success", + zap.Int("inserted", insertedCount), + zap.Int("expected", len(events))) + w.nackAll(ctx, envelopes) + return + } + + w.log.Info("Successfully inserted events", + zap.Int("count", insertedCount)) + w.ackAll(ctx, envelopes) +} + +// ackAll acknowledges all envelopes (deletes from SQS) +func (w *BatchWriter) ackAll(ctx context.Context, envelopes []*Envelope) { + for _, env := range envelopes { + if err := env.Ack(ctx); err != nil { + w.log.Error("Failed to ack envelope", zap.Error(err)) + } + } +} + +// nackAll negatively acknowledges all envelopes (leaves in SQS for retry) +func (w *BatchWriter) nackAll(ctx context.Context, envelopes []*Envelope) { + for _, env := range envelopes { + if err := env.Nack(ctx); err != nil { + w.log.Error("Failed to nack envelope", zap.Error(err)) + } + } +} + +// AckBatch is a helper for batch acknowledgment to SQS +func (w *BatchWriter) AckBatch(ctx context.Context, envelopes []*Envelope) error { + if len(envelopes) == 0 { + return nil + } + + var lastErr error + for _, env := range envelopes { + if err := env.Ack(ctx); err != nil { + w.log.Error("Failed to ack envelope", zap.Error(err)) + lastErr = err + } + } + + if lastErr != nil { + return fmt.Errorf("some acknowledgments failed: %w", lastErr) + } + + return nil +} diff --git a/internal/consumer/batch_writer_test.go b/internal/consumer/batch_writer_test.go new file mode 100644 index 0000000..290ea21 --- /dev/null +++ b/internal/consumer/batch_writer_test.go @@ -0,0 +1,380 @@ +package consumer + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/stretchr/testify/mock" + "go.uber.org/zap" + + "github.com/BarkinBalci/event-analytics-service/internal/domain" + "github.com/BarkinBalci/event-analytics-service/internal/repository" +) + +// MockEventRepository is a mock implementation of repository.EventRepository +type MockEventRepository struct { + mock.Mock +} + +func (m *MockEventRepository) InsertBatch(ctx context.Context, events []*domain.Event) (int, error) { + args := m.Called(ctx, events) + return args.Int(0), args.Error(1) +} + +func (m *MockEventRepository) InitSchema(ctx context.Context) error { + args := m.Called(ctx) + return args.Error(0) +} + +func (m *MockEventRepository) Ping(ctx context.Context) error { + args := m.Called(ctx) + return args.Error(0) +} + +func (m *MockEventRepository) Close() error { + args := m.Called() + return args.Error(0) +} + +func (m *MockEventRepository) GetMetrics(ctx context.Context, query repository.MetricsQuery) (*repository.MetricsResult, error) { + args := m.Called(ctx, query) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).(*repository.MetricsResult), args.Error(1) +} + +func createTestEnvelope(eventID string) *Envelope { + event := &domain.Event{ + EventID: eventID, + EventName: "test_event", + UserID: "user123", + Timestamp: testTimestamp, + } + + ack := func(ctx context.Context) error { + return nil + } + + nack := func(ctx context.Context) error { + return nil + } + + envelope := NewEnvelope(event, ack, nack) + return envelope +} + +func TestBatchWriter_Start_BatchSizeThreshold(t *testing.T) { + mockRepo := new(MockEventRepository) + log := zap.NewNop() + + config := BatchWriterConfig{ + MaxBatchSize: 3, + FlushTimeout: 10 * time.Second, + } + + writer := NewBatchWriter(mockRepo, config, log) + + mockRepo.On("InsertBatch", mock.Anything, mock.MatchedBy(func(events []*domain.Event) bool { + return len(events) == 3 + })).Return(3, nil) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + in := make(chan *Envelope, 5) + go writer.Start(ctx, in) + + // Send 3 envelopes to trigger batch size threshold + in <- createTestEnvelope("1") + in <- createTestEnvelope("2") + in <- createTestEnvelope("3") + + // Give time for processing + time.Sleep(100 * time.Millisecond) + + mockRepo.AssertExpectations(t) + mockRepo.AssertCalled(t, "InsertBatch", mock.Anything, mock.Anything) +} + +func TestBatchWriter_Start_TimeoutFlush(t *testing.T) { + mockRepo := new(MockEventRepository) + log := zap.NewNop() + + config := BatchWriterConfig{ + MaxBatchSize: 10, + FlushTimeout: 50 * time.Millisecond, + } + + writer := NewBatchWriter(mockRepo, config, log) + + mockRepo.On("InsertBatch", mock.Anything, mock.MatchedBy(func(events []*domain.Event) bool { + return len(events) == 2 + })).Return(2, nil) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + in := make(chan *Envelope, 5) + go writer.Start(ctx, in) + + // Send 2 envelopes (less than max batch size) + in <- createTestEnvelope("1") + in <- createTestEnvelope("2") + + // Wait for timeout to trigger flush + time.Sleep(100 * time.Millisecond) + + mockRepo.AssertExpectations(t) + mockRepo.AssertCalled(t, "InsertBatch", mock.Anything, mock.Anything) +} + +func TestBatchWriter_Start_InsertSuccess(t *testing.T) { + mockRepo := new(MockEventRepository) + log := zap.NewNop() + + config := BatchWriterConfig{ + MaxBatchSize: 2, + FlushTimeout: 10 * time.Second, + } + + writer := NewBatchWriter(mockRepo, config, log) + + mockRepo.On("InsertBatch", mock.Anything, mock.MatchedBy(func(events []*domain.Event) bool { + return len(events) == 2 + })).Return(2, nil) + + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + + in := make(chan *Envelope, 5) + go writer.Start(ctx, in) + + // Send 2 envelopes + in <- createTestEnvelope("1") + in <- createTestEnvelope("2") + + // Wait for processing + time.Sleep(50 * time.Millisecond) + + mockRepo.AssertExpectations(t) +} + +func TestBatchWriter_Start_InsertFailure(t *testing.T) { + mockRepo := new(MockEventRepository) + log := zap.NewNop() + + config := BatchWriterConfig{ + MaxBatchSize: 2, + FlushTimeout: 10 * time.Second, + } + + writer := NewBatchWriter(mockRepo, config, log) + + insertErr := errors.New("database connection error") + mockRepo.On("InsertBatch", mock.Anything, mock.Anything).Return(0, insertErr) + + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + + in := make(chan *Envelope, 5) + go writer.Start(ctx, in) + + // Send 2 envelopes + in <- createTestEnvelope("1") + in <- createTestEnvelope("2") + + // Wait for processing + time.Sleep(50 * time.Millisecond) + + mockRepo.AssertExpectations(t) + mockRepo.AssertCalled(t, "InsertBatch", mock.Anything, mock.Anything) +} + +func TestBatchWriter_Start_PartialInsert(t *testing.T) { + mockRepo := new(MockEventRepository) + log := zap.NewNop() + + config := BatchWriterConfig{ + MaxBatchSize: 3, + FlushTimeout: 10 * time.Second, + } + + writer := NewBatchWriter(mockRepo, config, log) + + // Repository inserts only 2 out of 3 events + mockRepo.On("InsertBatch", mock.Anything, mock.MatchedBy(func(events []*domain.Event) bool { + return len(events) == 3 + })).Return(2, nil) // Partial success + + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + + in := make(chan *Envelope, 5) + go writer.Start(ctx, in) + + // Send 3 envelopes + in <- createTestEnvelope("1") + in <- createTestEnvelope("2") + in <- createTestEnvelope("3") + + // Wait for processing + time.Sleep(50 * time.Millisecond) + + mockRepo.AssertExpectations(t) + mockRepo.AssertCalled(t, "InsertBatch", mock.Anything, mock.Anything) +} + +func TestBatchWriter_Start_GracefulShutdown(t *testing.T) { + mockRepo := new(MockEventRepository) + log := zap.NewNop() + + config := BatchWriterConfig{ + MaxBatchSize: 10, + FlushTimeout: 10 * time.Second, + } + + writer := NewBatchWriter(mockRepo, config, log) + + mockRepo.On("InsertBatch", mock.Anything, mock.MatchedBy(func(events []*domain.Event) bool { + return len(events) == 2 + })).Return(2, nil) + + ctx, cancel := context.WithCancel(context.Background()) + + in := make(chan *Envelope, 5) + done := make(chan bool) + + go func() { + writer.Start(ctx, in) + done <- true + }() + + // Send 2 envelopes + in <- createTestEnvelope("1") + in <- createTestEnvelope("2") + + // Give time for messages to be received + time.Sleep(10 * time.Millisecond) + + // Cancel context to trigger graceful shutdown + cancel() + + // Wait for shutdown + select { + case <-done: + // Shutdown completed + case <-time.After(200 * time.Millisecond): + t.Fatal("Graceful shutdown took too long") + } + + mockRepo.AssertExpectations(t) + mockRepo.AssertCalled(t, "InsertBatch", mock.Anything, mock.Anything) +} + +func TestBatchWriter_Start_InputChannelClosed(t *testing.T) { + mockRepo := new(MockEventRepository) + log := zap.NewNop() + + config := BatchWriterConfig{ + MaxBatchSize: 10, + FlushTimeout: 10 * time.Second, + } + + writer := NewBatchWriter(mockRepo, config, log) + + mockRepo.On("InsertBatch", mock.Anything, mock.MatchedBy(func(events []*domain.Event) bool { + return len(events) == 2 + })).Return(2, nil) + + ctx := context.Background() + + in := make(chan *Envelope, 5) + done := make(chan bool) + + go func() { + writer.Start(ctx, in) + done <- true + }() + + // Send 2 envelopes + in <- createTestEnvelope("1") + in <- createTestEnvelope("2") + + // Close input channel + close(in) + + // Wait for shutdown + select { + case <-done: + // Shutdown completed + case <-time.After(200 * time.Millisecond): + t.Fatal("Shutdown took too long after input channel closed") + } + + mockRepo.AssertExpectations(t) + mockRepo.AssertCalled(t, "InsertBatch", mock.Anything, mock.Anything) +} + +func TestBatchWriter_Start_EmptyBatchNotFlushed(t *testing.T) { + mockRepo := new(MockEventRepository) + log := zap.NewNop() + + config := BatchWriterConfig{ + MaxBatchSize: 10, + FlushTimeout: 50 * time.Millisecond, + } + + writer := NewBatchWriter(mockRepo, config, log) + + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + + in := make(chan *Envelope, 5) + go writer.Start(ctx, in) + + // Don't send any envelopes + + // Wait for timeout + <-ctx.Done() + + // InsertBatch should not be called for empty batch + mockRepo.AssertNotCalled(t, "InsertBatch") +} + +func TestBatchWriter_Start_MultipleBatches(t *testing.T) { + mockRepo := new(MockEventRepository) + log := zap.NewNop() + + config := BatchWriterConfig{ + MaxBatchSize: 2, + FlushTimeout: 10 * time.Second, + } + + writer := NewBatchWriter(mockRepo, config, log) + + // Expect two batches of 2 events each + mockRepo.On("InsertBatch", mock.Anything, mock.MatchedBy(func(events []*domain.Event) bool { + return len(events) == 2 + })).Return(2, nil).Times(2) + + ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) + defer cancel() + + in := make(chan *Envelope, 10) + go writer.Start(ctx, in) + + // Send 4 envelopes (should create 2 batches) + in <- createTestEnvelope("1") + in <- createTestEnvelope("2") + in <- createTestEnvelope("3") + in <- createTestEnvelope("4") + + // Wait for processing + time.Sleep(100 * time.Millisecond) + + mockRepo.AssertExpectations(t) + mockRepo.AssertNumberOfCalls(t, "InsertBatch", 2) +} diff --git a/internal/consumer/consumer.go b/internal/consumer/consumer.go new file mode 100644 index 0000000..6498590 --- /dev/null +++ b/internal/consumer/consumer.go @@ -0,0 +1,75 @@ +package consumer + +import ( + "context" + "sync" + "time" + + "github.com/aws/aws-sdk-go-v2/service/sqs/types" + "go.uber.org/zap" + + "github.com/BarkinBalci/event-analytics-service/internal/config" + "github.com/BarkinBalci/event-analytics-service/internal/queue" + "github.com/BarkinBalci/event-analytics-service/internal/repository" +) + +// Consumer orchestrates a pipeline of stages to process SQS messages +type Consumer struct { + receiver *Receiver + parser *ParserStage + batchWriter *BatchWriter +} + +// NewConsumer creates a new consumer with a pipeline architecture +func NewConsumer(cfg *config.Config, queueConsumer queue.QueueConsumer, repo repository.EventRepository, log *zap.Logger) *Consumer { + receiver := NewReceiver(queueConsumer, ReceiverConfig{ + MaxMessages: 10, + WaitTimeSeconds: 20, + BufferSize: 100, + }, log) + + parser := NewParserStage(queueConsumer, NewJSONEventParser(), log) + + batchWriter := NewBatchWriter(repo, BatchWriterConfig{ + MaxBatchSize: cfg.Consumer.BatchSizeMax, + FlushTimeout: time.Duration(cfg.Consumer.BatchTimeoutSec) * time.Second, + }, log) + + return &Consumer{ + receiver: receiver, + parser: parser, + batchWriter: batchWriter, + } +} + +// Start begins the consumer pipeline +func (c *Consumer) Start(ctx context.Context) error { + messageChan := make(chan types.Message, 100) + envelopeChan := make(chan *Envelope, 100) + + var wg sync.WaitGroup + + // Start all pipeline stages + wg.Add(3) + + // Stage 1: Receive messages from SQS + go func() { + defer wg.Done() + c.receiver.Start(ctx, messageChan) + }() + + // Stage 2: Parse messages into envelopes + go func() { + defer wg.Done() + c.parser.Start(ctx, messageChan, envelopeChan) + }() + + // Stage 3: Batch and write to the repository + go func() { + defer wg.Done() + c.batchWriter.Start(ctx, envelopeChan) + }() + + wg.Wait() + return nil +} diff --git a/internal/consumer/consumer_test.go b/internal/consumer/consumer_test.go new file mode 100644 index 0000000..9dcf91d --- /dev/null +++ b/internal/consumer/consumer_test.go @@ -0,0 +1,185 @@ +package consumer + +import ( + "context" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/sqs" + "github.com/aws/aws-sdk-go-v2/service/sqs/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "go.uber.org/zap" + + "github.com/BarkinBalci/event-analytics-service/internal/config" + "github.com/BarkinBalci/event-analytics-service/internal/domain" +) + +func TestConsumer_Start_PipelineCoordination(t *testing.T) { + mockConsumer := new(MockQueueConsumer) + mockRepo := new(MockEventRepository) + mockParser := new(MockMessageParser) + log := zap.NewNop() + + cfg := &config.Config{ + Consumer: config.Consumer{ + BatchSizeMax: 10, + BatchTimeoutSec: 1, + }, + } + + mockConsumer.On("QueueURL").Return("https://sqs.eu-central-1.amazonaws.com/123/test-queue") + + messages := []types.Message{ + { + MessageId: aws.String("msg-1"), + Body: aws.String(`{"event_id": "1"}`), + ReceiptHandle: aws.String("receipt-1"), + }, + } + + event := &domain.Event{ + EventID: "1", + EventName: "test_event", + UserID: "user123", + Timestamp: testTimestamp, + } + + mockConsumer.On("ReceiveMessages", mock.Anything, mock.AnythingOfType("*sqs.ReceiveMessageInput")). + Return(&sqs.ReceiveMessageOutput{Messages: messages}, nil).Once() + mockConsumer.On("ReceiveMessages", mock.Anything, mock.AnythingOfType("*sqs.ReceiveMessageInput")). + Return(&sqs.ReceiveMessageOutput{Messages: []types.Message{}}, nil).Maybe() + + mockParser.On("Parse", []byte(`{"event_id": "1"}`)).Return(event, nil) + mockConsumer.On("DeleteMessage", mock.Anything, mock.AnythingOfType("*sqs.DeleteMessageInput")). + Return(&sqs.DeleteMessageOutput{}, nil) + + mockRepo.On("InsertBatch", mock.Anything, mock.MatchedBy(func(events []*domain.Event) bool { + return len(events) == 1 && events[0].EventID == "1" + })).Return(1, nil) + + // Create consumer with mocked components + receiver := NewReceiver(mockConsumer, ReceiverConfig{ + MaxMessages: 10, + WaitTimeSeconds: 20, + BufferSize: 100, + }, log) + + parser := NewParserStage(mockConsumer, mockParser, log) + + batchWriter := NewBatchWriter(mockRepo, BatchWriterConfig{ + MaxBatchSize: cfg.Consumer.BatchSizeMax, + FlushTimeout: time.Duration(cfg.Consumer.BatchTimeoutSec) * time.Second, + }, log) + + consumer := &Consumer{ + receiver: receiver, + parser: parser, + batchWriter: batchWriter, + } + + ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) + defer cancel() + + // Start consumer + err := consumer.Start(ctx) + + assert.NoError(t, err) + + // Wait a bit to ensure processing + time.Sleep(150 * time.Millisecond) + + mockRepo.AssertCalled(t, "InsertBatch", mock.Anything, mock.Anything) +} + +func TestConsumer_Start_GracefulShutdown(t *testing.T) { + mockConsumer := new(MockQueueConsumer) + mockRepo := new(MockEventRepository) + log := zap.NewNop() + + cfg := &config.Config{ + Consumer: config.Consumer{ + BatchSizeMax: 10, + BatchTimeoutSec: 1, + }, + } + + mockConsumer.On("QueueURL").Return("https://sqs.eu-central-1.amazonaws.com/123/test-queue").Maybe() + mockConsumer.On("ReceiveMessages", mock.Anything, mock.AnythingOfType("*sqs.ReceiveMessageInput")). + Return(&sqs.ReceiveMessageOutput{Messages: []types.Message{}}, nil).Maybe() + + consumer := NewConsumer(cfg, mockConsumer, mockRepo, log) + + ctx, cancel := context.WithCancel(context.Background()) + + done := make(chan bool) + go func() { + err := consumer.Start(ctx) + assert.NoError(t, err) + done <- true + }() + + // Let it run for a bit + time.Sleep(50 * time.Millisecond) + + // Cancel context to trigger shutdown + cancel() + + // Wait for shutdown + select { + case <-done: + // Shutdown completed successfully + case <-time.After(500 * time.Millisecond): + t.Fatal("Graceful shutdown took too long") + } +} + +func TestConsumer_NewConsumer_ComponentInitialization(t *testing.T) { + mockConsumer := new(MockQueueConsumer) + mockRepo := new(MockEventRepository) + log := zap.NewNop() + + cfg := &config.Config{ + Consumer: config.Consumer{ + BatchSizeMax: 100, + BatchTimeoutSec: 5, + }, + } + + consumer := NewConsumer(cfg, mockConsumer, mockRepo, log) + + assert.NotNil(t, consumer) + assert.NotNil(t, consumer.receiver) + assert.NotNil(t, consumer.parser) + assert.NotNil(t, consumer.batchWriter) +} + +func TestConsumer_Start_EmptyQueueScenario(t *testing.T) { + mockConsumer := new(MockQueueConsumer) + mockRepo := new(MockEventRepository) + log := zap.NewNop() + + cfg := &config.Config{ + Consumer: config.Consumer{ + BatchSizeMax: 10, + BatchTimeoutSec: 1, + }, + } + + mockConsumer.On("QueueURL").Return("https://sqs.eu-central-1.amazonaws.com/123/test-queue") + mockConsumer.On("ReceiveMessages", mock.Anything, mock.AnythingOfType("*sqs.ReceiveMessageInput")). + Return(&sqs.ReceiveMessageOutput{Messages: []types.Message{}}, nil).Maybe() + + consumer := NewConsumer(cfg, mockConsumer, mockRepo, log) + + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + + err := consumer.Start(ctx) + + assert.NoError(t, err) + + // InsertBatch should not be called since no messages were processed + mockRepo.AssertNotCalled(t, "InsertBatch") +} diff --git a/internal/consumer/envelope.go b/internal/consumer/envelope.go new file mode 100644 index 0000000..a5de9cd --- /dev/null +++ b/internal/consumer/envelope.go @@ -0,0 +1,39 @@ +package consumer + +import ( + "context" + + "github.com/BarkinBalci/event-analytics-service/internal/domain" +) + +// Envelope wraps a domain event with acknowledgment callbacks +type Envelope struct { + Event *domain.Event + ack func(context.Context) error + nack func(context.Context) error +} + +// NewEnvelope creates a new message envelope +func NewEnvelope(event *domain.Event, ack, nack func(context.Context) error) *Envelope { + return &Envelope{ + Event: event, + ack: ack, + nack: nack, + } +} + +// Ack acknowledges successful processing +func (e *Envelope) Ack(ctx context.Context) error { + if e.ack != nil { + return e.ack(ctx) + } + return nil +} + +// Nack negatively acknowledges processing +func (e *Envelope) Nack(ctx context.Context) error { + if e.nack != nil { + return e.nack(ctx) + } + return nil +} diff --git a/internal/consumer/interfaces.go b/internal/consumer/interfaces.go new file mode 100644 index 0000000..c56553b --- /dev/null +++ b/internal/consumer/interfaces.go @@ -0,0 +1,10 @@ +package consumer + +import ( + "github.com/BarkinBalci/event-analytics-service/internal/domain" +) + +// MessageParser defines the interface for parsing raw message bytes into events +type MessageParser interface { + Parse(body []byte) (*domain.Event, error) +} diff --git a/internal/consumer/parser.go b/internal/consumer/parser.go new file mode 100644 index 0000000..35fa85f --- /dev/null +++ b/internal/consumer/parser.go @@ -0,0 +1,73 @@ +package consumer + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/BarkinBalci/event-analytics-service/internal/domain" +) + +// JSONEventParser implements MessageParser for JSON-formatted event messages +type JSONEventParser struct{} + +// NewJSONEventParser creates a new JSON event parser +func NewJSONEventParser() *JSONEventParser { + return &JSONEventParser{} +} + +// Parse parses a JSON message body into an Event +func (p *JSONEventParser) Parse(body []byte) (*domain.Event, error) { + var msgBody map[string]interface{} + if err := json.Unmarshal(body, &msgBody); err != nil { + return nil, fmt.Errorf("failed to unmarshal message body: %w", err) + } + + metadataJSON := "{}" + if metadata, ok := msgBody["metadata"].(map[string]interface{}); ok && len(metadata) > 0 { + metadataBytes, err := json.Marshal(metadata) + if err != nil { + return nil, fmt.Errorf("failed to marshal metadata: %w", err) + } + metadataJSON = string(metadataBytes) + } + + var tags []string + if tagsInterface, ok := msgBody["tags"].([]interface{}); ok { + for _, t := range tagsInterface { + if tagStr, ok := t.(string); ok { + tags = append(tags, tagStr) + } + } + } + + event := &domain.Event{ + EventID: getStringField(msgBody, "event_id"), + EventName: getStringField(msgBody, "event_name"), + Channel: getStringField(msgBody, "channel"), + CampaignID: getStringField(msgBody, "campaign_id"), + UserID: getStringField(msgBody, "user_id"), + Timestamp: getInt64Field(msgBody, "timestamp"), + Tags: tags, + Metadata: metadataJSON, + ProcessedAt: time.Now(), + Version: uint64(time.Now().UnixNano()), + } + + return event, nil +} + +// Helper functions for extracting fields from parsed JSON +func getStringField(m map[string]interface{}, key string) string { + if val, ok := m[key].(string); ok { + return val + } + return "" +} + +func getInt64Field(m map[string]interface{}, key string) int64 { + if val, ok := m[key].(float64); ok { + return int64(val) + } + return 0 +} diff --git a/internal/consumer/parser_stage.go b/internal/consumer/parser_stage.go new file mode 100644 index 0000000..f180195 --- /dev/null +++ b/internal/consumer/parser_stage.go @@ -0,0 +1,104 @@ +package consumer + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + awssqs "github.com/aws/aws-sdk-go-v2/service/sqs" + "github.com/aws/aws-sdk-go-v2/service/sqs/types" + "go.uber.org/zap" + + "github.com/BarkinBalci/event-analytics-service/internal/queue" +) + +// ParserStage handles parsing SQS messages into domain envelopes +type ParserStage struct { + consumer queue.QueueConsumer + parser MessageParser + log *zap.Logger +} + +// NewParserStage creates a new parser stage +func NewParserStage(consumer queue.QueueConsumer, parser MessageParser, log *zap.Logger) *ParserStage { + return &ParserStage{ + consumer: consumer, + parser: parser, + log: log, + } +} + +// Start begins parsing messages and outputs envelopes +func (p *ParserStage) Start(ctx context.Context, in <-chan types.Message, out chan<- *Envelope) { + defer close(out) + + for { + select { + case <-ctx.Done(): + p.log.Info("Parser stage shutting down") + return + case msg, ok := <-in: + if !ok { + p.log.Info("Parser stage input channel closed") + return + } + + envelope := p.parseMessage(ctx, msg) + if envelope == nil { + continue + } + + select { + case <-ctx.Done(): + return + case out <- envelope: + // Envelope sent to next stage + } + } + } +} + +// parseMessage parses a single SQS message into an envelope +func (p *ParserStage) parseMessage(ctx context.Context, msg types.Message) *Envelope { + body := aws.ToString(msg.Body) + event, err := p.parser.Parse([]byte(body)) + + if err != nil { + p.log.Warn("Failed to parse message", + zap.String("message_id", aws.ToString(msg.MessageId)), + zap.Error(err)) + if err := p.deleteMessage(ctx, msg); err != nil { + p.log.Error("Failed to delete malformed message", + zap.String("message_id", aws.ToString(msg.MessageId)), + zap.Error(err)) + } + return nil + } + + ack := func(ctx context.Context) error { + return p.deleteMessage(ctx, msg) + } + + nack := func(ctx context.Context) error { + // TODO: While the messages would become visible on their own maybe something could be done to reduce the time in here. + return nil + } + + return NewEnvelope(event, ack, nack) +} + +// deleteMessage deletes a message from SQS +func (p *ParserStage) deleteMessage(ctx context.Context, msg types.Message) error { + _, err := p.consumer.DeleteMessage(ctx, &awssqs.DeleteMessageInput{ + QueueUrl: aws.String(p.consumer.QueueURL()), + ReceiptHandle: msg.ReceiptHandle, + }) + if err != nil { + p.log.Error("Failed to delete message", + zap.String("message_id", aws.ToString(msg.MessageId)), + zap.Error(err)) + return err + } + p.log.Info("Deleted malformed message from SQS", + zap.String("message_id", aws.ToString(msg.MessageId))) + return nil +} diff --git a/internal/consumer/parser_stage_test.go b/internal/consumer/parser_stage_test.go new file mode 100644 index 0000000..fa4dd3f --- /dev/null +++ b/internal/consumer/parser_stage_test.go @@ -0,0 +1,327 @@ +package consumer + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/sqs" + "github.com/aws/aws-sdk-go-v2/service/sqs/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "go.uber.org/zap" + + "github.com/BarkinBalci/event-analytics-service/internal/domain" +) + +const ( + testTimestamp int64 = 1766702552 +) + +// MockMessageParser is a mock implementation of MessageParser +type MockMessageParser struct { + mock.Mock +} + +func (m *MockMessageParser) Parse(body []byte) (*domain.Event, error) { + args := m.Called(body) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).(*domain.Event), args.Error(1) +} + +func TestParserStage_Start_Success(t *testing.T) { + mockConsumer := new(MockQueueConsumer) + mockParser := new(MockMessageParser) + log := zap.NewNop() + + parserStage := NewParserStage(mockConsumer, mockParser, log) + + mockConsumer.On("QueueURL").Return("https://sqs.eu-central-1.amazonaws.com/123/test-queue") + mockConsumer.On("DeleteMessage", mock.Anything, mock.AnythingOfType("*sqs.DeleteMessageInput")). + Return(&sqs.DeleteMessageOutput{}, nil).Maybe() + + message := types.Message{ + MessageId: aws.String("msg-1"), + Body: aws.String(`{"event_id": "1"}`), + ReceiptHandle: aws.String("receipt-1"), + } + + event := &domain.Event{ + EventID: "1", + EventName: "test_event", + UserID: "user123", + Timestamp: testTimestamp, + } + + mockParser.On("Parse", []byte(`{"event_id": "1"}`)).Return(event, nil) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + in := make(chan types.Message, 1) + out := make(chan *Envelope, 1) + + go parserStage.Start(ctx, in, out) + + // Send message + in <- message + close(in) + + // Receive envelope + envelope := <-out + + assert.NotNil(t, envelope) + assert.Equal(t, "1", envelope.Event.EventID) + assert.Equal(t, "test_event", envelope.Event.EventName) + + mockParser.AssertExpectations(t) +} + +func TestParserStage_Start_MalformedMessage(t *testing.T) { + mockConsumer := new(MockQueueConsumer) + mockParser := new(MockMessageParser) + log := zap.NewNop() + + parserStage := NewParserStage(mockConsumer, mockParser, log) + + mockConsumer.On("QueueURL").Return("https://sqs.eu-central-1.amazonaws.com/123/test-queue") + mockConsumer.On("DeleteMessage", mock.Anything, mock.AnythingOfType("*sqs.DeleteMessageInput")). + Return(&sqs.DeleteMessageOutput{}, nil) + + message := types.Message{ + MessageId: aws.String("msg-1"), + Body: aws.String(`{invalid json}`), + ReceiptHandle: aws.String("receipt-1"), + } + + parseErr := errors.New("invalid JSON format") + mockParser.On("Parse", []byte(`{invalid json}`)).Return(nil, parseErr) + + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + + in := make(chan types.Message, 1) + out := make(chan *Envelope, 1) + + go parserStage.Start(ctx, in, out) + + // Send malformed message + in <- message + + // Wait for processing before closing input + time.Sleep(20 * time.Millisecond) + close(in) + + // Wait for output channel to close (which happens when input closes) + timeout := time.After(100 * time.Millisecond) + envelopeReceived := false + + for { + select { + case envelope, ok := <-out: + if !ok { + // Channel closed, exit loop + goto done + } + if envelope != nil { + envelopeReceived = true + t.Fatalf("Expected no envelope for malformed message, but got: %v", envelope) + } + case <-timeout: + goto done + } + } + +done: + assert.False(t, envelopeReceived, "Should not receive any envelope for malformed message") + mockParser.AssertExpectations(t) + mockConsumer.AssertCalled(t, "DeleteMessage", mock.Anything, mock.AnythingOfType("*sqs.DeleteMessageInput")) +} + +func TestParserStage_Start_DeleteMessageFailure(t *testing.T) { + mockConsumer := new(MockQueueConsumer) + mockParser := new(MockMessageParser) + log := zap.NewNop() + + parserStage := NewParserStage(mockConsumer, mockParser, log) + + mockConsumer.On("QueueURL").Return("https://sqs.eu-central-1.amazonaws.com/123/test-queue") + + deleteErr := errors.New("failed to delete message from SQS") + mockConsumer.On("DeleteMessage", mock.Anything, mock.AnythingOfType("*sqs.DeleteMessageInput")). + Return(nil, deleteErr) + + message := types.Message{ + MessageId: aws.String("msg-1"), + Body: aws.String(`{invalid}`), + ReceiptHandle: aws.String("receipt-1"), + } + + parseErr := errors.New("invalid JSON") + mockParser.On("Parse", []byte(`{invalid}`)).Return(nil, parseErr) + + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + + in := make(chan types.Message, 1) + out := make(chan *Envelope, 1) + + go parserStage.Start(ctx, in, out) + + // Send message + in <- message + + // Wait for processing before closing input + time.Sleep(20 * time.Millisecond) + close(in) + + // Wait for output channel to close (which happens when input closes) + timeout := time.After(100 * time.Millisecond) + envelopeReceived := false + + for { + select { + case envelope, ok := <-out: + if !ok { + // Channel closed, exit loop + goto done + } + if envelope != nil { + envelopeReceived = true + t.Fatalf("Expected no envelope, but got: %v", envelope) + } + case <-timeout: + goto done + } + } + +done: + assert.False(t, envelopeReceived, "Should not receive any envelope for malformed message") + mockParser.AssertExpectations(t) + mockConsumer.AssertCalled(t, "DeleteMessage", mock.Anything, mock.AnythingOfType("*sqs.DeleteMessageInput")) +} + +func TestParserStage_Start_ContextCancellation(t *testing.T) { + mockConsumer := new(MockQueueConsumer) + mockParser := new(MockMessageParser) + log := zap.NewNop() + + parserStage := NewParserStage(mockConsumer, mockParser, log) + + ctx, cancel := context.WithCancel(context.Background()) + + in := make(chan types.Message) + out := make(chan *Envelope, 1) + + // Cancel immediately + cancel() + + parserStage.Start(ctx, in, out) + + // Output channel should be closed + _, ok := <-out + assert.False(t, ok, "Output channel should be closed after context cancellation") +} + +func TestParserStage_Start_InputChannelClosed(t *testing.T) { + mockConsumer := new(MockQueueConsumer) + mockParser := new(MockMessageParser) + log := zap.NewNop() + + parserStage := NewParserStage(mockConsumer, mockParser, log) + + ctx := context.Background() + + in := make(chan types.Message) + out := make(chan *Envelope, 1) + + // Close input channel immediately + close(in) + + parserStage.Start(ctx, in, out) + + // Output channel should be closed + _, ok := <-out + assert.False(t, ok, "Output channel should be closed when input channel is closed") +} + +func TestParserStage_Start_MultipleMessages(t *testing.T) { + mockConsumer := new(MockQueueConsumer) + mockParser := new(MockMessageParser) + log := zap.NewNop() + + parserStage := NewParserStage(mockConsumer, mockParser, log) + + mockConsumer.On("QueueURL").Return("https://sqs.eu-central-1.amazonaws.com/123/test-queue") + mockConsumer.On("DeleteMessage", mock.Anything, mock.AnythingOfType("*sqs.DeleteMessageInput")). + Return(&sqs.DeleteMessageOutput{}, nil).Maybe() + + messages := []types.Message{ + { + MessageId: aws.String("msg-1"), + Body: aws.String(`{"event_id": "1"}`), + ReceiptHandle: aws.String("receipt-1"), + }, + { + MessageId: aws.String("msg-2"), + Body: aws.String(`{invalid}`), + ReceiptHandle: aws.String("receipt-2"), + }, + { + MessageId: aws.String("msg-3"), + Body: aws.String(`{"event_id": "3"}`), + ReceiptHandle: aws.String("receipt-3"), + }, + } + + event1 := &domain.Event{EventID: "1", EventName: "event1"} + event3 := &domain.Event{EventID: "3", EventName: "event3"} + + mockParser.On("Parse", []byte(`{"event_id": "1"}`)).Return(event1, nil) + mockParser.On("Parse", []byte(`{invalid}`)).Return(nil, errors.New("parse error")) + mockParser.On("Parse", []byte(`{"event_id": "3"}`)).Return(event3, nil) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + in := make(chan types.Message, 3) + out := make(chan *Envelope, 3) + + go parserStage.Start(ctx, in, out) + + // Send messages + for _, msg := range messages { + in <- msg + } + close(in) + + // Collect envelopes + var envelopes []*Envelope + timeout := time.After(100 * time.Millisecond) + done := false + + for !done { + select { + case envelope, ok := <-out: + if !ok { + done = true + break + } + envelopes = append(envelopes, envelope) + case <-timeout: + done = true + } + } + + // Should receive 2 valid envelopes (msg-1 and msg-3), msg-2 should be deleted + assert.Len(t, envelopes, 2) + assert.Equal(t, "1", envelopes[0].Event.EventID) + assert.Equal(t, "3", envelopes[1].Event.EventID) + + mockParser.AssertExpectations(t) + mockConsumer.AssertNumberOfCalls(t, "DeleteMessage", 1) // Only for malformed message +} diff --git a/internal/consumer/receiver.go b/internal/consumer/receiver.go new file mode 100644 index 0000000..2b78ac0 --- /dev/null +++ b/internal/consumer/receiver.go @@ -0,0 +1,79 @@ +package consumer + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + awssqs "github.com/aws/aws-sdk-go-v2/service/sqs" + "github.com/aws/aws-sdk-go-v2/service/sqs/types" + "go.uber.org/zap" + + "github.com/BarkinBalci/event-analytics-service/internal/queue" +) + +// ReceiverConfig configures the SQS receiver +type ReceiverConfig struct { + MaxMessages int32 + WaitTimeSeconds int32 + BufferSize int +} + +// Receiver handles receiving messages from SQS +type Receiver struct { + consumer queue.QueueConsumer + config ReceiverConfig + log *zap.Logger +} + +// NewReceiver creates a new SQS receiver +func NewReceiver(consumer queue.QueueConsumer, config ReceiverConfig, log *zap.Logger) *Receiver { + return &Receiver{ + consumer: consumer, + config: config, + log: log, + } +} + +// Start begins receiving messages and sends them to the output channel +func (r *Receiver) Start(ctx context.Context, out chan<- types.Message) { + defer close(out) + + for { + select { + case <-ctx.Done(): + r.log.Info("Receiver shutting down") + return + default: + result, err := r.consumer.ReceiveMessages(ctx, &awssqs.ReceiveMessageInput{ + QueueUrl: aws.String(r.consumer.QueueURL()), + MaxNumberOfMessages: r.config.MaxMessages, + WaitTimeSeconds: r.config.WaitTimeSeconds, + MessageAttributeNames: []string{"All"}, + }) + + if err != nil { + r.log.Error("Error receiving messages from SQS", zap.Error(err)) + time.Sleep(1 * time.Second) + continue + } + + if len(result.Messages) == 0 { + continue + } + + r.log.Info("Received messages from SQS", zap.Int("message_count", len(result.Messages))) + + // Send messages to the next stage + for _, msg := range result.Messages { + select { + case <-ctx.Done(): + r.log.Info("Receiver shutting down while sending messages") + return + case out <- msg: + // Message sent to next stage + } + } + } + } +} diff --git a/internal/consumer/receiver_test.go b/internal/consumer/receiver_test.go new file mode 100644 index 0000000..9ba2fef --- /dev/null +++ b/internal/consumer/receiver_test.go @@ -0,0 +1,260 @@ +package consumer + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/sqs" + "github.com/aws/aws-sdk-go-v2/service/sqs/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "go.uber.org/zap" +) + +// MockQueueConsumer is a mock implementation of queue.QueueConsumer +type MockQueueConsumer struct { + mock.Mock +} + +func (m *MockQueueConsumer) ReceiveMessages(ctx context.Context, input *sqs.ReceiveMessageInput) (*sqs.ReceiveMessageOutput, error) { + args := m.Called(ctx, input) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).(*sqs.ReceiveMessageOutput), args.Error(1) +} + +func (m *MockQueueConsumer) DeleteMessage(ctx context.Context, input *sqs.DeleteMessageInput) (*sqs.DeleteMessageOutput, error) { + args := m.Called(ctx, input) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).(*sqs.DeleteMessageOutput), args.Error(1) +} + +func (m *MockQueueConsumer) QueueURL() string { + args := m.Called() + return args.String(0) +} + +func TestReceiver_Start_Success(t *testing.T) { + mockConsumer := new(MockQueueConsumer) + log := zap.NewNop() + + config := ReceiverConfig{ + MaxMessages: 10, + WaitTimeSeconds: 20, + BufferSize: 100, + } + + receiver := NewReceiver(mockConsumer, config, log) + + mockConsumer.On("QueueURL").Return("https://sqs.eu-central-1.amazonaws.com/123/test-queue") + + messages := []types.Message{ + { + MessageId: aws.String("msg-1"), + Body: aws.String(`{"event_id": "1"}`), + }, + { + MessageId: aws.String("msg-2"), + Body: aws.String(`{"event_id": "2"}`), + }, + } + + // First call returns messages, second call returns empty (to trigger context cancellation) + mockConsumer.On("ReceiveMessages", mock.Anything, mock.AnythingOfType("*sqs.ReceiveMessageInput")). + Return(&sqs.ReceiveMessageOutput{Messages: messages}, nil).Once() + mockConsumer.On("ReceiveMessages", mock.Anything, mock.AnythingOfType("*sqs.ReceiveMessageInput")). + Return(&sqs.ReceiveMessageOutput{Messages: []types.Message{}}, nil).Maybe() + + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + + out := make(chan types.Message, 10) + + go receiver.Start(ctx, out) + + // Collect messages from output channel + var receivedMessages []types.Message + timeout := time.After(200 * time.Millisecond) + done := false + + for !done { + select { + case msg, ok := <-out: + if !ok { + done = true + break + } + receivedMessages = append(receivedMessages, msg) + case <-timeout: + done = true + } + } + + assert.Len(t, receivedMessages, 2) + assert.Equal(t, "msg-1", *receivedMessages[0].MessageId) + assert.Equal(t, "msg-2", *receivedMessages[1].MessageId) +} + +func TestReceiver_Start_SQSReceiveError(t *testing.T) { + mockConsumer := new(MockQueueConsumer) + log := zap.NewNop() + + config := ReceiverConfig{ + MaxMessages: 10, + WaitTimeSeconds: 20, + BufferSize: 100, + } + + receiver := NewReceiver(mockConsumer, config, log) + + mockConsumer.On("QueueURL").Return("https://sqs.eu-central-1.amazonaws.com/123/test-queue") + + receiveErr := errors.New("SQS connection error") + mockConsumer.On("ReceiveMessages", mock.Anything, mock.AnythingOfType("*sqs.ReceiveMessageInput")). + Return(nil, receiveErr).Once() + mockConsumer.On("ReceiveMessages", mock.Anything, mock.AnythingOfType("*sqs.ReceiveMessageInput")). + Return(&sqs.ReceiveMessageOutput{Messages: []types.Message{}}, nil).Maybe() + + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + + out := make(chan types.Message, 10) + + go receiver.Start(ctx, out) + + // Wait for context to cancel + <-ctx.Done() + + // Verify no messages were sent + select { + case _, ok := <-out: + if ok { + t.Fatal("Expected no messages but got one") + } + default: + // Channel is empty, which is expected + } + + mockConsumer.AssertCalled(t, "ReceiveMessages", mock.Anything, mock.AnythingOfType("*sqs.ReceiveMessageInput")) +} + +func TestReceiver_Start_ContextCancellation(t *testing.T) { + mockConsumer := new(MockQueueConsumer) + log := zap.NewNop() + + config := ReceiverConfig{ + MaxMessages: 10, + WaitTimeSeconds: 20, + BufferSize: 100, + } + + receiver := NewReceiver(mockConsumer, config, log) + + ctx, cancel := context.WithCancel(context.Background()) + out := make(chan types.Message, 10) + + // Cancel immediately + cancel() + + receiver.Start(ctx, out) + + // Channel should be closed + _, ok := <-out + assert.False(t, ok, "Channel should be closed after context cancellation") +} + +func TestReceiver_Start_EmptyMessages(t *testing.T) { + mockConsumer := new(MockQueueConsumer) + log := zap.NewNop() + + config := ReceiverConfig{ + MaxMessages: 10, + WaitTimeSeconds: 20, + BufferSize: 100, + } + + receiver := NewReceiver(mockConsumer, config, log) + + mockConsumer.On("QueueURL").Return("https://sqs.eu-central-1.amazonaws.com/123/test-queue") + + // Return empty messages + mockConsumer.On("ReceiveMessages", mock.Anything, mock.AnythingOfType("*sqs.ReceiveMessageInput")). + Return(&sqs.ReceiveMessageOutput{Messages: []types.Message{}}, nil).Maybe() + + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + + out := make(chan types.Message, 10) + + go receiver.Start(ctx, out) + + // Wait for context to cancel + <-ctx.Done() + + // Verify no messages were sent + select { + case msg, ok := <-out: + if ok { + t.Fatalf("Expected no messages but got: %v", msg) + } + default: + // Channel might not be closed yet, that's fine + } + + mockConsumer.AssertCalled(t, "ReceiveMessages", mock.Anything, mock.AnythingOfType("*sqs.ReceiveMessageInput")) +} + +func TestReceiver_Start_BufferBackpressure(t *testing.T) { + mockConsumer := new(MockQueueConsumer) + log := zap.NewNop() + + config := ReceiverConfig{ + MaxMessages: 10, + WaitTimeSeconds: 20, + BufferSize: 2, // Small buffer + } + + receiver := NewReceiver(mockConsumer, config, log) + + mockConsumer.On("QueueURL").Return("https://sqs.eu-central-1.amazonaws.com/123/test-queue") + + // Create more messages than buffer can hold + messages := make([]types.Message, 5) + for i := 0; i < 5; i++ { + messages[i] = types.Message{ + MessageId: aws.String("msg-" + string(rune(i+'0'))), + Body: aws.String(`{"event_id": "` + string(rune(i+'0')) + `"}`), + } + } + + mockConsumer.On("ReceiveMessages", mock.Anything, mock.AnythingOfType("*sqs.ReceiveMessageInput")). + Return(&sqs.ReceiveMessageOutput{Messages: messages}, nil).Once() + mockConsumer.On("ReceiveMessages", mock.Anything, mock.AnythingOfType("*sqs.ReceiveMessageInput")). + Return(&sqs.ReceiveMessageOutput{Messages: []types.Message{}}, nil).Maybe() + + ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) + defer cancel() + + out := make(chan types.Message, 2) // Small buffer + + go receiver.Start(ctx, out) + + // Slowly consume messages + var receivedMessages []types.Message + for i := 0; i < 5; i++ { + select { + case msg := <-out: + receivedMessages = append(receivedMessages, msg) + time.Sleep(10 * time.Millisecond) // Simulate slow consumer + case <-ctx.Done(): + } + } + + assert.GreaterOrEqual(t, len(receivedMessages), 2, "Should receive at least some messages even with backpressure") +} diff --git a/internal/domain/event.go b/internal/domain/event.go new file mode 100644 index 0000000..65f932d --- /dev/null +++ b/internal/domain/event.go @@ -0,0 +1,17 @@ +package domain + +import "time" + +// Event represents an event stored in ClickHouse +type Event struct { + EventID string `ch:"event_id"` + EventName string `ch:"event_name"` + Channel string `ch:"channel"` + CampaignID string `ch:"campaign_id"` + UserID string `ch:"user_id"` + Timestamp int64 `ch:"timestamp"` + Tags []string `ch:"tags"` + Metadata string `ch:"metadata"` + ProcessedAt time.Time `ch:"processed_at"` + Version uint64 `ch:"version"` +} diff --git a/internal/dto/request.go b/internal/dto/request.go new file mode 100644 index 0000000..cdcedca --- /dev/null +++ b/internal/dto/request.go @@ -0,0 +1,25 @@ +package dto + +// PublishEventRequest represents a publish event request +type PublishEventRequest struct { + EventName string `json:"event_name" binding:"required" example:"product_view"` + Channel string `json:"channel" binding:"required" example:"web"` + CampaignID string `json:"campaign_id" example:"cmp_987"` + UserID string `json:"user_id" binding:"required" example:"user_123"` + Timestamp int64 `json:"timestamp" binding:"required" example:"1723475612"` + Tags []string `json:"tags" example:"electronics,homepage,flash_sale"` + Metadata map[string]interface{} `json:"metadata" swaggertype:"object,string" example:"product_id:prod-789,price:129.99"` +} + +// PublishEventsBulkRequest represents a publish bulk event request +type PublishEventsBulkRequest struct { + Events []PublishEventRequest `json:"events" binding:"required,min=1,max=1000,dive"` +} + +// GetMetricsRequest represents a metrics query request +type GetMetricsRequest struct { + EventName string `form:"event_name" binding:"required" example:"product_view"` + From int64 `form:"from" binding:"required" example:"1723475612"` + To int64 `form:"to" binding:"required" example:"1723562012"` + GroupBy string `form:"group_by" example:"channel"` +} diff --git a/internal/dto/response.go b/internal/dto/response.go new file mode 100644 index 0000000..df6c2ad --- /dev/null +++ b/internal/dto/response.go @@ -0,0 +1,38 @@ +package dto + +// ErrorResponse represents an error response +type ErrorResponse struct { + Error string `json:"error" example:"validation_error"` + Message string `json:"message,omitempty" example:"event_name is required"` +} + +// PublishEventResponse represents a successful event ingestion response +type PublishEventResponse struct { + EventID string `json:"event_id" example:"evt_1a2b3c4d5e6f"` + Status string `json:"status" example:"accepted"` +} + +// PublishBulkEventsResponse represents a successful bulk event ingestion response +type PublishBulkEventsResponse struct { + Accepted int `json:"accepted" example:"5"` + Rejected int `json:"rejected" example:"0"` + EventIDs []string `json:"event_ids,omitempty" example:"evt_1,evt_2,evt_3"` + Errors []string `json:"errors,omitempty" example:"validation error on event 3"` +} + +// MetricsGroupData represents aggregated metrics for a specific group +type MetricsGroupData struct { + GroupValue string `json:"group_value" example:"web"` + TotalCount uint64 `json:"total_count" example:"1500"` +} + +// GetMetricsResponse represents the metrics query response +type GetMetricsResponse struct { + EventName string `json:"event_name" example:"product_view"` + From int64 `json:"from" example:"1723475612"` + To int64 `json:"to" example:"1723562012"` + TotalCount uint64 `json:"total_count" example:"5000"` + UniqueCount uint64 `json:"unique_count" example:"2500"` + GroupBy string `json:"group_by,omitempty" example:"channel"` + Groups []MetricsGroupData `json:"groups,omitempty"` +} diff --git a/internal/handler/handler.go b/internal/handler/handler.go new file mode 100644 index 0000000..9da0957 --- /dev/null +++ b/internal/handler/handler.go @@ -0,0 +1,204 @@ +package handler + +import ( + "net/http" + + "github.com/gin-gonic/gin" + swaggerFiles "github.com/swaggo/files" + ginSwagger "github.com/swaggo/gin-swagger" + "go.uber.org/zap" + + _ "github.com/BarkinBalci/event-analytics-service/docs" + "github.com/BarkinBalci/event-analytics-service/internal/dto" + "github.com/BarkinBalci/event-analytics-service/internal/service" +) + +type Handler struct { + eventService service.EventServicer + router *gin.Engine + log *zap.Logger +} + +func NewHandler(eventService service.EventServicer, log *zap.Logger) *Handler { + h := &Handler{ + eventService: eventService, + router: gin.Default(), + log: log, + } + + h.registerRoutes() + + return h +} + +func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + h.router.ServeHTTP(w, r) +} + +func (h *Handler) registerRoutes() { + h.router.GET("/health", h.healthCheck) + h.router.POST("/events", h.publishEvent) + h.router.POST("/events/bulk", h.publishEventsBulk) + h.router.GET("/metrics", h.getMetrics) + h.router.GET("/docs/*any", ginSwagger.WrapHandler(swaggerFiles.Handler)) +} + +// healthCheck handles health check requests +// @Summary Health check +// @Description Check if the service is running +// @Tags health +// @Produce json +// @Success 200 {object} map[string]string +// @Router /health [get] +func (h *Handler) healthCheck(c *gin.Context) { + // TODO: add a more sophisticated health check + c.JSON(http.StatusOK, gin.H{ + "status": "ok", + }) +} + +// publishEvent handles POST /events +// @Summary Publish a single event +// @Description Publish a single analytics event to the queue +// @Tags events +// @Accept json +// @Produce json +// @Param event body dto.PublishEventRequest true "Event data" +// @Success 202 {object} dto.PublishEventResponse +// @Failure 400 {object} dto.ErrorResponse +// @Failure 500 {object} dto.ErrorResponse +// @Router /events [post] +func (h *Handler) publishEvent(c *gin.Context) { + var req dto.PublishEventRequest + + if err := c.ShouldBindJSON(&req); err != nil { + h.log.Warn("Invalid event request", + zap.Error(err), + zap.String("event_name", req.EventName)) + c.JSON(http.StatusBadRequest, dto.ErrorResponse{ + Error: "validation_error", + Message: err.Error(), + }) + return + } + + eventID, err := h.eventService.ProcessEvent(&req) + if err != nil { + h.log.Error("Failed to process event", + zap.Error(err), + zap.String("event_name", req.EventName), + zap.String("user_id", req.UserID)) + c.JSON(http.StatusInternalServerError, dto.ErrorResponse{ + Error: "internal_error", + Message: err.Error(), + }) + return + } + + h.log.Info("Event accepted", + zap.String("event_id", eventID), + zap.String("event_name", req.EventName)) + + c.JSON(http.StatusAccepted, dto.PublishEventResponse{ + EventID: eventID, + Status: "accepted", + }) +} + +// publishEventsBulk handles POST /events/bulk +// @Summary Publish multiple events +// @Description Publish multiple analytics events in bulk to the queue +// @Tags events +// @Accept json +// @Produce json +// @Param events body dto.PublishEventsBulkRequest true "Bulk events data" +// @Success 202 {object} dto.PublishBulkEventsResponse +// @Failure 400 {object} dto.ErrorResponse +// @Failure 500 {object} dto.ErrorResponse +// @Router /events/bulk [post] +func (h *Handler) publishEventsBulk(c *gin.Context) { + var bulkRequest dto.PublishEventsBulkRequest + + if err := c.ShouldBindJSON(&bulkRequest); err != nil { + h.log.Warn("Invalid bulk event request", zap.Error(err)) + c.JSON(http.StatusBadRequest, dto.ErrorResponse{ + Error: "validation_error", + Message: err.Error(), + }) + return + } + + eventIDs, errors, err := h.eventService.ProcessBulkEvents(bulkRequest.Events) + if err != nil { + h.log.Error("Failed to process bulk events", + zap.Error(err), + zap.Int("event_count", len(bulkRequest.Events))) + c.JSON(http.StatusInternalServerError, dto.ErrorResponse{ + Error: "internal_error", + Message: err.Error(), + }) + return + } + + accepted := len(eventIDs) + rejected := len(errors) + + h.log.Info("Bulk events processed", + zap.Int("accepted", accepted), + zap.Int("rejected", rejected), + zap.Int("total", len(bulkRequest.Events))) + + c.JSON(http.StatusAccepted, dto.PublishBulkEventsResponse{ + Accepted: accepted, + Rejected: rejected, + EventIDs: eventIDs, + Errors: errors, + }) +} + +// getMetrics handles GET /metrics +// @Summary Get aggregated metrics +// @Description Retrieve aggregated event metrics with optional grouping by channel, hour, or day +// @Tags metrics +// @Produce json +// @Param event_name query string true "Event name to filter by" example:"product_view" +// @Param from query int true "Start timestamp (Unix epoch)" example:"1723475612" +// @Param to query int true "End timestamp (Unix epoch)" example:"1723562012" +// @Param group_by query string false "Field to group by (channel, hour, day)" Enums(channel, hour, day) example:"channel" +// @Success 200 {object} dto.GetMetricsResponse +// @Failure 400 {object} dto.ErrorResponse +// @Failure 500 {object} dto.ErrorResponse +// @Router /metrics [get] +func (h *Handler) getMetrics(c *gin.Context) { + var req dto.GetMetricsRequest + + if err := c.ShouldBindQuery(&req); err != nil { + h.log.Warn("Invalid metrics request", zap.Error(err)) + c.JSON(http.StatusBadRequest, dto.ErrorResponse{ + Error: "validation_error", + Message: err.Error(), + }) + return + } + + response, err := h.eventService.GetMetrics(&req) + if err != nil { + h.log.Error("Failed to get metrics", + zap.Error(err), + zap.String("event_name", req.EventName), + zap.Int64("from", req.From), + zap.Int64("to", req.To)) + c.JSON(http.StatusInternalServerError, dto.ErrorResponse{ + Error: "internal_error", + Message: err.Error(), + }) + return + } + + h.log.Info("Metrics retrieved", + zap.String("event_name", req.EventName), + zap.Uint64("total_count", response.TotalCount), + zap.Uint64("unique_count", response.UniqueCount)) + + c.JSON(http.StatusOK, response) +} diff --git a/internal/handler/handler_test.go b/internal/handler/handler_test.go new file mode 100644 index 0000000..9a9862b --- /dev/null +++ b/internal/handler/handler_test.go @@ -0,0 +1,493 @@ +package handler + +import ( + "bytes" + "encoding/json" + "errors" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "go.uber.org/zap" + + "github.com/BarkinBalci/event-analytics-service/internal/dto" +) + +const ( + testTimestamp int64 = 1766702551 +) + +// MockEventService is a mock implementation of service.EventServicer +type MockEventService struct { + mock.Mock +} + +func (m *MockEventService) ProcessEvent(event *dto.PublishEventRequest) (string, error) { + args := m.Called(event) + return args.String(0), args.Error(1) +} + +func (m *MockEventService) ProcessBulkEvents(events []dto.PublishEventRequest) ([]string, []string, error) { + args := m.Called(events) + return args.Get(0).([]string), args.Get(1).([]string), args.Error(2) +} + +func (m *MockEventService) GetMetrics(req *dto.GetMetricsRequest) (*dto.GetMetricsResponse, error) { + args := m.Called(req) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).(*dto.GetMetricsResponse), args.Error(1) +} + +func TestHandler_HealthCheck(t *testing.T) { + mockService := new(MockEventService) + log := zap.NewNop() + + handler := NewHandler(mockService, log) + + req := httptest.NewRequest(http.MethodGet, "/health", nil) + w := httptest.NewRecorder() + + handler.ServeHTTP(w, req) + + assert.Equal(t, http.StatusOK, w.Code) + + var response map[string]string + err := json.Unmarshal(w.Body.Bytes(), &response) + assert.NoError(t, err) + assert.Equal(t, "ok", response["status"]) +} + +func TestHandler_PublishEvent_Success(t *testing.T) { + mockService := new(MockEventService) + log := zap.NewNop() + + handler := NewHandler(mockService, log) + + eventReq := dto.PublishEventRequest{ + EventName: "test_event", + Channel: "web", + UserID: "user123", + Timestamp: testTimestamp, + CampaignID: "campaign1", + } + + mockService.On("ProcessEvent", &eventReq).Return("event-id-123", nil) + + body, _ := json.Marshal(eventReq) + req := httptest.NewRequest(http.MethodPost, "/events", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + + handler.ServeHTTP(w, req) + + assert.Equal(t, http.StatusAccepted, w.Code) + + var response dto.PublishEventResponse + err := json.Unmarshal(w.Body.Bytes(), &response) + assert.NoError(t, err) + assert.Equal(t, "event-id-123", response.EventID) + assert.Equal(t, "accepted", response.Status) + mockService.AssertExpectations(t) +} + +func TestHandler_PublishEvent_InvalidJSON(t *testing.T) { + mockService := new(MockEventService) + log := zap.NewNop() + + handler := NewHandler(mockService, log) + + invalidJSON := []byte(`{"event_name": "test", invalid}`) + req := httptest.NewRequest(http.MethodPost, "/events", bytes.NewReader(invalidJSON)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + + handler.ServeHTTP(w, req) + + assert.Equal(t, http.StatusBadRequest, w.Code) + + var response dto.ErrorResponse + err := json.Unmarshal(w.Body.Bytes(), &response) + assert.NoError(t, err) + assert.Equal(t, "validation_error", response.Error) + mockService.AssertNotCalled(t, "ProcessEvent") +} + +func TestHandler_PublishEvent_MissingRequiredFields(t *testing.T) { + mockService := new(MockEventService) + log := zap.NewNop() + + handler := NewHandler(mockService, log) + + eventReq := dto.PublishEventRequest{ + EventName: "test_event", + // Missing required fields: Channel, UserID, Timestamp + } + + body, _ := json.Marshal(eventReq) + req := httptest.NewRequest(http.MethodPost, "/events", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + + handler.ServeHTTP(w, req) + + assert.Equal(t, http.StatusBadRequest, w.Code) + + var response dto.ErrorResponse + err := json.Unmarshal(w.Body.Bytes(), &response) + assert.NoError(t, err) + assert.Equal(t, "validation_error", response.Error) + mockService.AssertNotCalled(t, "ProcessEvent") +} + +func TestHandler_PublishEvent_ServiceError(t *testing.T) { + mockService := new(MockEventService) + log := zap.NewNop() + + handler := NewHandler(mockService, log) + + eventReq := dto.PublishEventRequest{ + EventName: "test_event", + Channel: "web", + UserID: "user123", + Timestamp: testTimestamp, + } + + serviceErr := errors.New("queue publish error") + mockService.On("ProcessEvent", &eventReq).Return("", serviceErr) + + body, _ := json.Marshal(eventReq) + req := httptest.NewRequest(http.MethodPost, "/events", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + + handler.ServeHTTP(w, req) + + assert.Equal(t, http.StatusInternalServerError, w.Code) + + var response dto.ErrorResponse + err := json.Unmarshal(w.Body.Bytes(), &response) + assert.NoError(t, err) + assert.Equal(t, "internal_error", response.Error) + assert.Contains(t, response.Message, "queue publish error") + mockService.AssertExpectations(t) +} + +func TestHandler_PublishEventsBulk_Success(t *testing.T) { + mockService := new(MockEventService) + log := zap.NewNop() + + handler := NewHandler(mockService, log) + + bulkReq := dto.PublishEventsBulkRequest{ + Events: []dto.PublishEventRequest{ + { + EventName: "event1", + Channel: "web", + UserID: "user1", + Timestamp: testTimestamp, + }, + { + EventName: "event2", + Channel: "mobile", + UserID: "user2", + Timestamp: testTimestamp, + }, + }, + } + + mockService.On("ProcessBulkEvents", bulkReq.Events).Return( + []string{"event-id-1", "event-id-2"}, + []string{}, + nil, + ) + + body, _ := json.Marshal(bulkReq) + req := httptest.NewRequest(http.MethodPost, "/events/bulk", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + + handler.ServeHTTP(w, req) + + assert.Equal(t, http.StatusAccepted, w.Code) + + var response dto.PublishBulkEventsResponse + err := json.Unmarshal(w.Body.Bytes(), &response) + assert.NoError(t, err) + assert.Equal(t, 2, response.Accepted) + assert.Equal(t, 0, response.Rejected) + assert.Len(t, response.EventIDs, 2) + assert.Empty(t, response.Errors) + mockService.AssertExpectations(t) +} + +func TestHandler_PublishEventsBulk_PartialSuccess(t *testing.T) { + mockService := new(MockEventService) + log := zap.NewNop() + + handler := NewHandler(mockService, log) + + bulkReq := dto.PublishEventsBulkRequest{ + Events: []dto.PublishEventRequest{ + { + EventName: "event1", + Channel: "web", + UserID: "user1", + Timestamp: testTimestamp, + }, + { + EventName: "event2", + Channel: "mobile", + UserID: "user2", + Timestamp: testTimestamp, + }, + }, + } + + mockService.On("ProcessBulkEvents", bulkReq.Events).Return( + []string{"event-id-1"}, + []string{"timestamp validation failed"}, + nil, + ) + + body, _ := json.Marshal(bulkReq) + req := httptest.NewRequest(http.MethodPost, "/events/bulk", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + + handler.ServeHTTP(w, req) + + assert.Equal(t, http.StatusAccepted, w.Code) + + var response dto.PublishBulkEventsResponse + err := json.Unmarshal(w.Body.Bytes(), &response) + assert.NoError(t, err) + assert.Equal(t, 1, response.Accepted) + assert.Equal(t, 1, response.Rejected) + assert.Len(t, response.EventIDs, 1) + assert.Len(t, response.Errors, 1) + mockService.AssertExpectations(t) +} + +func TestHandler_PublishEventsBulk_InvalidRequest(t *testing.T) { + mockService := new(MockEventService) + log := zap.NewNop() + + handler := NewHandler(mockService, log) + + invalidJSON := []byte(`{"events": [{"invalid"}]}`) + req := httptest.NewRequest(http.MethodPost, "/events/bulk", bytes.NewReader(invalidJSON)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + + handler.ServeHTTP(w, req) + + assert.Equal(t, http.StatusBadRequest, w.Code) + + var response dto.ErrorResponse + err := json.Unmarshal(w.Body.Bytes(), &response) + assert.NoError(t, err) + assert.Equal(t, "validation_error", response.Error) + mockService.AssertNotCalled(t, "ProcessBulkEvents") +} + +func TestHandler_PublishEventsBulk_EmptyEvents(t *testing.T) { + mockService := new(MockEventService) + log := zap.NewNop() + + handler := NewHandler(mockService, log) + + bulkReq := dto.PublishEventsBulkRequest{ + Events: []dto.PublishEventRequest{}, + } + + body, _ := json.Marshal(bulkReq) + req := httptest.NewRequest(http.MethodPost, "/events/bulk", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + + handler.ServeHTTP(w, req) + + assert.Equal(t, http.StatusBadRequest, w.Code) + + var response dto.ErrorResponse + err := json.Unmarshal(w.Body.Bytes(), &response) + assert.NoError(t, err) + assert.Equal(t, "validation_error", response.Error) + mockService.AssertNotCalled(t, "ProcessBulkEvents") +} + +func TestHandler_GetMetrics_Success(t *testing.T) { + mockService := new(MockEventService) + log := zap.NewNop() + + handler := NewHandler(mockService, log) + + expectedResponse := &dto.GetMetricsResponse{ + EventName: "test_event", + From: 1000, + To: 2000, + TotalCount: 100, + UniqueCount: 50, + Groups: []dto.MetricsGroupData{}, + } + + mockService.On("GetMetrics", &dto.GetMetricsRequest{ + EventName: "test_event", + From: 1000, + To: 2000, + }).Return(expectedResponse, nil) + + req := httptest.NewRequest(http.MethodGet, "/metrics?event_name=test_event&from=1000&to=2000", nil) + w := httptest.NewRecorder() + + handler.ServeHTTP(w, req) + + assert.Equal(t, http.StatusOK, w.Code) + + var response dto.GetMetricsResponse + err := json.Unmarshal(w.Body.Bytes(), &response) + assert.NoError(t, err) + assert.Equal(t, "test_event", response.EventName) + assert.Equal(t, uint64(100), response.TotalCount) + assert.Equal(t, uint64(50), response.UniqueCount) + mockService.AssertExpectations(t) +} + +func TestHandler_GetMetrics_InvalidQueryParams(t *testing.T) { + mockService := new(MockEventService) + log := zap.NewNop() + + handler := NewHandler(mockService, log) + + // Missing required query parameters + req := httptest.NewRequest(http.MethodGet, "/metrics?event_name=test_event", nil) + w := httptest.NewRecorder() + + handler.ServeHTTP(w, req) + + assert.Equal(t, http.StatusBadRequest, w.Code) + + var response dto.ErrorResponse + err := json.Unmarshal(w.Body.Bytes(), &response) + assert.NoError(t, err) + assert.Equal(t, "validation_error", response.Error) + mockService.AssertNotCalled(t, "GetMetrics") +} + +func TestHandler_GetMetrics_ServiceError(t *testing.T) { + mockService := new(MockEventService) + log := zap.NewNop() + + handler := NewHandler(mockService, log) + + serviceErr := errors.New("database connection error") + mockService.On("GetMetrics", &dto.GetMetricsRequest{ + EventName: "test_event", + From: 1000, + To: 2000, + }).Return(nil, serviceErr) + + req := httptest.NewRequest(http.MethodGet, "/metrics?event_name=test_event&from=1000&to=2000", nil) + w := httptest.NewRecorder() + + handler.ServeHTTP(w, req) + + assert.Equal(t, http.StatusInternalServerError, w.Code) + + var response dto.ErrorResponse + err := json.Unmarshal(w.Body.Bytes(), &response) + assert.NoError(t, err) + assert.Equal(t, "internal_error", response.Error) + assert.Contains(t, response.Message, "database connection error") + mockService.AssertExpectations(t) +} + +func TestHandler_GetMetrics_GroupByHour(t *testing.T) { + mockService := new(MockEventService) + log := zap.NewNop() + handler := NewHandler(mockService, log) + + expectedResponse := &dto.GetMetricsResponse{ + EventName: "product_view", + From: 1766702551, + To: 1780702551, + TotalCount: 500, + UniqueCount: 250, + GroupBy: "hour", + Groups: []dto.MetricsGroupData{ + {GroupValue: "2025-12-27 14:00:00", TotalCount: 150}, + {GroupValue: "2025-12-27 15:00:00", TotalCount: 200}, + {GroupValue: "2025-12-27 16:00:00", TotalCount: 150}, + }, + } + + mockService.On("GetMetrics", mock.MatchedBy(func(req *dto.GetMetricsRequest) bool { + return req.EventName == "product_view" && + req.From == 1766702551 && + req.To == 1780702551 && + req.GroupBy == "hour" + })).Return(expectedResponse, nil) + + req, _ := http.NewRequest("GET", "/metrics?event_name=product_view&from=1766702551&to=1780702551&group_by=hour", nil) + w := httptest.NewRecorder() + + handler.ServeHTTP(w, req) + + assert.Equal(t, http.StatusOK, w.Code) + + var response dto.GetMetricsResponse + err := json.Unmarshal(w.Body.Bytes(), &response) + assert.NoError(t, err) + assert.Equal(t, "product_view", response.EventName) + assert.Equal(t, "hour", response.GroupBy) + assert.Len(t, response.Groups, 3) + assert.Equal(t, "2025-12-27 14:00:00", response.Groups[0].GroupValue) + mockService.AssertExpectations(t) +} + +func TestHandler_GetMetrics_GroupByDay(t *testing.T) { + mockService := new(MockEventService) + log := zap.NewNop() + handler := NewHandler(mockService, log) + + expectedResponse := &dto.GetMetricsResponse{ + EventName: "product_view", + From: 1766702551, + To: 1780702551, + TotalCount: 5000, + UniqueCount: 2500, + GroupBy: "day", + Groups: []dto.MetricsGroupData{ + {GroupValue: "2025-12-27", TotalCount: 1500}, + {GroupValue: "2025-12-28", TotalCount: 1800}, + {GroupValue: "2025-12-29", TotalCount: 1700}, + }, + } + + mockService.On("GetMetrics", mock.MatchedBy(func(req *dto.GetMetricsRequest) bool { + return req.EventName == "product_view" && + req.From == 1766702551 && + req.To == 1780702551 && + req.GroupBy == "day" + })).Return(expectedResponse, nil) + + req, _ := http.NewRequest("GET", "/metrics?event_name=product_view&from=1766702551&to=1780702551&group_by=day", nil) + w := httptest.NewRecorder() + + handler.ServeHTTP(w, req) + + assert.Equal(t, http.StatusOK, w.Code) + + var response dto.GetMetricsResponse + err := json.Unmarshal(w.Body.Bytes(), &response) + assert.NoError(t, err) + assert.Equal(t, "product_view", response.EventName) + assert.Equal(t, "day", response.GroupBy) + assert.Len(t, response.Groups, 3) + assert.Equal(t, "2025-12-27", response.Groups[0].GroupValue) + mockService.AssertExpectations(t) +} diff --git a/internal/logger/logger.go b/internal/logger/logger.go new file mode 100644 index 0000000..009f66a --- /dev/null +++ b/internal/logger/logger.go @@ -0,0 +1,23 @@ +package logger + +import ( + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +// New creates a new logger instance +func New(environment string) (*zap.Logger, error) { + var config zap.Config + + if environment == "production" { + config = zap.NewProductionConfig() + } else { + config = zap.NewDevelopmentConfig() + config.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder + } + + config.EncoderConfig.CallerKey = "caller" + config.EncoderConfig.EncodeCaller = zapcore.ShortCallerEncoder + + return config.Build(zap.AddCaller()) +} diff --git a/internal/queue/interfaces.go b/internal/queue/interfaces.go new file mode 100644 index 0000000..ba810cc --- /dev/null +++ b/internal/queue/interfaces.go @@ -0,0 +1,21 @@ +package queue + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/service/sqs" + + "github.com/BarkinBalci/event-analytics-service/internal/dto" +) + +// QueuePublisher defines the interface for publishing events to a queue +type QueuePublisher interface { + PublishEvent(ctx context.Context, event *dto.PublishEventRequest, eventID string) error +} + +// QueueConsumer defines the interface for consuming messages from a queue +type QueueConsumer interface { + ReceiveMessages(ctx context.Context, input *sqs.ReceiveMessageInput) (*sqs.ReceiveMessageOutput, error) + DeleteMessage(ctx context.Context, input *sqs.DeleteMessageInput) (*sqs.DeleteMessageOutput, error) + QueueURL() string +} diff --git a/internal/queue/sqs/client.go b/internal/queue/sqs/client.go new file mode 100644 index 0000000..3ecb72b --- /dev/null +++ b/internal/queue/sqs/client.go @@ -0,0 +1,133 @@ +package sqs + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/sqs" + "github.com/aws/aws-sdk-go-v2/service/sqs/types" + "go.uber.org/zap" + + envConfig "github.com/BarkinBalci/event-analytics-service/internal/config" + "github.com/BarkinBalci/event-analytics-service/internal/dto" +) + +// Client represents an SQS client +type Client struct { + client *sqs.Client + config envConfig.SQS + log *zap.Logger +} + +// NewClient creates a new SQS client +func NewClient(ctx context.Context, SQSConfig envConfig.SQS, log *zap.Logger) (*Client, error) { + configOpts := []func(*config.LoadOptions) error{ + config.WithRegion(SQSConfig.Region), + } + + var clientOpts []func(*sqs.Options) + + // Configure for local development with ElasticMQ + if SQSConfig.Endpoint != "" { + log.Info("Configuring SQS for local development", + zap.String("endpoint", SQSConfig.Endpoint)) + configOpts = append(configOpts, + config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider("dummy", "dummy", ""))) + + clientOpts = append(clientOpts, func(o *sqs.Options) { + o.BaseEndpoint = aws.String(SQSConfig.Endpoint) + }) + } + + cfg, err := config.LoadDefaultConfig(ctx, configOpts...) + if err != nil { + return nil, fmt.Errorf("failed to load AWS config: %w", err) + } + + sqsClient := sqs.NewFromConfig(cfg, clientOpts...) + + log.Info("SQS client created", + zap.String("region", SQSConfig.Region), + zap.String("queue_url", SQSConfig.QueueURL)) + + return &Client{ + client: sqsClient, + config: SQSConfig, + log: log, + }, nil +} + +// ReceiveMessages receives messages from SQS +func (c *Client) ReceiveMessages(ctx context.Context, input *sqs.ReceiveMessageInput) (*sqs.ReceiveMessageOutput, error) { + return c.client.ReceiveMessage(ctx, input) +} + +// DeleteMessage deletes a message from SQS +func (c *Client) DeleteMessage(ctx context.Context, input *sqs.DeleteMessageInput) (*sqs.DeleteMessageOutput, error) { + return c.client.DeleteMessage(ctx, input) +} + +// Client returns the underlying SQS client +func (c *Client) Client() *sqs.Client { + return c.client +} + +// QueueURL returns the configured queue URL +func (c *Client) QueueURL() string { + return c.config.QueueURL +} + +// PublishEvent publishes an event to SQS +func (c *Client) PublishEvent(ctx context.Context, event *dto.PublishEventRequest, eventID string) error { + messageBody := map[string]interface{}{ + "event_id": eventID, + "event_name": event.EventName, + "channel": event.Channel, + "campaign_id": event.CampaignID, + "user_id": event.UserID, + "timestamp": event.Timestamp, + "tags": event.Tags, + "metadata": event.Metadata, + } + + bodyJSON, err := json.Marshal(messageBody) + if err != nil { + c.log.Error("Failed to marshal event", + zap.String("event_id", eventID), + zap.String("event_name", event.EventName), + zap.Error(err)) + return fmt.Errorf("failed to marshal event: %w", err) + } + + _, err = c.client.SendMessage(ctx, &sqs.SendMessageInput{ + QueueUrl: aws.String(c.config.QueueURL), + MessageBody: aws.String(string(bodyJSON)), + MessageAttributes: map[string]types.MessageAttributeValue{ + "EventName": { + DataType: aws.String("String"), + StringValue: aws.String(event.EventName), + }, + "Channel": { + DataType: aws.String("String"), + StringValue: aws.String(event.Channel), + }, + }, + }) + if err != nil { + c.log.Error("Failed to send message to SQS", + zap.String("event_id", eventID), + zap.String("event_name", event.EventName), + zap.Error(err)) + return fmt.Errorf("failed to send message to SQS: %w", err) + } + + c.log.Info("Event published to SQS", + zap.String("event_id", eventID), + zap.String("event_name", event.EventName)) + + return nil +} diff --git a/internal/repository/clickhouse/client.go b/internal/repository/clickhouse/client.go new file mode 100644 index 0000000..e40c292 --- /dev/null +++ b/internal/repository/clickhouse/client.go @@ -0,0 +1,90 @@ +package clickhouse + +import ( + "context" + "crypto/tls" + "fmt" + "time" + + "go.uber.org/zap" + + "github.com/BarkinBalci/event-analytics-service/internal/config" + "github.com/ClickHouse/clickhouse-go/v2" + "github.com/ClickHouse/clickhouse-go/v2/lib/driver" +) + +// Client wraps the ClickHouse connection +type Client struct { + connection driver.Conn + config *config.ClickHouse + log *zap.Logger +} + +// NewClient creates a new ClickHouse client with the given configuration +func NewClient(ctx context.Context, config *config.ClickHouse, log *zap.Logger) (*Client, error) { + addr := fmt.Sprintf("%s:%s", config.Host, config.Port) + + log.Info("Connecting to ClickHouse", + zap.String("host", config.Host), + zap.String("port", config.Port), + zap.String("database", config.Database), + zap.Bool("useTLS", config.UseTLS)) + + // Configure TLS based on environment + var tlsConfig *tls.Config + if config.UseTLS { + tlsConfig = &tls.Config{ + InsecureSkipVerify: false, + } + } + + connection, err := clickhouse.Open(&clickhouse.Options{ + Addr: []string{addr}, + Auth: clickhouse.Auth{ + Database: config.Database, + Username: config.User, + Password: config.Password, + }, + Settings: clickhouse.Settings{ + "max_execution_time": 60, + }, + TLS: tlsConfig, + DialTimeout: 5 * time.Second, + MaxOpenConns: config.MaxOpenConns, + MaxIdleConns: config.MaxIdleConns, + ConnMaxLifetime: time.Duration(config.ConnMaxLifetime) * time.Second, + ConnOpenStrategy: clickhouse.ConnOpenInOrder, + BlockBufferSize: 10, + }) + + if err != nil { + log.Error("Failed to connect to ClickHouse", zap.Error(err)) + return nil, fmt.Errorf("failed to connect to ClickHouse: %w", err) + } + + // Verify connection + if err := connection.Ping(ctx); err != nil { + log.Error("Failed to ping ClickHouse", zap.Error(err)) + return nil, fmt.Errorf("failed to ping ClickHouse: %w", err) + } + + log.Info("ClickHouse connection established successfully") + + return &Client{connection: connection, config: config, log: log}, nil +} + +// Conn returns the underlying ClickHouse connection +func (c *Client) Conn() driver.Conn { + return c.connection +} + +// Close closes the ClickHouse connection +func (c *Client) Close() error { + c.log.Info("Closing ClickHouse connection") + if err := c.connection.Close(); err != nil { + c.log.Error("Error closing ClickHouse connection", zap.Error(err)) + return err + } + c.log.Info("ClickHouse connection closed successfully") + return nil +} diff --git a/internal/repository/clickhouse/repository.go b/internal/repository/clickhouse/repository.go new file mode 100644 index 0000000..1cb5aff --- /dev/null +++ b/internal/repository/clickhouse/repository.go @@ -0,0 +1,210 @@ +package clickhouse + +import ( + "context" + "fmt" + "time" + + "github.com/ClickHouse/clickhouse-go/v2/lib/driver" + "go.uber.org/zap" + + "github.com/BarkinBalci/event-analytics-service/internal/domain" + "github.com/BarkinBalci/event-analytics-service/internal/repository" +) + +// Repository implements EventRepository for ClickHouse +type Repository struct { + client *Client + log *zap.Logger +} + +// NewRepository creates a new ClickHouse repository +func NewRepository(client *Client, log *zap.Logger) *Repository { + return &Repository{ + client: client, + log: log, + } +} + +// InitSchema initializes the ClickHouse schema with ReplacingMergeTree engine +func (r *Repository) InitSchema(ctx context.Context) error { + query := ` + CREATE TABLE IF NOT EXISTS events ( + event_id String, + event_name LowCardinality(String), + channel LowCardinality(String), + campaign_id String, + user_id String, + timestamp Int64, + tags Array(String), + metadata String, + processed_at DateTime64(3) DEFAULT now64(3), + version UInt64 + ) ENGINE = ReplacingMergeTree(version) + PRIMARY KEY (event_id) + ORDER BY (event_id, timestamp) + PARTITION BY toYYYYMM(toDateTime(timestamp)) + SETTINGS index_granularity = 8192 + ` + + if err := r.client.Conn().Exec(ctx, query); err != nil { + return fmt.Errorf("failed to create events table: %w", err) + } + + r.log.Info("ClickHouse schema initialized successfully") + return nil +} + +// InsertBatch inserts a batch of events into ClickHouse +func (r *Repository) InsertBatch(ctx context.Context, events []*domain.Event) (int, error) { + if len(events) == 0 { + return 0, nil + } + + batch, err := r.client.Conn().PrepareBatch(ctx, "INSERT INTO events") + if err != nil { + return 0, fmt.Errorf("failed to prepare batch: %w", err) + } + + insertedCount := 0 + for _, event := range events { + if event.Version == 0 { + event.Version = uint64(time.Now().UnixNano()) + } + + metadataJSON := event.Metadata + if metadataJSON == "" { + metadataJSON = "{}" + } + + tags := event.Tags + if tags == nil { + tags = []string{} + } + + err := batch.Append( + event.EventID, + event.EventName, + event.Channel, + event.CampaignID, + event.UserID, + event.Timestamp, + tags, + metadataJSON, + event.ProcessedAt, + event.Version, + ) + + if err != nil { + return 0, fmt.Errorf("failed to append event to batch: %w", err) + } + insertedCount++ + } + + if insertedCount == 0 { + return 0, fmt.Errorf("no events could be appended to batch") + } + + if err := batch.Send(); err != nil { + return 0, fmt.Errorf("failed to send batch: %w", err) + } + + return insertedCount, nil +} + +// Ping checks if the ClickHouse connection is alive +func (r *Repository) Ping(ctx context.Context) error { + return r.client.Conn().Ping(ctx) +} + +// Close closes the ClickHouse connection +func (r *Repository) Close() error { + return r.client.Close() +} + +// GetMetrics retrieves aggregated metrics from ClickHouse +func (r *Repository) GetMetrics(ctx context.Context, query repository.MetricsQuery) (*repository.MetricsResult, error) { + result := &repository.MetricsResult{ + Groups: []repository.MetricsGroupResult{}, + } + + // Build the WHERE clause + whereClause := "WHERE event_name = ? AND timestamp >= ? AND timestamp <= ?" + args := []interface{}{query.EventName, query.From, query.To} + + // Get overall metrics + overallQuery := fmt.Sprintf(` + SELECT + count() as total_count, + uniq(user_id) as unique_count + FROM events FINAL + %s + `, whereClause) + + row := r.client.Conn().QueryRow(ctx, overallQuery, args...) + if err := row.Scan(&result.TotalCount, &result.UniqueCount); err != nil { + return nil, fmt.Errorf("failed to query overall metrics: %w", err) + } + + // If groupBy is specified, get grouped metrics + if query.GroupBy != "" { + validGroupBy := map[string]bool{"channel": true, "hour": true, "day": true} + if !validGroupBy[query.GroupBy] { + return nil, fmt.Errorf("unsupported group_by value: %s (supported: channel, hour, day)", query.GroupBy) + } + + var selectField string + var groupByClause string + var orderBy string + + switch query.GroupBy { + case "channel": + selectField = "channel" + groupByClause = "GROUP BY channel" + orderBy = "ORDER BY total_count DESC" + case "hour": + selectField = "formatDateTime(toStartOfHour(toDateTime(timestamp)), '%Y-%m-%d %H:00:00')" + groupByClause = "GROUP BY toStartOfHour(toDateTime(timestamp))" + orderBy = "ORDER BY group_value ASC" + case "day": + selectField = "formatDateTime(toStartOfDay(toDateTime(timestamp)), '%Y-%m-%d')" + groupByClause = "GROUP BY toStartOfDay(toDateTime(timestamp))" + orderBy = "ORDER BY group_value ASC" + } + + groupedQuery := fmt.Sprintf(` + SELECT + %s as group_value, + count() as total_count + FROM events FINAL + %s + %s + %s + `, selectField, whereClause, groupByClause, orderBy) + + rows, err := r.client.Conn().Query(ctx, groupedQuery, args...) + if err != nil { + return nil, fmt.Errorf("failed to query grouped metrics: %w", err) + } + defer func(rows driver.Rows) { + err := rows.Close() + if err != nil { + r.log.Error("Failed to close grouped metrics rows", zap.Error(err)) + } + }(rows) + + for rows.Next() { + var group repository.MetricsGroupResult + if err := rows.Scan(&group.GroupValue, &group.TotalCount); err != nil { + return nil, fmt.Errorf("failed to scan grouped metrics row: %w", err) + } + result.Groups = append(result.Groups, group) + } + + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("error iterating grouped metrics rows: %w", err) + } + } + + return result, nil +} diff --git a/internal/repository/interfaces.go b/internal/repository/interfaces.go new file mode 100644 index 0000000..1373ad9 --- /dev/null +++ b/internal/repository/interfaces.go @@ -0,0 +1,46 @@ +package repository + +import ( + "context" + + "github.com/BarkinBalci/event-analytics-service/internal/domain" +) + +// MetricsQuery represents a metrics query parameters +type MetricsQuery struct { + EventName string + From int64 + To int64 + GroupBy string +} + +// MetricsGroupResult represents aggregated metrics for a specific group +type MetricsGroupResult struct { + GroupValue string + TotalCount uint64 +} + +// MetricsResult represents the result of a metrics query +type MetricsResult struct { + TotalCount uint64 + UniqueCount uint64 + Groups []MetricsGroupResult +} + +// EventRepository defines the interface for event storage operations +type EventRepository interface { + // InsertBatch inserts a batch of events into the storage + InsertBatch(ctx context.Context, events []*domain.Event) (int, error) + + // InitSchema initializes the database schema (creates tables if they don't exist) + InitSchema(ctx context.Context) error + + // Ping checks if the database connection is alive + Ping(ctx context.Context) error + + // Close closes the repository and releases resources + Close() error + + // GetMetrics retrieves aggregated metrics based on the query + GetMetrics(ctx context.Context, query MetricsQuery) (*MetricsResult, error) +} diff --git a/internal/service/event.go b/internal/service/event.go new file mode 100644 index 0000000..e9109ef --- /dev/null +++ b/internal/service/event.go @@ -0,0 +1,165 @@ +package service + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "fmt" + "time" + + "go.uber.org/zap" + + "github.com/BarkinBalci/event-analytics-service/internal/dto" + "github.com/BarkinBalci/event-analytics-service/internal/queue" + "github.com/BarkinBalci/event-analytics-service/internal/repository" +) + +// EventService represents event service +type EventService struct { + publisher queue.QueuePublisher + repository repository.EventRepository + log *zap.Logger +} + +// NewEventService creates a new event service +func NewEventService(publisher queue.QueuePublisher, repo repository.EventRepository, log *zap.Logger) *EventService { + return &EventService{ + publisher: publisher, + repository: repo, + log: log, + } +} + +// computeEventID generates a deterministic event ID based on event content +// Uses SHA-256 hash of: user_id|event_name|timestamp|campaign_id|channel +func computeEventID(event *dto.PublishEventRequest) string { + // Concatenate fields that uniquely identify an event + data := fmt.Sprintf("%s|%s|%d|%s|%s", + event.UserID, + event.EventName, + event.Timestamp, + event.CampaignID, + event.Channel, + ) + + // SHA-256 hash for deterministic ID + hash := sha256.Sum256([]byte(data)) + return hex.EncodeToString(hash[:]) +} + +// ProcessEvent processes a single event +func (s *EventService) ProcessEvent(event *dto.PublishEventRequest) (string, error) { + ctx := context.Background() + + currentTime := time.Now().Unix() + if event.Timestamp > currentTime+1 { + s.log.Warn("Timestamp validation failed: future timestamp", + zap.Int64("event_timestamp", event.Timestamp), + zap.Int64("current_time", currentTime), + zap.String("event_name", event.EventName)) + return "", fmt.Errorf("timestamp cannot be in the future: %d > %d", event.Timestamp, currentTime) + } + + eventID := computeEventID(event) + + err := s.publisher.PublishEvent(ctx, event, eventID) + if err != nil { + return "", fmt.Errorf("failed to publish event to queue: %w", err) + } + + return eventID, nil +} + +// ProcessBulkEvents validates and processes multiple events +func (s *EventService) ProcessBulkEvents(events []dto.PublishEventRequest) ([]string, []string, error) { + var eventIDs []string + var errors []string + + for i, event := range events { + eventID, err := s.ProcessEvent(&event) + if err != nil { + errors = append(errors, err.Error()) + s.log.Warn("Failed to process event in bulk", + zap.Int("index", i), + zap.Error(err), + zap.String("event_name", event.EventName)) + continue + } + eventIDs = append(eventIDs, eventID) + } + + return eventIDs, errors, nil +} + +// GetMetrics retrieves aggregated metrics from the repository +func (s *EventService) GetMetrics(req *dto.GetMetricsRequest) (*dto.GetMetricsResponse, error) { + ctx := context.Background() + + // Validate time range + if req.From > req.To { + s.log.Warn("Invalid time range for metrics", + zap.Int64("from", req.From), + zap.Int64("to", req.To), + zap.String("event_name", req.EventName)) + return nil, fmt.Errorf("from timestamp must be less than or equal to to timestamp") + } + + // Validate group_by parameter + if req.GroupBy != "" { + validGroupBy := map[string]bool{"channel": true, "hour": true, "day": true} + if !validGroupBy[req.GroupBy] { + s.log.Warn("Invalid group_by value", + zap.String("group_by", req.GroupBy)) + return nil, fmt.Errorf("invalid group_by value: %s (supported: channel, hour, day)", req.GroupBy) + } + + // Warn if time range is too large for hourly grouping + rangeSeconds := req.To - req.From + if req.GroupBy == "hour" && rangeSeconds > 90*24*3600 { + s.log.Warn("Large time range for hourly grouping", + zap.Int64("range_days", rangeSeconds/(24*3600))) + return nil, fmt.Errorf("time range too large for hourly grouping (max 90 days, got %d days)", rangeSeconds/(24*3600)) + } + } + + // Build repository query + query := repository.MetricsQuery{ + EventName: req.EventName, + From: req.From, + To: req.To, + GroupBy: req.GroupBy, + } + + s.log.Info("Querying metrics", + zap.String("event_name", req.EventName), + zap.Int64("from", req.From), + zap.Int64("to", req.To), + zap.String("group_by", req.GroupBy)) + + // Query repository + result, err := s.repository.GetMetrics(ctx, query) + if err != nil { + return nil, fmt.Errorf("failed to get metrics from repository: %w", err) + } + + // Build response + response := &dto.GetMetricsResponse{ + EventName: req.EventName, + From: req.From, + To: req.To, + TotalCount: result.TotalCount, + UniqueCount: result.UniqueCount, + GroupBy: req.GroupBy, + Groups: make([]dto.MetricsGroupData, 0, len(result.Groups)), + } + + // Convert repository groups to DTO groups + for _, group := range result.Groups { + response.Groups = append(response.Groups, dto.MetricsGroupData{ + GroupValue: group.GroupValue, + TotalCount: group.TotalCount, + }) + } + + return response, nil +} diff --git a/internal/service/event_test.go b/internal/service/event_test.go new file mode 100644 index 0000000..4eb2d60 --- /dev/null +++ b/internal/service/event_test.go @@ -0,0 +1,558 @@ +package service + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "go.uber.org/zap" + + "github.com/BarkinBalci/event-analytics-service/internal/domain" + "github.com/BarkinBalci/event-analytics-service/internal/dto" + "github.com/BarkinBalci/event-analytics-service/internal/repository" +) + +const ( + testCurrentTime int64 = 1766702551 + testFutureTime int64 = 2556144000 +) + +// MockQueuePublisher is a mock implementation of queue.QueuePublisher +type MockQueuePublisher struct { + mock.Mock +} + +func (m *MockQueuePublisher) PublishEvent(ctx context.Context, event *dto.PublishEventRequest, eventID string) error { + args := m.Called(ctx, event, eventID) + return args.Error(0) +} + +// MockEventRepository is a mock implementation of repository.EventRepository +type MockEventRepository struct { + mock.Mock +} + +func (m *MockEventRepository) InsertBatch(ctx context.Context, events []*domain.Event) (int, error) { + args := m.Called(ctx, events) + return args.Int(0), args.Error(1) +} + +func (m *MockEventRepository) InitSchema(ctx context.Context) error { + args := m.Called(ctx) + return args.Error(0) +} + +func (m *MockEventRepository) Ping(ctx context.Context) error { + args := m.Called(ctx) + return args.Error(0) +} + +func (m *MockEventRepository) Close() error { + args := m.Called() + return args.Error(0) +} + +func (m *MockEventRepository) GetMetrics(ctx context.Context, query repository.MetricsQuery) (*repository.MetricsResult, error) { + args := m.Called(ctx, query) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).(*repository.MetricsResult), args.Error(1) +} + +func TestEventService_ProcessEvent_Success(t *testing.T) { + mockPublisher := new(MockQueuePublisher) + mockRepo := new(MockEventRepository) + log := zap.NewNop() + + service := NewEventService(mockPublisher, mockRepo, log) + + req := &dto.PublishEventRequest{ + EventName: "test_event", + Channel: "web", + UserID: "user123", + Timestamp: testCurrentTime, + CampaignID: "campaign1", + Tags: []string{"tag1"}, + Metadata: map[string]interface{}{"key": "value"}, + } + + mockPublisher.On("PublishEvent", mock.Anything, req, mock.AnythingOfType("string")).Return(nil) + + eventID, err := service.ProcessEvent(req) + + assert.NoError(t, err) + assert.NotEmpty(t, eventID) + mockPublisher.AssertExpectations(t) +} + +func TestEventService_ProcessEvent_FutureTimestamp(t *testing.T) { + mockPublisher := new(MockQueuePublisher) + mockRepo := new(MockEventRepository) + log := zap.NewNop() + + service := NewEventService(mockPublisher, mockRepo, log) + + req := &dto.PublishEventRequest{ + EventName: "test_event", + Channel: "web", + UserID: "user123", + Timestamp: testFutureTime, + } + + eventID, err := service.ProcessEvent(req) + + assert.Error(t, err) + assert.Empty(t, eventID) + assert.Contains(t, err.Error(), "timestamp cannot be in the future") + mockPublisher.AssertNotCalled(t, "PublishEvent") +} + +func TestEventService_ProcessEvent_SQSPublishError(t *testing.T) { + mockPublisher := new(MockQueuePublisher) + mockRepo := new(MockEventRepository) + log := zap.NewNop() + + service := NewEventService(mockPublisher, mockRepo, log) + + req := &dto.PublishEventRequest{ + EventName: "test_event", + Channel: "web", + UserID: "user123", + Timestamp: testCurrentTime, + } + + publishErr := errors.New("queue publish error") + mockPublisher.On("PublishEvent", mock.Anything, req, mock.AnythingOfType("string")).Return(publishErr) + + eventID, err := service.ProcessEvent(req) + + assert.Error(t, err) + assert.Empty(t, eventID) + assert.Contains(t, err.Error(), "failed to publish event to queue") + mockPublisher.AssertExpectations(t) +} + +func TestEventService_ProcessEvent_ContentHashIdempotency(t *testing.T) { + mockPublisher := new(MockQueuePublisher) + mockRepo := new(MockEventRepository) + log := zap.NewNop() + + service := NewEventService(mockPublisher, mockRepo, log) + + req := &dto.PublishEventRequest{ + EventName: "test_event", + Channel: "web", + UserID: "user123", + Timestamp: testCurrentTime, + CampaignID: "campaign1", + } + + mockPublisher.On("PublishEvent", mock.Anything, req, mock.AnythingOfType("string")).Return(nil) + + // Same event should produce same event_id (idempotency) + eventID1, _ := service.ProcessEvent(req) + eventID2, _ := service.ProcessEvent(req) + assert.Equal(t, eventID1, eventID2, "Same event should produce same event_id for idempotency") + + // Different event should produce different event_id + reqDifferent := &dto.PublishEventRequest{ + EventName: "different_event", + Channel: "mobile", + UserID: "user456", + Timestamp: testCurrentTime + 100, + CampaignID: "campaign2", + } + + mockPublisher.On("PublishEvent", mock.Anything, reqDifferent, mock.AnythingOfType("string")).Return(nil) + + eventID3, _ := service.ProcessEvent(reqDifferent) + assert.NotEqual(t, eventID1, eventID3, "Different events should produce different event_ids") + + // Same content in different field should produce different event_id + reqDifferentChannel := &dto.PublishEventRequest{ + EventName: "test_event", + Channel: "mobile", // Different channel + UserID: "user123", + Timestamp: testCurrentTime, + CampaignID: "campaign1", + } + + mockPublisher.On("PublishEvent", mock.Anything, reqDifferentChannel, mock.AnythingOfType("string")).Return(nil) + + eventID4, _ := service.ProcessEvent(reqDifferentChannel) + assert.NotEqual(t, eventID1, eventID4, "Different channel should produce different event_id") +} + +func TestEventService_ProcessBulkEvents_AllSuccess(t *testing.T) { + mockPublisher := new(MockQueuePublisher) + mockRepo := new(MockEventRepository) + log := zap.NewNop() + + service := NewEventService(mockPublisher, mockRepo, log) + + events := []dto.PublishEventRequest{ + { + EventName: "event1", + Channel: "web", + UserID: "user1", + Timestamp: testCurrentTime, + }, + { + EventName: "event2", + Channel: "mobile", + UserID: "user2", + Timestamp: testCurrentTime, + }, + } + + mockPublisher.On("PublishEvent", mock.Anything, mock.Anything, mock.AnythingOfType("string")).Return(nil).Times(2) + + eventIDs, errors, err := service.ProcessBulkEvents(events) + + assert.NoError(t, err) + assert.Len(t, eventIDs, 2) + assert.Empty(t, errors) + mockPublisher.AssertExpectations(t) +} + +func TestEventService_ProcessBulkEvents_PartialFailure(t *testing.T) { + mockPublisher := new(MockQueuePublisher) + mockRepo := new(MockEventRepository) + log := zap.NewNop() + + service := NewEventService(mockPublisher, mockRepo, log) + + events := []dto.PublishEventRequest{ + { + EventName: "event1", + Channel: "web", + UserID: "user1", + Timestamp: testCurrentTime, + }, + { + EventName: "event2", + Channel: "mobile", + UserID: "user2", + Timestamp: testFutureTime, // This will fail + }, + { + EventName: "event3", + Channel: "web", + UserID: "user3", + Timestamp: testCurrentTime, + }, + } + + mockPublisher.On("PublishEvent", mock.Anything, mock.Anything, mock.AnythingOfType("string")).Return(nil).Times(2) + + eventIDs, errs, err := service.ProcessBulkEvents(events) + + assert.NoError(t, err) + assert.Len(t, eventIDs, 2) + assert.Len(t, errs, 1) + assert.Contains(t, errs[0], "timestamp cannot be in the future") +} + +func TestEventService_ProcessBulkEvents_AllFailure(t *testing.T) { + mockPublisher := new(MockQueuePublisher) + mockRepo := new(MockEventRepository) + log := zap.NewNop() + + service := NewEventService(mockPublisher, mockRepo, log) + + events := []dto.PublishEventRequest{ + { + EventName: "event1", + Channel: "web", + UserID: "user1", + Timestamp: testFutureTime, + }, + { + EventName: "event2", + Channel: "mobile", + UserID: "user2", + Timestamp: testFutureTime, + }, + } + + eventIDs, errs, err := service.ProcessBulkEvents(events) + + assert.NoError(t, err) + assert.Empty(t, eventIDs) + assert.Len(t, errs, 2) + mockPublisher.AssertNotCalled(t, "PublishEvent") +} + +func TestEventService_ProcessBulkEvents_EmptyList(t *testing.T) { + mockPublisher := new(MockQueuePublisher) + mockRepo := new(MockEventRepository) + log := zap.NewNop() + + service := NewEventService(mockPublisher, mockRepo, log) + + events := []dto.PublishEventRequest{} + + eventIDs, errs, err := service.ProcessBulkEvents(events) + + assert.NoError(t, err) + assert.Empty(t, eventIDs) + assert.Empty(t, errs) + mockPublisher.AssertNotCalled(t, "PublishEvent") +} + +func TestEventService_GetMetrics_Success(t *testing.T) { + mockPublisher := new(MockQueuePublisher) + mockRepo := new(MockEventRepository) + log := zap.NewNop() + + service := NewEventService(mockPublisher, mockRepo, log) + + req := &dto.GetMetricsRequest{ + EventName: "test_event", + From: 1000, + To: 2000, + GroupBy: "", + } + + expectedResult := &repository.MetricsResult{ + TotalCount: 100, + UniqueCount: 50, + Groups: []repository.MetricsGroupResult{}, + } + + mockRepo.On("GetMetrics", mock.Anything, repository.MetricsQuery{ + EventName: "test_event", + From: 1000, + To: 2000, + GroupBy: "", + }).Return(expectedResult, nil) + + response, err := service.GetMetrics(req) + + assert.NoError(t, err) + assert.NotNil(t, response) + assert.Equal(t, uint64(100), response.TotalCount) + assert.Equal(t, uint64(50), response.UniqueCount) + assert.Equal(t, "test_event", response.EventName) + mockRepo.AssertExpectations(t) +} + +func TestEventService_GetMetrics_InvalidTimeRange(t *testing.T) { + mockPublisher := new(MockQueuePublisher) + mockRepo := new(MockEventRepository) + log := zap.NewNop() + + service := NewEventService(mockPublisher, mockRepo, log) + + req := &dto.GetMetricsRequest{ + EventName: "test_event", + From: 2000, + To: 1000, // Invalid: From > To + } + + response, err := service.GetMetrics(req) + + assert.Error(t, err) + assert.Nil(t, response) + assert.Contains(t, err.Error(), "from timestamp must be less than or equal to to timestamp") + mockRepo.AssertNotCalled(t, "GetMetrics") +} + +func TestEventService_GetMetrics_RepositoryError(t *testing.T) { + mockPublisher := new(MockQueuePublisher) + mockRepo := new(MockEventRepository) + log := zap.NewNop() + + service := NewEventService(mockPublisher, mockRepo, log) + + req := &dto.GetMetricsRequest{ + EventName: "test_event", + From: 1000, + To: 2000, + } + + repoErr := errors.New("database connection error") + mockRepo.On("GetMetrics", mock.Anything, mock.Anything).Return(nil, repoErr) + + response, err := service.GetMetrics(req) + + assert.Error(t, err) + assert.Nil(t, response) + assert.Contains(t, err.Error(), "failed to get metrics from repository") + mockRepo.AssertExpectations(t) +} + +func TestEventService_GetMetrics_WithGroupBy(t *testing.T) { + mockPublisher := new(MockQueuePublisher) + mockRepo := new(MockEventRepository) + log := zap.NewNop() + + service := NewEventService(mockPublisher, mockRepo, log) + + req := &dto.GetMetricsRequest{ + EventName: "test_event", + From: 1000, + To: 2000, + GroupBy: "channel", + } + + expectedResult := &repository.MetricsResult{ + TotalCount: 100, + UniqueCount: 50, + Groups: []repository.MetricsGroupResult{ + {GroupValue: "web", TotalCount: 60}, + {GroupValue: "mobile", TotalCount: 40}, + }, + } + + mockRepo.On("GetMetrics", mock.Anything, repository.MetricsQuery{ + EventName: "test_event", + From: 1000, + To: 2000, + GroupBy: "channel", + }).Return(expectedResult, nil) + + response, err := service.GetMetrics(req) + + assert.NoError(t, err) + assert.NotNil(t, response) + assert.Equal(t, uint64(100), response.TotalCount) + assert.Equal(t, uint64(50), response.UniqueCount) + assert.Len(t, response.Groups, 2) + assert.Equal(t, "web", response.Groups[0].GroupValue) + assert.Equal(t, uint64(60), response.Groups[0].TotalCount) + assert.Equal(t, "mobile", response.Groups[1].GroupValue) + assert.Equal(t, uint64(40), response.Groups[1].TotalCount) + mockRepo.AssertExpectations(t) +} + +func TestEventService_GetMetrics_GroupByHour(t *testing.T) { + mockPublisher := new(MockQueuePublisher) + mockRepo := new(MockEventRepository) + logger, _ := zap.NewDevelopment() + eventService := NewEventService(mockPublisher, mockRepo, logger) + + req := &dto.GetMetricsRequest{ + EventName: "product_view", + From: 1723475612, + To: 1723562012, // ~24 hours + GroupBy: "hour", + } + + expectedQuery := repository.MetricsQuery{ + EventName: "product_view", + From: 1723475612, + To: 1723562012, + GroupBy: "hour", + } + + expectedResult := &repository.MetricsResult{ + TotalCount: 500, + UniqueCount: 250, + Groups: []repository.MetricsGroupResult{ + {GroupValue: "2024-08-12 14:00:00", TotalCount: 150}, + {GroupValue: "2024-08-12 15:00:00", TotalCount: 200}, + {GroupValue: "2024-08-12 16:00:00", TotalCount: 150}, + }, + } + + mockRepo.On("GetMetrics", mock.Anything, expectedQuery).Return(expectedResult, nil) + + response, err := eventService.GetMetrics(req) + assert.NoError(t, err) + assert.NotNil(t, response) + assert.Equal(t, uint64(500), response.TotalCount) + assert.Equal(t, uint64(250), response.UniqueCount) + assert.Equal(t, "hour", response.GroupBy) + assert.Len(t, response.Groups, 3) + assert.Equal(t, "2024-08-12 14:00:00", response.Groups[0].GroupValue) + mockRepo.AssertExpectations(t) +} + +func TestEventService_GetMetrics_GroupByDay(t *testing.T) { + mockPublisher := new(MockQueuePublisher) + mockRepo := new(MockEventRepository) + logger, _ := zap.NewDevelopment() + eventService := NewEventService(mockPublisher, mockRepo, logger) + + req := &dto.GetMetricsRequest{ + EventName: "product_view", + From: 1723475612, + To: 1726067612, // ~30 days + GroupBy: "day", + } + + expectedQuery := repository.MetricsQuery{ + EventName: "product_view", + From: 1723475612, + To: 1726067612, + GroupBy: "day", + } + + expectedResult := &repository.MetricsResult{ + TotalCount: 5000, + UniqueCount: 2500, + Groups: []repository.MetricsGroupResult{ + {GroupValue: "2024-08-12", TotalCount: 1500}, + {GroupValue: "2024-08-13", TotalCount: 1800}, + {GroupValue: "2024-08-14", TotalCount: 1700}, + }, + } + + mockRepo.On("GetMetrics", mock.Anything, expectedQuery).Return(expectedResult, nil) + + response, err := eventService.GetMetrics(req) + assert.NoError(t, err) + assert.NotNil(t, response) + assert.Equal(t, uint64(5000), response.TotalCount) + assert.Equal(t, uint64(2500), response.UniqueCount) + assert.Equal(t, "day", response.GroupBy) + assert.Len(t, response.Groups, 3) + assert.Equal(t, "2024-08-12", response.Groups[0].GroupValue) + mockRepo.AssertExpectations(t) +} + +func TestEventService_GetMetrics_InvalidGroupBy(t *testing.T) { + mockPublisher := new(MockQueuePublisher) + mockRepo := new(MockEventRepository) + logger, _ := zap.NewDevelopment() + eventService := NewEventService(mockPublisher, mockRepo, logger) + + req := &dto.GetMetricsRequest{ + EventName: "product_view", + From: 1723475612, + To: 1723562012, + GroupBy: "week", // Invalid group_by + } + + response, err := eventService.GetMetrics(req) + assert.Error(t, err) + assert.Nil(t, response) + assert.Contains(t, err.Error(), "invalid group_by value") + assert.Contains(t, err.Error(), "week") + mockRepo.AssertNotCalled(t, "GetMetrics") +} + +func TestEventService_GetMetrics_HourlyGroupingTooLargeRange(t *testing.T) { + mockPublisher := new(MockQueuePublisher) + mockRepo := new(MockEventRepository) + logger, _ := zap.NewDevelopment() + eventService := NewEventService(mockPublisher, mockRepo, logger) + + req := &dto.GetMetricsRequest{ + EventName: "product_view", + From: 1723475612, + To: 1723475612 + 91*24*3600, // 91 days - too large + GroupBy: "hour", + } + + response, err := eventService.GetMetrics(req) + assert.Error(t, err) + assert.Nil(t, response) + assert.Contains(t, err.Error(), "time range too large for hourly grouping") + assert.Contains(t, err.Error(), "91 days") + mockRepo.AssertNotCalled(t, "GetMetrics") +} diff --git a/internal/service/interfaces.go b/internal/service/interfaces.go new file mode 100644 index 0000000..4c24390 --- /dev/null +++ b/internal/service/interfaces.go @@ -0,0 +1,12 @@ +package service + +import ( + "github.com/BarkinBalci/event-analytics-service/internal/dto" +) + +// EventServicer defines the interface for event service operations +type EventServicer interface { + ProcessEvent(event *dto.PublishEventRequest) (string, error) + ProcessBulkEvents(events []dto.PublishEventRequest) ([]string, []string, error) + GetMetrics(req *dto.GetMetricsRequest) (*dto.GetMetricsResponse, error) +}