From 5f30ac8f2c6a9e948bac34d573de173094cbeab7 Mon Sep 17 00:00:00 2001 From: Reinaldy Rafli Date: Thu, 23 May 2024 08:01:25 +0700 Subject: [PATCH 1/6] ci: setup github directory --- .github/CODEOWNERS | 5 + .github/CODE_OF_CONDUCT.md | 132 ++++++++++ .github/FUNDING.yml | 2 + .github/workflows/master.yml | 100 +++++++ .github/workflows/pr.yml | 56 ++++ .github/workflows/secret-scan.yml | 27 ++ .gitignore | 262 +++++++------------ .idea/.gitignore | 8 + .idea/inspectionProfiles/Project_Default.xml | 6 + .idea/modules.xml | 8 + .idea/semyi.iml | 9 + .idea/vcs.xml | 6 + backend/.gitignore | 23 ++ frontend/.gitignore | 134 +++++++++- frontend/package-lock.json | 2 +- frontend/package.json | 5 +- 16 files changed, 610 insertions(+), 175 deletions(-) create mode 100644 .github/CODEOWNERS create mode 100644 .github/CODE_OF_CONDUCT.md create mode 100644 .github/FUNDING.yml create mode 100644 .github/workflows/master.yml create mode 100644 .github/workflows/pr.yml create mode 100644 .github/workflows/secret-scan.yml create mode 100644 .idea/.gitignore create mode 100644 .idea/inspectionProfiles/Project_Default.xml create mode 100644 .idea/modules.xml create mode 100644 .idea/semyi.iml create mode 100644 .idea/vcs.xml create mode 100644 backend/.gitignore diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000..4d7acd4 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,5 @@ +* @teknologi-umum/frontend-web @teknologi-umum/backend-go +frontend @teknologi-umum/frontend-web +backend @teknologi-umum/backend-go +.github @teknologi-umum/infrastructure +Dockerfile @teknologi-umum/infrastructure @teknologi-umum/frontend-web @teknologi-umum/backend-go diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..bed324d --- /dev/null +++ b/.github/CODE_OF_CONDUCT.md @@ -0,0 +1,132 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, caste, color, religion, or sexual +identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +- Demonstrating empathy and kindness toward other people +- Being respectful of differing opinions, viewpoints, and experiences +- Giving and gracefully accepting constructive feedback +- Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +- Focusing on what is best not just for us as individuals, but for the overall + community + +Examples of unacceptable behavior include: + +- The use of sexualized language or imagery, and sexual attention or advances of + any kind +- Trolling, insulting or derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others' private information, such as a physical or email address, + without their explicit permission +- Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +opensource@teknologiumum.com. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series of +actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or permanent +ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within the +community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.1, available at +[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. + +Community Impact Guidelines were inspired by +[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at +[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at +[https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 0000000..d412d85 --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1,2 @@ +github: teknologi-umum +custom: ["https://saweria.co/teknologiumum"] diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml new file mode 100644 index 0000000..281d44d --- /dev/null +++ b/.github/workflows/master.yml @@ -0,0 +1,100 @@ +name: Master + +on: + push: + branches: + - master + +jobs: + frontend: + name: Frontend + runs-on: ubuntu-latest + container: node:20-bookworm + timeout-minutes: 30 + defaults: + run: + working-directory: ./frontend + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Setup pnpm + uses: pnpm/action-setup@v4 + with: + version: latest + + - name: Install dependencies + run: pnpm install + + - name: Lint + run: npx eslint --ignore-path .gitignore . + + - name: Build + run: pnpm run build + + - name: Test + run: pnpm run test + + - uses: codecov/codecov-action@v3 + + backend: + name: Backend + runs-on: ubuntu-latest + container: golang:1.22-bookworm + defaults: + run: + working-directory: ./backend + timeout-minutes: 30 + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Build + run: go build . + + - name: Test + run: go test -v + + - uses: codecov/codecov-action@v3 + + cd: + name: CD + needs: + - frontend + - backend + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Log in to the Container registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract metadata (tags, labels) for Docker + id: meta + uses: docker/metadata-action@v5 + with: + images: ghcr.io/${{ github.repository }} + flavor: | + latest=false + tags: | + type=edge + type=sha + + - name: Build and push Docker image + uses: docker/build-push-action@v4 + with: + context: . + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + secrets: | + "SENTRY_DSN=${{ secrets.SENTRY_DSN }}" + "SENTRY_AUTH_TOKEN=${{ secrets.SENTRY_AUTH_TOKEN }}" diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml new file mode 100644 index 0000000..cc8cc03 --- /dev/null +++ b/.github/workflows/pr.yml @@ -0,0 +1,56 @@ +name: Master + +on: + pull_request: + +jobs: + frontend: + name: Frontend + runs-on: ubuntu-latest + container: node:20-bookworm + timeout-minutes: 30 + defaults: + run: + working-directory: ./frontend + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Setup pnpm + uses: pnpm/action-setup@v4 + with: + version: latest + + - name: Install dependencies + run: pnpm install + + - name: Lint + run: npx eslint --ignore-path .gitignore . + + - name: Build + run: pnpm run build + + - name: Test + run: pnpm run test + + - uses: codecov/codecov-action@v3 + + backend: + name: Backend + runs-on: ubuntu-latest + container: golang:1.22-bookworm + defaults: + run: + working-directory: ./backend + timeout-minutes: 30 + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Build + run: go build . + + - name: Test + run: go test -v + + - uses: codecov/codecov-action@v3 diff --git a/.github/workflows/secret-scan.yml b/.github/workflows/secret-scan.yml new file mode 100644 index 0000000..286aedc --- /dev/null +++ b/.github/workflows/secret-scan.yml @@ -0,0 +1,27 @@ +name: Secret Scan + +on: + push: + branches: + - main + - master + pull_request: + +jobs: + trufflehog: + name: Trufflehog + runs-on: ubuntu-latest + timeout-minutes: 10 + steps: + - name: Setup jq + uses: dcarbone/install-jq-action@v2.1.0 + + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: TruffleHog OSS + uses: trufflesecurity/trufflehog@main + with: + extra_args: --debug --only-verified diff --git a/.gitignore b/.gitignore index 88e7bae..2440828 100644 --- a/.gitignore +++ b/.gitignore @@ -1,171 +1,93 @@ -# Created by https://www.toptal.com/developers/gitignore/api/node,go -# Edit at https://www.toptal.com/developers/gitignore?templates=node,go +### VisualStudioCode template +.vscode/* +!.vscode/settings.json +!.vscode/tasks.json +!.vscode/launch.json +!.vscode/extensions.json +!.vscode/*.code-snippets + +# Local History for Visual Studio Code +.history/ + +# Built Visual Studio Code Extensions +*.vsix + +### JetBrains template +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/**/usage.statistics.xml +.idea/**/dictionaries +.idea/**/shelf + +# AWS User-specific +.idea/**/aws.xml + +# Generated files +.idea/**/contentModel.xml + +# Sensitive or high-churn files +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml +.idea/**/dbnavigator.xml + +# Gradle +.idea/**/gradle.xml +.idea/**/libraries + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/artifacts +# .idea/compiler.xml +# .idea/jarRepositories.xml +# .idea/modules.xml +# .idea/*.iml +# .idea/modules +# *.iml +# *.ipr + +# CMake +cmake-build-*/ + +# Mongo Explorer plugin +.idea/**/mongoSettings.xml + +# File-based project format +*.iws + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# SonarLint plugin +.idea/sonarlint/ + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + +# Editor-based Rest Client +.idea/httpRequests + +# Android studio 3.1+ serialized cache file +.idea/caches/build_file_checksums.ser -### Go ### -# If you prefer the allow list template instead of the deny list, see community template: -# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore -# -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib - -# Test binary, built with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Dependency directories (remove the comment below to include it) -# vendor/ - -# Go workspace file -go.work - -### Go Patch ### -/vendor/ -/Godeps/ - -### Node ### -# Logs -logs -*.log -npm-debug.log* -yarn-debug.log* -yarn-error.log* -lerna-debug.log* -.pnpm-debug.log* - -# Diagnostic reports (https://nodejs.org/api/report.html) -report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json - -# Runtime data -pids -*.pid -*.seed -*.pid.lock - -# Directory for instrumented libs generated by jscoverage/JSCover -lib-cov - -# Coverage directory used by tools like istanbul -coverage -*.lcov - -# nyc test coverage -.nyc_output - -# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) -.grunt - -# Bower dependency directory (https://bower.io/) -bower_components - -# node-waf configuration -.lock-wscript - -# Compiled binary addons (https://nodejs.org/api/addons.html) -build/Release - -# Dependency directories -node_modules/ -jspm_packages/ - -# Snowpack dependency directory (https://snowpack.dev/) -web_modules/ - -# TypeScript cache -*.tsbuildinfo - -# Optional npm cache directory -.npm - -# Optional eslint cache -.eslintcache - -# Optional stylelint cache -.stylelintcache - -# Microbundle cache -.rpt2_cache/ -.rts2_cache_cjs/ -.rts2_cache_es/ -.rts2_cache_umd/ - -# Optional REPL history -.node_repl_history - -# Output of 'npm pack' -*.tgz - -# Yarn Integrity file -.yarn-integrity - -# dotenv environment variable files -.env -.env.development.local -.env.test.local -.env.production.local -.env.local - -# parcel-bundler cache (https://parceljs.org/) -.cache -.parcel-cache - -# Next.js build output -.next -out - -# Nuxt.js build / generate output -.nuxt -dist - -# Gatsby files -.cache/ -# Comment in the public line in if your project uses Gatsby and not Next.js -# https://nextjs.org/blog/next-9-1#public-directory-support -# public - -# vuepress build output -.vuepress/dist - -# vuepress v2.x temp and cache directory -.temp - -# Docusaurus cache and generated files -.docusaurus - -# Serverless directories -.serverless/ - -# FuseBox cache -.fusebox/ - -# DynamoDB Local files -.dynamodb/ - -# TernJS port file -.tern-port - -# Stores VSCode versions used for testing VSCode extensions -.vscode-test - -# yarn v2 -.yarn/cache -.yarn/unplugged -.yarn/build-state.yml -.yarn/install-state.gz -.pnp.* - -### Node Patch ### -# Serverless Webpack directories -.webpack/ - -# Optional stylelint cache - -# SvelteKit build / generate output -.svelte-kit - -# End of https://www.toptal.com/developers/gitignore/api/node,go diff --git a/.idea/.gitignore b/.idea/.gitignore new file mode 100644 index 0000000..13566b8 --- /dev/null +++ b/.idea/.gitignore @@ -0,0 +1,8 @@ +# Default ignored files +/shelf/ +/workspace.xml +# Editor-based HTTP Client requests +/httpRequests/ +# Datasource local storage ignored files +/dataSources/ +/dataSources.local.xml diff --git a/.idea/inspectionProfiles/Project_Default.xml b/.idea/inspectionProfiles/Project_Default.xml new file mode 100644 index 0000000..03d9549 --- /dev/null +++ b/.idea/inspectionProfiles/Project_Default.xml @@ -0,0 +1,6 @@ + + + + \ No newline at end of file diff --git a/.idea/modules.xml b/.idea/modules.xml new file mode 100644 index 0000000..bfdd1f3 --- /dev/null +++ b/.idea/modules.xml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/.idea/semyi.iml b/.idea/semyi.iml new file mode 100644 index 0000000..338a266 --- /dev/null +++ b/.idea/semyi.iml @@ -0,0 +1,9 @@ + + + + + + + + + \ No newline at end of file diff --git a/.idea/vcs.xml b/.idea/vcs.xml new file mode 100644 index 0000000..c8397c9 --- /dev/null +++ b/.idea/vcs.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/backend/.gitignore b/backend/.gitignore new file mode 100644 index 0000000..013b4b8 --- /dev/null +++ b/backend/.gitignore @@ -0,0 +1,23 @@ +### Go template +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +vendor/ + +# Go workspace file +go.work + diff --git a/frontend/.gitignore b/frontend/.gitignore index 76add87..6d2198e 100644 --- a/frontend/.gitignore +++ b/frontend/.gitignore @@ -1,2 +1,132 @@ -node_modules -dist \ No newline at end of file +### Node template +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +lerna-debug.log* +.pnpm-debug.log* + +# Diagnostic reports (https://nodejs.org/api/report.html) +report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json + +# Runtime data +pids +*.pid +*.seed +*.pid.lock + +# Directory for instrumented libs generated by jscoverage/JSCover +lib-cov + +# Coverage directory used by tools like istanbul +coverage +*.lcov + +# nyc test coverage +.nyc_output + +# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) +.grunt + +# Bower dependency directory (https://bower.io/) +bower_components + +# node-waf configuration +.lock-wscript + +# Compiled binary addons (https://nodejs.org/api/addons.html) +build/Release + +# Dependency directories +node_modules/ +jspm_packages/ + +# Snowpack dependency directory (https://snowpack.dev/) +web_modules/ + +# TypeScript cache +*.tsbuildinfo + +# Optional npm cache directory +.npm + +# Optional eslint cache +.eslintcache + +# Optional stylelint cache +.stylelintcache + +# Microbundle cache +.rpt2_cache/ +.rts2_cache_cjs/ +.rts2_cache_es/ +.rts2_cache_umd/ + +# Optional REPL history +.node_repl_history + +# Output of 'npm pack' +*.tgz + +# Yarn Integrity file +.yarn-integrity + +# dotenv environment variable files +.env +.env.development.local +.env.test.local +.env.production.local +.env.local + +# parcel-bundler cache (https://parceljs.org/) +.cache +.parcel-cache + +# Next.js build output +.next +out + +# Nuxt.js build / generate output +.nuxt +dist + +# Gatsby files +.cache/ +# Comment in the public line in if your project uses Gatsby and not Next.js +# https://nextjs.org/blog/next-9-1#public-directory-support +# public + +# vuepress build output +.vuepress/dist + +# vuepress v2.x temp and cache directory +.temp +.cache + +# Docusaurus cache and generated files +.docusaurus + +# Serverless directories +.serverless/ + +# FuseBox cache +.fusebox/ + +# DynamoDB Local files +.dynamodb/ + +# TernJS port file +.tern-port + +# Stores VSCode versions used for testing VSCode extensions +.vscode-test + +# yarn v2 +.yarn/cache +.yarn/unplugged +.yarn/build-state.yml +.yarn/install-state.gz +.pnp.* + diff --git a/frontend/package-lock.json b/frontend/package-lock.json index 40bd8e7..f5283bf 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -7,7 +7,7 @@ "": { "name": "vite-template-solid", "version": "0.0.0", - "license": "MIT", + "license": "GPL-3.0", "dependencies": { "@fontsource/ibm-plex-sans": "^4.5.3", "@fontsource/libre-franklin": "^4.5.3", diff --git a/frontend/package.json b/frontend/package.json index bd90241..5a045a8 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -9,9 +9,10 @@ "serve": "vite preview", "lint:check": "eslint --ignore-path .gitignore .", "lint": "eslint --ignore-path .gitignore .", - "format": "prettier --write --ignore-path .gitignore ." + "format": "prettier --write --ignore-path .gitignore .", + "test": "exit 0" }, - "license": "MIT", + "license": "GPL-3.0", "devDependencies": { "@teknologi-umum/eslint-config-base": "^0.0.7", "@typescript-eslint/eslint-plugin": "^5.10.2", From 6f333599bc4be63d788b18f97dfe57fce0befef6 Mon Sep 17 00:00:00 2001 From: Reinaldy Rafli Date: Thu, 23 May 2024 08:02:19 +0700 Subject: [PATCH 2/6] ci: rename workflow name for PR --- .github/workflows/pr.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml index cc8cc03..5029cb3 100644 --- a/.github/workflows/pr.yml +++ b/.github/workflows/pr.yml @@ -1,4 +1,4 @@ -name: Master +name: PR on: pull_request: From 34835b010b52db7849ec5dfc830b83798dc5b69e Mon Sep 17 00:00:00 2001 From: Reinaldy Rafli Date: Thu, 23 May 2024 08:20:20 +0700 Subject: [PATCH 3/6] chore: setup editorconfig --- .editorconfig | 22 ++++++++++++---------- .idea/semyi.iml | 2 +- backend/.editorconfig | 13 +++++++++++++ frontend/.editorconfig | 10 ++++++++++ 4 files changed, 36 insertions(+), 11 deletions(-) create mode 100644 backend/.editorconfig create mode 100644 frontend/.editorconfig diff --git a/.editorconfig b/.editorconfig index f43fadb..fcfc0a9 100644 --- a/.editorconfig +++ b/.editorconfig @@ -1,18 +1,20 @@ root = true [*] -end_of_line = lf charset = utf-8 -trim_trailing_whitespace = true -insert_final_newline = true -max_line_length = 80 - -[*.go] +end_of_line = lf indent_size = 4 -indent_style = tab +indent_style = space +insert_final_newline = false +max_line_length = 120 tab_width = 4 -[*.{js,jsx,ts,tsx,json,css,scss,less,sass,html,yml,md}] -indent_style = space +[{*.bash,*.sh,*.zsh}] +indent_size = 2 +tab_width = 2 + +[{*.har,*.jsb2,*.jsb3,*.json,*.jsonc,*.postman_collection,*.postman_collection.json,*.postman_environment,*.postman_environment.json,.babelrc,.eslintrc,bowerrc,jest.config}] +indent_size = 2 + +[{*.yaml,*.yml}] indent_size = 2 -end_of_line = lf \ No newline at end of file diff --git a/.idea/semyi.iml b/.idea/semyi.iml index 338a266..27a3109 100644 --- a/.idea/semyi.iml +++ b/.idea/semyi.iml @@ -6,4 +6,4 @@ - \ No newline at end of file + diff --git a/backend/.editorconfig b/backend/.editorconfig new file mode 100644 index 0000000..00d37e3 --- /dev/null +++ b/backend/.editorconfig @@ -0,0 +1,13 @@ +root = true + +[*] +charset = utf-8 +end_of_line = lf +indent_size = 4 +indent_style = space +insert_final_newline = false +max_line_length = 120 +tab_width = 4 + +[{*.go,*.go2}] +indent_style = tab diff --git a/frontend/.editorconfig b/frontend/.editorconfig new file mode 100644 index 0000000..546a2c8 --- /dev/null +++ b/frontend/.editorconfig @@ -0,0 +1,10 @@ +root = true + +[*] +charset = utf-8 +end_of_line = lf +indent_size = 2 +indent_style = space +insert_final_newline = false +max_line_length = 120 +tab_width = 2 From 7de851e264b7681492ee2ac3ffb1699b7de528eb Mon Sep 17 00:00:00 2001 From: Reinaldy Rafli Date: Thu, 23 May 2024 08:21:17 +0700 Subject: [PATCH 4/6] ci: disable vcs stamping for go --- .github/workflows/master.yml | 2 +- .github/workflows/pr.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index 281d44d..e72ecff 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -50,7 +50,7 @@ jobs: uses: actions/checkout@v4 - name: Build - run: go build . + run: go build -buildvcs=false . - name: Test run: go test -v diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml index 5029cb3..d3298ce 100644 --- a/.github/workflows/pr.yml +++ b/.github/workflows/pr.yml @@ -48,7 +48,7 @@ jobs: uses: actions/checkout@v4 - name: Build - run: go build . + run: go build -buildvcs=false . - name: Test run: go test -v From 6a6a53b1147f4ac04f18ba3911d1f549802d8eb1 Mon Sep 17 00:00:00 2001 From: Reinaldy Rafli Date: Thu, 23 May 2024 12:41:11 +0700 Subject: [PATCH 5/6] refactor(backend): total refactor to duckdb and better central broker --- .idea/sqldialects.xml | 6 + backend/broker.go | 137 ++++++++ backend/broker_test.go | 40 +++ backend/config.go | 166 +++++++--- backend/go.mod | 45 +-- backend/go.sum | 243 ++++---------- backend/http.go | 310 +++++------------- backend/main.go | 181 ++++------ backend/migration.go | 155 +++++++-- .../20240523085502_monitor_historical.sql | 38 +++ backend/monitor_historical_reader.go | 31 ++ backend/monitor_historical_writer.go | 33 ++ backend/monitor_processor.go | 35 ++ backend/queue.go | 44 --- backend/snapshot.go | 57 ---- backend/subscriber.go | 107 ++---- backend/webhook.go | 2 +- backend/worker.go | 195 +++++++---- backend/worker_test.go | 81 +++++ 19 files changed, 1048 insertions(+), 858 deletions(-) create mode 100644 .idea/sqldialects.xml create mode 100644 backend/broker.go create mode 100644 backend/broker_test.go create mode 100644 backend/migrations/20240523085502_monitor_historical.sql create mode 100644 backend/monitor_historical_reader.go create mode 100644 backend/monitor_historical_writer.go create mode 100644 backend/monitor_processor.go delete mode 100644 backend/queue.go delete mode 100644 backend/snapshot.go create mode 100644 backend/worker_test.go diff --git a/.idea/sqldialects.xml b/.idea/sqldialects.xml new file mode 100644 index 0000000..9708136 --- /dev/null +++ b/.idea/sqldialects.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/backend/broker.go b/backend/broker.go new file mode 100644 index 0000000..73ad1c3 --- /dev/null +++ b/backend/broker.go @@ -0,0 +1,137 @@ +package main + +import ( + "sync" + + "github.com/google/uuid" +) + +// Code acquired from https://github.com/go-micro/go-micro/blob/v1.18.0/broker/memory/memory.go Apache-2.0 license. +// The code was modified to fit the project's codebase. +// +// Copyright 2015 Asim Aslam. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// BrokerCallbackHandler is used to process messages via a subscription of a topic. +// The handler is passed a publication interface which contains the +// message and optional Ack method to acknowledge receipt of the message. +type BrokerCallbackHandler[T any] func(BrokerEvent[T]) error + +type BrokerMessage[T any] struct { + Header map[string]string + Body T +} + +// BrokerEvent is given to a subscription handler for processing +type BrokerEvent[T any] interface { + Topic() string + Message() *BrokerMessage[T] + Ack() error +} + +type Broker[T any] struct { + sync.RWMutex + Subscribers map[string][]*BrokerSubscriber[T] +} + +type memoryEvent[T any] struct { + topic string + message *BrokerMessage[T] +} + +type BrokerSubscriber[T any] struct { + id string + topic string + exit chan bool + handler BrokerCallbackHandler[T] +} + +func (m *Broker[T]) Publish(topic string, message *BrokerMessage[T]) error { + m.RLock() + + subs, ok := m.Subscribers[topic] + m.RUnlock() + if !ok { + return nil + } + + p := &memoryEvent[T]{ + topic: topic, + message: message, + } + + for _, sub := range subs { + if err := sub.handler(p); err != nil { + return err + } + } + + return nil +} + +func (m *Broker[T]) Subscribe(topic string, callback BrokerCallbackHandler[T]) (*BrokerSubscriber[T], error) { + sub := &BrokerSubscriber[T]{ + id: uuid.New().String(), + topic: topic, + exit: make(chan bool, 1), + handler: callback, + } + + m.Lock() + m.Subscribers[topic] = append(m.Subscribers[topic], sub) + m.Unlock() + + go func() { + <-sub.exit + m.Lock() + var newSubscribers []*BrokerSubscriber[T] + for _, sb := range m.Subscribers[topic] { + if sb.id == sub.id { + continue + } + newSubscribers = append(newSubscribers, sb) + } + m.Subscribers[topic] = newSubscribers + m.Unlock() + }() + + return sub, nil +} + +func (m *memoryEvent[T]) Topic() string { + return m.topic +} + +func (m *memoryEvent[T]) Message() *BrokerMessage[T] { + return m.message +} + +func (m *memoryEvent[T]) Ack() error { + return nil +} + +func (m *BrokerSubscriber[T]) Topic() string { + return m.topic +} + +func (m *BrokerSubscriber[T]) Unsubscribe() error { + m.exit <- true + return nil +} + +func NewBroker[T any]() *Broker[T] { + return &Broker[T]{ + Subscribers: make(map[string][]*BrokerSubscriber[T]), + } +} diff --git a/backend/broker_test.go b/backend/broker_test.go new file mode 100644 index 0000000..2c64c7b --- /dev/null +++ b/backend/broker_test.go @@ -0,0 +1,40 @@ +package main_test + +import ( + "fmt" + "testing" + + main "semyi" +) + +func TestMemoryBroker(t *testing.T) { + b := main.NewBroker[string]() + + topic := "test" + count := 10 + + sub, err := b.Subscribe(topic, func(event main.BrokerEvent[string]) error { + return nil + }) + if err != nil { + t.Fatalf("Unexpected error subscribing %v", err) + } + + for i := 0; i < count; i++ { + message := &main.BrokerMessage[string]{ + Header: map[string]string{ + "foo": "bar", + "id": fmt.Sprintf("%d", i), + }, + Body: "Hello world", + } + + if err := b.Publish(topic, message); err != nil { + t.Fatalf("Unexpected error publishing %d", i) + } + } + + if err := sub.Unsubscribe(); err != nil { + t.Fatalf("Unexpected error unsubscribing from %s: %v", topic, err) + } +} diff --git a/backend/config.go b/backend/config.go index beb7c15..bf7d38a 100644 --- a/backend/config.go +++ b/backend/config.go @@ -6,81 +6,165 @@ import ( "io" "net/url" "os" + "path" + + "github.com/BurntSushi/toml" + "github.com/rs/zerolog/log" + "gopkg.in/yaml.v3" ) type ConfigurationFile struct { - Endpoints []Endpoint `json:"endpoints"` - Webhook Webhook `json:"webhook"` + Monitors []Monitor `json:"monitors"` + Webhook Webhook `json:"webhook"` } -type Endpoint struct { - Name string `json:"name"` - URL string `json:"url"` - Description string `json:"description"` - Timeout int `json:"timeout"` - Interval int `json:"interval"` - Headers map[string]string `json:"headers"` - Method string `json:"method"` +type MonitorType string + +const ( + MonitorTypeHTTP MonitorType = "http" + MonitorTypePing MonitorType = "ping" +) + +type Monitor struct { + // UniqueID specifies unique identifier for each monitor. In any case of the monitor configuration value get + // changed (name, description, public monitorIds, etc), if users want to keep the data intact, they should keep the + // UniqueID the same. + UniqueID string `json:"unique_id" yaml:"unique_id" toml:"unique_id"` + // Name specifies the display name that will be shown in the dashboard. + Name string `json:"name" yaml:"name" toml:"name"` + // Description specifies the description of the monitor. This is helpful as a friendly description of what + // we are monitoring (e.g., "Push notification for email and SMS"). + Description string `json:"description" yaml:"description" toml:"description"` + // PublicUrl specifies the public URL that will be shown in the dashboard. This is helpful to provide a different + // public URL rather than providing the exact URL that's used for the HTTP monitor. + PublicUrl string `json:"public_url" yaml:"public_url" toml:"public_url"` + // Type specifies the type of monitor. It can be either "http" or "ping". + Type MonitorType `json:"type" yaml:"type" toml:"type"` + // Interval specifies the interval of each check in seconds. It must not be less or equal to zero. + Interval int `json:"interval" yaml:"interval" toml:"interval"` + // Timeout specifies the timeout for each check in seconds. It must not be less or equal to than zero. + Timeout int `json:"timeout" yaml:"timeout" toml:"timeout"` + // HttpHeaders specifies additional headers that are used for the HTTP request. It's a key-value pair where the key + // specifies the header name and the value specifies the header value. This is optional. + HttpHeaders map[string]string `json:"headers" yaml:"headers" toml:"headers"` + // HttpMethod specifies the HTTP method that will be used for the HTTP request. It can be anything. + // If not provided, it'll default to "GET". + HttpMethod string `json:"method" yaml:"method" toml:"method"` + // HttpEndpoint specifies the HTTP monitor that will be used for the HTTP request. It must be a valid URL. + HttpEndpoint string `json:"monitor" yaml:"monitor" toml:"monitor"` + // HttpExpectedStatusCode specifies the expected status code for the HTTP request. If the status code is not the same + // as the expected status code, it'll be considered as a failed check. The format of the value follows Caddy's health + // check format: 200, 2xx, 200-300, 200-400, 2xx-4xx. This is optional. Defaults to 2xx. + HttpExpectedStatusCode string `json:"http_expected_status_code" yaml:"http_expected_status_code" toml:"http_expected_status_code"` + // IcmpHostname specifies the hostname that will be used for the ICMP request. It must be a valid hostname. + IcmpHostname string `json:"hostname" yaml:"hostname" toml:"hostname"` + // IcmpPacketSize specifies the packet size that will be used for the ICMP request. It must be greater than zero. + // The default packet size is 56 bytes. + IcmpPacketSize int `json:"packet_size" yaml:"packet_size" toml:"packet_size"` +} + +func (m Monitor) MarshalJSON() ([]byte, error) { + // We can't let everything be marshaled as is because we don't want to expose the configuration to be public. + return json.Marshal(map[string]any{ + "id": m.UniqueID, + "name": m.Name, + "description": m.Description, + "public_url": m.PublicUrl, + }) } type Webhook struct { - URL string `json:"url"` - SuccessResponse bool `json:"success_response"` - FailedResponse bool `json:"failed_response"` + URL string `json:"monitorIds" yaml:"monitorIds" toml:"monitorIds"` + SuccessResponse bool `json:"success_response" yaml:"success_response" toml:"success_response"` + FailedResponse bool `json:"failed_response" yaml:"failed_response" toml:"failed_response"` } -func ReadConfigurationFile(path string) (ConfigurationFile, error) { - if path == "" { - path = "../config.json" +func ReadConfigurationFile(filePath string) (ConfigurationFile, error) { + if filePath == "" { + filePath = "../config.json" } - file, err := os.Open(path) + file, err := os.Open(filePath) if err != nil { - return ConfigurationFile{}, fmt.Errorf("failed to open configuration file: %v", err) + return ConfigurationFile{}, fmt.Errorf("failed to open configuration file: %w", err) } - defer file.Close() + defer func() { + err := file.Close() + if err != nil { + log.Error().Err(err).Msg("failed to close configuration file") + } + }() data, err := io.ReadAll(file) if err != nil { - return ConfigurationFile{}, fmt.Errorf("failed to read configuration file: %v", err) + return ConfigurationFile{}, fmt.Errorf("failed to read configuration file: %w", err) } var configurationFile ConfigurationFile - err = json.Unmarshal(data, &configurationFile) - if err != nil { - return ConfigurationFile{}, fmt.Errorf("failed to parse configuration file: %v", err) + + switch path.Ext(filePath) { + case ".json": + err := json.Unmarshal(data, &configurationFile) + if err != nil { + return ConfigurationFile{}, fmt.Errorf("failed to parse configuration file: %w", err) + } + break + case ".yml": + fallthrough + case ".yaml": + err := yaml.Unmarshal(data, &configurationFile) + if err != nil { + return ConfigurationFile{}, fmt.Errorf("failed to parse configuration file: %w", err) + } + case ".toml": + err := toml.Unmarshal(data, &configurationFile) + if err != nil { + return ConfigurationFile{}, fmt.Errorf("failed to parse configuration file: %w", err) + } + default: + return ConfigurationFile{}, fmt.Errorf("invalid configuration file format") } return configurationFile, nil } -func ValidateEndpoint(config Endpoint) (bool, error) { - if config.Name == "" { - return false, fmt.Errorf("name is required") - } - - if config.URL == "" { - return false, fmt.Errorf("url is required") - } else { - // try to parse url - _, err := url.Parse(config.URL) - if err != nil { - return false, fmt.Errorf("invalid url: %v", err) - } +func (m Monitor) Validate() (bool, error) { + if m.UniqueID == "" { + return false, fmt.Errorf("unique_id is required") } - if config.Description == "" { - return false, fmt.Errorf("description is required") + if m.Name == "" { + return false, fmt.Errorf("name is required") } - if config.Timeout < 0 { + if m.Timeout < 0 { return false, fmt.Errorf("timeout must be greater than 0") } - if config.Interval < 0 { + if m.Interval < 0 { return false, fmt.Errorf("interval must be greater than 0") } + switch m.Type { + case MonitorTypeHTTP: + if m.HttpEndpoint == "" { + return false, fmt.Errorf("monitor is required") + } else { + // try to parse monitorIds + _, err := url.Parse(m.HttpEndpoint) + if err != nil { + return false, fmt.Errorf("invalid monitorIds: %v", err) + } + } + + case MonitorTypePing: + if m.IcmpHostname == "" { + return false, fmt.Errorf("hostname is required") + } + default: + return false, fmt.Errorf("invalid monitor type") + } + return true, nil } @@ -89,7 +173,7 @@ func ValidateWebhook(webhook Webhook) (bool, error) { // Try to parse the given URL _, err := url.Parse(webhook.URL) if err != nil { - return false, fmt.Errorf("invalid url: %v", err) + return false, fmt.Errorf("invalid monitorIds: %v", err) } } diff --git a/backend/go.mod b/backend/go.mod index b93cdd0..e62caa6 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -1,30 +1,37 @@ module semyi -go 1.17 +go 1.21 + +toolchain go1.22.2 require ( + github.com/BurntSushi/toml v1.3.2 github.com/allegro/bigcache/v3 v3.0.1 + github.com/go-chi/chi/v5 v5.0.12 + github.com/marcboeker/go-duckdb v1.6.5 + github.com/prometheus-community/pro-bing v0.4.0 github.com/rs/cors v1.8.2 + github.com/rs/zerolog v1.32.0 github.com/unrolled/secure v1.0.9 - modernc.org/sqlite v1.14.5 + gopkg.in/yaml.v3 v3.0.1 ) require ( - github.com/google/uuid v1.3.0 // indirect - github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect - github.com/mattn/go-isatty v0.0.12 // indirect - github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect - golang.org/x/mod v0.3.0 // indirect - golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac // indirect - golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78 // indirect - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect - lukechampine.com/uint128 v1.1.1 // indirect - modernc.org/cc/v3 v3.35.22 // indirect - modernc.org/ccgo/v3 v3.15.1 // indirect - modernc.org/libc v1.14.1 // indirect - modernc.org/mathutil v1.4.1 // indirect - modernc.org/memory v1.0.5 // indirect - modernc.org/opt v0.1.1 // indirect - modernc.org/strutil v1.1.1 // indirect - modernc.org/token v1.0.0 // indirect + github.com/apache/arrow/go/v14 v14.0.2 // indirect + github.com/goccy/go-json v0.10.2 // indirect + github.com/google/flatbuffers v23.5.26+incompatible // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/klauspost/compress v1.16.7 // indirect + github.com/klauspost/cpuid/v2 v2.2.5 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/pierrec/lz4/v4 v4.1.18 // indirect + github.com/zeebo/xxh3 v1.0.2 // indirect + golang.org/x/mod v0.13.0 // indirect + golang.org/x/net v0.21.0 // indirect + golang.org/x/sync v0.6.0 // indirect + golang.org/x/sys v0.17.0 // indirect + golang.org/x/tools v0.14.0 // indirect + golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect ) diff --git a/backend/go.sum b/backend/go.sum index 1a8d9c9..61a506c 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -1,181 +1,82 @@ +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/allegro/bigcache/v3 v3.0.1 h1:Q4Xl3chywXuJNOw7NV+MeySd3zGQDj4KCpkCg0te8mc= github.com/allegro/bigcache/v3 v3.0.1/go.mod h1:aPyh7jEvrog9zAwx5N7+JUQX5dZTSGpxF1LAR4dr35I= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/google/go-cmp v0.5.3 h1:x95R7cp+rSeeqAMI2knLtQ0DKlaBhv2NrtrOvafPHRo= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-sqlite3 v1.14.10 h1:MLn+5bFRlWMGoSRmJour3CL1w/qL96mvipqpwQW/Sfk= -github.com/mattn/go-sqlite3 v1.14.10/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/apache/arrow/go/v14 v14.0.2 h1:N8OkaJEOfI3mEZt07BIkvo4sC6XDbL+48MBPWO5IONw= +github.com/apache/arrow/go/v14 v14.0.2/go.mod h1:u3fgh3EdgN/YQ8cVQRguVW3R+seMybFg8QBQ5LU+eBY= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-chi/chi/v5 v5.0.12 h1:9euLV5sTrTNTRUU9POmDUvfxyj6LAABLUcEWO+JJb4s= +github.com/go-chi/chi/v5 v5.0.12/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= +github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= +github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/google/flatbuffers v23.5.26+incompatible h1:M9dgRyhJemaM4Sw8+66GHBu8ioaQmyPLg1b8VwK5WJg= +github.com/google/flatbuffers v23.5.26+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= +github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= +github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/marcboeker/go-duckdb v1.6.5 h1:XCfR1JVZxsemcSPxRQKK0R0ESfgRMHTEqh3Y+dv40SI= +github.com/marcboeker/go-duckdb v1.6.5/go.mod h1:WtWeqqhZoTke/Nbd7V9lnBx7I2/A/q0SAq/urGzPCMs= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= +github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= -github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/prometheus-community/pro-bing v0.4.0 h1:YMbv+i08gQz97OZZBwLyvmmQEEzyfyrrjEaAchdy3R4= +github.com/prometheus-community/pro-bing v0.4.0/go.mod h1:b7wRYZtCcPmt4Sz319BykUU241rWLe1VFXyiyWK/dH4= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.32.0 h1:keLypqrlIjaFsbmJOBdB/qvyF8KEtCWHwobLp5l/mQ0= +github.com/rs/zerolog v1.32.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/unrolled/secure v1.0.9 h1:BWRuEb1vDrBFFDdbCnKkof3gZ35I/bnHGyt0LB0TNyQ= github.com/unrolled/secure v1.0.9/go.mod h1:fO+mEan+FLB0CdEnHf6Q4ZZVNqG+5fuLFnP8p0BXDPI= github.com/urfave/negroni v1.0.0 h1:kIimOitoypq34K7TG7DUaJ9kq/N4Ofuwi1sjz0KipXc= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210902050250-f475640dd07b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac h1:oN6lz7iLW/YC7un8pq+9bOLyXrprv2+DKfkJY+2LJJw= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78 h1:M8tBwCtWD/cZV9DZpFYRUgaymAYAr+aIUTWzDaM3uPs= -golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -lukechampine.com/uint128 v1.1.1 h1:pnxCASz787iMf+02ssImqk6OLt+Z5QHMoZyUXR4z6JU= -lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= -modernc.org/cc/v3 v3.33.6/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= -modernc.org/cc/v3 v3.33.9/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= -modernc.org/cc/v3 v3.33.11/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= -modernc.org/cc/v3 v3.34.0/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= -modernc.org/cc/v3 v3.35.0/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= -modernc.org/cc/v3 v3.35.4/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= -modernc.org/cc/v3 v3.35.5/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= -modernc.org/cc/v3 v3.35.7/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= -modernc.org/cc/v3 v3.35.8/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= -modernc.org/cc/v3 v3.35.10/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= -modernc.org/cc/v3 v3.35.15/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= -modernc.org/cc/v3 v3.35.16/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= -modernc.org/cc/v3 v3.35.17/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= -modernc.org/cc/v3 v3.35.18/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= -modernc.org/cc/v3 v3.35.20/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= -modernc.org/cc/v3 v3.35.22 h1:BzShpwCAP7TWzFppM4k2t03RhXhgYqaibROWkrWq7lE= -modernc.org/cc/v3 v3.35.22/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= -modernc.org/ccgo/v3 v3.9.5/go.mod h1:umuo2EP2oDSBnD3ckjaVUXMrmeAw8C8OSICVa0iFf60= -modernc.org/ccgo/v3 v3.10.0/go.mod h1:c0yBmkRFi7uW4J7fwx/JiijwOjeAeR2NoSaRVFPmjMw= -modernc.org/ccgo/v3 v3.11.0/go.mod h1:dGNposbDp9TOZ/1KBxghxtUp/bzErD0/0QW4hhSaBMI= -modernc.org/ccgo/v3 v3.11.1/go.mod h1:lWHxfsn13L3f7hgGsGlU28D9eUOf6y3ZYHKoPaKU0ag= -modernc.org/ccgo/v3 v3.11.3/go.mod h1:0oHunRBMBiXOKdaglfMlRPBALQqsfrCKXgw9okQ3GEw= -modernc.org/ccgo/v3 v3.12.4/go.mod h1:Bk+m6m2tsooJchP/Yk5ji56cClmN6R1cqc9o/YtbgBQ= -modernc.org/ccgo/v3 v3.12.6/go.mod h1:0Ji3ruvpFPpz+yu+1m0wk68pdr/LENABhTrDkMDWH6c= -modernc.org/ccgo/v3 v3.12.8/go.mod h1:Hq9keM4ZfjCDuDXxaHptpv9N24JhgBZmUG5q60iLgUo= -modernc.org/ccgo/v3 v3.12.11/go.mod h1:0jVcmyDwDKDGWbcrzQ+xwJjbhZruHtouiBEvDfoIsdg= -modernc.org/ccgo/v3 v3.12.14/go.mod h1:GhTu1k0YCpJSuWwtRAEHAol5W7g1/RRfS4/9hc9vF5I= -modernc.org/ccgo/v3 v3.12.18/go.mod h1:jvg/xVdWWmZACSgOiAhpWpwHWylbJaSzayCqNOJKIhs= -modernc.org/ccgo/v3 v3.12.20/go.mod h1:aKEdssiu7gVgSy/jjMastnv/q6wWGRbszbheXgWRHc8= -modernc.org/ccgo/v3 v3.12.21/go.mod h1:ydgg2tEprnyMn159ZO/N4pLBqpL7NOkJ88GT5zNU2dE= -modernc.org/ccgo/v3 v3.12.22/go.mod h1:nyDVFMmMWhMsgQw+5JH6B6o4MnZ+UQNw1pp52XYFPRk= -modernc.org/ccgo/v3 v3.12.25/go.mod h1:UaLyWI26TwyIT4+ZFNjkyTbsPsY3plAEB6E7L/vZV3w= -modernc.org/ccgo/v3 v3.12.29/go.mod h1:FXVjG7YLf9FetsS2OOYcwNhcdOLGt8S9bQ48+OP75cE= -modernc.org/ccgo/v3 v3.12.36/go.mod h1:uP3/Fiezp/Ga8onfvMLpREq+KUjUmYMxXPO8tETHtA8= -modernc.org/ccgo/v3 v3.12.38/go.mod h1:93O0G7baRST1vNj4wnZ49b1kLxt0xCW5Hsa2qRaZPqc= -modernc.org/ccgo/v3 v3.12.43/go.mod h1:k+DqGXd3o7W+inNujK15S5ZYuPoWYLpF5PYougCmthU= -modernc.org/ccgo/v3 v3.12.46/go.mod h1:UZe6EvMSqOxaJ4sznY7b23/k13R8XNlyWsO5bAmSgOE= -modernc.org/ccgo/v3 v3.12.47/go.mod h1:m8d6p0zNps187fhBwzY/ii6gxfjob1VxWb919Nk1HUk= -modernc.org/ccgo/v3 v3.12.50/go.mod h1:bu9YIwtg+HXQxBhsRDE+cJjQRuINuT9PUK4orOco/JI= -modernc.org/ccgo/v3 v3.12.51/go.mod h1:gaIIlx4YpmGO2bLye04/yeblmvWEmE4BBBls4aJXFiE= -modernc.org/ccgo/v3 v3.12.53/go.mod h1:8xWGGTFkdFEWBEsUmi+DBjwu/WLy3SSOrqEmKUjMeEg= -modernc.org/ccgo/v3 v3.12.54/go.mod h1:yANKFTm9llTFVX1FqNKHE0aMcQb1fuPJx6p8AcUx+74= -modernc.org/ccgo/v3 v3.12.55/go.mod h1:rsXiIyJi9psOwiBkplOaHye5L4MOOaCjHg1Fxkj7IeU= -modernc.org/ccgo/v3 v3.12.56/go.mod h1:ljeFks3faDseCkr60JMpeDb2GSO3TKAmrzm7q9YOcMU= -modernc.org/ccgo/v3 v3.12.57/go.mod h1:hNSF4DNVgBl8wYHpMvPqQWDQx8luqxDnNGCMM4NFNMc= -modernc.org/ccgo/v3 v3.12.60/go.mod h1:k/Nn0zdO1xHVWjPYVshDeWKqbRWIfif5dtsIOCUVMqM= -modernc.org/ccgo/v3 v3.12.66/go.mod h1:jUuxlCFZTUZLMV08s7B1ekHX5+LIAurKTTaugUr/EhQ= -modernc.org/ccgo/v3 v3.12.67/go.mod h1:Bll3KwKvGROizP2Xj17GEGOTrlvB1XcVaBrC90ORO84= -modernc.org/ccgo/v3 v3.12.73/go.mod h1:hngkB+nUUqzOf3iqsM48Gf1FZhY599qzVg1iX+BT3cQ= -modernc.org/ccgo/v3 v3.12.81/go.mod h1:p2A1duHoBBg1mFtYvnhAnQyI6vL0uw5PGYLSIgF6rYY= -modernc.org/ccgo/v3 v3.12.84/go.mod h1:ApbflUfa5BKadjHynCficldU1ghjen84tuM5jRynB7w= -modernc.org/ccgo/v3 v3.12.86/go.mod h1:dN7S26DLTgVSni1PVA3KxxHTcykyDurf3OgUzNqTSrU= -modernc.org/ccgo/v3 v3.12.90/go.mod h1:obhSc3CdivCRpYZmrvO88TXlW0NvoSVvdh/ccRjJYko= -modernc.org/ccgo/v3 v3.12.92/go.mod h1:5yDdN7ti9KWPi5bRVWPl8UNhpEAtCjuEE7ayQnzzqHA= -modernc.org/ccgo/v3 v3.13.1/go.mod h1:aBYVOUfIlcSnrsRVU8VRS35y2DIfpgkmVkYZ0tpIXi4= -modernc.org/ccgo/v3 v3.14.0/go.mod h1:hBrkiBlUwvr5vV/ZH9YzXIp982jKE8Ek8tR1ytoAL6Q= -modernc.org/ccgo/v3 v3.15.1 h1:bagyhO7uFlYWedkh6mfIYf8LZGYnVGPYh2FqXisaOV4= -modernc.org/ccgo/v3 v3.15.1/go.mod h1:md59wBwDT2LznX/OTCPoVS6KIsdRgY8xqQwBV+hkTH0= -modernc.org/ccorpus v1.11.1 h1:K0qPfpVG1MJh5BYazccnmhywH4zHuOgJXgbjzyp6dWA= -modernc.org/ccorpus v1.11.1/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= -modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM= -modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= -modernc.org/libc v1.9.8/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w= -modernc.org/libc v1.9.11/go.mod h1:NyF3tsA5ArIjJ83XB0JlqhjTabTCHm9aX4XMPHyQn0Q= -modernc.org/libc v1.11.0/go.mod h1:2lOfPmj7cz+g1MrPNmX65QCzVxgNq2C5o0jdLY2gAYg= -modernc.org/libc v1.11.2/go.mod h1:ioIyrl3ETkugDO3SGZ+6EOKvlP3zSOycUETe4XM4n8M= -modernc.org/libc v1.11.5/go.mod h1:k3HDCP95A6U111Q5TmG3nAyUcp3kR5YFZTeDS9v8vSU= -modernc.org/libc v1.11.6/go.mod h1:ddqmzR6p5i4jIGK1d/EiSw97LBcE3dK24QEwCFvgNgE= -modernc.org/libc v1.11.11/go.mod h1:lXEp9QOOk4qAYOtL3BmMve99S5Owz7Qyowzvg6LiZso= -modernc.org/libc v1.11.13/go.mod h1:ZYawJWlXIzXy2Pzghaf7YfM8OKacP3eZQI81PDLFdY8= -modernc.org/libc v1.11.16/go.mod h1:+DJquzYi+DMRUtWI1YNxrlQO6TcA5+dRRiq8HWBWRC8= -modernc.org/libc v1.11.19/go.mod h1:e0dgEame6mkydy19KKaVPBeEnyJB4LGNb0bBH1EtQ3I= -modernc.org/libc v1.11.24/go.mod h1:FOSzE0UwookyT1TtCJrRkvsOrX2k38HoInhw+cSCUGk= -modernc.org/libc v1.11.26/go.mod h1:SFjnYi9OSd2W7f4ct622o/PAYqk7KHv6GS8NZULIjKY= -modernc.org/libc v1.11.27/go.mod h1:zmWm6kcFXt/jpzeCgfvUNswM0qke8qVwxqZrnddlDiE= -modernc.org/libc v1.11.28/go.mod h1:Ii4V0fTFcbq3qrv3CNn+OGHAvzqMBvC7dBNyC4vHZlg= -modernc.org/libc v1.11.31/go.mod h1:FpBncUkEAtopRNJj8aRo29qUiyx5AvAlAxzlx9GNaVM= -modernc.org/libc v1.11.34/go.mod h1:+Tzc4hnb1iaX/SKAutJmfzES6awxfU1BPvrrJO0pYLg= -modernc.org/libc v1.11.37/go.mod h1:dCQebOwoO1046yTrfUE5nX1f3YpGZQKNcITUYWlrAWo= -modernc.org/libc v1.11.39/go.mod h1:mV8lJMo2S5A31uD0k1cMu7vrJbSA3J3waQJxpV4iqx8= -modernc.org/libc v1.11.42/go.mod h1:yzrLDU+sSjLE+D4bIhS7q1L5UwXDOw99PLSX0BlZvSQ= -modernc.org/libc v1.11.44/go.mod h1:KFq33jsma7F5WXiYelU8quMJasCCTnHK0mkri4yPHgA= -modernc.org/libc v1.11.45/go.mod h1:Y192orvfVQQYFzCNsn+Xt0Hxt4DiO4USpLNXBlXg/tM= -modernc.org/libc v1.11.47/go.mod h1:tPkE4PzCTW27E6AIKIR5IwHAQKCAtudEIeAV1/SiyBg= -modernc.org/libc v1.11.49/go.mod h1:9JrJuK5WTtoTWIFQ7QjX2Mb/bagYdZdscI3xrvHbXjE= -modernc.org/libc v1.11.51/go.mod h1:R9I8u9TS+meaWLdbfQhq2kFknTW0O3aw3kEMqDDxMaM= -modernc.org/libc v1.11.53/go.mod h1:5ip5vWYPAoMulkQ5XlSJTy12Sz5U6blOQiYasilVPsU= -modernc.org/libc v1.11.54/go.mod h1:S/FVnskbzVUrjfBqlGFIPA5m7UwB3n9fojHhCNfSsnw= -modernc.org/libc v1.11.55/go.mod h1:j2A5YBRm6HjNkoSs/fzZrSxCuwWqcMYTDPLNx0URn3M= -modernc.org/libc v1.11.56/go.mod h1:pakHkg5JdMLt2OgRadpPOTnyRXm/uzu+Yyg/LSLdi18= -modernc.org/libc v1.11.58/go.mod h1:ns94Rxv0OWyoQrDqMFfWwka2BcaF6/61CqJRK9LP7S8= -modernc.org/libc v1.11.71/go.mod h1:DUOmMYe+IvKi9n6Mycyx3DbjfzSKrdr/0Vgt3j7P5gw= -modernc.org/libc v1.11.75/go.mod h1:dGRVugT6edz361wmD9gk6ax1AbDSe0x5vji0dGJiPT0= -modernc.org/libc v1.11.82/go.mod h1:NF+Ek1BOl2jeC7lw3a7Jj5PWyHPwWD4aq3wVKxqV1fI= -modernc.org/libc v1.11.86/go.mod h1:ePuYgoQLmvxdNT06RpGnaDKJmDNEkV7ZPKI2jnsvZoE= -modernc.org/libc v1.11.87/go.mod h1:Qvd5iXTeLhI5PS0XSyqMY99282y+3euapQFxM7jYnpY= -modernc.org/libc v1.11.88/go.mod h1:h3oIVe8dxmTcchcFuCcJ4nAWaoiwzKCdv82MM0oiIdQ= -modernc.org/libc v1.11.98/go.mod h1:ynK5sbjsU77AP+nn61+k+wxUGRx9rOFcIqWYYMaDZ4c= -modernc.org/libc v1.11.101/go.mod h1:wLLYgEiY2D17NbBOEp+mIJJJBGSiy7fLL4ZrGGZ+8jI= -modernc.org/libc v1.12.0/go.mod h1:2MH3DaF/gCU8i/UBiVE1VFRos4o523M7zipmwH8SIgQ= -modernc.org/libc v1.13.1/go.mod h1:npFeGWjmZTjFeWALQLrvklVmAxv4m80jnG3+xI8FdJk= -modernc.org/libc v1.13.2/go.mod h1:npFeGWjmZTjFeWALQLrvklVmAxv4m80jnG3+xI8FdJk= -modernc.org/libc v1.14.1 h1:rwx9uVJU/fEmsmV5ECGRwdAiXgUm6k6tsFA+L8kQb6E= -modernc.org/libc v1.14.1/go.mod h1:npFeGWjmZTjFeWALQLrvklVmAxv4m80jnG3+xI8FdJk= -modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/mathutil v1.4.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/mathutil v1.4.1 h1:ij3fYGe8zBF4Vu+g0oT7mB06r8sqGWKuJu1yXeR4by8= -modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/memory v1.0.4/go.mod h1:nV2OApxradM3/OVbs2/0OsP6nPfakXpi50C7dcoHXlc= -modernc.org/memory v1.0.5 h1:XRch8trV7GgvTec2i7jc33YlUI0RKVDBvZ5eZ5m8y14= -modernc.org/memory v1.0.5/go.mod h1:B7OYswTRnfGg+4tDH1t1OeUNnsy2viGTdME4tzd+IjM= -modernc.org/opt v0.1.1 h1:/0RX92k9vwVeDXj+Xn23DKp2VJubL7k8qNffND6qn3A= -modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= -modernc.org/sqlite v1.14.5 h1:bYrrjwH9Y7QUGk1MbchZDhRfmpGuEAs/D45sVjNbfvs= -modernc.org/sqlite v1.14.5/go.mod h1:YyX5Rx0WbXokitdWl2GJIDy4BrPxBP0PwwhpXOHCDLE= -modernc.org/strutil v1.1.1 h1:xv+J1BXY3Opl2ALrBwyfEikFAj8pmqcpnfmuwUwcozs= -modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= -modernc.org/tcl v1.10.0 h1:vux2MNFhSXYqD8Kq4Uc9RjWcgv2c7Atx3da3VpLPPEw= -modernc.org/tcl v1.10.0/go.mod h1:WzWapmP/7dHVhFoyPpEaNSVTL8xtewhouN/cqSJ5A2s= -modernc.org/token v1.0.0 h1:a0jaWiNMDhDUtqOj09wvjWWAqd3q7WpBulmL9H2egsk= -modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= -modernc.org/z v1.2.21/go.mod h1:uXrObx4pGqXWIMliC5MiKuwAyMrltzwpteOFUP1PWCc= -modernc.org/z v1.3.0 h1:4RWULo1Nvaq5ZBhbLe74u8p6tV4Mmm0ZrPBXYPm/xjM= -modernc.org/z v1.3.0/go.mod h1:+mvgLH814oDjtATDdT3rs84JnUIpkvAF5B8AVkNlE2g= +github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= +github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= +github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= +golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= +golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= +golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.12.0 h1:xKuo6hzt+gMav00meVPUlXwSdoEJP46BR+wdxQEFK2o= +gonum.org/v1/gonum v0.12.0/go.mod h1:73TDxJfAAHeA8Mk9mf8NlIppyhQNo5GLTcYeqgo2lvY= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/backend/http.go b/backend/http.go index 8730c6f..80fad6b 100644 --- a/backend/http.go +++ b/backend/http.go @@ -1,77 +1,72 @@ package main import ( - "database/sql" "encoding/json" "fmt" "log" + "net" "net/http" - "os" - "strconv" + "slices" "strings" + "time" + "github.com/go-chi/chi/v5" "github.com/rs/cors" "github.com/unrolled/secure" ) -func (d *Deps) NewServer(port, staticPath string) *http.Server { - sslRedirect := os.Getenv("ENV") == "production" +type Server struct { + historicalReader *MonitorHistoricalReader + centralBroker *Broker[MonitorHistorical] +} + +type ServerConfig struct { + SSLRedirect bool + Environment string + Hostname string + Port string + StaticPath string + MonitorHistoricalReader *MonitorHistoricalReader + CentralBroker *Broker[MonitorHistorical] +} - if sslRedirectEnv, ok := os.LookupEnv("SSL_REDIRECT"); ok { - sslRedirect, _ = strconv.ParseBool(sslRedirectEnv) +func NewServer(config ServerConfig) *http.Server { + server := &Server{ + historicalReader: config.MonitorHistoricalReader, } secureMiddleware := secure.New(secure.Options{ BrowserXssFilter: true, ContentTypeNosniff: true, - SSLRedirect: sslRedirect, - IsDevelopment: os.Getenv("ENV") == "development", + SSLRedirect: config.SSLRedirect, + IsDevelopment: config.Environment == "development", }) corsMiddleware := cors.New(cors.Options{ - Debug: os.Getenv("ENV") == "development", + Debug: config.Environment == "development", AllowedOrigins: []string{"*"}, AllowedMethods: []string{"GET", "OPTIONS"}, AllowedHeaders: []string{"Content-Type"}, }) - api := http.NewServeMux() - api.HandleFunc("/api/overview", func(w http.ResponseWriter, r *http.Request) { - if r.Method == http.MethodGet { - d.snapshotOverview(w, r) - return - } - - w.WriteHeader(http.StatusMethodNotAllowed) - }) - api.HandleFunc("/api/by", func(w http.ResponseWriter, r *http.Request) { - if r.Method == http.MethodGet { - d.snapshotBy(w, r) - return - } - - w.WriteHeader(http.StatusMethodNotAllowed) - }) - api.HandleFunc("/api/static", func(w http.ResponseWriter, r *http.Request) { - if r.Method == http.MethodGet { - d.staticSnapshot(w, r) - return - } - - w.WriteHeader(http.StatusMethodNotAllowed) - }) + api := chi.NewRouter() + api.Use(corsMiddleware.Handler) + api.Get("/api/overview", server.snapshotOverview) + api.Get("/api/by", server.snapshotBy) + api.Get("/api/static", server.staticSnapshot) - r := http.NewServeMux() + r := chi.NewRouter() + r.Use(secureMiddleware.Handler) r.Handle("/api/", corsMiddleware.Handler(api)) - r.Handle("/", http.FileServer(http.Dir(staticPath))) + r.Handle("/", http.FileServer(http.Dir(config.StaticPath))) return &http.Server{ - Addr: ":" + port, - Handler: secureMiddleware.Handler(r), + Addr: net.JoinHostPort(config.Hostname, config.Port), + Handler: r, } } -func (d *Deps) snapshotOverview(w http.ResponseWriter, r *http.Request) { +func (s *Server) snapshotOverview(w http.ResponseWriter, r *http.Request) { flusher, ok := w.(http.Flusher) if !ok { w.WriteHeader(http.StatusPreconditionFailed) @@ -80,26 +75,7 @@ func (d *Deps) snapshotOverview(w http.ResponseWriter, r *http.Request) { return } - endpointsBytes, err := d.Cache.Get("endpoint:urls") - if err != nil { - w.WriteHeader(http.StatusInternalServerError) - w.Header().Set("Content-Type", "application/json") - errBytes, err := json.Marshal(map[string]string{"error": err.Error()}) - if err != nil { - w.Write([]byte(`{"error": "internal server error"}`)) - return - } - w.Write(errBytes) - return - } - - w.Header().Set("Connection", "keep-alive") - w.Header().Set("Cache-Control", "no-cache") - w.Header().Set("Content-Type", "text/event-stream") - w.WriteHeader(http.StatusOK) - - endpoints := strings.Split(string(endpointsBytes), ",") - sub, err := d.NewSubscriber(endpoints...) + subscriber, err := NewSubscriber(s.centralBroker, monitorIds...) if err != nil { w.WriteHeader(http.StatusInternalServerError) w.Header().Set("Content-Type", "application/json") @@ -116,7 +92,7 @@ func (d *Deps) snapshotOverview(w http.ResponseWriter, r *http.Request) { select { case <-r.Context().Done(): return - case data := <-sub.Listen(r.Context()): + case data := <-subscriber.Listen(r.Context()): marshaled, err := json.Marshal(data) if err != nil { log.Printf("failed to marshal data: %s", err) @@ -128,12 +104,14 @@ func (d *Deps) snapshotOverview(w http.ResponseWriter, r *http.Request) { } flusher.Flush() + default: + time.Sleep(time.Millisecond * 10) } } } -func (d *Deps) snapshotBy(w http.ResponseWriter, r *http.Request) { +func (s *Server) snapshotBy(w http.ResponseWriter, r *http.Request) { flusher, ok := w.(http.Flusher) if !ok { w.WriteHeader(http.StatusPreconditionFailed) @@ -142,32 +120,23 @@ func (d *Deps) snapshotBy(w http.ResponseWriter, r *http.Request) { return } - url := r.URL.Query().Get("url") - if url == "" { + ids := r.URL.Query().Get("ids") + if ids == "" { w.WriteHeader(http.StatusBadRequest) w.Header().Set("Content-Type", "application/json") - w.Write([]byte(`{"error": "url is required"}`)) + w.Write([]byte(`{"error":"ids is required"}`)) return } - endpointsBytes, err := d.Cache.Get("endpoint:urls") - if err != nil { - w.WriteHeader(http.StatusInternalServerError) - w.Header().Set("Content-Type", "application/json") - errBytes, err := json.Marshal(map[string]string{"error": err.Error()}) - if err != nil { - w.Write([]byte(`{"error": "internal server error"}`)) + wantedMonitorIds := strings.Split(ids, ",") + + for _, id := range wantedMonitorIds { + if !slices.Contains(monitorIds, id) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusBadRequest) + w.Write([]byte(`{"error": "id is not in the list of monitors"}`)) return } - w.Write(errBytes) - return - } - - if !strings.Contains(string(endpointsBytes), url) { - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusBadRequest) - w.Write([]byte(`{"error": "url is not in the list of endpoints"}`)) - return } w.Header().Set("Content-Type", "text/event-stream") @@ -175,8 +144,7 @@ func (d *Deps) snapshotBy(w http.ResponseWriter, r *http.Request) { w.Header().Set("Connection", "keep-alive") w.WriteHeader(http.StatusOK) - endpoints := strings.Split(url, ",") - sub, err := d.NewSubscriber(endpoints...) + sub, err := NewSubscriber(s.centralBroker, wantedMonitorIds...) if err != nil { w.WriteHeader(http.StatusInternalServerError) w.Header().Set("Content-Type", "application/json") @@ -205,177 +173,81 @@ func (d *Deps) snapshotBy(w http.ResponseWriter, r *http.Request) { } flusher.Flush() + default: + time.Sleep(time.Millisecond * 10) } } } -func (d *Deps) staticSnapshot(w http.ResponseWriter, r *http.Request) { - url := r.URL.Query().Get("url") - if url == "" { +func (s *Server) staticSnapshot(w http.ResponseWriter, r *http.Request) { + monitorId := r.URL.Query().Get("id") + if monitorId == "" { w.WriteHeader(http.StatusBadRequest) w.Header().Set("Content-Type", "application/json") - w.Write([]byte(`{"error": "url is required"}`)) + w.Write([]byte(`{"error": "id is required"}`)) return } - endpointsBytes, err := d.Cache.Get("endpoint:urls") - if err != nil { - w.WriteHeader(http.StatusInternalServerError) - w.Header().Set("Content-Type", "application/json") - errBytes, err := json.Marshal(map[string]string{"error": err.Error()}) - if err != nil { - w.Write([]byte(`{"error": "internal server error"}`)) - return - } - w.Write(errBytes) - return + interval := r.URL.Query().Get("interval") + if interval == "" { + interval = "hourly" } - endpoints := strings.Split(string(endpointsBytes), ",") - - if !contains(url, endpoints) { + if interval != "hourly" && interval != "daily" && interval != "raw" { w.WriteHeader(http.StatusBadRequest) w.Header().Set("Content-Type", "application/json") - w.Write([]byte(`{"error": "url is not in the list of endpoints"}`)) - return - } - - // acquire endpoint metadata from cache - endpointBytes, err := d.Cache.Get("endpoint:" + url) - if err != nil { - w.WriteHeader(http.StatusInternalServerError) - w.Header().Set("Content-Type", "application/json") - errBytes, err := json.Marshal(map[string]string{"error": err.Error()}) - if err != nil { - w.Write([]byte(`{"error": "internal server error"}`)) - return - } - w.Write(errBytes) - return - } - - var endpoint Endpoint - err = json.Unmarshal(endpointBytes, &endpoint) - if err != nil { - w.WriteHeader(http.StatusInternalServerError) - w.Header().Set("Content-Type", "application/json") - errBytes, err := json.Marshal(map[string]string{"error": err.Error()}) - if err != nil { - w.Write([]byte(`{"error": "internal server error"}`)) - return - } - w.Write(errBytes) + w.Write([]byte(`{"error": "interval must be hourly, daily, or raw"}`)) return } - c, err := d.DB.Conn(r.Context()) - if err != nil { - w.WriteHeader(http.StatusInternalServerError) + if !slices.Contains(monitorIds, monitorId) { + w.WriteHeader(http.StatusBadRequest) w.Header().Set("Content-Type", "application/json") - errBytes, err := json.Marshal(map[string]string{"error": err.Error()}) - if err != nil { - w.Write([]byte(`{"error": "internal server error"}`)) - return - } - w.Write(errBytes) + w.Write([]byte(`{"error": "id is not in the list of monitors"}`)) return } - defer c.Close() - tx, err := c.BeginTx(r.Context(), &sql.TxOptions{Isolation: sql.LevelReadUncommitted, ReadOnly: true}) - if err != nil { - w.WriteHeader(http.StatusInternalServerError) - w.Header().Set("Content-Type", "application/json") - errBytes, err := json.Marshal(map[string]string{"error": err.Error()}) + // TODO: Acquire monitor metadata + var err error + var monitor Monitor + var monitorHistorical []MonitorHistorical + switch interval { + case "raw": + monitorHistorical, err = s.historicalReader.ReadRawHistorical(r.Context(), monitorId) if err != nil { - w.Write([]byte(`{"error": "internal server error"}`)) + // TODO: Handle error properly + w.WriteHeader(http.StatusInternalServerError) return } - w.Write(errBytes) - return - } - - rows, err := tx.QueryContext( - r.Context(), - `SELECT - url, - timeout, - interval, - status_code, - request_duration, - created_at - FROM - snapshot - WHERE - url = ? - ORDER BY - created_at DESC - LIMIT 100`, - url, - ) - if err != nil { - tx.Rollback() - w.WriteHeader(http.StatusInternalServerError) - w.Header().Set("Content-Type", "application/json") - errBytes, err := json.Marshal(map[string]string{"error": err.Error()}) + break + case "hourly": + monitorHistorical, err = s.historicalReader.ReadHourlyHistorical(r.Context(), monitorId) if err != nil { - w.Write([]byte(`{"error": "internal server error"}`)) + // TODO: Handle error properly + w.WriteHeader(http.StatusInternalServerError) return } - w.Write(errBytes) - return - } - defer rows.Close() - - var snapshots []Response - for rows.Next() { - var snapshot Response - err := rows.Scan( - &snapshot.URL, - &snapshot.Timeout, - &snapshot.Interval, - &snapshot.StatusCode, - &snapshot.RequestDuration, - &snapshot.Timestamp, - ) + break + case "daily": + monitorHistorical, err = s.historicalReader.ReadDailyHistorical(r.Context(), monitorId) if err != nil { - tx.Rollback() + // TODO: Handle error properly w.WriteHeader(http.StatusInternalServerError) - w.Header().Set("Content-Type", "application/json") - errBytes, err := json.Marshal(map[string]string{"error": err.Error()}) - if err != nil { - w.Write([]byte(`{"error": "internal server error"}`)) - return - } - w.Write(errBytes) return } - - snapshot.Name = endpoint.Name - snapshot.Description = endpoint.Description - snapshot.Method = endpoint.Method - snapshot.Headers = endpoint.Headers - snapshot.Success = snapshot.StatusCode == http.StatusOK - - snapshots = append(snapshots, snapshot) - } - - err = tx.Commit() - if err != nil { - tx.Rollback() - w.WriteHeader(http.StatusInternalServerError) + break + default: + w.WriteHeader(http.StatusBadRequest) w.Header().Set("Content-Type", "application/json") - errBytes, err := json.Marshal(map[string]string{"error": err.Error()}) - if err != nil { - w.Write([]byte(`{"error": "internal server error"}`)) - return - } - w.Write(errBytes) + w.Write([]byte(`{"error": "interval must be hourly, daily, or raw"}`)) return } - data, err := json.Marshal(snapshots) + data, err := json.Marshal(map[string]any{ + "metadata": monitor, + "historical": monitorHistorical, + }) if err != nil { w.WriteHeader(http.StatusInternalServerError) w.Header().Set("Content-Type", "application/json") diff --git a/backend/main.go b/backend/main.go index 1f2f8f1..2b10288 100644 --- a/backend/main.go +++ b/backend/main.go @@ -3,9 +3,7 @@ package main import ( "context" "database/sql" - "encoding/json" "errors" - "log" "net/http" "os" "os/signal" @@ -14,17 +12,13 @@ import ( "time" "github.com/allegro/bigcache/v3" - _ "modernc.org/sqlite" + _ "github.com/marcboeker/go-duckdb" + "github.com/rs/zerolog/log" ) -type Deps struct { - DB *sql.DB - Queue *Queue - Cache *bigcache.BigCache - DefaultTimeout int - DefaultInterval int - Webhook *Webhook -} +var DefaultInterval int = 30 +var DefaultTimeout int = 10 +var monitorIds []string func main() { // Read environment variables @@ -35,7 +29,7 @@ func main() { dbPath, ok := os.LookupEnv("DB_PATH") if !ok { - dbPath = "../db.sqlite3" + dbPath = "../db.duckdb" } staticPath, ok := os.LookupEnv("STATIC_PATH") @@ -61,161 +55,106 @@ func main() { if os.Getenv("ENV") == "" { err := os.Setenv("ENV", "development") if err != nil { - log.Fatalf("Error setting ENV: %v", err) + log.Fatal().Err(err).Msg("Error setting ENV") } } - defTimeout, err := strconv.Atoi(defaultTimeout) + // Read configuration file + config, err := ReadConfigurationFile(configPath) if err != nil { - log.Fatalf("Failed to parse default timeout: %v", err) + log.Fatal().Err(err).Msg("failed to read configuration file") } - defInterval, err := strconv.Atoi(defaultInterval) + DefaultTimeout, err = strconv.Atoi(defaultTimeout) if err != nil { - log.Fatalf("Failed to parse default interval: %v", err) + log.Fatal().Err(err).Msg("Failed to parse default timeout") } - // Read configuration file - config, err := ReadConfigurationFile(configPath) + DefaultInterval, err = strconv.Atoi(defaultInterval) if err != nil { - log.Fatalf("failed to read configuration file: %v", err) + log.Fatal().Err(err).Msg("Failed to parse default interval") } - db, err := sql.Open("sqlite", dbPath) + db, err := sql.Open("duckdb", dbPath) if err != nil { - log.Fatalf("failed to open database: %v", err) + log.Fatal().Err(err).Msg("failed to open database") } - defer db.Close() + defer func(db *sql.DB) { + err := db.Close() + if err != nil { + log.Fatal().Err(err).Msg("failed to close database") + } + }(db) ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) defer cancel() - err = Migrate(db, ctx) + err = Migrate(db, ctx, true) if err != nil { - log.Fatalf("failed to migrate database: %v", err) + log.Fatal().Err(err).Msg("failed to migrate database") } cache, err := bigcache.NewBigCache(bigcache.DefaultConfig(time.Hour * 24)) if err != nil { - log.Fatalf("failed to create cache: %v", err) + log.Fatal().Err(err).Msg("failed to create cache") } defer cache.Close() - deps := &Deps{ - DB: db, - Cache: cache, - Queue: NewQueue(), - DefaultTimeout: defTimeout, - DefaultInterval: defInterval, - Webhook: &config.Webhook, - } + processor := &Processor{} // Create a new worker - for _, endpoint := range config.Endpoints { - worker, err := deps.NewWorker(endpoint) - if err != nil { - log.Fatalf("Failed to create worker: %v", err) - } + for _, monitor := range config.Monitors { + monitorIds = append(monitorIds, monitor.UniqueID) - // register endpoint url into cache - err = deps.Cache.Append("endpoint:urls", []byte(endpoint.URL+",")) + worker, err := NewWorker(monitor, processor) if err != nil { - log.Fatalf("Failed to register endpoint url into cache: %v", err) + log.Fatal().Err(err).Msg("Failed to create worker") } - log.Printf("Registered endpoint: %s", endpoint.Name) + log.Info().Str("UniqueID", monitor.UniqueID).Str("Name", monitor.Name).Msg("Registered monitor") - go func() { + go func(worker *Worker) { defer func() { if r := recover(); r != nil { - log.Printf("[Running worker] Recovered from panic: %v", r) + log.Warn().Msgf("[Running worker] Recovered from panic: %v", r) } }() worker.Run() - }() - - // set the name, description, headers, and method of an endpoint - // to the cache - data, err := json.Marshal(endpoint) - if err != nil { - log.Fatalf("Failed to marshal endpoint: %v", err) - } - - err = deps.Cache.Set("endpoint:"+endpoint.URL, data) - if err != nil { - log.Fatalf("Failed to set endpoint data into cache: %v", err) - } + }(worker) } - // Dump snapshot every 5 seconds - go func() { - defer func() { - if r := recover(); r != nil { - log.Printf("[Dumping snapshot] Recovered from panic: %v", r) - } - }() - - for { - deps.Queue.Lock() - - if len(deps.Queue.Items) > 0 { - log.Printf("Dumping snapshot: %d items", len(deps.Queue.Items)) - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) - - err := deps.WriteSnapshot(ctx, deps.Queue.Items) - if err != nil { - cancel() - log.Printf("Failed to write snapshot: %v", err) - } - - if err == nil { - deps.Queue.Items = []Response{} - } + // TODO: Create a new worker that process monitor_historical data and create monitor_historical_hourly_aggregate from it + // TODO: Create a new worker that process monitor_historical data and create monitor_historical_daily_aggregate from it - cancel() - } + // TODO: Complete the ServerConfig + server := NewServer(ServerConfig{ + SSLRedirect: false, + Environment: "", + Hostname: "", + Port: "", + StaticPath: staticPath, + MonitorHistoricalReader: nil, + }) + go func() { + // Listen for SIGKILL and SIGTERM + signalChan := make(chan os.Signal, 1) + signal.Notify(signalChan, os.Interrupt, syscall.SIGTERM) + <-signalChan - deps.Queue.Unlock() - time.Sleep(time.Second * 5) - } - }() + log.Info().Msg("Shutting down server...") + ctx, cancel = context.WithTimeout(context.Background(), time.Second*10) + defer cancel() - server := deps.NewServer(port, staticPath) - go func() { - defer func() { - if r := recover(); r != nil { - log.Printf("[HTTP Server] Recovered from panic: %v", r) - } - }() - - // Start the server - log.Printf("Starting server on port %s", port) - if e := server.ListenAndServe(); e != nil && !errors.Is(e, http.ErrServerClosed) { - log.Fatalf("Failed to start server: %v", e) + err = server.Shutdown(ctx) + if err != nil { + log.Fatal().Err(err).Msg("Failed to shutdown server") } }() - // Listen for SIGKILL and SIGTERM - signalChan := make(chan os.Signal, 1) - signal.Notify(signalChan, os.Interrupt, syscall.SIGTERM) - <-signalChan - - log.Println("\nShutting down server...") - ctx, cancel = context.WithTimeout(context.Background(), time.Second*10) - defer cancel() - - err = server.Shutdown(ctx) - if err != nil { - log.Fatalf("Failed to shutdown server: %v", err) - } - - deps.Queue.Lock() - - err = deps.WriteSnapshot(ctx, deps.Queue.Items) - if err != nil { - log.Printf("Failed to write snapshot: %v", err) + // Start the server + log.Printf("Starting server on port %s", port) + if e := server.ListenAndServe(); e != nil && !errors.Is(e, http.ErrServerClosed) { + log.Fatal().Err(err).Msg("Failed to start server") } - - deps.Queue.Unlock() } diff --git a/backend/migration.go b/backend/migration.go index 3c25398..4ed8e89 100644 --- a/backend/migration.go +++ b/backend/migration.go @@ -1,60 +1,145 @@ package main import ( + "bufio" "context" "database/sql" + "embed" + "fmt" + "sort" + "strconv" + "strings" + + "github.com/rs/zerolog/log" ) -func Migrate(db *sql.DB, ctx context.Context) error { - c, err := db.Conn(ctx) +//go:embed migrations/*.sql +var migrationFiles embed.FS + +func Migrate(db *sql.DB, ctx context.Context, directionUp bool) error { + dir, err := migrationFiles.ReadDir("./") if err != nil { - return err + return fmt.Errorf("failed to read migrations directory: %w", err) } - defer c.Close() - tx, err := c.BeginTx(ctx, &sql.TxOptions{Isolation: sql.LevelSerializable, ReadOnly: false}) - if err != nil { - return err + var files []string + + for _, file := range dir { + if file.IsDir() { + continue + } + + files = append(files, file.Name()) } - _, err = tx.ExecContext( - ctx, - `CREATE TABLE IF NOT EXISTS snapshot ( - url varchar(255) NOT NULL, - timeout integer DEFAULT 10, - interval integer DEFAULT 30, - status_code integer NOT NULL, - request_duration integer NOT NULL, - created_at timestamp NOT NULL - )`, - ) - if err != nil { - tx.Rollback() - return err + sort.SliceStable(files, func(i, j int) bool { + // Gather the first 14 characters of the file name. Then we can parse it as YYYYMMDDHHmmss and sort by that. + firstFileName := files[i][:14] + secondFileName := files[j][:14] + + firstTime, _ := strconv.ParseInt(firstFileName, 10, 64) + secondTime, _ := strconv.ParseInt(secondFileName, 10, 64) + + return firstTime < secondTime + }) + + var migrationScripts []string + + for _, file := range files { + content, err := migrationFiles.Open(file) + if err != nil { + if content != nil && content.Close != nil { + _ = content.Close() + } + + return fmt.Errorf("failed to read migration file: %w", err) + } + + var contentAccumulator strings.Builder + var foundStartMarker = false + scanner := bufio.NewScanner(content) + scanner.Split(bufio.ScanLines) + if directionUp { + // Read from the line that has "-- +goose Up" until the first occurrence of "-- +goose StatementEnd" + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if line == "-- +goose Up" { + foundStartMarker = true + continue + } + + if foundStartMarker { + if line == "-- +goose StatementEnd" { + break + } + + contentAccumulator.WriteString(line) + } + } + } else { + // Read from the line that has "-- +goose Down" until the first occurrence of "-- +goose StatementEnd" + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if line == "-- +goose Down" { + foundStartMarker = true + continue + } + + if foundStartMarker { + if line == "-- +goose StatementEnd" { + break + } + + contentAccumulator.WriteString(line) + } + } + } + + if err := scanner.Err(); err != nil { + return fmt.Errorf("failed to read migration file: %w", err) + } + + err = content.Close() + if err != nil { + log.Error().Err(err).Msg("failed to close file") + } + + migrationScripts = append(migrationScripts, contentAccumulator.String()) } - _, err = tx.ExecContext( - ctx, - `CREATE INDEX IF NOT EXISTS snapshot_url_idx ON snapshot (url)`, - ) + c, err := db.Conn(ctx) if err != nil { - tx.Rollback() - return err + return fmt.Errorf("failed to open connection: %w", err) } + defer func() { + err := c.Close() + if err != nil { + log.Error().Err(err).Msg("failed to close connection") + } + }() - _, err = tx.ExecContext( - ctx, - `CREATE INDEX IF NOT EXISTS snapshot_created_at_idx ON snapshot (created_at)`, - ) + tx, err := c.BeginTx(ctx, &sql.TxOptions{Isolation: sql.LevelSerializable, ReadOnly: false}) if err != nil { - tx.Rollback() - return err + return fmt.Errorf("failed to begin transaction: %w", err) + } + + for _, migrationScript := range migrationScripts { + _, err = tx.ExecContext( + ctx, + migrationScript, + ) + if err != nil { + if e := tx.Rollback(); e != nil { + return fmt.Errorf("failed to rollback transaction: %w (%s)", e, err.Error()) + } + + return fmt.Errorf("failed to execute migration script: %w", err) + } } err = tx.Commit() if err != nil { - tx.Rollback() - return err + return fmt.Errorf("failed to commit transaction: %w", err) } return nil diff --git a/backend/migrations/20240523085502_monitor_historical.sql b/backend/migrations/20240523085502_monitor_historical.sql new file mode 100644 index 0000000..d5fdc24 --- /dev/null +++ b/backend/migrations/20240523085502_monitor_historical.sql @@ -0,0 +1,38 @@ +-- +goose Up +-- +goose StatementBegin +CREATE TABLE monitor_historical ( + monitor_id VARCHAR(255) NOT NULL, + status SMALLINT NOT NULL, + latency INTEGER NOT NULL DEFAULT 0, + timestamp TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP +); + +CREATE INDEX monitor_historical_monitor_id_idx ON monitor_historical (monitor_id); + +CREATE TABLE monitor_historical_hourly_aggregate ( + timestamp TIMESTAMPTZ NOT NULL, + monitor_id VARCHAR(255) NOT NULL, + status SMALLINT NOT NULL, + latency INTEGER NOT NULL DEFAULT 0, + created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP +); + +CREATE INDEX monitor_historical_hourly_aggregate_monitor_id_timestamp_idx ON monitor_historical_hourly_aggregate (monitor_id, timestamp); + +CREATE TABLE monitor_historical_daily_aggregate ( + timestamp TIMESTAMPTZ NOT NULL, + monitor_id VARCHAR(255) NOT NULL, + status SMALLINT NOT NULL, + latency INTEGER NOT NULL DEFAULT 0, + created_at TIMESTAMPTZ NOT NULL DEFAULT CURRENT_TIMESTAMP +); + +CREATE INDEX monitor_historical_daily_aggregate_monitor_id_timestamp_idx ON monitor_historical_daily_aggregate (monitor_id, timestamp); +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +DROP TABLE monitor_historical; +DROP TABLE monitor_historical_hourly_aggregate; +DROP TABLE monitor_historical_daily_aggregate; +-- +goose StatementEnd diff --git a/backend/monitor_historical_reader.go b/backend/monitor_historical_reader.go new file mode 100644 index 0000000..c365bc9 --- /dev/null +++ b/backend/monitor_historical_reader.go @@ -0,0 +1,31 @@ +package main + +import ( + "context" + "database/sql" +) + +type MonitorHistoricalReader struct { + db *sql.DB +} + +func NewMonitorHistoricalReader(db *sql.DB) *MonitorHistoricalReader { + return &MonitorHistoricalReader{db: db} +} + +func (r *MonitorHistoricalReader) ReadRawHistorical(ctx context.Context, monitorId string) ([]MonitorHistorical, error) { + panic("TODO: implement me!") +} + +func (r *MonitorHistoricalReader) ReadHourlyHistorical(ctx context.Context, monitorId string) ([]MonitorHistorical, error) { + panic("TODO: implement me!") +} + +func (r *MonitorHistoricalReader) ReadDailyHistorical(ctx context.Context, monitorId string) ([]MonitorHistorical, error) { + panic("TODO: implement me!") +} + +func (r *MonitorHistoricalReader) ReadRawLatest(ctx context.Context, monitorId string) (MonitorHistorical, error) { + // Get the latest entry from the raw historical table + panic("TODO: implement me!") +} diff --git a/backend/monitor_historical_writer.go b/backend/monitor_historical_writer.go new file mode 100644 index 0000000..2309ea8 --- /dev/null +++ b/backend/monitor_historical_writer.go @@ -0,0 +1,33 @@ +package main + +import ( + "database/sql" + "time" +) + +type MonitorStatus uint8 + +const ( + MonitorStatusSuccess MonitorStatus = iota + MonitorStatusFailure +) + +type MonitorHistorical struct { + MonitorID string + Status MonitorStatus + Latency int64 + Timestamp time.Time +} + +type MonitorHistoricalWriter struct { + db *sql.DB +} + +func NewMonitorHistoricalWriter(db *sql.DB) *MonitorHistoricalWriter { + return &MonitorHistoricalWriter{db: db} +} + +func (w *MonitorHistoricalWriter) Write(historical MonitorHistorical) error { + // TODO: Work on me + return nil +} diff --git a/backend/monitor_processor.go b/backend/monitor_processor.go new file mode 100644 index 0000000..3652ac2 --- /dev/null +++ b/backend/monitor_processor.go @@ -0,0 +1,35 @@ +package main + +import "github.com/rs/zerolog/log" + +type Processor struct { + historicalWriter *MonitorHistoricalWriter + historicalReader *MonitorHistoricalReader +} + +func (m *Processor) ProcessResponse(response Response) { + status := MonitorStatusFailure + if response.Success { + status = MonitorStatusSuccess + } + + uniqueId := response.Monitor.UniqueID + if len(uniqueId) >= 255 { + // Truncate the unique ID if it's too long + uniqueId = uniqueId[:255] + } + + // TODO: Retry write if it fails + // Write the response to the historical writer + err := m.historicalWriter.Write(MonitorHistorical{ + MonitorID: uniqueId, + Status: status, + Latency: response.RequestDuration, + Timestamp: response.Timestamp, + }) + if err != nil { + log.Error().Err(err).Msg("failed to write historical data") + } + + // TODO: If the current status is different from the last status, send an alert notification +} diff --git a/backend/queue.go b/backend/queue.go deleted file mode 100644 index f73674d..0000000 --- a/backend/queue.go +++ /dev/null @@ -1,44 +0,0 @@ -package main - -import ( - "errors" - "sync" -) - -var ErrEmptyQueue = errors.New("Queue is empty") - -type Queue struct { - sync.RWMutex - Items []Response -} - -func NewQueue() *Queue { - return &Queue{ - Items: []Response{}, - } -} - -func (q *Queue) Dequeue() (Response, error) { - q.Lock() - defer q.Unlock() - - if len(q.Items) == 0 { - return Response{}, ErrEmptyQueue - } - - item := q.Items[0] - q.Items = q.Items[1:] - - return item, nil -} - -func (q *Queue) LatestItem() (Response, error) { - q.RLock() - defer q.RUnlock() - - if len(q.Items) == 0 { - return Response{}, ErrEmptyQueue - } - - return q.Items[len(q.Items)-1], nil -} diff --git a/backend/snapshot.go b/backend/snapshot.go deleted file mode 100644 index 6f8d6a3..0000000 --- a/backend/snapshot.go +++ /dev/null @@ -1,57 +0,0 @@ -package main - -import ( - "context" - "database/sql" -) - -func (d *Deps) WriteSnapshot(ctx context.Context, items []Response) error { - c, err := d.DB.Conn(ctx) - if err != nil { - return err - } - defer c.Close() - - tx, err := c.BeginTx(ctx, &sql.TxOptions{Isolation: sql.LevelReadUncommitted, ReadOnly: false}) - if err != nil { - return err - } - - for _, item := range items { - _, err := tx.ExecContext( - ctx, - `INSERT INTO - snapshot - ( - url, - timeout, - interval, - status_code, - request_duration, - created_at - ) - VALUES - ( - $1, $2, $3, $4, $5, $6 - )`, - item.Endpoint.URL, - item.Endpoint.Timeout, - item.Endpoint.Interval, - item.StatusCode, - item.RequestDuration, - item.Timestamp, - ) - if err != nil { - tx.Rollback() - return err - } - } - - err = tx.Commit() - if err != nil { - tx.Rollback() - return err - } - - return nil -} diff --git a/backend/subscriber.go b/backend/subscriber.go index 24386d8..787a80e 100644 --- a/backend/subscriber.go +++ b/backend/subscriber.go @@ -2,96 +2,43 @@ package main import ( "context" - "database/sql" - "encoding/json" "errors" - "log" - "time" - - "github.com/allegro/bigcache/v3" + "fmt" ) type Subscriber struct { - url []string - db *sql.DB - cache *bigcache.BigCache - queue *Queue + subscribers []*BrokerSubscriber[MonitorHistorical] + ch chan MonitorHistorical } -func (d *Deps) NewSubscriber(url ...string) (*Subscriber, error) { - if len(url) == 0 { - return &Subscriber{}, errors.New("no url provided") +func NewSubscriber(centralBroker *Broker[MonitorHistorical], monitorIds ...string) (*Subscriber, error) { + if len(monitorIds) == 0 { + return &Subscriber{}, errors.New("no monitorIds provided") } - return &Subscriber{ - url: url, - db: d.DB, - cache: d.Cache, - queue: d.Queue, - }, nil -} - -func (s *Subscriber) Listen(ctx context.Context) <-chan Response { - ch := make(chan Response) - - go func() { - defer func() { - if r := recover(); r != nil { - log.Printf("[Subscriber-Listen] Recovered from panic: %v", r) - } - }() - - for { - select { - case <-ctx.Done(): - return - default: - // listen for changes in s.queue.Items - item, err := s.queue.LatestItem() - if err != nil { - if errors.Is(err, ErrEmptyQueue) { - continue - } - log.Println("Error dequeueing item from queue:", err) - continue - } - - // check if current item is in cache - cached, err := s.cache.Get(item.URL) - if err != nil { - // TODO(elianiva): proper error handling - log.Println("Error getting cached item") - continue - } - - itemStr, err := json.Marshal(item) - if err != nil { - // TODO(elianiva): proper error handling - log.Println("Error marshalling") - continue - } - if string(itemStr) == string(cached) { - continue - } - - if contains(item.URL, s.url) { - // send item to ch - ch <- item - } - } - - time.Sleep(time.Second * 1) + ch := make(chan MonitorHistorical) + var subscribers []*BrokerSubscriber[MonitorHistorical] + // create a new BrokerSubscriber + for _, monitorId := range monitorIds { + subscriber, err := centralBroker.Subscribe(monitorId, func(event BrokerEvent[MonitorHistorical]) error { + // send the event to the channel + message := event.Message() + ch <- message.Body + return nil + }) + if err != nil { + return &Subscriber{}, fmt.Errorf("failed to subscribe to monitor %s: %w", monitorId, err) } - }() - return ch -} + subscribers = append(subscribers, subscriber) -func contains(item string, items []string) bool { - for _, i := range items { - if i == item { - return true - } } - return false + return &Subscriber{ + subscribers: subscribers, + ch: ch, + }, nil +} + +func (s *Subscriber) Listen(ctx context.Context) <-chan MonitorHistorical { + return s.ch } diff --git a/backend/webhook.go b/backend/webhook.go index f124028..1f10976 100644 --- a/backend/webhook.go +++ b/backend/webhook.go @@ -16,7 +16,7 @@ const ( ) type WebhookInformation struct { - Endpoint string `json:"endpoint"` + Endpoint string `json:"monitor"` Status Status `json:"status"` StatusCode int `json:"statusCode"` RequestDuration int64 `json:"requestDuration"` diff --git a/backend/worker.go b/backend/worker.go index 29ebb13..4d12031 100644 --- a/backend/worker.go +++ b/backend/worker.go @@ -2,140 +2,195 @@ package main import ( "context" - "database/sql" - "encoding/json" "errors" "fmt" - "log" "net/http" + "strconv" + "strings" "time" - "github.com/allegro/bigcache/v3" + probing "github.com/prometheus-community/pro-bing" + "github.com/rs/zerolog/log" ) type Response struct { - Success bool `json:"success"` - StatusCode int `json:"statusCode"` - RequestDuration int64 `json:"requestDuration"` - Timestamp int64 `json:"timestamp"` - Endpoint + Success bool `json:"success"` + StatusCode int `json:"statusCode"` + RequestDuration int64 `json:"requestDuration"` + Timestamp time.Time `json:"timestamp"` + Monitor } +// Worker should only run checks for a single monitor, with specific type (HTTP or ICMP monitor). +// For each monitor result (success or fail), it should push the result into the monitor processor. type Worker struct { - endpoint *Endpoint - db *sql.DB - queue *Queue - cache *bigcache.BigCache - webhook *Webhook + monitor Monitor + processor *Processor } -func (d *Deps) NewWorker(e Endpoint) (*Worker, error) { - // Validate the endpoint - var endpoint = &e - _, err := ValidateEndpoint(*endpoint) +func NewWorker(monitor Monitor, processor *Processor) (*Worker, error) { + // Validate the monitor + _, err := monitor.Validate() if err != nil { return &Worker{}, err } - if endpoint.Interval == 0 { - endpoint.Interval = d.DefaultInterval + // Set default values + if monitor.Interval == 0 { + monitor.Interval = DefaultInterval } - if endpoint.Timeout == 0 { - endpoint.Timeout = d.DefaultTimeout + if monitor.Timeout == 0 { + monitor.Timeout = DefaultTimeout } - if endpoint.Method == "" { - endpoint.Method = http.MethodGet + if monitor.HttpMethod == "" { + monitor.HttpMethod = http.MethodGet + } + + if monitor.HttpExpectedStatusCode == "" { + monitor.HttpExpectedStatusCode = "2xx" + } + + if monitor.IcmpPacketSize <= 0 { + monitor.IcmpPacketSize = 56 } return &Worker{ - endpoint: endpoint, - db: d.DB, - queue: d.Queue, - cache: d.Cache, - webhook: d.Webhook, + monitor: monitor, + processor: processor, }, nil } func (w *Worker) Run() { for { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(w.endpoint.Timeout)) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(w.monitor.Timeout)) + var response Response + var err error // Make the request - response, err := w.makeRequest(ctx) - if err != nil { - cancel() - log.Printf("Failed to make request: %v", err) - continue + switch w.monitor.Type { + case MonitorTypeHTTP: + response, err = w.makeHttpRequest(ctx) + if err != nil { + cancel() + log.Error().Err(err).Msg("failed to make http request") + continue + } + break + case MonitorTypePing: + response, err = w.makeIcmpRequest(ctx) + if err != nil { + cancel() + log.Error().Err(err).Msg("failed to make icmp request") + continue + } } // Insert the response to the database - err = w.addToQueue(response) - if err != nil { - cancel() - log.Printf("Failed to insert response to the database: %v", err) - continue - } + w.processor.ProcessResponse(response) + + // Sleep for the interval + time.Sleep(time.Duration(w.monitor.Interval) * time.Second) + } +} + +func (w *Worker) parseExpectedStatusCode(got int) bool { + // Valid values: + // * 200 -> Direct 200 status code + // * 2xx -> Any 2xx status code (200-299) + // * 200-300 -> Any 200-300 status code (inclusive) + // * 2xx-3xx -> Any 2xx (200-299) and 3xx (300-399) status code (inclusive) + + if w.monitor.HttpExpectedStatusCode == strconv.Itoa(got) { + return true + } - err = w.webhook.Send(ctx, *response) - if err != nil { - cancel() - log.Printf("Failed to send webhook: %v", err) - continue + parts := strings.Split(w.monitor.HttpExpectedStatusCode, "-") + ok := false + for _, part := range parts { + if ok { + break } - // Sleep for the interval - time.Sleep(time.Duration(w.endpoint.Interval) * time.Second) + expectedSmallerParts := strings.Split(part, "") + gotSmallerParts := strings.Split(strconv.Itoa(got), "") + + for i, expectedPart := range expectedSmallerParts { + if expectedPart == "x" { + continue + } + + if expectedPart == gotSmallerParts[i] { + ok = true + continue + } + + if expectedPart != gotSmallerParts[i] { + ok = false + break + } + } } + + return false } -func (w *Worker) makeRequest(ctx context.Context) (*Response, error) { +func (w *Worker) makeHttpRequest(ctx context.Context) (Response, error) { timeStart := time.Now().UnixMilli() - req, err := http.NewRequestWithContext(ctx, w.endpoint.Method, w.endpoint.URL, nil) + req, err := http.NewRequestWithContext(ctx, w.monitor.HttpMethod, w.monitor.HttpEndpoint, nil) if err != nil && !errors.Is(err, context.DeadlineExceeded) { - return &Response{}, fmt.Errorf("failed to create request: %v", err) + return Response{}, fmt.Errorf("failed to create request: %w", err) } - if len(w.endpoint.Headers) > 0 { - for key, value := range w.endpoint.Headers { + if len(w.monitor.HttpHeaders) > 0 { + for key, value := range w.monitor.HttpHeaders { req.Header.Add(key, value) } } - client := http.Client{ - Timeout: time.Duration(w.endpoint.Timeout) * time.Second, + client := &http.Client{ + Timeout: time.Duration(w.monitor.Timeout) * time.Second, } resp, err := client.Do(req) if err != nil && !errors.Is(err, context.DeadlineExceeded) { - return &Response{}, fmt.Errorf("failed to make request: %v", err) + return Response{}, fmt.Errorf("failed to make request: %w", err) } timeEnd := time.Now().UnixMilli() - return &Response{ - Success: resp.StatusCode == 200, + return Response{ + Success: w.parseExpectedStatusCode(resp.StatusCode), StatusCode: resp.StatusCode, RequestDuration: timeEnd - timeStart, - Timestamp: time.Now().UnixMilli(), - Endpoint: *w.endpoint, + Timestamp: time.Now(), + Monitor: w.monitor, }, nil } -func (w *Worker) addToQueue(response *Response) error { - w.queue.Lock() - w.queue.Items = append(w.queue.Items, *response) - w.queue.Unlock() - - data, err := json.Marshal(response) +func (w *Worker) makeIcmpRequest(ctx context.Context) (Response, error) { + pinger, err := probing.NewPinger(w.monitor.IcmpHostname) if err != nil { - return err + return Response{}, fmt.Errorf("failed to create pinger: %w", err) } - err = w.cache.Set(w.endpoint.URL, data) + pinger.Count = 1 + pinger.Size = w.monitor.IcmpPacketSize + pinger.Timeout = time.Duration(w.monitor.Timeout) * time.Second + + err = pinger.Run() if err != nil { - return err + return Response{}, fmt.Errorf("failed to run pinger: %w", err) } - return nil + + stats := pinger.Statistics() + + return Response{ + Success: stats.PacketsRecv > 0, + StatusCode: 0, + RequestDuration: int64(stats.AvgRtt / time.Millisecond), + Timestamp: time.Now(), + Monitor: w.monitor, + }, nil } diff --git a/backend/worker_test.go b/backend/worker_test.go new file mode 100644 index 0000000..18001f2 --- /dev/null +++ b/backend/worker_test.go @@ -0,0 +1,81 @@ +package main_test + +import ( + "strconv" + "strings" + "testing" +) + +func parseExpectedStatusCode(expected string, got int) bool { + // Valid values: + // * 200 -> Direct 200 status code + // * 2xx -> Any 2xx status code (200-299) + // * 200-300 -> Any 200-300 status code (inclusive) + // * 2xx-3xx -> Any 2xx (200-299) and 3xx (300-399) status code (inclusive) + + if expected == strconv.Itoa(got) { + return true + } + + parts := strings.Split(expected, "-") + var ok = false + for _, part := range parts { + if ok == true { + break + } + expectedSmallerParts := strings.Split(part, "") + gotSmallerParts := strings.Split(strconv.Itoa(got), "") + + for i, expectedPart := range expectedSmallerParts { + if expectedPart == "x" { + continue + } + + if expectedPart == gotSmallerParts[i] { + ok = true + continue + } + + if expectedPart != gotSmallerParts[i] { + ok = false + break + } + } + } + + return ok +} + +func TestParseExpectedStatusCode(t *testing.T) { + tests := []struct { + expected string + got int + want bool + }{ + {"200", 200, true}, + {"2xx", 200, true}, + {"2xx", 201, true}, + {"2xx", 299, true}, + {"2xx", 300, false}, + {"200-300", 200, true}, + {"200-300", 300, true}, + {"200-300", 301, false}, + {"2xx-3xx", 200, true}, + {"2xx-3xx", 201, true}, + {"2xx-3xx", 299, true}, + {"2xx-3xx", 300, true}, + {"2xx-3xx", 301, true}, + {"2xx-3xx", 399, true}, + {"2xx-3xx", 400, false}, + {"2xx-5xx", 200, true}, + } + + for _, tt := range tests { + t.Run(tt.expected, func(t *testing.T) { + got := parseExpectedStatusCode(tt.expected, tt.got) + if got != tt.want { + t.Errorf("parseExpectedStatusCode(%s, %d) = %v; want %v", tt.expected, tt.got, got, tt.want) + } + }) + } +} From e395f3f77c018a039b08512d85b130bf9e8cb0be Mon Sep 17 00:00:00 2001 From: Reinaldy Rafli Date: Thu, 23 May 2024 14:30:05 +0700 Subject: [PATCH 6/6] fix(backend): remove webhook alert for now --- backend/migration.go | 2 +- backend/webhook.go | 70 -------------------------------------------- backend/worker.go | 2 +- 3 files changed, 2 insertions(+), 72 deletions(-) delete mode 100644 backend/webhook.go diff --git a/backend/migration.go b/backend/migration.go index 4ed8e89..01c809e 100644 --- a/backend/migration.go +++ b/backend/migration.go @@ -48,7 +48,7 @@ func Migrate(db *sql.DB, ctx context.Context, directionUp bool) error { for _, file := range files { content, err := migrationFiles.Open(file) if err != nil { - if content != nil && content.Close != nil { + if content != nil { _ = content.Close() } diff --git a/backend/webhook.go b/backend/webhook.go deleted file mode 100644 index 1f10976..0000000 --- a/backend/webhook.go +++ /dev/null @@ -1,70 +0,0 @@ -package main - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "net/http" -) - -type Status string - -const ( - StatusSuccess Status = "success" - StatusFailed Status = "failed" -) - -type WebhookInformation struct { - Endpoint string `json:"monitor"` - Status Status `json:"status"` - StatusCode int `json:"statusCode"` - RequestDuration int64 `json:"requestDuration"` - Timestamp int64 `json:"timestamp"` -} - -func (w *Webhook) Send(ctx context.Context, response Response) error { - // Fast return if there is no URL provided - if w.URL == "" { - return nil - } - - var responseStatus Status = StatusFailed - if response.Success { - responseStatus = StatusSuccess - } - - if responseStatus == StatusFailed && !w.FailedResponse { - return nil - } - - if responseStatus == StatusSuccess && !w.SuccessResponse { - return nil - } - - body, err := json.Marshal(WebhookInformation{ - Endpoint: response.Endpoint.URL, - Status: responseStatus, - StatusCode: response.StatusCode, - RequestDuration: response.RequestDuration, - Timestamp: response.Timestamp, - }) - if err != nil { - return fmt.Errorf("error marshalling response: %v", err) - } - - req, err := http.NewRequestWithContext(ctx, http.MethodPost, w.URL, bytes.NewReader(body)) - if err != nil { - return fmt.Errorf("error creating request: %v", err) - } - req.Header.Set("Content-Type", "application/json") - req.Header.Set("User-Agent", "Semyi Webhook") - - client := http.DefaultClient - _, err = client.Do(req) - if err != nil { - return fmt.Errorf("error making request: %v", err) - } - - return nil -} diff --git a/backend/worker.go b/backend/worker.go index 4d12031..5da2ef5 100644 --- a/backend/worker.go +++ b/backend/worker.go @@ -88,7 +88,7 @@ func (w *Worker) Run() { } // Insert the response to the database - w.processor.ProcessResponse(response) + go w.processor.ProcessResponse(response) // Sleep for the interval time.Sleep(time.Duration(w.monitor.Interval) * time.Second)