diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 000000000..c6c8b3621 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,9 @@ +root = true + +[*] +indent_style = space +indent_size = 2 +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml new file mode 100644 index 000000000..d3a5641e2 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -0,0 +1,109 @@ +name: "\U0001F41E Bug report" +description: File a bug report +labels: ["status/triage", "type/bug"] +assignees: [] + +body: + - type: checkboxes + id: terms + attributes: + label: Issue submitter TODO list + description: By you checking these checkboxes we can be sure you've done the essential things. + options: + - label: I've searched for an already existing issues [here](https://github.com/sentry-kubernetes/charts/issues) + required: true + + - type: textarea + attributes: + label: Describe the bug (actual behavior) + description: | + A clear and concise description of what the bug is. Use a list, if there is more than one problem. + ```markdown + + ``` + validations: + required: true + + - type: textarea + attributes: + label: Expected behavior + description: | + A clear and concise description of what you expected to happen. + ```markdown + + ``` + validations: + required: false + + - type: textarea + attributes: + label: values.yaml + description: | + Please provide the relevant part of your `values.yaml` file. + ```yaml + + ``` + validations: + required: true + + - type: textarea + attributes: + label: Helm chart version + description: | + Please provide the version of the Helm chart you are using. + ```markdown + + ``` + validations: + required: true + + - type: textarea + attributes: + label: Steps to reproduce + description: | + Please write down the order of the actions required to reproduce the issue. + For the advanced setups/complicated issue, we might need you to provide + a minimal [reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). + ```markdown + + ``` + validations: + required: true + + - type: textarea + attributes: + label: Screenshots + description: | + If applicable, add screenshots to help explain your problem. + ```markdown + + ``` + validations: + required: false + + - type: textarea + attributes: + label: Logs + description: | + If applicable, *upload* logs to help explain your problem. + ```markdown + + ``` + validations: + required: false + + - type: textarea + attributes: + label: Additional context + description: | + Add any other context about the problem here. E.G.: + 1. Are there any alternative scenarios (different data/methods/configuration/setup) you have tried? + Were they successful or the same issue occurred? Please provide steps as well. + 2. Related issues (if there are any). + 3. Logs (if available) + 4. Is there any serious impact or behaviour on the end-user because of this issue, that can be overlooked? + ```markdown + + ``` + validations: + required: false diff --git a/.github/workflows/auto-label-conflicts.yaml b/.github/workflows/auto-label-conflicts.yaml new file mode 100644 index 000000000..a3bd7f797 --- /dev/null +++ b/.github/workflows/auto-label-conflicts.yaml @@ -0,0 +1,25 @@ +name: Auto Label Conflicts +on: + push: + branches: [develop] + pull_request: + branches: [develop] + + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +jobs: + auto-label: + runs-on: ubuntu-latest + steps: + - uses: prince-chrismc/label-merge-conflicts-action@v3 + with: + conflict_label_name: "conflicts" + github_token: ${{ secrets.PERSONAL_TOKEN }} + detect_merge_changes: false # or true to handle as conflicts + conflict_comment: | + :wave: Hi, @${author}, + I detected conflicts against the base branch :speak_no_evil: + You'll want to sync :arrows_counterclockwise: your branch with upstream! diff --git a/.github/workflows/conventional-commits-check.yaml b/.github/workflows/conventional-commits-check.yaml new file mode 100644 index 000000000..5c28e5111 --- /dev/null +++ b/.github/workflows/conventional-commits-check.yaml @@ -0,0 +1,15 @@ +name: Conventional Commits Check + +on: + pull_request: + branches: [ develop ] + +jobs: + check-conventional-commits: + name: Conventional Commits + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Check Commit Conventions + uses: webiny/action-conventional-commits@v1.3.0 diff --git a/.github/workflows/lint-test.yaml b/.github/workflows/lint-test.yaml new file mode 100644 index 000000000..c3103f6a4 --- /dev/null +++ b/.github/workflows/lint-test.yaml @@ -0,0 +1,50 @@ +name: Lint and Test Charts + +on: pull_request + +jobs: + lint-test: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set up Helm + uses: azure/setup-helm@v4.2.0 + with: + version: v3.14.4 + + - uses: actions/setup-python@v5 + with: + python-version: '3.x' + check-latest: true + + - name: Set up chart-testing + uses: helm/chart-testing-action@v2.6.1 + + - name: Add Helm Repositories + run: | + helm repo add sentry-kubernetes https://sentry-kubernetes.github.io/charts + helm repo update + + - name: Run chart-testing (list-changed) + id: list-changed + run: | + changed=$(ct list-changed --target-branch ${{ github.event.repository.default_branch }}) + if [[ -n "$changed" ]]; then + echo "changed=true" >> "$GITHUB_OUTPUT" + fi + + - name: Run chart-testing (lint) + if: steps.list-changed.outputs.changed == 'true' + run: ct lint --target-branch ${{ github.event.repository.default_branch }} --check-version-increment=false + + - name: Create kind cluster + if: steps.list-changed.outputs.changed == 'true' + uses: helm/kind-action@v1.10.0 + + - name: Run chart-testing (install) + if: steps.list-changed.outputs.changed == 'true' + run: ct install --target-branch ${{ github.event.repository.default_branch }} --helm-extra-args "--timeout 1000s" diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml deleted file mode 100644 index 56a22a9c3..000000000 --- a/.github/workflows/lint.yaml +++ /dev/null @@ -1,40 +0,0 @@ -name: Lint and Test Charts - -on: pull_request - -jobs: - lint-charts: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - with: - fetch-depth: "0" - - name: Set up chart-testing - uses: helm/chart-testing-action@v2.1.0 - - name: Run chart-testing (list-changed) - id: list-changed - run: | - changed=$(ct list-changed --target-branch=develop --chart-dirs=. ) - if [[ -n "$changed" ]]; then - echo "::set-output name=changed::true" - fi - - name: Lint Sentry Helm Chart - uses: WyriHaximus/github-action-helm3@v2 - if: steps.list-changed.outputs.changed == 'true' - with: - exec: for chart in $(ls -d ./*/); do helm lint $chart; done - - name: Run chart-testing (lint) - if: steps.list-changed.outputs.changed == 'true' - run: | - ct lint \ - --target-branch=develop \ - --chart-dirs=. \ - --chart-repos=bitnami=https://charts.bitnami.com/bitnami,sentry-kubernetes=https://sentry-kubernetes.github.io/charts - # It would be nice to turn on this testing, but it is hard to get right and - # rather complex (as well as expensive in terms of runner compute time). - # Additionally, this is out of scope of fixing the issue brought up in #456 - # - name: Create kind cluster - # uses: helm/kind-action@v1.2.0 - # if: steps.list-changed.outputs.changed == 'true' - # - name: Run chart-testing (install) - # run: ct install --target-branch=develop --chart-dirs=. diff --git a/.github/workflows/push.yaml b/.github/workflows/push.yaml index 7ab557ce5..87ec9f864 100644 --- a/.github/workflows/push.yaml +++ b/.github/workflows/push.yaml @@ -2,19 +2,19 @@ name: Build and push Chart on: push: - branches: - - develop + tags: + - '*' jobs: build-push: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 with: path: 'main' - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 with: path: 'gh-pages' @@ -25,31 +25,25 @@ jobs: git fetch --no-tags --prune --depth=1 origin +refs/heads/*:refs/remotes/origin/* git checkout gh-pages - # - name: Build zips - # uses: yeouchien/helm3-action@f3a7c239c5c60777210c8e631839edf5dd3fa29c - # with: - # command: dep update main/sentry - - - name: Build zips - uses: yeouchien/helm3-action@f3a7c239c5c60777210c8e631839edf5dd3fa29c + - name: Build clickhouse chart + uses: WyriHaximus/github-action-helm3@v4 with: - command: package main/sentry --destination gh-pages/charts - + exec: helm package -u main/charts/clickhouse --destination gh-pages/charts - - name: Build zips - uses: yeouchien/helm3-action@f3a7c239c5c60777210c8e631839edf5dd3fa29c + - name: Build sentry chart + uses: WyriHaximus/github-action-helm3@v4 with: - command: package main/clickhouse --destination gh-pages/charts + exec: helm package -u main/charts/sentry --destination gh-pages/charts - - name: Build zips - uses: yeouchien/helm3-action@f3a7c239c5c60777210c8e631839edf5dd3fa29c + - name: Build sentry-kubernetes chart + uses: WyriHaximus/github-action-helm3@v4 with: - command: package main/sentry-kubernetes --destination gh-pages/charts + exec: helm package -u main/charts/sentry-kubernetes --destination gh-pages/charts - - name: Create index file - uses: yeouchien/helm3-action@f3a7c239c5c60777210c8e631839edf5dd3fa29c + - name: Build sentry-kubernetes chart + uses: WyriHaximus/github-action-helm3@v4 with: - command: repo index --url https://sentry-kubernetes.github.io/charts ./gh-pages/charts + exec: helm repo index --url https://sentry-kubernetes.github.io/charts ./gh-pages/charts - name: Commit files run: | @@ -58,7 +52,7 @@ jobs: git commit -m "Add changes" -a - name: Push changes - uses: ad-m/github-push-action@19caa5c351f47734055690f7d01aaaef2f9114d5 + uses: ad-m/github-push-action@9870d48124da805820c70ebc6ba563c715551019 with: github_token: ${{ secrets.GITHUB_TOKEN }} branch: gh-pages diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 000000000..3fbc689ca --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,20 @@ +name: Release Please + +on: + push: + branches: + - develop + +permissions: + contents: write + pull-requests: write + +jobs: + release-please: + runs-on: ubuntu-latest + steps: + - name: Release Please Action + uses: google-github-actions/release-please-action@v4 + with: + token: ${{ secrets.PERSONAL_TOKEN }} + config-file: release-please-config.json diff --git a/.github/workflows/stale.yaml b/.github/workflows/stale.yaml index 64c2f7ffb..98be5f896 100644 --- a/.github/workflows/stale.yaml +++ b/.github/workflows/stale.yaml @@ -10,7 +10,7 @@ jobs: issues: write pull-requests: write steps: - - uses: actions/stale@v5 + - uses: actions/stale@v9 with: days-before-issue-stale: 30 days-before-issue-close: 14 diff --git a/.release-please-manifest.json b/.release-please-manifest.json new file mode 100644 index 000000000..be9dbe8de --- /dev/null +++ b/.release-please-manifest.json @@ -0,0 +1 @@ +{"charts/clickhouse":"3.14.0","charts/sentry":"26.10.0","charts/sentry-kubernetes":"0.4.0"} diff --git a/README.md b/README.md index 3df0f9262..3cac2dd56 100644 --- a/README.md +++ b/README.md @@ -1,44 +1,202 @@ -# Sentry 10 helm charts +# Sentry helm charts Sentry is a cross-platform crash reporting and aggregation platform. -This repository aims to support Sentry 10 and move out from the deprecated Helm charts official repo. +This repository aims to support Sentry >=10 and move out from the deprecated Helm charts official repo. Big thanks to the maintainers of the [deprecated chart](https://github.com/helm/charts/tree/master/stable/sentry). This work has been partly inspired by it. ## How this chart works -`helm repo add sentry https://sentry-kubernetes.github.io/charts` +``` +helm repo add sentry https://sentry-kubernetes.github.io/charts +helm repo update +helm install my-sentry sentry/sentry --wait --timeout=1000s +``` ## Values -For now the full list of values is not documented but you can get inspired by the values.yaml specific to each directory. +For now the full list of values is not documented, but you can get inspired by the `values.yaml` specific to each directory. + +## Upgrading from 25.x.x Version of This Chart to 26.x.x + +Make sure to upgrade to chart version 25.20.0 (Sentry 24.8.0) before upgrading to 26.x.x. + +## Upgrading from 23.x.x Version of This Chart to 24.x.x/25.x.x + +Make sure to revert the changes on Clickhouse replica counts if the change doesn't suit you. + +## Upgrading from 22.x.x Version of This Chart to 23.x.x + +This version introduces changes to definitions of ingest-consumers and workers. These changes allow to balance +ingestion pipeline with more granularity. + +### Major Changes + +- **Ingest consumers**: Templates for Deployment and HPA manifests are now separate for ingest-consumer-events, + ingest-consumer-attachments, and ingest-consumer-transactions. +- **Workers**: Templates for two additional worker Deployments added, each of them with its own HPA. By default, they're + configured for error- and transaction-related tasks processing, but queues to consume can be redefined for both. + +### Migration Guide + +Since labels are immutable in Kubernetes Deployments, `helm upgrade --force` should be used to recreate ingest-consumer Deployments. +As an alternative, existing ingest-consumer Deployments can be removed manually with `kubectl delete` before upgrading the Helm release. + +## Upgrading from 21.x.x Version of This Chart to 22.x.x + +This version introduces a significant change by dropping support for Kafka Zookeeper and transitioning to Kafka Kraft +mode. This change requires action on your part to ensure a smooth upgrade. + +### Major Changes + +- **Kafka Upgrade**: We have upgraded from Kafka `23.0.7` to `27.1.2`. This involves moving from Zookeeper to Kraft, + requiring a fresh setup of Kafka. + +### Migration Guide + +1. **Backup Your Data**: Ensure all your data is backed up before starting the migration process. +2. **Retrieve the Cluster ID from Zookeeper** by executing: + + ```shell + kubectl exec -it -- zkCli.sh get /cluster/id + ``` + +3. **Deploy at least one Kraft controller-only** in your deployment with `zookeeperMigrationMode=true`. The Kraft + controllers will migrate the data from your Kafka ZkBroker to Kraft mode. + + To do this, add the following values to your Zookeeper deployment when upgrading: + + ```yaml + controller: + replicaCount: 1 + controllerOnly: true + zookeeperMigrationMode: true + broker: + zookeeperMigrationMode: true + kraft: + enabled: true + clusterId: "" + ``` + +4. **Wait until all brokers are ready.** You should see the following log in the broker logs: + + ```shell + INFO [KafkaServer id=100] Finished catching up on KRaft metadata log, requesting that the KRaft controller unfence this broker (kafka.server.KafkaServer) + INFO [BrokerLifecycleManager id=100 isZkBroker=true] The broker has been unfenced. Transitioning from RECOVERY to RUNNING. (kafka.server.BrokerLifecycleManager) + ``` + In the controllers, the following message should show up: + ```shell + Transitioning ZK migration state from PRE_MIGRATION to MIGRATION (org.apache.kafka.controller.FeatureControlManager) + ``` + +5. **Once all brokers have been successfully migrated,** set **`broker.zookeeperMigrationMode=false`** to fully migrate them. + ```yaml + broker: + zookeeperMigrationMode: false + ``` + +6. **To conclude the migration**, switch off migration mode on controllers and stop Zookeeper: + + ```yaml + controller: + zookeeperMigrationMode: false + zookeeper: + enabled: false + ``` + After the migration is complete, you should see the following message in your controllers: + + ```shell + [2023-07-13 13:07:45,226] INFO [QuorumController id=1] Transitioning ZK migration state from MIGRATION to POST_MIGRATION (org.apache.kafka.controller.FeatureControlManager) + ``` +7. **(Optional)** If you would like to switch to a non-dedicated cluster, set **`controller.controllerOnly=false`**. This will cause controller-only nodes to switch to controller+broker nodes. + + At this point, you could manually decommission broker-only nodes by reassigning its partitions to controller-eligible nodes. + + For more information about decommissioning a Kafka broker, check the official documentation. + +## Upgrading from 20.x.x version of this Chart to 21.x.x + +Bumped dependencies: +- memcached > 6.5.9 +- kafka > 23.0.7 - This is a major update, but only kafka version is updated. See [bitnami charts' update note](https://github.com/bitnami/charts/tree/main/bitnami/kafka#to-2300) +- clickhouse > 3.7.0 - Supports `priorityClassName` and `max_suspicious_broken_parts` config. +- zookeeper > 11.4.11 - 2 Major updates from v9 to v11. See [To v10 upgrade notes](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper#to-1000) and [To v11 upgrade notes](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper#to-1100) +- rabbitmq > 11.16.2 + +## Upgrading from 19.x.x version of this Chart to 20.x.x + +Bumped dependencies: +- kafka > 22.1.3 - now supports Kraft. Note that the upgrade is breaking and that you have to start a new Kafka from scratch to use it. + +Example: + +```yaml +kafka: + zookeeper: + enabled: false + kraft: + enabled: true +``` + +## Upgrading from 18.x.x version of this Chart to 19.x.x + +Chart dependencies have been upgraded because of Sentry requirements. +Changes: +- The minimum required version of PostgreSQL is 14.5 (works with 15.x too) + +Bumped dependencies: +- postgresql > 12.5.1 - latest version of chart with postgres 15 + +## Upgrading from 17.x.x version of this Chart to 18.x.x + +If Kafka is complaining about unknown or missing topic, please connect to `kafka-0` and run + +```shell +/opt/bitnami/kafka/bin/kafka-topics.sh --create --topic ingest-replay-recordings --bootstrap-server localhost:9092 +``` + +## Upgrading from 16.x.x version of this Chart to 17.x.x + +Sentry version from 22.10.0 onwards should be using chart 17.x.x + +- post process forwarder events and transactions topics are split in Sentry 22.10.0 + +You can delete the deployment "sentry-post-process-forward" as it's no longer needed. + +`sentry-worker` may fail to start by [#774](https://github.com/sentry-kubernetes/charts/issues/774). +If you encountered this issue, please reset `counters-0`, `triggers-0` queues. + +## Upgrading from 15.x.x version of this Chart to 16.x.x + +`system.secret-key` is removed + +See https://github.com/sentry-kubernetes/charts/tree/develop/sentry#sentry-secret-key ## Upgrading from 14.x.x version of this Chart to 15.x.x -Chart dependencies has been upgraded because of bitnami charts removal. +Chart dependencies have been upgraded because of bitnami charts removal. Changes: - `nginx.service.port: 80` > `nginx.service.ports.http: 80` - `kafka.service.port` > `kafka.service.ports.client` Bumped dependencies: - redis > 16.12.1 - latest version of chart -- kafka > 16.3.2 - chart aligned with zookeeper dependency, upgraded kafka to 3.11 +- kafka > 16.3.2 - chart aligned with zookeeper dependency, upgraded Kafka to 3.11 - rabbit > 8.32.2 - latest 3.9.* image version of chart -- postgresql > 10.16.2 - latest wersion of chart with postgres 11 +- postgresql > 10.16.2 - latest version of chart with postgres 11 - nginx > 12.0.4 - latest version of chart ## Upgrading from 13.x.x version of this Chart to 14.0.0 -ClickHouse was reconfigured with sharding and replication in-mind, If you are using external ClickHouse, you don't need to do anything. +ClickHouse was reconfigured with sharding and replication in mind. If you are using external ClickHouse, you don't need to do anything. **WARNING**: You will lose current event data
-Otherwise, you should delete the old ClickHouse volumes in-order to upgrade to this version. - +Otherwise, you should delete the old ClickHouse volumes in order to upgrade to this version. ## Upgrading from 12.x.x version of this Chart to 13.0.0 -The service annotions have been moved from the `service` section to the respective service's service sub-section. So what was: +The service annotations have been moved from the `service` section to the respective service's service sub-section. So what was: ```yaml service: @@ -64,56 +222,67 @@ relay: alb.ingress.kubernetes.io/healthcheck-port: traffic-port ``` - ## Upgrading from 10.x.x version of this Chart to 11.0.0 -If you were using clickhouse tabix externally, we disabled it per default. +If you were using ClickHouse Tabix externally, we disabled it by default. ## Upgrading from 9.x.x version of this Chart to 10.0.0 -If you were using clickhouse ImagePullSecrets, [we unified](https://github.com/sentry-kubernetes/charts/commit/573ca29d03bf2c044004c1aa387f652a36ada23a) the way it's used. +If you were using ClickHouse ImagePullSecrets, [we unified](https://github.com/sentry-kubernetes/charts/commit/573ca29d03bf2c044004c1aa387f652a36ada23a) the way it's used. ## Upgrading from 8.x.x version of this Chart to 9.0.0 -to simplify 1st time installations, backup value on clickhouse has been changed to false. +To simplify first-time installations, the backup value on ClickHouse has been changed to false. -clickhouse.clickhouse.configmap.remote_servers.replica.backup +`clickhouse.clickhouse.configmap.remote_servers.replica.backup` ## Upgrading from 7.x.x version of this Chart to 8.0.0 -- the default value of features.orgSubdomains is now "false" +- the default value of `features.orgSubdomains` is now "false" ## Upgrading from 6.x.x version of this Chart to 7.0.0 -- the default mode of relay is now "proxy". You can change it through the values.yaml file -- we removed the `githubSso` variable for the oauth github configuration. It was using the old environment variable, that doesn't work with Sentry anymore. Just use the common github.xxxx configuration for both oauth & the application integration. +- the default mode of relay is now "proxy". You can change it through the `values.yaml` file +- we removed the `githubSso` variable for the OAuth GitHub configuration. It was using the old environment variable, that doesn't work with Sentry anymore. Just use the common `github.xxxx` configuration for both OAuth & the application integration. ## Upgrading from 5.x.x version of this Chart to 6.0.0 -- The sentry.configYml value is now in a real yaml format +- The `sentry.configYml` value is now in a real YAML format - If you were previously using `relay.asHook`, the value is now `asHook` ## Upgrading from 4.x.x version of this Chart to 5.0.0 -As Relay is now part of this chart your need to make sure you enable either Nginx or the Ingress. Please read the next paragraph for more informations. +As Relay is now part of this chart, you need to make sure you enable either Nginx or the Ingress. Please read the next paragraph for more information. -If you are using an ingress gateway (like istio), you have to change your inbound path from sentry-web to nginx. +If you are using an ingress gateway (like Istio), you have to change your inbound path from `sentry-web` to `nginx`. ## NGINX and/or Ingress -By default, NGINX is enabled to allow sending the incoming requests to [Sentry Relay](https://getsentry.github.io/relay/) or the Django backend depending on the path. When Sentry is meant to be exposed outside of the Kubernetes cluster, it is recommended to disable NGINX and let the Ingress do the same. It's recommended to go with the go to Ingress Controller, [NGINX Ingress](https://kubernetes.github.io/ingress-nginx/) but others should work as well. +By default, NGINX is enabled to allow sending the incoming requests to [Sentry Relay](https://getsentry.github.io/relay/) or the Django backend depending on the path. When Sentry is meant to be exposed outside of the Kubernetes cluster, it is recommended to disable NGINX and let the Ingress do the same. It's recommended to go with the go-to Ingress Controller, [NGINX Ingress](https://kubernetes.github.io/ingress-nginx/), but others should work as well. -Note: if you are using NGINX Ingress, please set this annotation on your ingress : nginx.ingress.kubernetes.io/use-regex: "true". -If you are using `additionalHostNames` the `nginx.ingress.kubernetes.io/upstream-vhost` annotation might also come in handy. +Note: if you are using NGINX Ingress, please set this annotation on your ingress: `nginx.ingress.kubernetes.io/use-regex: "true"`. +If you are using `additionalHostNames`, the `nginx.ingress.kubernetes.io/upstream-vhost` annotation might also come in handy. It sets the `Host` header to the value you provide to avoid CSRF issues. -## Clickhouse warning +### Letsencrypt on NGINX Ingress Controller +```yaml +nginx: + ingress: + annotations: + cert-manager.io/cluster-issuer: "letsencrypt-prod" + enabled: true + hostname: fqdn + ingressClassName: "nginx" + tls: true +``` + +## ClickHouse warning -Snuba only supports a UTC timezone for Clickhouse. Please keep the initial value! +Snuba only supports a UTC timezone for ClickHouse. Please keep the initial value! ## Upgrading from 3.1.0 version of this Chart to 4.0.0 -Following Helm Chart best practices the new version introduces some breaking changes, all configuration for external +Following Helm Chart best practices, the new version introduces some breaking changes. All configuration for external resources moved to separate config branches: `externalClickhouse`, `externalKafka`, `externalRedis`, `externalPostgresql`. Here is a mapping table of old values and new values: @@ -132,7 +301,7 @@ Here is a mapping table of old values and new values: ## Upgrading from deprecated 9.0 -> 10.0 Chart -As this chart runs in helm 3 and also tries its best to follow on from the original Sentry chart. There are some steps that needs to be taken in order to correctly upgrade. +As this chart runs in Helm 3 and also tries its best to follow on from the original Sentry chart. There are some steps that need to be taken in order to correctly upgrade. From the previous upgrade, make sure to get the following from your previous installation: @@ -142,11 +311,11 @@ From the previous upgrade, make sure to get the following from your previous ins #### Upgrade Steps -Due to an issue where transferring from Helm 2 to 3. Statefulsets that use the following: `heritage: {{ .Release.Service }}` in the metadata field will error out with a `Forbidden` error during the upgrade. The only workaround is to delete the existing statefulsets (Don't worry, PVC will be retained): +Due to an issue where transferring from Helm 2 to 3. StatefulSets that use the following: `heritage: {{ .Release.Service }}` in the metadata field will error out with a `Forbidden` error during the upgrade. The only workaround is to delete the existing StatefulSets (Don't worry, PVC will be retained): > `kubectl delete --all sts -n ` -Once the statefulsets are deleted. Next steps is to convert the helm release from version 2 to 3 using the helm 3 plugin: +Once the StatefulSets are deleted. Next steps is to convert the Helm release from version 2 to 3 using the Helm 3 plugin: > `helm3 2to3 convert ` diff --git a/clickhouse/.helmignore b/charts/clickhouse/.helmignore similarity index 100% rename from clickhouse/.helmignore rename to charts/clickhouse/.helmignore diff --git a/charts/clickhouse/CHANGELOG.md b/charts/clickhouse/CHANGELOG.md new file mode 100644 index 000000000..4d35b8d00 --- /dev/null +++ b/charts/clickhouse/CHANGELOG.md @@ -0,0 +1,85 @@ +# Changelog + +## [3.14.0](https://github.com/sentry-kubernetes/charts/compare/clickhouse-v3.13.0...clickhouse-v3.14.0) (2024-12-28) + + +### Features + +* bump clickhouse to 23.8 ([#1649](https://github.com/sentry-kubernetes/charts/issues/1649)) ([02d0083](https://github.com/sentry-kubernetes/charts/commit/02d00839de982d61d18f9d2655f3349a4f981a6b)) + +## [3.13.0](https://github.com/sentry-kubernetes/charts/compare/clickhouse-v3.12.0...clickhouse-v3.13.0) (2024-10-16) + + +### Features + +* **clickhouse:** update ClickHouse version to 23.3.19.32 ([#1552](https://github.com/sentry-kubernetes/charts/issues/1552)) ([05bcae5](https://github.com/sentry-kubernetes/charts/commit/05bcae567473f6d00c92fe3c4da84fc26fc26065)) + +## [3.12.0](https://github.com/sentry-kubernetes/charts/compare/clickhouse-v3.11.0...clickhouse-v3.12.0) (2024-10-15) + + +### Features + +* **clickhouse:** update ClickHouse version to 22.8.15.23 ([#1550](https://github.com/sentry-kubernetes/charts/issues/1550)) ([4271312](https://github.com/sentry-kubernetes/charts/commit/4271312e55b15f8f91bda7c70e3384042e602f5c)) + +## [3.11.0](https://github.com/sentry-kubernetes/charts/compare/clickhouse-v3.10.0...clickhouse-v3.11.0) (2024-09-14) + + +### Features + +* update clickhouse chart with new image version and startup probes ([#1429](https://github.com/sentry-kubernetes/charts/issues/1429)) ([efdf1b6](https://github.com/sentry-kubernetes/charts/commit/efdf1b6dcc9215335fe0128b27187aa25bf9256f)) + +## [3.10.0](https://github.com/sentry-kubernetes/charts/compare/clickhouse-v3.9.0...clickhouse-v3.10.0) (2024-07-11) + + +### Features + +* **clickhouse:** allow overriding Clickhouse host ([#1356](https://github.com/sentry-kubernetes/charts/issues/1356)) ([bf0aaf3](https://github.com/sentry-kubernetes/charts/commit/bf0aaf346d5363fef2b12e2caee6f4b4750d45fd)) + +## [3.9.0](https://github.com/sentry-kubernetes/charts/compare/clickhouse-v3.8.0...clickhouse-v3.9.0) (2024-05-21) + + +### Features + +* ability to override default clickhouse config ([#1268](https://github.com/sentry-kubernetes/charts/issues/1268)) ([1f8a5d5](https://github.com/sentry-kubernetes/charts/commit/1f8a5d58cedf3f26e759c67d2c5f50ea11d371c1)) + +## [3.8.0](https://github.com/sentry-kubernetes/charts/compare/clickhouse-v3.7.2...clickhouse-v3.8.0) (2024-04-12) + + +### Features + +* add priorityClassName to clickhouse ([#1098](https://github.com/sentry-kubernetes/charts/issues/1098)) ([386e7b7](https://github.com/sentry-kubernetes/charts/commit/386e7b7328000a289a7642752af583e8f6d40106)) +* Allow to set max_suspicious_broken_parts in merge_tree settings… ([#1080](https://github.com/sentry-kubernetes/charts/issues/1080)) ([d2f305c](https://github.com/sentry-kubernetes/charts/commit/d2f305c73b7b0ab625734a30fe5b5363606cd751)) +* Allow to set specified merge_tree settings in config.xml ([#884](https://github.com/sentry-kubernetes/charts/issues/884)) ([a964753](https://github.com/sentry-kubernetes/charts/commit/a964753b6fc785292c448e2f8d7c3099c696039d)) +* clone clickhouse chart ([#71](https://github.com/sentry-kubernetes/charts/issues/71)) ([d4c252b](https://github.com/sentry-kubernetes/charts/commit/d4c252b752bd637595b2406e88f2118d8609667a)) +* distributed tables v2 ([#588](https://github.com/sentry-kubernetes/charts/issues/588)) ([cfe7d73](https://github.com/sentry-kubernetes/charts/commit/cfe7d736278feeeb72189efb841a6099685ed1dd)) +* put clickhouse with UTC timezone per default ([#82](https://github.com/sentry-kubernetes/charts/issues/82)) ([daab634](https://github.com/sentry-kubernetes/charts/commit/daab634449ce10ad45a0f73c765e04033a8cb657)) +* **tabix:** allow setting ingress annotations for dealing with cors ([#321](https://github.com/sentry-kubernetes/charts/issues/321)) ([b11361f](https://github.com/sentry-kubernetes/charts/commit/b11361f2fe6b27504d2f0fda4a12bc5ade780b05)) + + +### Bug Fixes + +* clickhouse chart lint ([a364b05](https://github.com/sentry-kubernetes/charts/commit/a364b053069ab9330af6c8bfd0d2bda619ada0f0)) +* clickhouse ingress ([#99](https://github.com/sentry-kubernetes/charts/issues/99)) ([94da94d](https://github.com/sentry-kubernetes/charts/commit/94da94d15a9528ebdb4782c20af48b02e0a256bf)) +* make ingress, rbac compatible with latest k8s versions ([#114](https://github.com/sentry-kubernetes/charts/issues/114)) ([8d2f319](https://github.com/sentry-kubernetes/charts/commit/8d2f3196fe797a301ba6ebb21b793f3030d70962)) +* replace hardcoded value ([#1085](https://github.com/sentry-kubernetes/charts/issues/1085)) ([c5fec72](https://github.com/sentry-kubernetes/charts/commit/c5fec72ad8dc16e727019094d07dbaae4359cdf8)) + +## 3.7.2 (2024-04-12) + + +### Features + +* add priorityClassName to clickhouse ([#1098](https://github.com/sentry-kubernetes/charts/issues/1098)) ([386e7b7](https://github.com/sentry-kubernetes/charts/commit/386e7b7328000a289a7642752af583e8f6d40106)) +* Allow to set max_suspicious_broken_parts in merge_tree settings… ([#1080](https://github.com/sentry-kubernetes/charts/issues/1080)) ([d2f305c](https://github.com/sentry-kubernetes/charts/commit/d2f305c73b7b0ab625734a30fe5b5363606cd751)) +* Allow to set specified merge_tree settings in config.xml ([#884](https://github.com/sentry-kubernetes/charts/issues/884)) ([a964753](https://github.com/sentry-kubernetes/charts/commit/a964753b6fc785292c448e2f8d7c3099c696039d)) +* clone clickhouse chart ([#71](https://github.com/sentry-kubernetes/charts/issues/71)) ([d4c252b](https://github.com/sentry-kubernetes/charts/commit/d4c252b752bd637595b2406e88f2118d8609667a)) +* distributed tables v2 ([#588](https://github.com/sentry-kubernetes/charts/issues/588)) ([cfe7d73](https://github.com/sentry-kubernetes/charts/commit/cfe7d736278feeeb72189efb841a6099685ed1dd)) +* put clickhouse with UTC timezone per default ([#82](https://github.com/sentry-kubernetes/charts/issues/82)) ([daab634](https://github.com/sentry-kubernetes/charts/commit/daab634449ce10ad45a0f73c765e04033a8cb657)) +* **tabix:** allow setting ingress annotations for dealing with cors ([#321](https://github.com/sentry-kubernetes/charts/issues/321)) ([b11361f](https://github.com/sentry-kubernetes/charts/commit/b11361f2fe6b27504d2f0fda4a12bc5ade780b05)) + + +### Bug Fixes + +* clickhouse chart lint ([a364b05](https://github.com/sentry-kubernetes/charts/commit/a364b053069ab9330af6c8bfd0d2bda619ada0f0)) +* clickhouse ingress ([#99](https://github.com/sentry-kubernetes/charts/issues/99)) ([94da94d](https://github.com/sentry-kubernetes/charts/commit/94da94d15a9528ebdb4782c20af48b02e0a256bf)) +* make ingress, rbac compatible with latest k8s versions ([#114](https://github.com/sentry-kubernetes/charts/issues/114)) ([8d2f319](https://github.com/sentry-kubernetes/charts/commit/8d2f3196fe797a301ba6ebb21b793f3030d70962)) +* replace hardcoded value ([#1085](https://github.com/sentry-kubernetes/charts/issues/1085)) ([c5fec72](https://github.com/sentry-kubernetes/charts/commit/c5fec72ad8dc16e727019094d07dbaae4359cdf8)) diff --git a/clickhouse/Chart.yaml b/charts/clickhouse/Chart.yaml similarity index 50% rename from clickhouse/Chart.yaml rename to charts/clickhouse/Chart.yaml index 248947d26..2de224d66 100755 --- a/clickhouse/Chart.yaml +++ b/charts/clickhouse/Chart.yaml @@ -1,16 +1,17 @@ apiVersion: v1 -appVersion: "19.14" -description: ClickHouse is an open source column-oriented database management system - capable of real time generation of analytical data reports using SQL queries +appVersion: "23.8.16.16" +description: ClickHouse is an open source column-oriented database management + system capable of real time generation of analytical data reports using SQL + queries home: https://clickhouse.yandex/ icon: https://clickhouse.yandex/images/logo.png keywords: -- clickhouse -- olap -- database + - clickhouse + - olap + - database name: clickhouse sources: -- https://github.com/sentry-kubernetes/charts -version: 3.1.2 + - https://github.com/sentry-kubernetes/charts +version: 3.14.0 maintainers: - name: sentry-kubernetes diff --git a/clickhouse/README.md b/charts/clickhouse/README.md similarity index 93% rename from clickhouse/README.md rename to charts/clickhouse/README.md index 913b53eb0..c190d6908 100755 --- a/clickhouse/README.md +++ b/charts/clickhouse/README.md @@ -51,31 +51,37 @@ The following tables lists the configurable parameters of the Clickhouse chart a | `clickhouse.tcp_port` | Port for communicating with clients over the TCP protocol | `9000` | | `clickhouse.interserver_http_port` | Port for exchanging data between ClickHouse servers | `9009` | | `clickhouse.replicas` | The instance number of Clickhouse | `3` | -| `clickhouse.image` | Docker image for Clickhouse | `yandex/clickhouse-server` | -| `clickhouse.imageVersion` | Docker image version for Clickhouse | `19.14` | +| `clickhouse.image` | Docker image for Clickhouse | `clickhouse/clickhouse-server` | +| `clickhouse.imageVersion` | Docker image version for Clickhouse | `` | | `clickhouse.imagePullPolicy` | Image pull policy. One of Always, Never, IfNotPresent | `IfNotPresent` | +| `clickhouse.startupProbe.enabled` | Turn on and off liveness probe | `true` | +| `clickhouse.startupProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `5` | +| `clickhouse.startupProbe.periodSeconds` | How often to perform the probe | `5` | +| `clickhouse.startupProbe.timeoutSeconds` | When the probe times out | `5` | +| `clickhouse.startupProbe.failureThreshold` | Minimum consecutive successes for the probe | `60` | +| `clickhouse.startupProbe.successThreshold` | Minimum consecutive failures for the probe | `1` | | `clickhouse.livenessProbe.enabled` | Turn on and off liveness probe | `true` | -| `clickhouse.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | +| `clickhouse.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `0` | | `clickhouse.livenessProbe.periodSeconds` | How often to perform the probe | `30` | | `clickhouse.livenessProbe.timeoutSeconds` | When the probe times out | `5` | | `clickhouse.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` | | `clickhouse.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` | | `clickhouse.readinessProbe.enabled` | Turn on and off readiness probe | `true` | -| `clickhouse.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` | +| `clickhouse.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `0` | | `clickhouse.readinessProbe.periodSeconds` | How often to perform the probe | `30` | | `clickhouse.readinessProbe.timeoutSeconds` | When the probe times out | `5` | | `clickhouse.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` | | `clickhouse.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` | |`clickhouse.resources` | The resource requests and limits for Clickhouse pods |`{}` | | `clickhouse.persistentVolumeClaim.enabled` | Enable persistence using a `PersistentVolumeClaim` | `false` | -| `clickhouse.persistentVolumeClaim.dataPersistentVolume.enabled` | Turn on and off dataPersistentVolume | `false` | -| `clickhouse.persistentVolumeClaim.dataPersistentVolume.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | -| `clickhouse.persistentVolumeClaim.dataPersistentVolume.storageClassName` | Persistent Volume Storage Class | `` | -| `clickhouse.persistentVolumeClaim.dataPersistentVolume.storage` | Persistent Volume Size | `500Gi` | -| `clickhouse.persistentVolumeClaim.logsPersistentVolume.enabled` | Turn on and off dataPersistentVolume | `false` | -| `clickhouse.persistentVolumeClaim.logsPersistentVolume.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | -| `clickhouse.persistentVolumeClaim.logsPersistentVolume.storageClassName` | Persistent Volume Storage Class | `` | -| `clickhouse.persistentVolumeClaim.logsPersistentVolume.storage` | Persistent Volume Size | `50Gi` | +| `clickhouse.persistentVolumeClaim.dataPersistentVolume.enabled` | Turn on and off dataPersistentVolume | `false` | +| `clickhouse.persistentVolumeClaim.dataPersistentVolume.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `clickhouse.persistentVolumeClaim.dataPersistentVolume.storageClassName` | Persistent Volume Storage Class | `` | +| `clickhouse.persistentVolumeClaim.dataPersistentVolume.storage` | Persistent Volume Size | `500Gi` | +| `clickhouse.persistentVolumeClaim.logsPersistentVolume.enabled` | Turn on and off dataPersistentVolume | `false` | +| `clickhouse.persistentVolumeClaim.logsPersistentVolume.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `clickhouse.persistentVolumeClaim.logsPersistentVolume.storageClassName` | Persistent Volume Storage Class | `` | +| `clickhouse.persistentVolumeClaim.logsPersistentVolume.storage` | Persistent Volume Size | `50Gi` | | `clickhouse.ingress.enabled` | Enable ingress | `false` | | `clickhouse.ingress.host` | Ingress host | `` | | `clickhouse.ingress.path` | Ingress path | `` | @@ -130,14 +136,14 @@ The following tables lists the configurable parameters of the Clickhouse chart a | `clickhouse.configmap.remote_servers.graphite.config[].events_cumulative` | Sending cumulative data from a :ref:system_tables-system.events table | `true` | | `clickhouse.configmap.remote_servers.graphite.config[].asynchronous_metrics` | Sending data from a :ref:system_tables-system.asynchronous_metrics table | `true` | | `clickhouse.configmap.profiles.enabled` | Enable a settings profiles | `false` | -| `clickhouse.configmap.profiles.profile[].name` | Tne name of a settings profile | `` | +| `clickhouse.configmap.profiles.profile[].name` | The name of a settings profile | `` | | `clickhouse.configmap.profiles.profile[].config` | The config of a settings profile | `{}` | | `clickhouse.configmap.users.enabled` | Enable a settings users | `false` | -| `clickhouse.configmap.users.user[].name` | Tne name of a settings user | `` | -| `clickhouse.configmap.users.user[].config` | Tne config of a settings user | `{}` | +| `clickhouse.configmap.users.user[].name` | The name of a settings user | `` | +| `clickhouse.configmap.users.user[].config` | The config of a settings user | `{}` | | `clickhouse.configmap.quotas.enabled` | Enable a settings quotas | `false` | -| `clickhouse.configmap.quotas.quota[].name` | Tne name of a settings quota | `` | -| `clickhouse.configmap.quotas.quota[].config[]` | Tne config of a settings quota | `[]` | +| `clickhouse.configmap.quotas.quota[].name` | The name of a settings quota | `` | +| `clickhouse.configmap.quotas.quota[].config[]` | The config of a settings quota | `[]` | | `tabix.enabled` | Enable tabix | `false` | | `tabix.replicas` | The instance number of Tabix | `1` | | `tabix.updateStrategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate | `RollingUpdate` | @@ -145,7 +151,7 @@ The following tables lists the configurable parameters of the Clickhouse chart a | `tabix.updateStrategy.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `1` | | `tabix.image` | Docker image name | `spoonest/clickhouse-tabix-web-client` | | `tabix.imageVersion` | Docker image version | `stable` | -| `tabix.imagePullPolicy` | Dcoker image pull policy | `IfNotPresent` | +| `tabix.imagePullPolicy` | Docker image pull policy | `IfNotPresent` | | `tabix.livenessProbe.enabled` | Turn on and off liveness probe | `true` | | `tabix.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | | `tabix.livenessProbe.periodSeconds` | How often to perform the probe | `30` | diff --git a/clickhouse/templates/NOTES.txt b/charts/clickhouse/templates/NOTES.txt similarity index 97% rename from clickhouse/templates/NOTES.txt rename to charts/clickhouse/templates/NOTES.txt index f8a6dd147..e1990241a 100755 --- a/clickhouse/templates/NOTES.txt +++ b/charts/clickhouse/templates/NOTES.txt @@ -1,31 +1,31 @@ -** Please be patient while the chart is being deployed ** - -1. Get the Clickhouse URL by running: - -{{- if .Values.clickhouse.ingress.enabled }} - - export HOSTNAME=$(kubectl get ingress --namespace {{ .Release.Namespace }} {{ template "clickhouse.fullname" . }} -o jsonpath='{.spec.rules[0].host}') - echo "Clickhouse URL: http://$HOSTNAME/" - -{{- else }} - - echo URL : http://127.0.0.1:8080/ - echo Management URL : http://127.0.0.1:8080/manager - kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "clickhouse.fullname" . }} 8123:{{ .Values.clickhouse.http_port }} - kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "clickhouse.fullname" . }} 9000:{{ .Values.clickhouse.tcp_port }} - kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "clickhouse.fullname" . }} 9009:{{ .Values.clickhouse.interserver_http_port }} - -{{- end }} - -2. Get the Tabix URL by running: - -{{- if .Values.tabix.ingress.enabled }} - - export HOSTNAME=$(kubectl get ingress --namespace {{ .Release.Namespace }} {{ template "clickhouse.fullname" . }}-tabix -o jsonpath='{.spec.rules[0].host}') - echo "Tabix URL: http://$HOSTNAME/" - -{{- else }} - - kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "clickhouse.fullname" . }}-tabix 80 - -{{- end }} +** Please be patient while the chart is being deployed ** + +1. Get the Clickhouse URL by running: + +{{- if .Values.clickhouse.ingress.enabled }} + + export HOSTNAME=$(kubectl get ingress --namespace {{ .Release.Namespace }} {{ template "clickhouse.fullname" . }} -o jsonpath='{.spec.rules[0].host}') + echo "Clickhouse URL: http://$HOSTNAME/" + +{{- else }} + + echo URL : http://127.0.0.1:8080/ + echo Management URL : http://127.0.0.1:8080/manager + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "clickhouse.fullname" . }} 8123:{{ .Values.clickhouse.http_port }} + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "clickhouse.fullname" . }} 9000:{{ .Values.clickhouse.tcp_port }} + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "clickhouse.fullname" . }} 9009:{{ .Values.clickhouse.interserver_http_port }} + +{{- end }} + +2. Get the Tabix URL by running: + +{{- if .Values.tabix.ingress.enabled }} + + export HOSTNAME=$(kubectl get ingress --namespace {{ .Release.Namespace }} {{ template "clickhouse.fullname" . }}-tabix -o jsonpath='{.spec.rules[0].host}') + echo "Tabix URL: http://$HOSTNAME/" + +{{- else }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "clickhouse.fullname" . }}-tabix 80 + +{{- end }} diff --git a/clickhouse/templates/_helpers.tpl b/charts/clickhouse/templates/_helpers.tpl similarity index 100% rename from clickhouse/templates/_helpers.tpl rename to charts/clickhouse/templates/_helpers.tpl diff --git a/clickhouse/templates/configmap-config.yaml b/charts/clickhouse/templates/configmap-config.yaml similarity index 60% rename from clickhouse/templates/configmap-config.yaml rename to charts/clickhouse/templates/configmap-config.yaml index 3b8fd4b27..3a6a69a36 100755 --- a/clickhouse/templates/configmap-config.yaml +++ b/charts/clickhouse/templates/configmap-config.yaml @@ -21,7 +21,7 @@ data: users.xml {{ template "clickhouse.fullname" . }} - 0.0.0.0 + {{ .Values.clickhouse.listen_host | default "0.0.0.0" }} {{ .Values.clickhouse.http_port | default "8123" }} {{ .Values.clickhouse.tcp_port | default "9000" }} {{ .Values.clickhouse.interserver_http_port | default "9009" }} @@ -33,7 +33,51 @@ data: {{ .Values.timezone | default "Asia/Shanghai" }} {{ .Values.clickhouse.configmap.umask | default "027" }} {{ .Values.clickhouse.configmap.mlock_executable | default "false" }} - + {{- if .Values.clickhouse.configmap.remote_servers.enabled }} + + <{{ include "clickhouse.fullname" . }}> + {{- range untilStep 0 (int .Values.clickhouse.replicas) 1 }} + + + {{ $.Values.clickhouse.configmap.remote_servers.internal_replication | default "false" }} + {{ include "clickhouse.fullname" $ }}-{{ . }}.{{ include "clickhouse.fullname" $ }}-headless.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }} + {{ $.Values.clickhouse.tcp_port}} + {{- if $.Values.clickhouse.configmap.remote_servers.replica.user }} + {{ $.Values.clickhouse.configmap.remote_servers.replica.user }} + {{- end }} + {{- if $.Values.clickhouse.configmap.remote_servers.replica.password }} + {{ $.Values.clickhouse.configmap.remote_servers.replica.password }} + {{- end }} + {{- if $.Values.clickhouse.configmap.remote_servers.replica.secure }} + {{ $.Values.clickhouse.configmap.remote_servers.replica.secure }} + {{- end }} + {{- if $.Values.clickhouse.configmap.remote_servers.replica.compression }} + {{ $.Values.clickhouse.configmap.remote_servers.replica.compression }} + {{- end }} + + {{- if $.Values.clickhouse.configmap.remote_servers.replica.backup.enabled }} + + {{ include "clickhouse.fullname" $ }}-replica-{{ . }}.{{ include "clickhouse.fullname" $ }}-replica-headless.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }} + {{ $.Values.clickhouse.tcp_port}} + {{- if $.Values.clickhouse.configmap.remote_servers.replica.user }} + {{ $.Values.clickhouse.configmap.remote_servers.replica.user }} + {{- end }} + {{- if $.Values.clickhouse.configmap.remote_servers.replica.password }} + {{ $.Values.clickhouse.configmap.remote_servers.replica.password }} + {{- end }} + {{- if $.Values.clickhouse.configmap.remote_servers.replica.secure }} + {{ $.Values.clickhouse.configmap.remote_servers.replica.secure }} + {{- end }} + {{- if $.Values.clickhouse.configmap.remote_servers.replica.compression }} + {{ $.Values.clickhouse.configmap.remote_servers.replica.compression }} + {{- end }} + + {{- end }} + + {{- end }} + + + {{- end }} {{ .Values.clickhouse.configmap.builtin_dictionaries_reload_interval | default "3600" }} @@ -115,5 +159,19 @@ data: {{- if .Values.clickhouse.configmap.interserver_http_host }} {{ .Values.clickhouse.configmap.interserver_http_host }} {{- end }} + + {{- if .Values.clickhouse.configmap.merge_tree.enabled }} + + {{ .Values.clickhouse.configmap.merge_tree.parts_to_delay_insert }} + {{ .Values.clickhouse.configmap.merge_tree.parts_to_throw_insert }} + {{ .Values.clickhouse.configmap.merge_tree.max_part_loading_threads }} + {{ .Values.clickhouse.configmap.merge_tree.max_suspicious_broken_parts }} + + {{- end }} +{{- if .Values.clickhouse.configmap.configOverride }} + override.xml: |- + + {{- .Values.clickhouse.configmap.configOverride | nindent 4 }} +{{- end }} {{- end }} diff --git a/clickhouse/templates/configmap-metrika.yaml b/charts/clickhouse/templates/configmap-metrika.yaml similarity index 100% rename from clickhouse/templates/configmap-metrika.yaml rename to charts/clickhouse/templates/configmap-metrika.yaml diff --git a/clickhouse/templates/configmap-users.yaml b/charts/clickhouse/templates/configmap-users.yaml similarity index 100% rename from clickhouse/templates/configmap-users.yaml rename to charts/clickhouse/templates/configmap-users.yaml diff --git a/clickhouse/templates/deployment-tabix.yaml b/charts/clickhouse/templates/deployment-tabix.yaml similarity index 94% rename from clickhouse/templates/deployment-tabix.yaml rename to charts/clickhouse/templates/deployment-tabix.yaml index 697de913f..c5259da7a 100755 --- a/clickhouse/templates/deployment-tabix.yaml +++ b/charts/clickhouse/templates/deployment-tabix.yaml @@ -58,6 +58,10 @@ spec: ports: - name: http containerPort: 80 +{{- if .Values.tabix.volumeMounts }} + volumeMounts: +{{ toYaml .Values.tabix.volumeMounts | indent 8 }} +{{- end }} env: {{- if .Values.tabix.security }} - name: USER @@ -101,6 +105,10 @@ spec: resources: {{ toYaml .Values.tabix.resources | indent 10 }} {{- end }} +{{- if .Values.tabix.volumes }} + volumes: +{{ toYaml .Values.tabix.volumes | indent 6 }} +{{- end }} {{- if .Values.serviceAccount.enabled }} serviceAccountName: {{ .Values.serviceAccount.name }}-tabix {{- end }} diff --git a/clickhouse/templates/ingress-clickhouse.yaml b/charts/clickhouse/templates/ingress-clickhouse.yaml similarity index 77% rename from clickhouse/templates/ingress-clickhouse.yaml rename to charts/clickhouse/templates/ingress-clickhouse.yaml index 9359efcc2..6904f3eac 100755 --- a/clickhouse/templates/ingress-clickhouse.yaml +++ b/charts/clickhouse/templates/ingress-clickhouse.yaml @@ -1,5 +1,11 @@ {{- if .Values.clickhouse.ingress.enabled}} -apiVersion: {{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} networking.k8s.io/v1beta1 {{- else }} extensions/v1beta1 {{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} kind: Ingress metadata: name: {{ include "clickhouse.fullname" . }} diff --git a/clickhouse/templates/ingress-tabix.yaml b/charts/clickhouse/templates/ingress-tabix.yaml similarity index 74% rename from clickhouse/templates/ingress-tabix.yaml rename to charts/clickhouse/templates/ingress-tabix.yaml index d4cc0f571..8e2c04c95 100755 --- a/clickhouse/templates/ingress-tabix.yaml +++ b/charts/clickhouse/templates/ingress-tabix.yaml @@ -1,6 +1,12 @@ {{- if .Values.tabix.enabled }} {{- if .Values.tabix.ingress.enabled}} -apiVersion: {{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} networking.k8s.io/v1beta1 {{- else }} extensions/v1beta1 {{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} kind: Ingress metadata: name: {{ include "clickhouse.fullname" . }}-tabix diff --git a/clickhouse/templates/prometheus-rule.yaml b/charts/clickhouse/templates/prometheus-rule.yaml similarity index 100% rename from clickhouse/templates/prometheus-rule.yaml rename to charts/clickhouse/templates/prometheus-rule.yaml diff --git a/clickhouse/templates/serviceaccount-clickhouse-replica.yaml b/charts/clickhouse/templates/serviceaccount-clickhouse-replica.yaml similarity index 100% rename from clickhouse/templates/serviceaccount-clickhouse-replica.yaml rename to charts/clickhouse/templates/serviceaccount-clickhouse-replica.yaml diff --git a/clickhouse/templates/serviceaccount-clickhouse-tabix.yaml b/charts/clickhouse/templates/serviceaccount-clickhouse-tabix.yaml similarity index 100% rename from clickhouse/templates/serviceaccount-clickhouse-tabix.yaml rename to charts/clickhouse/templates/serviceaccount-clickhouse-tabix.yaml diff --git a/clickhouse/templates/serviceaccount-clickhouse.yaml b/charts/clickhouse/templates/serviceaccount-clickhouse.yaml similarity index 100% rename from clickhouse/templates/serviceaccount-clickhouse.yaml rename to charts/clickhouse/templates/serviceaccount-clickhouse.yaml diff --git a/clickhouse/templates/servicemonitor-clickhouse-replica.yaml b/charts/clickhouse/templates/servicemonitor-clickhouse-replica.yaml similarity index 65% rename from clickhouse/templates/servicemonitor-clickhouse-replica.yaml rename to charts/clickhouse/templates/servicemonitor-clickhouse-replica.yaml index 642b44966..f7fa907af 100755 --- a/clickhouse/templates/servicemonitor-clickhouse-replica.yaml +++ b/charts/clickhouse/templates/servicemonitor-clickhouse-replica.yaml @@ -1,3 +1,4 @@ +{{- if .Values.clickhouse.configmap.remote_servers.replica.backup.enabled }} {{- if .Values.clickhouse.metrics.serviceMonitor.enabled }} apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor @@ -14,13 +15,26 @@ metadata: {{- toYaml .Values.clickhouse.metrics.serviceMonitor.selector | nindent 4 }} {{- end }} spec: + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} selector: matchLabels: app.kubernetes.io/name: {{ include "clickhouse.name" . }}-replica-metrics endpoints: - port: metrics + path: /metrics {{- if .Values.clickhouse.metrics.serviceMonitor.interval }} interval: {{ .Values.clickhouse.metrics.serviceMonitor.interval }} {{- end }} +{{- if .Values.clickhouse.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: +{{ toYaml .Values.clickhouse.metrics.serviceMonitor.metricRelabelings | indent 6 }} +{{- end }} +{{- if .Values.clickhouse.metrics.serviceMonitor.relabelings }} + relabelings: +{{ toYaml .Values.clickhouse.metrics.serviceMonitor.relabelings | nindent 6 }} +{{- end }} --- {{- end }} +{{- end }} diff --git a/clickhouse/templates/servicemonitor-clickhouse.yaml b/charts/clickhouse/templates/servicemonitor-clickhouse.yaml similarity index 67% rename from clickhouse/templates/servicemonitor-clickhouse.yaml rename to charts/clickhouse/templates/servicemonitor-clickhouse.yaml index ab5d94c2c..bb9aa3056 100755 --- a/clickhouse/templates/servicemonitor-clickhouse.yaml +++ b/charts/clickhouse/templates/servicemonitor-clickhouse.yaml @@ -14,13 +14,25 @@ metadata: {{- toYaml .Values.clickhouse.metrics.serviceMonitor.selector | nindent 4 }} {{- end }} spec: + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} selector: matchLabels: app.kubernetes.io/name: {{ include "clickhouse.name" . }}-metrics endpoints: - port: metrics + path: /metrics {{- if .Values.clickhouse.metrics.serviceMonitor.interval }} interval: {{ .Values.clickhouse.metrics.serviceMonitor.interval }} - {{- end }} + {{- end }} +{{- if .Values.clickhouse.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: +{{ toYaml .Values.clickhouse.metrics.serviceMonitor.metricRelabelings | indent 6 }} +{{- end }} +{{- if .Values.clickhouse.metrics.serviceMonitor.relabelings }} + relabelings: +{{ toYaml .Values.clickhouse.metrics.serviceMonitor.relabelings | nindent 6 }} +{{- end }} --- {{- end}} diff --git a/clickhouse/templates/statefulset-clickhouse-replica.yaml b/charts/clickhouse/templates/statefulset-clickhouse-replica.yaml similarity index 95% rename from clickhouse/templates/statefulset-clickhouse-replica.yaml rename to charts/clickhouse/templates/statefulset-clickhouse-replica.yaml index c0f7ec69d..e7e519a29 100755 --- a/clickhouse/templates/statefulset-clickhouse-replica.yaml +++ b/charts/clickhouse/templates/statefulset-clickhouse-replica.yaml @@ -65,22 +65,9 @@ spec: - name: {{ . | quote }} {{- end }} {{- end }} - initContainers: - - name: init - image: {{ .Values.clickhouse.init.image }}:{{ .Values.clickhouse.init.imageVersion }} - imagePullPolicy: {{ .Values.clickhouse.init.imagePullPolicy }} - args: - - /bin/sh - - -c - - | - mkdir -p /etc/clickhouse-server/metrica.d - {{- if .Values.clickhouse.init.resources }} - resources: -{{ toYaml .Values.clickhouse.init.resources | indent 10 }} - {{- end }} containers: - name: {{ include "clickhouse.fullname" . }}-replica - image: {{ .Values.clickhouse.image }}:{{ .Values.clickhouse.imageVersion }} + image: {{ .Values.clickhouse.image }}:{{ .Values.clickhouse.imageVersion | default .Chart.AppVersion }} imagePullPolicy: {{ .Values.clickhouse.imagePullPolicy }} command: - /bin/bash @@ -128,6 +115,9 @@ spec: mountPath: /etc/clickhouse-server/metrica.d - name: {{ include "clickhouse.fullname" . }}-users mountPath: /etc/clickhouse-server/users.d +{{- if .Values.clickhouse.volumeMounts }} +{{ toYaml .Values.clickhouse.volumeMounts | indent 8 }} +{{- end }} {{- with .Values.clickhouse.securityContext }} securityContext: {{- toYaml . | nindent 10 }} @@ -141,6 +131,10 @@ spec: containerPort: {{ .Values.clickhouse.metrics.image.port }} protocol: TCP {{- if .Values.clickhouse.metrics.resources }} +{{- if .Values.clickhouse.metrics.volumeMounts }} + volumeMounts: +{{ toYaml .Values.clickhouse.metrics.volumeMounts | indent 8 }} +{{- end }} resources: {{- toYaml .Values.clickhouse.metrics.resources | nindent 8 }} {{- end }} {{- end }} @@ -183,6 +177,9 @@ spec: - key: users.xml path: users.xml {{- end }} +{{- if .Values.clickhouse.volumes }} +{{ toYaml .Values.clickhouse.volumes | indent 6 }} +{{- end }} {{- if .Values.serviceAccount.enabled }} serviceAccountName: {{ .Values.serviceAccount.name }}-replica {{- end }} diff --git a/clickhouse/templates/statefulset-clickhouse.yaml b/charts/clickhouse/templates/statefulset-clickhouse.yaml similarity index 88% rename from clickhouse/templates/statefulset-clickhouse.yaml rename to charts/clickhouse/templates/statefulset-clickhouse.yaml index 9bc69c64b..69fd994bd 100755 --- a/clickhouse/templates/statefulset-clickhouse.yaml +++ b/charts/clickhouse/templates/statefulset-clickhouse.yaml @@ -46,6 +46,9 @@ spec: {{- end }} {{- end }} spec: + {{- with .Values.clickhouse.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} {{- with .Values.clickhouse.podSecurityContext }} securityContext: {{- toYaml . | nindent 8 }} @@ -64,22 +67,9 @@ spec: - name: {{ . | quote }} {{- end }} {{- end }} - initContainers: - - name: init - image: {{ .Values.clickhouse.init.image }}:{{ .Values.clickhouse.init.imageVersion }} - imagePullPolicy: {{ .Values.clickhouse.init.imagePullPolicy }} - args: - - /bin/sh - - -c - - | - mkdir -p /etc/clickhouse-server/metrica.d - {{- if .Values.clickhouse.init.resources }} - resources: -{{ toYaml .Values.clickhouse.init.resources | indent 10 }} - {{- end }} containers: - name: {{ include "clickhouse.fullname" . }} - image: {{ .Values.clickhouse.image }}:{{ .Values.clickhouse.imageVersion }} + image: {{ .Values.clickhouse.image }}:{{ .Values.clickhouse.imageVersion | default .Chart.AppVersion }} imagePullPolicy: {{ .Values.clickhouse.imagePullPolicy }} command: - /bin/bash @@ -92,6 +82,16 @@ spec: containerPort: {{ .Values.clickhouse.tcp_port | default "9000" }} - name: inter-http-port containerPort: {{ .Values.clickhouse.interserver_http_port | default "9009" }} + {{- if .Values.clickhouse.startupProbe.enabled }} + startupProbe: + tcpSocket: + port: {{ .Values.clickhouse.tcp_port }} + initialDelaySeconds: {{ .Values.clickhouse.startupProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.clickhouse.startupProbe.periodSeconds }} + timeoutSeconds: {{ .Values.clickhouse.startupProbe.timeoutSeconds }} + failureThreshold: {{ .Values.clickhouse.startupProbe.failureThreshold }} + successThreshold: {{ .Values.clickhouse.startupProbe.successThreshold }} + {{- end }} {{- if .Values.clickhouse.livenessProbe.enabled }} livenessProbe: tcpSocket: @@ -127,6 +127,9 @@ spec: mountPath: /etc/clickhouse-server/metrica.d - name: {{ include "clickhouse.fullname" . }}-users mountPath: /etc/clickhouse-server/users.d +{{- if .Values.clickhouse.volumeMounts }} +{{ toYaml .Values.clickhouse.volumeMounts | indent 8 }} +{{- end }} {{- with .Values.clickhouse.securityContext }} securityContext: {{- toYaml . | nindent 10 }} @@ -139,8 +142,12 @@ spec: - name: metrics containerPort: {{ .Values.clickhouse.metrics.image.port }} protocol: TCP +{{- if .Values.clickhouse.metrics.volumeMounts }} + volumeMounts: +{{ toYaml .Values.clickhouse.metrics.volumeMounts | indent 8 }} +{{- end }} {{- if .Values.clickhouse.metrics.resources }} - resources: {{- toYaml .Values.clickhouse.metrics.resources | nindent 8 }} + resources: {{- toYaml .Values.clickhouse.metrics.resources | nindent 10 }} {{- end }} {{- end }} {{- if .Values.clickhouse.nodeSelector }} @@ -169,6 +176,10 @@ spec: items: - key: config.xml path: config.xml + {{- if .Values.clickhouse.configmap.configOverride }} + - key: override.xml + path: override.xml + {{- end }} - name: {{ include "clickhouse.fullname" . }}-metrica configMap: name: {{ include "clickhouse.fullname" . }}-metrica @@ -182,6 +193,9 @@ spec: - key: users.xml path: users.xml {{- end }} +{{- if .Values.clickhouse.volumes }} +{{ toYaml .Values.clickhouse.volumes | indent 6 }} +{{- end }} {{- if .Values.serviceAccount.enabled }} serviceAccountName: {{ .Values.serviceAccount.name }} {{- end }} diff --git a/clickhouse/templates/svc-clickhouse-headless.yaml b/charts/clickhouse/templates/svc-clickhouse-headless.yaml similarity index 100% rename from clickhouse/templates/svc-clickhouse-headless.yaml rename to charts/clickhouse/templates/svc-clickhouse-headless.yaml diff --git a/clickhouse/templates/svc-clickhouse-metrics.yaml b/charts/clickhouse/templates/svc-clickhouse-metrics.yaml similarity index 100% rename from clickhouse/templates/svc-clickhouse-metrics.yaml rename to charts/clickhouse/templates/svc-clickhouse-metrics.yaml diff --git a/clickhouse/templates/svc-clickhouse-replica-headless.yaml b/charts/clickhouse/templates/svc-clickhouse-replica-headless.yaml similarity index 100% rename from clickhouse/templates/svc-clickhouse-replica-headless.yaml rename to charts/clickhouse/templates/svc-clickhouse-replica-headless.yaml diff --git a/clickhouse/templates/svc-clickhouse-replica-metrics.yaml b/charts/clickhouse/templates/svc-clickhouse-replica-metrics.yaml similarity index 100% rename from clickhouse/templates/svc-clickhouse-replica-metrics.yaml rename to charts/clickhouse/templates/svc-clickhouse-replica-metrics.yaml diff --git a/clickhouse/templates/svc-clickhouse-replica.yaml b/charts/clickhouse/templates/svc-clickhouse-replica.yaml similarity index 100% rename from clickhouse/templates/svc-clickhouse-replica.yaml rename to charts/clickhouse/templates/svc-clickhouse-replica.yaml diff --git a/clickhouse/templates/svc-clickhouse.yaml b/charts/clickhouse/templates/svc-clickhouse.yaml similarity index 100% rename from clickhouse/templates/svc-clickhouse.yaml rename to charts/clickhouse/templates/svc-clickhouse.yaml diff --git a/clickhouse/templates/svc-tabix.yaml b/charts/clickhouse/templates/svc-tabix.yaml similarity index 100% rename from clickhouse/templates/svc-tabix.yaml rename to charts/clickhouse/templates/svc-tabix.yaml diff --git a/clickhouse/values.yaml b/charts/clickhouse/values.yaml similarity index 89% rename from clickhouse/values.yaml rename to charts/clickhouse/values.yaml index a77baccd2..4be3fb1d7 100755 --- a/clickhouse/values.yaml +++ b/charts/clickhouse/values.yaml @@ -56,6 +56,10 @@ clickhouse: # Security Context securityContext: {} + ## Additional Volumes + # volumes: [] + # volumeMounts: [] + ## Prometheus Exporter / Metrics ## metrics: @@ -70,6 +74,10 @@ clickhouse: ## Metrics exporter resource requests and limits # resources: {} + ## Additional Volumes + # volumes: [] + # volumeMounts: [] + ## Metrics exporter pod Annotation and Labels podAnnotations: prometheus.io/scrape: "true" @@ -110,6 +118,9 @@ clickhouse: ## Default value: /var/lib/clickhouse path: "/var/lib/clickhouse" ## + ## The host to listen on + listen_host: "0.0.0.0" + ## ## The port for connecting to the server over HTTP http_port: "8123" ## @@ -122,37 +133,37 @@ clickhouse: ## The instance number of Clickhouse replicas: "3" ## Clickhouse image configuration. - image: "yandex/clickhouse-server" - imageVersion: "19.14" + image: "clickhouse/clickhouse-server" + imageVersion: ~ imagePullPolicy: "IfNotPresent" + priorityClassName: ~ + ## The resource limits and requests used by clickhouse resources: {} - init: - image: "busybox" - imageVersion: "1.31.0" - imagePullPolicy: "IfNotPresent" - # imagePullSecrets: - ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. - ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - resources: {} + startupProbe: + enabled: true + periodSeconds: 5 + timeoutSeconds: 5 + failureThreshold: 60 + successThreshold: 1 livenessProbe: enabled: true - initialDelaySeconds: "30" - periodSeconds: "30" - timeoutSeconds: "5" - failureThreshold: "3" - successThreshold: "1" + initialDelaySeconds: 0 + periodSeconds: 30 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes readinessProbe: enabled: true - initialDelaySeconds: "30" - periodSeconds: "30" - timeoutSeconds: "5" - failureThreshold: "3" - successThreshold: "1" + initialDelaySeconds: 0 + periodSeconds: 30 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 ## volumeClaimTemplates is a list of claims that pods are allowed to reference. ## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. ## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. @@ -361,6 +372,28 @@ clickhouse: result_rows: "0" read_rows: "0" execution_time: "0" + ## Allows to configure specified MergeTree tables settings in config.xml + ## More info: https://clickhouse.com/docs/en/operations/settings/merge-tree-settings + merge_tree: + enabled: false + # If the number of active parts in a single partition exceeds the parts_to_delay_insert value, an INSERT artificially slows down. + parts_to_delay_insert: 150 + # If the number of inactive parts in a single partition more than the inactive_parts_to_throw_insert value, INSERT is interrupted with the "Too many inactive parts (N). Parts cleaning are processing significantly slower than inserts" exception. + parts_to_throw_insert: 300 + # The maximum number of threads that read parts when ClickHouse starts. + max_part_loading_threads: auto + # If the number of broken parts in a single partition exceeds the max_suspicious_broken_parts value, automatic deletion is denied. + max_suspicious_broken_parts: 100 + ## + ## Allows to override default Clickhouse config via override.xml + ## E.g. + ## configOverride: + ## + ## + ## event_date + INTERVAL 1 DAY DELETE + ## + ## + configOverride: "" ## ## Web interface for ClickHouse in the Tabix project. @@ -460,3 +493,7 @@ tabix: ## Additional Labels podLabels: + + ## Additional Volumes + # volumes: [] + # volumeMounts: [] diff --git a/sentry-kubernetes/.helmignore b/charts/sentry-kubernetes/.helmignore similarity index 100% rename from sentry-kubernetes/.helmignore rename to charts/sentry-kubernetes/.helmignore diff --git a/charts/sentry-kubernetes/CHANGELOG.md b/charts/sentry-kubernetes/CHANGELOG.md new file mode 100644 index 000000000..50476539e --- /dev/null +++ b/charts/sentry-kubernetes/CHANGELOG.md @@ -0,0 +1,22 @@ +# Changelog + +## [0.4.0](https://github.com/sentry-kubernetes/charts/compare/sentry-kubernetes-v0.3.4...sentry-kubernetes-v0.4.0) (2024-09-26) + + +### Features + +* switch to go version of sentry-kubernetes with first minimal version of the chart ([#1395](https://github.com/sentry-kubernetes/charts/issues/1395)) ([e643ff6](https://github.com/sentry-kubernetes/charts/commit/e643ff6c742f3b8f88a88fdfec21c769b2c0fbb3)) + +## [0.3.4](https://github.com/sentry-kubernetes/charts/compare/sentry-kubernetes-v0.3.3...sentry-kubernetes-v0.3.4) (2024-04-12) + + +### Bug Fixes + +* make ingress, rbac compatible with latest k8s versions ([#114](https://github.com/sentry-kubernetes/charts/issues/114)) ([8d2f319](https://github.com/sentry-kubernetes/charts/commit/8d2f3196fe797a301ba6ebb21b793f3030d70962)) + +## 0.3.3 (2024-04-12) + + +### Bug Fixes + +* make ingress, rbac compatible with latest k8s versions ([#114](https://github.com/sentry-kubernetes/charts/issues/114)) ([8d2f319](https://github.com/sentry-kubernetes/charts/commit/8d2f3196fe797a301ba6ebb21b793f3030d70962)) diff --git a/charts/sentry-kubernetes/Chart.yaml b/charts/sentry-kubernetes/Chart.yaml new file mode 100644 index 000000000..bb19a8e26 --- /dev/null +++ b/charts/sentry-kubernetes/Chart.yaml @@ -0,0 +1,17 @@ +apiVersion: v2 +name: sentry-kubernetes +description: A Helm chart for sentry-kubernetes + (https://github.com/getsentry/sentry-kubernetes) +type: application +version: 0.4.0 +appVersion: latest +home: https://github.com/getsentry/sentry-kubernetes +icon: https://sentry-brand.storage.googleapis.com/sentry-glyph-white.png +keywords: + - sentry + - report kubernetes events +sources: + - https://github.com/getsentry/sentry-kubernetes + - https://github.com/sentry-kubernetes/charts +maintainers: + - name: sentry-kubernetes diff --git a/charts/sentry-kubernetes/README.md b/charts/sentry-kubernetes/README.md new file mode 100644 index 000000000..4467ae1c3 --- /dev/null +++ b/charts/sentry-kubernetes/README.md @@ -0,0 +1,98 @@ +# sentry-kubernetes + +[sentry-kubernetes](https://github.com/getsentry/sentry-kubernetes) is a utility that pushes Kubernetes events to [Sentry](https://sentry.io). + +# Installation: + +```console +$ helm install sentry/sentry-kubernetes --name my-release --set sentry.dsn= +``` + +## Configuration + +The following table lists the configurable parameters of the sentry-kubernetes chart and their default values: + +| Parameter | Description | Default | +| ----------------------- | --------------------------------------------------------------------------------------------------------------------------- | ----------------------------- | +| `sentry.dsn` | Sentry DSN | Empty | +| `existingSecret` | Existing secret to read DSN from | Empty | +| `sentry.environment` | Sentry environment | Empty | +| `sentry.release` | Sentry release version | Empty | +| `sentry.logLevel` | Sentry log level (trace, debug, info, warn, error, disabled) | `info` | +| `sentry.watchNamespaces`| Comma-separated list of namespaces to watch (set to `__all__` to watch all namespaces) | `default` | +| `sentry.watchHistorical`| Set to `1` to report all existing (old) events, `0` to only report new events | `0` | +| `sentry.clusterConfigType` | Cluster configuration type (`auto`, `in-cluster`, `out-cluster`) | `auto` | +| `sentry.kubeconfigPath` | Filesystem path to the kubeconfig used to connect to the cluster (used if `clusterConfigType` is `out-cluster`) | Empty | +| `sentry.monitorCronjobs` | Set to `1` to enable Sentry Crons integration for CronJob objects | `0` | +| `sentry.customDsns` | Set to `1` to enable custom DSN specified in annotations with the key `k8s.sentry.io/dsn` | `0` | +| `image.repository` | Container image name | `getsentry/sentry-kubernetes` | +| `image.tag` | Container image tag | `latest` | +| `rbac.create` | If `true`, create and use RBAC resources | `true` | +| `serviceAccount.name` | Service account to be used. If not set and serviceAccount.create is `true`, a name is generated using the fullname template | Empty | +| `serviceAccount.create` | If true, create a new service account | `true` | +| `priorityClassName` | pod priorityClassName | Empty | +| `resources` | Resource requests and limits | `{}` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Tolerations for pod assignment | `[]` | +| `podAnnotations` | Annotations to add to the pod | `{}` | +| `podLabels` | Additional labels to add to the pod | `{}` | +| `rbac.custom_rules` | List of custom RBAC rules to extend default permissions. Each rule can specify `apiGroups`, `resources`, and `verbs`. | `[]` | +| `sentry.appendEnv` | List of custom environment variables to append. Each item can specify a `name` and either a `value` or a `valueFrom` reference. | `[]` | + +## Usage + +After installing the chart, you can configure various aspects of the sentry-kubernetes integration by modifying the `values.yaml` file or using `--set` flags during installation. + +### Example `values.yaml` Configuration + +Here's an example `values.yaml` that sets up sentry-kubernetes with custom configurations (remove unused values for default values): + +```yaml + +sentry: + dsn: + environment: production + release: "1.0.0" + logLevel: info + watchNamespaces: "default,production" + watchHistorical: "1" + clusterConfigType: auto + kubeconfigPath: "/path/to/kubeconfig" + monitorCronjobs: "1" + customDsns: "1" + appendEnv: + - name: SENTRY_NEW_ENV_1 + value: "newvalues" + - name: SENTRY_NEW_ENV_2 + value: "newvalues" + + +rbac: + # Specifies whether RBAC resources should be created + create: true + # Will replace the default rules + custom_rules: + - verbs: + - get + - list + - watch + apiGroups: + - 'apps' + - 'batch' + - '' + resources: + - events + - jobs + - deployments + - replicasets + - cronjobs + - pods + +resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 250m + memory: 256Mi + diff --git a/sentry-kubernetes/templates/NOTES.txt b/charts/sentry-kubernetes/templates/NOTES.txt similarity index 100% rename from sentry-kubernetes/templates/NOTES.txt rename to charts/sentry-kubernetes/templates/NOTES.txt diff --git a/sentry-kubernetes/templates/_helpers.tpl b/charts/sentry-kubernetes/templates/_helpers.tpl similarity index 100% rename from sentry-kubernetes/templates/_helpers.tpl rename to charts/sentry-kubernetes/templates/_helpers.tpl diff --git a/sentry-kubernetes/templates/clusterrole.yaml b/charts/sentry-kubernetes/templates/clusterrole.yaml similarity index 70% rename from sentry-kubernetes/templates/clusterrole.yaml rename to charts/sentry-kubernetes/templates/clusterrole.yaml index 2afc81ecd..2e6a12eff 100644 --- a/sentry-kubernetes/templates/clusterrole.yaml +++ b/charts/sentry-kubernetes/templates/clusterrole.yaml @@ -5,6 +5,10 @@ metadata: labels: {{ include "sentry-kubernetes.labels" . | indent 4 }} name: {{ template "sentry-kubernetes.fullname" . }} rules: + # Custom rules inclusion + {{ if .Values.rbac.custom_rules }} + {{- toYaml .Values.rbac.custom_rules | nindent 4 }} + {{ else }} - apiGroups: - "" resources: @@ -13,4 +17,5 @@ rules: - get - list - watch + {{ end }} {{- end -}} \ No newline at end of file diff --git a/sentry-kubernetes/templates/clusterrolebinding.yaml b/charts/sentry-kubernetes/templates/clusterrolebinding.yaml similarity index 100% rename from sentry-kubernetes/templates/clusterrolebinding.yaml rename to charts/sentry-kubernetes/templates/clusterrolebinding.yaml diff --git a/charts/sentry-kubernetes/templates/deployment.yaml b/charts/sentry-kubernetes/templates/deployment.yaml new file mode 100644 index 000000000..27833751a --- /dev/null +++ b/charts/sentry-kubernetes/templates/deployment.yaml @@ -0,0 +1,89 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: {{ include "sentry-kubernetes.labels" . | indent 4 }} + name: {{ template "sentry-kubernetes.fullname" . }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app: {{ template "sentry-kubernetes.name" . }} + template: + metadata: + annotations: + checksum/secrets: {{ include (print .Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + labels: + app: {{ template "sentry-kubernetes.name" . }} + release: {{ .Release.Name }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} + spec: + {{- if .Values.priorityClassName }} + priorityClassName: "{{ .Values.priorityClassName }}" + {{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + {{ if .Values.sentry.dsn }} + - name: SENTRY_DSN + valueFrom: + secretKeyRef: + name: {{ template "sentry-kubernetes.secretName" . }} + key: sentry.dsn + {{ end }} + {{ if .Values.sentry.environment }} + - name: SENTRY_ENVIRONMENT + value: {{ .Values.sentry.environment | quote }} + {{ end }} + {{ if .Values.sentry.release }} + - name: SENTRY_RELEASE + value: {{ .Values.sentry.release | quote}} + {{ end }} + {{ if .Values.sentry.logLevel }} + - name: SENTRY_K8S_LOG_LEVEL + value: {{ .Values.sentry.logLevel | quote}} + {{ end }} + {{ if .Values.sentry.watchNamespaces }} + - name: SENTRY_K8S_WATCH_NAMESPACES + value: {{ .Values.sentry.watchNamespaces | quote}} + {{ end }} + {{ if .Values.sentry.watchHistorical }} + - name: SENTRY_K8S_WATCH_HISTORICAL + value: {{ .Values.sentry.watchHistorical | quote}} + {{ end }} + {{ if .Values.sentry.clusterConfigType }} + - name: SENTRY_K8S_CLUSTER_CONFIG_TYPE + value: {{ .Values.sentry.clusterConfigType | quote}} + {{ end }} + {{ if .Values.sentry.kubeconfigPath }} + - name: SENTRY_K8S_KUBECONFIG_PATH + value: {{ .Values.sentry.kubeconfigPath | quote}} + {{ end }} + {{ if .Values.sentry.monitorCronjobs }} + - name: SENTRY_K8S_MONITOR_CRONJOBS + value: {{ .Values.sentry.monitorCronjobs | quote}} + {{ end }} + {{ if .Values.sentry.customDsns }} + - name: SENTRY_K8S_CUSTOM_DSNS + value: {{ .Values.sentry.customDsns | quote}} + {{ end }} + {{ if .Values.sentry.appendEnv }} + {{- toYaml .Values.sentry.appendEnv | nindent 10 }} + {{ end }} + resources: + {{- toYaml .Values.resources | nindent 10 }} + {{- if .Values.nodeSelector }} + nodeSelector: + {{- toYaml .Values.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: + {{- toYaml .Values.tolerations | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "sentry-kubernetes.serviceAccountName" . }} diff --git a/sentry-kubernetes/templates/secret.yaml b/charts/sentry-kubernetes/templates/secret.yaml similarity index 100% rename from sentry-kubernetes/templates/secret.yaml rename to charts/sentry-kubernetes/templates/secret.yaml diff --git a/sentry-kubernetes/templates/serviceaccount.yaml b/charts/sentry-kubernetes/templates/serviceaccount.yaml similarity index 100% rename from sentry-kubernetes/templates/serviceaccount.yaml rename to charts/sentry-kubernetes/templates/serviceaccount.yaml diff --git a/charts/sentry-kubernetes/values.yaml b/charts/sentry-kubernetes/values.yaml new file mode 100644 index 000000000..b775e47f4 --- /dev/null +++ b/charts/sentry-kubernetes/values.yaml @@ -0,0 +1,69 @@ +# Default values for sentry-kubernetes. + +sentry: + dsn: + # environment: production + # release: "1.0.0" + # logLevel: info + # watchNamespaces: "default,production" + # watchHistorical: "1" + # clusterConfigType: auto + # kubeconfigPath: "/path/to/kubeconfig" + # monitorCronjobs: "1" + # customDsns: "1" + # This can be use to add custom env var to the pod if not yet supported by the chart. + # appendEnv: + # - name: SENTRY_NEW_ENV_1 + # value: "newvalues" + # - name: SENTRY_NEW_ENV_2 + # value: "newvalues" + +# Sentry DSN config using an existing secret: +# existingSecret: +image: + repository: ghcr.io/getsentry/sentry-kubernetes + # Tag should be set in hard in the charts when getsentry will do the job of releasing version. + tag: latest + pullPolicy: Always +resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + # if your need more specific cluster_roles for security reason, custom will be used in place + +rbac: + # Specifies whether RBAC resources should be created + create: true + # Exemple of custom rules necessary for cronjob and other issues catching. + # custom_rules: + # - verbs: + # - get + # - list + # - watch + # apiGroups: + # - 'apps' + # - 'batch' + # - '' + # resources: + # - events + # - jobs + # - deployments + # - replicasets + # - cronjobs + # - pods + +# Set priorityCLassName in deployment +# priorityClassName: "" + +podLabels: {} +podAnnotations: {} diff --git a/sentry/.gitignore b/charts/sentry/.gitignore similarity index 100% rename from sentry/.gitignore rename to charts/sentry/.gitignore diff --git a/sentry/.helmignore b/charts/sentry/.helmignore similarity index 100% rename from sentry/.helmignore rename to charts/sentry/.helmignore diff --git a/charts/sentry/CHANGELOG.md b/charts/sentry/CHANGELOG.md new file mode 100644 index 000000000..4e91b6b84 --- /dev/null +++ b/charts/sentry/CHANGELOG.md @@ -0,0 +1,787 @@ +# Changelog + +## [26.10.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v26.9.1...sentry-v26.10.0) (2025-01-03) + + +### Features + +* update Sentry chart to appVersion 24.10.0 ([#1652](https://github.com/sentry-kubernetes/charts/issues/1652)) ([72f61ff](https://github.com/sentry-kubernetes/charts/commit/72f61ff7ae1a26d11865cec083861041d94e029d)) + +## [26.9.1](https://github.com/sentry-kubernetes/charts/compare/sentry-v26.9.0...sentry-v26.9.1) (2024-12-23) + + +### Bug Fixes + +* add Kafka topics from upstream projects (Sentry and Snuba) for version 24.9.0 ([#1647](https://github.com/sentry-kubernetes/charts/issues/1647)) ([cac64cf](https://github.com/sentry-kubernetes/charts/commit/cac64cff98fe9b11a29c83e5f562086ad5f4e00a)) + +## [26.9.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v26.8.1...sentry-v26.9.0) (2024-12-16) + + +### Features + +* add variable for SAMPLED_DEFAULT_RATE ([#1634](https://github.com/sentry-kubernetes/charts/issues/1634)) ([7910f9d](https://github.com/sentry-kubernetes/charts/commit/7910f9d0030b9c09206e01762e284003d9e61639)) + +## [26.8.1](https://github.com/sentry-kubernetes/charts/compare/sentry-v26.8.0...sentry-v26.8.1) (2024-12-11) + + +### Bug Fixes + +* Add missing ingest for feedback events ([#1630](https://github.com/sentry-kubernetes/charts/issues/1630)) ([3882cae](https://github.com/sentry-kubernetes/charts/commit/3882cae9b5cde0591f87b9021e09ddbad27ca659)) +* missing flags required for Queues and Queries instrumentation ([#1622](https://github.com/sentry-kubernetes/charts/issues/1622)) ([a9b6b61](https://github.com/sentry-kubernetes/charts/commit/a9b6b61c842c5a92f0e011d3ae74c489bb83602c)) +* missing profilingFunctionsConsumer.sidecars ([#1627](https://github.com/sentry-kubernetes/charts/issues/1627)) ([bcbd6ed](https://github.com/sentry-kubernetes/charts/commit/bcbd6ed01e76d741ab500adf1aa6038f0b36f631)) + +## [26.8.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v26.7.0...sentry-v26.8.0) (2024-12-04) + + +### Features + +* add configuration for noStrictOffsetReset ([#1620](https://github.com/sentry-kubernetes/charts/issues/1620)) ([234552e](https://github.com/sentry-kubernetes/charts/commit/234552e186a5b9ed814595ce83bc744034c93821)) +* Add missing sidecar and global parameter for deployments & hooks ([#1230](https://github.com/sentry-kubernetes/charts/issues/1230)) ([ac8143f](https://github.com/sentry-kubernetes/charts/commit/ac8143fd1607c0493ea5c60940be3e649f6d613b)) + +## [26.7.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v26.6.0...sentry-v26.7.0) (2024-11-28) + + +### Features + +* pgbouncer for postgresql ([#1607](https://github.com/sentry-kubernetes/charts/issues/1607)) ([07157bd](https://github.com/sentry-kubernetes/charts/commit/07157bd61bff026cd80d912d16c140c9168d90d3)) + +## [26.6.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v26.5.0...sentry-v26.6.0) (2024-11-25) + + +### Features + +* disable Sentry anonymous usage statistics ([#1608](https://github.com/sentry-kubernetes/charts/issues/1608)) ([b679d97](https://github.com/sentry-kubernetes/charts/commit/b679d97e21e787857a7d1dc8fd7c84ceb759e083)) + +## [26.5.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v26.4.0...sentry-v26.5.0) (2024-11-05) + + +### Features + +* recovery support multi hosts and ports of external kafka cluster ([#1588](https://github.com/sentry-kubernetes/charts/issues/1588)) ([889bd0d](https://github.com/sentry-kubernetes/charts/commit/889bd0d47235cb1ab5a7b52439f5b8df61026a03)) + +## [26.4.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v26.3.0...sentry-v26.4.0) (2024-11-03) + + +### Features + +* add maxBatchTimeMs, maxPollIntervalMs for ingestConsumerAttachments ([#1591](https://github.com/sentry-kubernetes/charts/issues/1591)) ([72af218](https://github.com/sentry-kubernetes/charts/commit/72af2189d2249cc29de1442179ce2258da958e44)) + +## [26.3.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v26.2.0...sentry-v26.3.0) (2024-10-26) + + +### Features + +* Introduce global tolerations across all components ([#1580](https://github.com/sentry-kubernetes/charts/issues/1580)) ([7b48399](https://github.com/sentry-kubernetes/charts/commit/7b48399efe73cbb582b4df34068d0104ae3d969c)) + +## [26.2.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v26.1.0...sentry-v26.2.0) (2024-10-25) + + +### Features + +* allow users to specify Kafka topic name prefix in values.yaml ([#1544](https://github.com/sentry-kubernetes/charts/issues/1544)) ([5693406](https://github.com/sentry-kubernetes/charts/commit/569340626ce1587d48040a939b80ab74874fd022)) +* **rabbitmq:** updated configuration to support Prometheus ([#1578](https://github.com/sentry-kubernetes/charts/issues/1578)) ([35f779a](https://github.com/sentry-kubernetes/charts/commit/35f779a0f18b2b9108470efc86403fa3c0eefae2)) + +## [26.1.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v26.0.0...sentry-v26.1.0) (2024-10-25) + + +### Features + +* add global nodeSelector fallback for all deployments ([#1576](https://github.com/sentry-kubernetes/charts/issues/1576)) ([d6eec42](https://github.com/sentry-kubernetes/charts/commit/d6eec42b2c31f42a473d1241721ff3d64111400f)) + +## [26.0.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v25.20.0...sentry-v26.0.0) (2024-10-24) + + +### ⚠ BREAKING CHANGES + +Make sure to upgrade to chart version 25.20.0 (Seentry 24.8.0) before upgrading to 26.x.x + +**Note:** In version [sentry-v25.19.0](https://github.com/sentry-kubernetes/charts/releases/tag/sentry-v26.5.0) ([commit](https://github.com/sentry-kubernetes/charts/commit/f5a12e04ee5ffa28f1d62bf6c7cb5c733b30c2b9)), SASL authentication functionality for Kafka was added, which broke backward compatibility when using an external Kafka cluster. The single-host external kafka setup works correctly. The [issue #1584](https://github.com/sentry-kubernetes/charts/issues/1584) was fixed in version [sentry-v26.5.0](https://github.com/sentry-kubernetes/charts/releases/tag/sentry-v26.5.0) ([commit](https://github.com/sentry-kubernetes/charts/commit/889bd0d47235cb1ab5a7b52439f5b8df61026a03)). In this case, for a sequential upgrade, a viable workaround could be to use [sentry-v26.5.0](https://github.com/sentry-kubernetes/charts/releases/tag/sentry-v26.5.0) with `appVersion: 24.8.0` initially ([Chart.yml](https://github.com/sentry-kubernetes/charts/blob/sentry-v26.5.0/charts/sentry/Chart.yaml#L6)), and then use [sentry-v26.5.0](https://github.com/sentry-kubernetes/charts/releases/tag/sentry-v26.5.0) as is ([details](https://github.com/sentry-kubernetes/charts/pull/1588#issuecomment-2459117235)). + +### Features + +* add maxTasksPerChild option to Sentry worker deployments ([#1572](https://github.com/sentry-kubernetes/charts/issues/1572)) ([bc32900](https://github.com/sentry-kubernetes/charts/commit/bc329004f46f4af7ecf4a99f07e74e28dbee436e)) +* update sentry appVersion to 24.9.0 ([#1571](https://github.com/sentry-kubernetes/charts/issues/1571)) ([2a3a030](https://github.com/sentry-kubernetes/charts/commit/2a3a030ba3c61c6792712c4f637fe64d42a47fe2)) + +## [25.20.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v25.19.0...sentry-v25.20.0) (2024-10-23) + + +### Features + +* update sentry appVersion to 24.8.0 ([#1569](https://github.com/sentry-kubernetes/charts/issues/1569)) ([cb731e0](https://github.com/sentry-kubernetes/charts/commit/cb731e0cba028907fff29ed9e1525e544694ec32)) + +## [25.19.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v25.18.0...sentry-v25.19.0) (2024-10-22) + + +### Features + +* **clickhouse:** remove explicit imageVersion, inherit from chart ([#1561](https://github.com/sentry-kubernetes/charts/issues/1561)) ([4d003fd](https://github.com/sentry-kubernetes/charts/commit/4d003fdc350f1427d413285b94bf27fd13635239)) +* **sentry:** add sasl auth for kafka and manage settings of connections ([#1557](https://github.com/sentry-kubernetes/charts/issues/1557)) ([f5a12e0](https://github.com/sentry-kubernetes/charts/commit/f5a12e04ee5ffa28f1d62bf6c7cb5c733b30c2b9)) + + +### Bug Fixes + +* **snuba:** Add missing --no-strict-offset-reset for replacer ([#1559](https://github.com/sentry-kubernetes/charts/issues/1559)) ([0c415e7](https://github.com/sentry-kubernetes/charts/commit/0c415e704fb2f2cbb984d3e0d5e3b08895834436)) +* Use correct syntax for envFrom in web and worker ([#1563](https://github.com/sentry-kubernetes/charts/issues/1563)) ([b834c0e](https://github.com/sentry-kubernetes/charts/commit/b834c0e4651633ca88e4c1839d60c0c69cf52087)) + +## [25.18.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v25.17.1...sentry-v25.18.0) (2024-10-16) + + +### Features + +* **clickhouse:** update ClickHouse chart to 3.12.0 ([#1556](https://github.com/sentry-kubernetes/charts/issues/1556)) ([07e73c1](https://github.com/sentry-kubernetes/charts/commit/07e73c1846c242f1babaa1ed47271588c9ec2daf)) + +## [25.17.1](https://github.com/sentry-kubernetes/charts/compare/sentry-v25.17.0...sentry-v25.17.1) (2024-10-15) + + +### Bug Fixes + +* external redis functionality for relay ([#1548](https://github.com/sentry-kubernetes/charts/issues/1548)) ([6e71fc1](https://github.com/sentry-kubernetes/charts/commit/6e71fc169622c2e7e4934bfefd613f613a6c77d2)) + +## [25.17.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v25.16.0...sentry-v25.17.0) (2024-10-14) + + +### Features + +* add logLevel and logFormat options for worker events and transactions ([#1542](https://github.com/sentry-kubernetes/charts/issues/1542)) ([bfbdd4d](https://github.com/sentry-kubernetes/charts/commit/bfbdd4d95bf15b18a72ed3d5af2baa363e98d6b6)) + +## [25.16.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v25.15.1...sentry-v25.16.0) (2024-10-14) + + +### Features + +* **snuba:** add events_analytics_platform to settings ([#1540](https://github.com/sentry-kubernetes/charts/issues/1540)) ([b035b10](https://github.com/sentry-kubernetes/charts/commit/b035b10fb96d7081abcab8cf03a5f63e814a4871)) + +## [25.15.1](https://github.com/sentry-kubernetes/charts/compare/sentry-v25.15.0...sentry-v25.15.1) (2024-10-14) + + +### Bug Fixes + +* conditionally set auto-offset-reset for snuba subscription consumers ([#1538](https://github.com/sentry-kubernetes/charts/issues/1538)) ([db26b85](https://github.com/sentry-kubernetes/charts/commit/db26b853246e8f213d25f8c5041893e54a556630)) + +## [25.15.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v25.14.0...sentry-v25.15.0) (2024-10-13) + + +### Features + +* **sentry:** Add missing --no-strict-offset-reset and --auto-offset-reset for consumers ([#1535](https://github.com/sentry-kubernetes/charts/issues/1535)) ([8e0eea0](https://github.com/sentry-kubernetes/charts/commit/8e0eea0e5a3805c93d19ea93240d634953461cea)) + +## [25.14.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v25.13.4...sentry-v25.14.0) (2024-10-13) + + +### Features + +* offset-reset in ds ([#1533](https://github.com/sentry-kubernetes/charts/issues/1533)) ([0e3ef2d](https://github.com/sentry-kubernetes/charts/commit/0e3ef2db47c552fc80d07442263764a33c11c0d3)) + +## [25.13.4](https://github.com/sentry-kubernetes/charts/compare/sentry-v25.13.3...sentry-v25.13.4) (2024-10-13) + + +### Bug Fixes + +* symbolicator storage class ([#1530](https://github.com/sentry-kubernetes/charts/issues/1530)) ([26cbaab](https://github.com/sentry-kubernetes/charts/commit/26cbaab28dcb0c95ac10723ed62b453c678b9787)) + +## [25.13.3](https://github.com/sentry-kubernetes/charts/compare/sentry-v25.13.2...sentry-v25.13.3) (2024-10-12) + + +### Bug Fixes + +* update geoip job hooks and volume handling ([#1529](https://github.com/sentry-kubernetes/charts/issues/1529)) ([886eb5f](https://github.com/sentry-kubernetes/charts/commit/886eb5fe8110bfb1a973740ca3a1a2e3e776c003)) + +## [25.13.2](https://github.com/sentry-kubernetes/charts/compare/sentry-v25.13.1...sentry-v25.13.2) (2024-10-08) + + +### Bug Fixes + +* correct storageClass handling for geodata persistence ([#1524](https://github.com/sentry-kubernetes/charts/issues/1524)) ([b2f568d](https://github.com/sentry-kubernetes/charts/commit/b2f568d926771208256d47f03a2f39806ca94fe3)) + +## [25.13.1](https://github.com/sentry-kubernetes/charts/compare/sentry-v25.13.0...sentry-v25.13.1) (2024-10-08) + + +### Bug Fixes + +* correct argument order of consumers ([#1522](https://github.com/sentry-kubernetes/charts/issues/1522)) ([6236a74](https://github.com/sentry-kubernetes/charts/commit/6236a74e70a78525a6030ade5cd3fc29b424fe59)) + +## [25.13.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v25.12.0...sentry-v25.13.0) (2024-10-07) + + +### Features + +* add geoip support to sentry deployment ([#1516](https://github.com/sentry-kubernetes/charts/issues/1516)) ([4f2429b](https://github.com/sentry-kubernetes/charts/commit/4f2429b746fe13002c21abf233338a293acff1a0)) + +## [25.12.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v25.11.1...sentry-v25.12.0) (2024-10-07) + + +### Features + +* add existingSecretEnv support for web and worker deployments ([#1509](https://github.com/sentry-kubernetes/charts/issues/1509)) ([b170ac3](https://github.com/sentry-kubernetes/charts/commit/b170ac33a64e41a36bfeb416e05801ec9ae1365d)) +* allow customization of kafka configuration ([#1514](https://github.com/sentry-kubernetes/charts/issues/1514)) ([5f4009b](https://github.com/sentry-kubernetes/charts/commit/5f4009b97898bea66749436b792e4a9815df4be8)) + + +### Bug Fixes + +* user-create-job hook does not create user ([5f4009b](https://github.com/sentry-kubernetes/charts/commit/5f4009b97898bea66749436b792e4a9815df4be8)) + +## [25.11.1](https://github.com/sentry-kubernetes/charts/compare/sentry-v25.11.0...sentry-v25.11.1) (2024-10-01) + + +### Bug Fixes + +* reintroduced "Extend Redis functionality" ([#1492](https://github.com/sentry-kubernetes/charts/issues/1492)) broke S3 existing secret (from commit 0b7a7b4c) ([#1499](https://github.com/sentry-kubernetes/charts/issues/1499)) ([3eb75ef](https://github.com/sentry-kubernetes/charts/commit/3eb75ef861c68279975d2baa846bdf9b678474f3)) + +## [25.11.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v25.10.1...sentry-v25.11.0) (2024-09-30) + + +### Features + +* reintroduce "Extend Redis functionality" ([#1492](https://github.com/sentry-kubernetes/charts/issues/1492)) ([0b7a7b4](https://github.com/sentry-kubernetes/charts/commit/0b7a7b4c874bf4d4a460c88bb259cab0e025f7ee)) + +## [25.10.1](https://github.com/sentry-kubernetes/charts/compare/sentry-v25.10.0...sentry-v25.10.1) (2024-09-30) + + +### Bug Fixes + +* add topic partition counts in snuba config for correct ([#1489](https://github.com/sentry-kubernetes/charts/issues/1489)) ([2b44fb2](https://github.com/sentry-kubernetes/charts/commit/2b44fb2a449410a64aa4628e06fdd4e1cb1ae6aa)) +* configuring kafka to use zookeeper uses only brokers, and service name in db-check is wrong ([#1494](https://github.com/sentry-kubernetes/charts/issues/1494)) ([34d4975](https://github.com/sentry-kubernetes/charts/commit/34d49752a9869372a69ce6add1011be1155ec254)) + +## [25.10.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v25.9.0...sentry-v25.10.0) (2024-09-25) + + +### Features + +* update kafka for fix jmx-exporter scrape path ([#1477](https://github.com/sentry-kubernetes/charts/issues/1477)) ([a1c6250](https://github.com/sentry-kubernetes/charts/commit/a1c6250f70245f6514ddbe8e15741250bd6de1a2)) + +## [25.9.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v25.8.1...sentry-v25.9.0) (2024-09-18) + + +### Features + +* add logLevel option to ingestConsumerAttachments ([#1468](https://github.com/sentry-kubernetes/charts/issues/1468)) ([8005f0f](https://github.com/sentry-kubernetes/charts/commit/8005f0fcebf9856a3a29a99f596452a2c481a58c)) + +## [25.8.1](https://github.com/sentry-kubernetes/charts/compare/sentry-v25.8.0...sentry-v25.8.1) (2024-09-17) + + +### Bug Fixes + +* correct nginx.conf and ingress settings for sentry 24.7.1 and ([#1466](https://github.com/sentry-kubernetes/charts/issues/1466)) ([cfb90ef](https://github.com/sentry-kubernetes/charts/commit/cfb90efd05b7b1b03cf191df4324e2092f50e4dc)) +* correct order of arguments of sentry consumers ([#1463](https://github.com/sentry-kubernetes/charts/issues/1463)) ([#1464](https://github.com/sentry-kubernetes/charts/issues/1464)) ([2861efa](https://github.com/sentry-kubernetes/charts/commit/2861efa7192b8d8bc02835ef4ade16a21b2729f1)) + +## [25.8.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v25.7.0...sentry-v25.8.0) (2024-09-16) + + +### Features + +* add logging and worker settings to Sentry web deployment ([#1459](https://github.com/sentry-kubernetes/charts/issues/1459)) ([f0427e2](https://github.com/sentry-kubernetes/charts/commit/f0427e219773382eee4580bc4170d221f5150eee)) + +## [25.7.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v25.6.0...sentry-v25.7.0) (2024-09-14) + + +### Features + +* **symbolicator:** implement deployment and statefulset selection ([#1453](https://github.com/sentry-kubernetes/charts/issues/1453)) ([112c1b5](https://github.com/sentry-kubernetes/charts/commit/112c1b50456273163f6692d16787c4d04fe87cda)) + +## [25.6.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v25.5.1...sentry-v25.6.0) (2024-09-14) + + +### Features + +* updated sentry to 24.7.1 ([#1454](https://github.com/sentry-kubernetes/charts/issues/1454)) ([7874e56](https://github.com/sentry-kubernetes/charts/commit/7874e569217e8469c5ce40087ecd656309a01bba)) + +## [25.5.1](https://github.com/sentry-kubernetes/charts/compare/sentry-v25.5.0...sentry-v25.5.1) (2024-09-12) + + +### Bug Fixes + +* correct podLabels type from list to map in values.yaml ([#1448](https://github.com/sentry-kubernetes/charts/issues/1448)) ([0c34ecc](https://github.com/sentry-kubernetes/charts/commit/0c34ecca3874c4ff1162c76457993bbe29238b96)) +* invalid parameter in deployments ([#1446](https://github.com/sentry-kubernetes/charts/issues/1446)) ([dbafa66](https://github.com/sentry-kubernetes/charts/commit/dbafa66025fd9ecb3eb4b07a5df53f97221e77da)) + +## [25.5.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v25.4.0...sentry-v25.5.0) (2024-09-11) + + +### Features + +* enhance nginx config to handle disabled sentry relay ([#1430](https://github.com/sentry-kubernetes/charts/issues/1430)) ([4395dba](https://github.com/sentry-kubernetes/charts/commit/4395dba949ca41375bcf0c24435344406cc2bbb7)) + +## [25.4.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v25.3.0...sentry-v25.4.0) (2024-09-10) + + +### Features + +* add cache, logging, and kafka configuration options to sentry relay ([#1438](https://github.com/sentry-kubernetes/charts/issues/1438)) ([4a84c9f](https://github.com/sentry-kubernetes/charts/commit/4a84c9f5168969e044c0303ca81b60ce743303fd)) +* add excludequeues option to sentry worker deployment ([#1441](https://github.com/sentry-kubernetes/charts/issues/1441)) ([78e80fb](https://github.com/sentry-kubernetes/charts/commit/78e80fb35677b1174e9d6d5dcbc37f58a32b86ac)) + +## [25.3.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v25.2.2...sentry-v25.3.0) (2024-09-05) + + +### Features + +* enhance logging options and add missing configuration parameters ([#1419](https://github.com/sentry-kubernetes/charts/issues/1419)) ([c666226](https://github.com/sentry-kubernetes/charts/commit/c666226346114998ff3c04a005d494e79bd7e13e)) + +## [25.2.2](https://github.com/sentry-kubernetes/charts/compare/sentry-v25.2.1...sentry-v25.2.2) (2024-09-04) + + +### Bug Fixes + +* remove invalid --max-batch-size and --processes parameters from some consumers ([#1416](https://github.com/sentry-kubernetes/charts/issues/1416)) ([e42dc12](https://github.com/sentry-kubernetes/charts/commit/e42dc12e9bddee1e4d42db7173901ccde0cd3371)) + +## [25.2.1](https://github.com/sentry-kubernetes/charts/compare/sentry-v25.2.0...sentry-v25.2.1) (2024-09-03) + + +### Bug Fixes + +* del --max-batch-time-ms and enable maxBatchTimeMs in values ([#1412](https://github.com/sentry-kubernetes/charts/issues/1412)) ([086b477](https://github.com/sentry-kubernetes/charts/commit/086b47720b8fe0ee15fd65eafc5446dccc903366)) +* discord template typos ([#1408](https://github.com/sentry-kubernetes/charts/issues/1408)) ([044cc25](https://github.com/sentry-kubernetes/charts/commit/044cc254873911ae668e1c6bd1a34ac0883a1db8)) + +## [25.2.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v25.1.0...sentry-v25.2.0) (2024-09-02) + + +### Features + +* add logLevel, maxPollIntervalMs, inputBlockSize, maxBatchTimeMs ([#1403](https://github.com/sentry-kubernetes/charts/issues/1403)) ([78de49b](https://github.com/sentry-kubernetes/charts/commit/78de49b0f94633cf098aff320a79d7a48443b9a5)) + +## [25.1.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v25.0.1...sentry-v25.1.0) (2024-08-26) + + +### Features + +* add noStrictOffsetReset for ingest-consumer-attachments ([#1398](https://github.com/sentry-kubernetes/charts/issues/1398)) ([599294c](https://github.com/sentry-kubernetes/charts/commit/599294c33b9e5dfd076e581386a614f60fca38ef)) + +## [25.0.1](https://github.com/sentry-kubernetes/charts/compare/sentry-v25.0.0...sentry-v25.0.1) (2024-08-23) + + +### Bug Fixes + +* clickhouse replicas 1 ([d789562](https://github.com/sentry-kubernetes/charts/commit/d789562cbde4371b0057272976a981f66229ca50)) + +## [25.0.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v24.0.1...sentry-v25.0.0) (2024-08-23) + + +### ⚠ BREAKING CHANGES + +* change default values again + +### Bug Fixes + +* change default values again ([a282b7e](https://github.com/sentry-kubernetes/charts/commit/a282b7e718c37c7d5d25aef19b6372ae00180ab0)) + +## [24.0.1](https://github.com/sentry-kubernetes/charts/compare/sentry-v24.0.0...sentry-v24.0.1) (2024-08-22) + + +### Bug Fixes + +* revert ClickHouse replicas number ([#1392](https://github.com/sentry-kubernetes/charts/issues/1392)) ([ad6fc29](https://github.com/sentry-kubernetes/charts/commit/ad6fc293e627f78f15b960b8d8cbc0d606cd194f)) + +## [24.0.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v23.12.1...sentry-v24.0.0) (2024-08-19) + + +### ⚠ BREAKING CHANGES + +* deployment default values ([#1379](https://github.com/sentry-kubernetes/charts/issues/1379)) + +### Features + +* add optional relabeling configs to serviceMonitor object ([#1390](https://github.com/sentry-kubernetes/charts/issues/1390)) ([4f6e440](https://github.com/sentry-kubernetes/charts/commit/4f6e440c5c69ab728a2e9ac9a56b55ce274c5dc1)) + + +### Bug Fixes + +* deployment default values ([#1379](https://github.com/sentry-kubernetes/charts/issues/1379)) ([72376fd](https://github.com/sentry-kubernetes/charts/commit/72376fd0aeb9d7fdb6b30a275ae59429bb88da12)) + +## [23.12.1](https://github.com/sentry-kubernetes/charts/compare/sentry-v23.12.0...sentry-v23.12.1) (2024-07-19) + + +### Bug Fixes + +* update memcached chart to 7.4.8 ([#1352](https://github.com/sentry-kubernetes/charts/issues/1352)) ([a39ae5b](https://github.com/sentry-kubernetes/charts/commit/a39ae5b5252b0535f76ee1dbaccf723dbc1bd6fb)) + +## [23.12.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v23.11.0...sentry-v23.12.0) (2024-07-11) + + +### Features + +* add parameters for web workers TTL ([#1355](https://github.com/sentry-kubernetes/charts/issues/1355)) ([a1b218f](https://github.com/sentry-kubernetes/charts/commit/a1b218f69a8ea20a987e11a94dbf052d5a05d3a8)) + + +### Bug Fixes + +* remove 'profiling-global-suspect-functions' as it is not supported on self hosted ([#1358](https://github.com/sentry-kubernetes/charts/issues/1358)) ([25004f6](https://github.com/sentry-kubernetes/charts/commit/25004f67e4cba551bb78d5c42af80d2e631c50de)) + +## [23.11.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v23.10.0...sentry-v23.11.0) (2024-06-24) + + +### Features + +* add multiprocess to postProcessForwardTransactions ([#1334](https://github.com/sentry-kubernetes/charts/issues/1334)) ([9de6968](https://github.com/sentry-kubernetes/charts/commit/9de696813a5e407f4ddf3657d19519500088e7d3)) + +## [23.10.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v23.9.1...sentry-v23.10.0) (2024-06-21) + + +### Features + +* add insights feature flags ([#1329](https://github.com/sentry-kubernetes/charts/issues/1329)) ([6cccdbd](https://github.com/sentry-kubernetes/charts/commit/6cccdbd1a8703c6f0a0d417654358e11e7275bce)) + +## [23.9.1](https://github.com/sentry-kubernetes/charts/compare/sentry-v23.9.0...sentry-v23.9.1) (2024-06-17) + + +### Bug Fixes + +* template fails when existingSecretKeys is undefined ([#1323](https://github.com/sentry-kubernetes/charts/issues/1323)) ([4808c6f](https://github.com/sentry-kubernetes/charts/commit/4808c6ff76f53820c9ec8a25fd77ac42f0395d66)) + +## [23.9.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v23.8.0...sentry-v23.9.0) (2024-06-17) + + +### Features + +* Configure external postgres with values from secret ([#1279](https://github.com/sentry-kubernetes/charts/issues/1279)) ([adfb64d](https://github.com/sentry-kubernetes/charts/commit/adfb64da1dcd6ba109b64fe2e7496e88d65b38a9)) + +## [23.8.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v23.7.0...sentry-v23.8.0) (2024-06-16) + + +### Features + +* discord integration ([#1318](https://github.com/sentry-kubernetes/charts/issues/1318)) ([d480620](https://github.com/sentry-kubernetes/charts/commit/d480620b3d60d983b239a5a59f063b05a4234ecc)) + +## [23.7.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v23.6.0...sentry-v23.7.0) (2024-06-11) + + +### Features + +* **deps:** update kafka helm to v29 ([#1285](https://github.com/sentry-kubernetes/charts/issues/1285)) ([5b24013](https://github.com/sentry-kubernetes/charts/commit/5b240133bd5f40202e9a86b9744eb32ed512da97)) +* **deps:** update nginx docker tag to v18 ([#1301](https://github.com/sentry-kubernetes/charts/issues/1301)) ([161aed6](https://github.com/sentry-kubernetes/charts/commit/161aed65c60672972dd21c242a830f33a7d837ea)) + +## [23.6.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v23.5.2...sentry-v23.6.0) (2024-06-10) + + +### Features + +* add optional LogLevel parameter to sentry cleanup job and custom location snippet to nginx conf ([cd38e67](https://github.com/sentry-kubernetes/charts/commit/cd38e67121b04d5e4b060aaa97ae8378c837846e)) + + +### Bug Fixes + +* fix custom features ([#1309](https://github.com/sentry-kubernetes/charts/issues/1309)) ([7490ec3](https://github.com/sentry-kubernetes/charts/commit/7490ec3bbf1efeebf780b8f3aec3aa70d177d4e4)) + +## [23.5.2](https://github.com/sentry-kubernetes/charts/compare/sentry-v23.5.1...sentry-v23.5.2) (2024-06-08) + + +### Bug Fixes + +* **snuba:** add profile_chunks to the storage sets ([#1307](https://github.com/sentry-kubernetes/charts/issues/1307)) ([df812f7](https://github.com/sentry-kubernetes/charts/commit/df812f7cf4ba28006a59c0fa49a527feac50a184)) + +## [23.5.1](https://github.com/sentry-kubernetes/charts/compare/sentry-v23.5.0...sentry-v23.5.1) (2024-06-07) + + +### Bug Fixes + +* **deployment-relay:** fix relay init container volume mounts ([#861](https://github.com/sentry-kubernetes/charts/issues/861)) ([72314d5](https://github.com/sentry-kubernetes/charts/commit/72314d5a5dcc53a7562e0066a54b62ac4f9eb3e0)) + +## [23.5.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v23.4.1...sentry-v23.5.0) (2024-06-07) + + +### Features + +* 24.5.1 update ([6e628ad](https://github.com/sentry-kubernetes/charts/commit/6e628adc200525ebe57e9977328d8dd8b5eea471)) + +## [23.4.1](https://github.com/sentry-kubernetes/charts/compare/sentry-v23.4.0...sentry-v23.4.1) (2024-06-06) + + +### Bug Fixes + +* relay topic ([1ae5f66](https://github.com/sentry-kubernetes/charts/commit/1ae5f66a24260c96ae4711f1f880220f33150148)) + +## [23.4.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v23.3.0...sentry-v23.4.0) (2024-06-06) + + +### Features + +* add-custom-features to configmap ([#1297](https://github.com/sentry-kubernetes/charts/issues/1297)) ([300aea0](https://github.com/sentry-kubernetes/charts/commit/300aea0dfc6293892e450d359daa63ae3619ace5)) + +## [23.3.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v23.2.0...sentry-v23.3.0) (2024-06-04) + + +### Features + +* topologySpreadConstraint ([#1291](https://github.com/sentry-kubernetes/charts/issues/1291)) ([bc0d4e6](https://github.com/sentry-kubernetes/charts/commit/bc0d4e64987c1ea5e4d3b1386ce45ea94c3dd15b)) + +## [23.2.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v23.1.0...sentry-v23.2.0) (2024-06-03) + + +### Features + +* allow to configure extra manifest through values ([#1278](https://github.com/sentry-kubernetes/charts/issues/1278)) ([3fec182](https://github.com/sentry-kubernetes/charts/commit/3fec18254de4675077f57f7783403317fc1bdad7)) +* dependency update before push chart ([#1283](https://github.com/sentry-kubernetes/charts/issues/1283)) ([2c2b6c2](https://github.com/sentry-kubernetes/charts/commit/2c2b6c2d966c137a7c4251eb4c81b186f886c63e)) +* supports ipv6 ([#1292](https://github.com/sentry-kubernetes/charts/issues/1292)) ([b920e3f](https://github.com/sentry-kubernetes/charts/commit/b920e3f2159f8c3e124f09cc48b29ba5aae5aedb)) + + +### Bug Fixes + +* ipv6 ([b904c79](https://github.com/sentry-kubernetes/charts/commit/b904c7935c9d51903017c1e14f74bf122cfaddde)) + +## [23.1.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v23.0.3...sentry-v23.1.0) (2024-05-23) + + +### Features + +* bump sentry to 24.5.0 ([#1270](https://github.com/sentry-kubernetes/charts/issues/1270)) ([7d53050](https://github.com/sentry-kubernetes/charts/commit/7d53050f8c9bda2b2fca686685e44823706dc263)) + + +### Bug Fixes + +* **worker:** workerTransactions should be disabled by default ([#1275](https://github.com/sentry-kubernetes/charts/issues/1275)) ([6090619](https://github.com/sentry-kubernetes/charts/commit/6090619d326b420f6c177114ec55a5ef84f3a075)) + +## [23.0.3](https://github.com/sentry-kubernetes/charts/compare/sentry-v23.0.2...sentry-v23.0.3) (2024-05-22) + + +### Bug Fixes + +* typo in deployment-snuba-subscription-consumer-metrics.yaml [#1271](https://github.com/sentry-kubernetes/charts/issues/1271) ([d667d0c](https://github.com/sentry-kubernetes/charts/commit/d667d0cd18e36af06e6ba3050a850265df540e1d)) + +## [23.0.2](https://github.com/sentry-kubernetes/charts/compare/sentry-v23.0.1...sentry-v23.0.2) (2024-05-16) + + +### Bug Fixes + +* worker hpa ([#1263](https://github.com/sentry-kubernetes/charts/issues/1263)) ([0b55646](https://github.com/sentry-kubernetes/charts/commit/0b55646e696f6be42755b58308f3127532a60d70)) + +## [23.0.1](https://github.com/sentry-kubernetes/charts/compare/sentry-v23.0.0...sentry-v23.0.1) (2024-05-15) + + +### Bug Fixes + +* fix worker deployments ([#1261](https://github.com/sentry-kubernetes/charts/issues/1261)) ([eb3e7af](https://github.com/sentry-kubernetes/charts/commit/eb3e7af7f0e74fee19c10a08d1dcd193bd8de429)) + +## [23.0.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v22.5.1...sentry-v23.0.0) (2024-05-14) + + +### ⚠ BREAKING CHANGES + +* ingest consumers and workers separation ([#1245](https://github.com/sentry-kubernetes/charts/issues/1245)) + +### Features + +* ingest consumers and workers separation ([#1245](https://github.com/sentry-kubernetes/charts/issues/1245)) ([5969544](https://github.com/sentry-kubernetes/charts/commit/596954497af0acef2cce4014056ca756c5eb3592)) + + +### Bug Fixes + +* fix order snuba-outcomes-billing-consumer args ([#1257](https://github.com/sentry-kubernetes/charts/issues/1257)) ([0645404](https://github.com/sentry-kubernetes/charts/commit/06454040e45fd3d13236d011802a1e6436da5f8a)) + +## [22.5.1](https://github.com/sentry-kubernetes/charts/compare/sentry-v22.5.0...sentry-v22.5.1) (2024-05-14) + + +### Bug Fixes + +* fix snuba-outcomes-billing-consumer args ([#1254](https://github.com/sentry-kubernetes/charts/issues/1254)) ([ac821d6](https://github.com/sentry-kubernetes/charts/commit/ac821d61f94bcedd399b970f4c58e605c1d04602)) + +## [22.5.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v22.4.0...sentry-v22.5.0) (2024-05-14) + + +### Features + +* bump sentry to 24.4.2 ([#1248](https://github.com/sentry-kubernetes/charts/issues/1248)) ([c4ea3fb](https://github.com/sentry-kubernetes/charts/commit/c4ea3fbf8b646de66251f37521f744ac84228a9b)) + +## [22.4.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v22.3.0...sentry-v22.4.0) (2024-05-08) + + +### Features + +* checksum only for configmap contents ([#1228](https://github.com/sentry-kubernetes/charts/issues/1228)) ([97829b0](https://github.com/sentry-kubernetes/charts/commit/97829b0e0ebf705ec3083d3d01e52b4d09200946)) + + +### Bug Fixes + +* actualize Sentry consumer additional options usage ([#1244](https://github.com/sentry-kubernetes/charts/issues/1244)) ([e24d459](https://github.com/sentry-kubernetes/charts/commit/e24d4596feeb6b1fc7fe9da806d1bac2c43bcfc6)) + +## [22.3.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v22.2.1...sentry-v22.3.0) (2024-04-29) + + +### Features + +* Add liveness to consumers ([#1240](https://github.com/sentry-kubernetes/charts/issues/1240)) ([60aaa3d](https://github.com/sentry-kubernetes/charts/commit/60aaa3d5f485320dc3bca1161b786cb20df34b73)) + +## [22.2.1](https://github.com/sentry-kubernetes/charts/compare/sentry-v22.2.0...sentry-v22.2.1) (2024-04-24) + + +### Bug Fixes + +* issues with nginx configuration for metrics ([#1232](https://github.com/sentry-kubernetes/charts/issues/1232)) ([b227a6b](https://github.com/sentry-kubernetes/charts/commit/b227a6b801d244dba9baf7180eeaaaef96dfccc3)) +* sentry metrics deployment annotations ([#1239](https://github.com/sentry-kubernetes/charts/issues/1239)) ([59c6245](https://github.com/sentry-kubernetes/charts/commit/59c6245f0b79b6eafff14e301cd1378cbb432bc7)) + +## [22.2.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v22.1.1...sentry-v22.2.0) (2024-04-17) + + +### Features + +* added flags on individual components ([#1188](https://github.com/sentry-kubernetes/charts/issues/1188)) ([fb4d6f1](https://github.com/sentry-kubernetes/charts/commit/fb4d6f14b50c545a798b3a27012f28013d36e2d5)) +* update kafka topic provisioning config ([#1134](https://github.com/sentry-kubernetes/charts/issues/1134)) ([cd508ff](https://github.com/sentry-kubernetes/charts/commit/cd508ff4e02b44189eae30d993deb3fb368fd5ee)) + + +### Bug Fixes + +* kafka.listeners.interBroker typo ([#1222](https://github.com/sentry-kubernetes/charts/issues/1222)) ([63e7062](https://github.com/sentry-kubernetes/charts/commit/63e70626a6bb0ea4caeac8dfd80b216ffcfe9c28)) + +## [22.1.1](https://github.com/sentry-kubernetes/charts/compare/sentry-v22.1.0...sentry-v22.1.1) (2024-04-15) + + +### Bug Fixes + +* When set to controller only, port 9092 is not listened on ([#1218](https://github.com/sentry-kubernetes/charts/issues/1218)) ([d5cbba6](https://github.com/sentry-kubernetes/charts/commit/d5cbba6b65da9f77f78299ddc9b07c24fd7b3cef)) + +## [22.1.0](https://github.com/sentry-kubernetes/charts/compare/sentry-v22.0.2...sentry-v22.1.0) (2024-04-12) + + +### Features + +* 1.0.0 ([9c5b70c](https://github.com/sentry-kubernetes/charts/commit/9c5b70c6396fd9974001e403cc11f92ccb61a011)) +* 11.0.0 ([9d333e6](https://github.com/sentry-kubernetes/charts/commit/9d333e6f259dad0444b349d839cdde0187f64cc9)) +* 11.2.0 ([0569872](https://github.com/sentry-kubernetes/charts/commit/05698720bc8a5eedcbaa847f766ac6a669360ec5)) +* 21.4.0 release ([#356](https://github.com/sentry-kubernetes/charts/issues/356)) ([84c56ac](https://github.com/sentry-kubernetes/charts/commit/84c56ac5417c94a15ec53374b63fe56ab5e9b9a2)) +* 9.0.0 ([d659dcb](https://github.com/sentry-kubernetes/charts/commit/d659dcbbea17eacf20653a6ca386ed233bd690f2)) +* ability to use existingSecret for psql ([#148](https://github.com/sentry-kubernetes/charts/issues/148)) ([8c02d7a](https://github.com/sentry-kubernetes/charts/commit/8c02d7ae67c5fc0d161e739e9968b6d272acc259)) +* add `existingSecret` param for Github ([#863](https://github.com/sentry-kubernetes/charts/issues/863)) ([8d43592](https://github.com/sentry-kubernetes/charts/commit/8d43592fe3f3a22e3189bd84995e5cecccd088be)) +* add podLabels to Sentry hooks ([#659](https://github.com/sentry-kubernetes/charts/issues/659)) ([50b8f1e](https://github.com/sentry-kubernetes/charts/commit/50b8f1e8879e22dcf7d10035b64c8ffa2b7e41ae)) +* add pvc to clickhouse ([#50](https://github.com/sentry-kubernetes/charts/issues/50)) ([197930f](https://github.com/sentry-kubernetes/charts/commit/197930f1a29bbf038351fbc72d401297140fc857)) +* add sentry cleanup cronjob ([#178](https://github.com/sentry-kubernetes/charts/issues/178)) ([ae42bc4](https://github.com/sentry-kubernetes/charts/commit/ae42bc4b4783f7515cf4e8620576568a6cb6ff5c)) +* add service account names for cronjobs ([#681](https://github.com/sentry-kubernetes/charts/issues/681)) ([0bc212d](https://github.com/sentry-kubernetes/charts/commit/0bc212daf3a999019d035deaed979f157f69d8ad)) +* add share process namespace to hook jobs ([#688](https://github.com/sentry-kubernetes/charts/issues/688)) ([b81316d](https://github.com/sentry-kubernetes/charts/commit/b81316d4b524e19b6f2968c7552f76c47ef33368)) +* add support for configuring email by env variables ([#87](https://github.com/sentry-kubernetes/charts/issues/87)) ([5e91477](https://github.com/sentry-kubernetes/charts/commit/5e91477cb61b8edb3b54734f899dd9560288417b)) +* add support of GCS without secret, using GKE ServiceAccount ([#264](https://github.com/sentry-kubernetes/charts/issues/264)) ([e42656e](https://github.com/sentry-kubernetes/charts/commit/e42656ee3f5d88b9b7ba07ceba15d02c82fb544c)) +* Add tolerations to user-create hook ([#615](https://github.com/sentry-kubernetes/charts/issues/615)) ([f347039](https://github.com/sentry-kubernetes/charts/commit/f347039cc4ff0f9a9e9237d9c6785d39bc46280c)) +* added (partial) support for Symbolicator deployment ([#213](https://github.com/sentry-kubernetes/charts/issues/213)) ([098fdef](https://github.com/sentry-kubernetes/charts/commit/098fdef2af7b1f7b53c596cb6127d70c1d49bcea)) +* added activeDeadlineSeconds to all jobs ([#683](https://github.com/sentry-kubernetes/charts/issues/683)) ([2418ba8](https://github.com/sentry-kubernetes/charts/commit/2418ba8c38a85991c9c9eb5a281bfb3d5d72a020)) +* added cpu hpa for worker - remove replica if hpa ([#74](https://github.com/sentry-kubernetes/charts/issues/74)) ([5a14b8b](https://github.com/sentry-kubernetes/charts/commit/5a14b8b59c786eaa352c521dea00feb765cc2dc1)) +* added different types of snuba consumers ([#83](https://github.com/sentry-kubernetes/charts/issues/83)) ([951cf38](https://github.com/sentry-kubernetes/charts/commit/951cf38c08a548e3db92d7848b6d1e6205d40b66)) +* added env var config for init jobs ([#124](https://github.com/sentry-kubernetes/charts/issues/124)) ([5cec614](https://github.com/sentry-kubernetes/charts/commit/5cec61417e243ce49fe509f6bc62847add0975d0)) +* added hpa for the web deployment ([#36](https://github.com/sentry-kubernetes/charts/issues/36)) ([b89ca49](https://github.com/sentry-kubernetes/charts/commit/b89ca49bf38af686bc69ff5f3ad0fb3392a2dbb2)) +* added post process forwarder ([#9](https://github.com/sentry-kubernetes/charts/issues/9)) ([686613a](https://github.com/sentry-kubernetes/charts/commit/686613a31572d2bd54d5153d4d44c5ba8bc77ac2)) +* added redis worker ([#10](https://github.com/sentry-kubernetes/charts/issues/10)) ([9e1cc7a](https://github.com/sentry-kubernetes/charts/commit/9e1cc7a046615856b50f17e232a9901bdc6204a9)) +* allow custom mode for sentry-relay ([#197](https://github.com/sentry-kubernetes/charts/issues/197)) ([a78b5f1](https://github.com/sentry-kubernetes/charts/commit/a78b5f1f061fad910cd679bb9eca083f04607de6)) +* allow some snuba deployments to be pushed as hooks ([#198](https://github.com/sentry-kubernetes/charts/issues/198)) ([9bda4ee](https://github.com/sentry-kubernetes/charts/commit/9bda4ee401664382f54932adfd7135820bc45d1d)) +* allow to configure running relay as a hook ([#183](https://github.com/sentry-kubernetes/charts/issues/183)) ([652a838](https://github.com/sentry-kubernetes/charts/commit/652a8386a4f84a0fa8319b7afda3d3c1784c3d06)) +* allow to specify Recreate strategy type for sentry-web ([#216](https://github.com/sentry-kubernetes/charts/issues/216)) ([0179c79](https://github.com/sentry-kubernetes/charts/commit/0179c79fe490e1f44a3e54f6bcc8ff0f37a78d92)) +* allows revisionHistoryLimit ([#339](https://github.com/sentry-kubernetes/charts/issues/339)) ([65545c8](https://github.com/sentry-kubernetes/charts/commit/65545c87c98c3af930897f08f4d095f67beb3027)) +* bump sentry dependencies ([#1099](https://github.com/sentry-kubernetes/charts/issues/1099)) ([dccd183](https://github.com/sentry-kubernetes/charts/commit/dccd183760ee92aab2592333d8dc1a23d7411466)) +* configure Slack based on current documentation ([#189](https://github.com/sentry-kubernetes/charts/issues/189)) ([5c4447c](https://github.com/sentry-kubernetes/charts/commit/5c4447cd5f2055ac7356face9e19fa552adb5e77)) +* december release ([#245](https://github.com/sentry-kubernetes/charts/issues/245)) ([ec588a9](https://github.com/sentry-kubernetes/charts/commit/ec588a94520555e512b1b931eb5799e6d08293c9)) +* distributed tables v2 ([#588](https://github.com/sentry-kubernetes/charts/issues/588)) ([cfe7d73](https://github.com/sentry-kubernetes/charts/commit/cfe7d736278feeeb72189efb841a6099685ed1dd)) +* history limits for cronjobs ([#682](https://github.com/sentry-kubernetes/charts/issues/682)) ([f13da86](https://github.com/sentry-kubernetes/charts/commit/f13da86b541aa8645f1e7a72e54ea3df5ad6f7b2)) +* hpa for snuba ([#42](https://github.com/sentry-kubernetes/charts/issues/42)) ([36a6e10](https://github.com/sentry-kubernetes/charts/commit/36a6e10f87e2b124b2a6527901ac49454a6a29e2)) +* ingress ([#19](https://github.com/sentry-kubernetes/charts/issues/19)) ([a5fad21](https://github.com/sentry-kubernetes/charts/commit/a5fad21db773e664f2b51df0522f411b9be30698)) +* initial beta 0.5.0 ([51866df](https://github.com/sentry-kubernetes/charts/commit/51866df56c518d28c74937c6b5119cd683d667f0)) +* kafka fix db check ([a148a57](https://github.com/sentry-kubernetes/charts/commit/a148a57d7480e23adc912fe8700914792bea9807)) +* kafka ha ([#35](https://github.com/sentry-kubernetes/charts/issues/35)) ([3b581f0](https://github.com/sentry-kubernetes/charts/commit/3b581f0b33d5d7d297c8bf110c70b7ae4b0796d2)) +* **kafka:** enable kraft ([#1179](https://github.com/sentry-kubernetes/charts/issues/1179)) ([b01b26a](https://github.com/sentry-kubernetes/charts/commit/b01b26a15247daad77828d67b1cfa15a7c9cc95c)) +* lint ([#1](https://github.com/sentry-kubernetes/charts/issues/1)) ([680d142](https://github.com/sentry-kubernetes/charts/commit/680d142670ae1972a27d473837b0884dc9f536f9)) +* org subdomain disabled per default ([#248](https://github.com/sentry-kubernetes/charts/issues/248)) ([0819e79](https://github.com/sentry-kubernetes/charts/commit/0819e79bf71d2af2dc23a9347ee242945757a4d2)) +* pass entire symbolicator config as yaml ([#335](https://github.com/sentry-kubernetes/charts/issues/335)) ([20d23b5](https://github.com/sentry-kubernetes/charts/commit/20d23b5369dd9ebca7c8fb7cd0f63c8012ef45b3)) +* postgresql - and fix some issues ([#5](https://github.com/sentry-kubernetes/charts/issues/5)) ([70fa92d](https://github.com/sentry-kubernetes/charts/commit/70fa92dde31c7488f9303acfbd79813760fcebf7)) +* profiling support ([#938](https://github.com/sentry-kubernetes/charts/issues/938)) ([b58f8a3](https://github.com/sentry-kubernetes/charts/commit/b58f8a34bbfab27ab0ff10bddcd8c23033b230fc)) +* removal of hook resources can be configured ([#223](https://github.com/sentry-kubernetes/charts/issues/223)) ([32232da](https://github.com/sentry-kubernetes/charts/commit/32232da29ca7431084aea8048ef84da0ac1794f6)) +* secret key generation ([#672](https://github.com/sentry-kubernetes/charts/issues/672)) ([559afc9](https://github.com/sentry-kubernetes/charts/commit/559afc90bfc7e2fdfd8b2d471ade5293f56c94d7)) +* sentry 20.10.1 ([#211](https://github.com/sentry-kubernetes/charts/issues/211)) ([cb9c844](https://github.com/sentry-kubernetes/charts/commit/cb9c844592ac678fa1c86faafcf520a040f7d814)) +* sentry 20.7.2 ([#126](https://github.com/sentry-kubernetes/charts/issues/126)) ([43197bc](https://github.com/sentry-kubernetes/charts/commit/43197bcefc51efc92eb51bac6efc4198e5b5c0f7)) +* sentry 20.9.0 ([#182](https://github.com/sentry-kubernetes/charts/issues/182)) ([c7934ae](https://github.com/sentry-kubernetes/charts/commit/c7934aec90eb1231d6ad21d052ae50dca3c8bf21)) +* sentry 22.10 & split post-process-forwarder ([#766](https://github.com/sentry-kubernetes/charts/issues/766)) ([ca611ef](https://github.com/sentry-kubernetes/charts/commit/ca611efde92be5deb7756ae05837e6ff1402f839)) +* sentry january ([#291](https://github.com/sentry-kubernetes/charts/issues/291)) ([be0ba2a](https://github.com/sentry-kubernetes/charts/commit/be0ba2aa9baebe24bec119038e83fa6ffc56d75b)) +* sentry june release ([#910](https://github.com/sentry-kubernetes/charts/issues/910)) ([96eb327](https://github.com/sentry-kubernetes/charts/commit/96eb3278775032e594bbf893f4fd4ef6d958a3de)) +* sentry march release ([#333](https://github.com/sentry-kubernetes/charts/issues/333)) ([7408226](https://github.com/sentry-kubernetes/charts/commit/74082260202b8af97822d03b79f43cca61da9ca0)) +* sentry may 2022 ([#623](https://github.com/sentry-kubernetes/charts/issues/623)) ([2a4cb0c](https://github.com/sentry-kubernetes/charts/commit/2a4cb0c4afe16720475f05405aac08e718acdead)) +* **sentry:** adding statsd backend + prometheus-operator support ([#85](https://github.com/sentry-kubernetes/charts/issues/85)) ([7daee19](https://github.com/sentry-kubernetes/charts/commit/7daee1999c61934f937c751855d2746a78425456)) +* **sentry:** bump clickhouse image to 19.17 ([#143](https://github.com/sentry-kubernetes/charts/issues/143)) ([3b80f57](https://github.com/sentry-kubernetes/charts/commit/3b80f578d8647ed1a549590c04376ed76cc4473f)) +* **sentry:** enable snuba transactions consumer ([#142](https://github.com/sentry-kubernetes/charts/issues/142)) ([0203628](https://github.com/sentry-kubernetes/charts/commit/0203628f4c9d6e255ce163ceaa7997b89365c162)) +* **sentry:** use AppVersion as the default image tag ([#141](https://github.com/sentry-kubernetes/charts/issues/141)) ([a0e79cf](https://github.com/sentry-kubernetes/charts/commit/a0e79cf8c834c79c1018c02452274a6f43d5ef73)) +* separate config for external services ([#66](https://github.com/sentry-kubernetes/charts/issues/66)) ([5e80eba](https://github.com/sentry-kubernetes/charts/commit/5e80ebab63d8c78868edd63810789b47468b2986)) +* smtp support use-ssl options ([#494](https://github.com/sentry-kubernetes/charts/issues/494)) ([bf8fdd5](https://github.com/sentry-kubernetes/charts/commit/bf8fdd596861a22bff74299f9d23b60644d7a08f)) +* support for sentry relay & sentry 20.7.2 ([#144](https://github.com/sentry-kubernetes/charts/issues/144)) ([7ae3651](https://github.com/sentry-kubernetes/charts/commit/7ae365187da883efd891c351dc75c2e566112718)) +* support partitions parameter for consumer events ([#566](https://github.com/sentry-kubernetes/charts/issues/566)) ([26128fe](https://github.com/sentry-kubernetes/charts/commit/26128fe808c33008c1660fda1271cef4381bf1fa)) +* supports different values between redis&rabbitmq ([#16](https://github.com/sentry-kubernetes/charts/issues/16)) ([da70618](https://github.com/sentry-kubernetes/charts/commit/da7061889e42d66ce1085338f635fcbf9ed4c19d)) +* supports github apps - sso ([#17](https://github.com/sentry-kubernetes/charts/issues/17)) ([90829bd](https://github.com/sentry-kubernetes/charts/commit/90829bd1bff956ae1d6815b1837985931b623d10)) +* supports persistent workers ([#15](https://github.com/sentry-kubernetes/charts/issues/15)) ([ca63322](https://github.com/sentry-kubernetes/charts/commit/ca6332289753f333cfc3a0ce39942f7092eef01f)) +* update clickhouse version ([#332](https://github.com/sentry-kubernetes/charts/issues/332)) ([3a6b012](https://github.com/sentry-kubernetes/charts/commit/3a6b0123d564ea10d65845123c6cec61659ea9f2)) +* update rabbitmq-ha to allow setting affinity ([#110](https://github.com/sentry-kubernetes/charts/issues/110)) ([fe269f2](https://github.com/sentry-kubernetes/charts/commit/fe269f253d46f763e080c084b92fa4f8da296964)) +* update Sentry & Snuba ([#80](https://github.com/sentry-kubernetes/charts/issues/80)) ([3f28140](https://github.com/sentry-kubernetes/charts/commit/3f281400fde3f06062a823ee06ea5474cfa261a3)) +* update sentry to 20.7.1 ([#122](https://github.com/sentry-kubernetes/charts/issues/122)) ([e1014b8](https://github.com/sentry-kubernetes/charts/commit/e1014b8b49626818cf80329e445f48b2f68390a9)) +* update sentry to february release ([#306](https://github.com/sentry-kubernetes/charts/issues/306)) ([e8d13dc](https://github.com/sentry-kubernetes/charts/commit/e8d13dc8afac5c878d85cf96f318a7fa316243be)) +* update sentry&snuba ([#72](https://github.com/sentry-kubernetes/charts/issues/72)) ([3cae6b9](https://github.com/sentry-kubernetes/charts/commit/3cae6b9ae6f956ef9de8138265f3e18c95638bf5)) +* update sentry&snuba images ([#53](https://github.com/sentry-kubernetes/charts/issues/53)) ([0427a1f](https://github.com/sentry-kubernetes/charts/commit/0427a1fc098c72a1b8b3a7af8a376247790f899b)) +* updated clickhouse chart version ([6bdcea8](https://github.com/sentry-kubernetes/charts/commit/6bdcea8690d207b29e610f28363841cf0b0912c9)) +* updated clickhouse to 1.4.0 ([17aa7ef](https://github.com/sentry-kubernetes/charts/commit/17aa7ef5a442986393ee473b9bcb439c15c866e9)) +* updated kafka helm chart and supports kafka without zookeeper ([#888](https://github.com/sentry-kubernetes/charts/issues/888)) ([d8fab37](https://github.com/sentry-kubernetes/charts/commit/d8fab37ef108dea173e6e7412fd030bd977af754)) +* updated tags to 20.8.0 ([#167](https://github.com/sentry-kubernetes/charts/issues/167)) ([91c7d41](https://github.com/sentry-kubernetes/charts/commit/91c7d41e14cb57d0951d7d88eafa721ccf430bb5)) +* updating Sentry config.yaml to support actual yaml. ([#186](https://github.com/sentry-kubernetes/charts/issues/186)) ([945e4c0](https://github.com/sentry-kubernetes/charts/commit/945e4c0e44facbeae402fbb5f32a7ce05dd53dfe)) +* upgrade kafka for more stability ([#246](https://github.com/sentry-kubernetes/charts/issues/246)) ([898fbcd](https://github.com/sentry-kubernetes/charts/commit/898fbcd72f13e5d139456d56d3a87f6a0f6e8833)) +* upgrade sentry to 21.5.1 ([#389](https://github.com/sentry-kubernetes/charts/issues/389)) ([656d112](https://github.com/sentry-kubernetes/charts/commit/656d11265224a122a34eb5b043115af018f3c610)) +* upgrade sentry to 21.6.1 ([#416](https://github.com/sentry-kubernetes/charts/issues/416)) ([2234ab9](https://github.com/sentry-kubernetes/charts/commit/2234ab90fdd66a76813d78c541b6331b79839d96)) +* upgrade Sentry version to 21.3.1 ([#350](https://github.com/sentry-kubernetes/charts/issues/350)) ([13f4537](https://github.com/sentry-kubernetes/charts/commit/13f4537e4df3d67b4be82234309a580d2c4c9af4)) + + +### Bug Fixes + +* 419 using correct block indicator ([#420](https://github.com/sentry-kubernetes/charts/issues/420)) ([d4ff2b7](https://github.com/sentry-kubernetes/charts/commit/d4ff2b7476c91fb8f1120a5465203d6b8200eeca)) +* add dbcheck job that ensures that clickhouse and kafka are up before proceeding ([#267](https://github.com/sentry-kubernetes/charts/issues/267)) ([8bdd076](https://github.com/sentry-kubernetes/charts/commit/8bdd076d1b45bfcfc393b68726f5b55b597a6756)) +* add legacyApp field for slack ([#125](https://github.com/sentry-kubernetes/charts/issues/125)) ([dfe8a12](https://github.com/sentry-kubernetes/charts/commit/dfe8a1262989a44cbcdb0f79d11435dc13478740)) +* add noStrictOffsetReset with ingestConsumer ([#1186](https://github.com/sentry-kubernetes/charts/issues/1186)) ([f4d2f74](https://github.com/sentry-kubernetes/charts/commit/f4d2f74431cb3d041b248729b75bd474ec6ef455)) +* add performance-view in configmap-sentry.yaml ([#133](https://github.com/sentry-kubernetes/charts/issues/133)) ([72c41d2](https://github.com/sentry-kubernetes/charts/commit/72c41d2f70d70a66b9b8d1741eed098c19c03a0a)) +* allow Redis only and fix Redis password use in BROKER_URL ([#24](https://github.com/sentry-kubernetes/charts/issues/24)) ([20c4ca1](https://github.com/sentry-kubernetes/charts/commit/20c4ca1baac88cefb8bcd7cf76de15013003807c)) +* Apply appropriate batch apiVersion based on kubernetes version ([#700](https://github.com/sentry-kubernetes/charts/issues/700)) ([4128412](https://github.com/sentry-kubernetes/charts/commit/412841202e136f64266139132ebc96037a43d9ca)) +* charts path ([2d7bcc9](https://github.com/sentry-kubernetes/charts/commit/2d7bcc98abbc367c62643bd0cb1d02bce52f893a)) +* clickhouse database init "Bad get: has UInt64" because of '-' character in cluster name ([#204](https://github.com/sentry-kubernetes/charts/issues/204)) ([9f93287](https://github.com/sentry-kubernetes/charts/commit/9f932872ce44f8b4d3db5e7555cf3637be1a6d9d)) +* dbCheck image can be pulled from custom repository ([#358](https://github.com/sentry-kubernetes/charts/issues/358)) ([#359](https://github.com/sentry-kubernetes/charts/issues/359)) ([6145595](https://github.com/sentry-kubernetes/charts/commit/61455956b1db50fc5b2af47a673d9759c86308db)) +* distant clickhouse image ([5da5d86](https://github.com/sentry-kubernetes/charts/commit/5da5d866261db73d38437bdbbdf20057bd61b1df)) +* django.security.csrf issue ([#155](https://github.com/sentry-kubernetes/charts/issues/155)) ([a680856](https://github.com/sentry-kubernetes/charts/commit/a680856c0208f4df626e05163f754442f59fca10)) +* do not complete the user-create on error ([#58](https://github.com/sentry-kubernetes/charts/issues/58)) ([7b925fe](https://github.com/sentry-kubernetes/charts/commit/7b925feb8b0d28a2a47276ae666462d9107a4a9d)) +* empty email values were crashing the pods ([#12](https://github.com/sentry-kubernetes/charts/issues/12)) ([5b6101f](https://github.com/sentry-kubernetes/charts/commit/5b6101fb0860ec0a8fff2dca86d1affc9b5d9656)) +* erroneous formatting in relay configmap ([#203](https://github.com/sentry-kubernetes/charts/issues/203)) ([a42d41f](https://github.com/sentry-kubernetes/charts/commit/a42d41fe502c8a8e2394330cbf1c03eddc9d8d20)), closes [#202](https://github.com/sentry-kubernetes/charts/issues/202) +* external db ([#34](https://github.com/sentry-kubernetes/charts/issues/34)) ([87bfc2c](https://github.com/sentry-kubernetes/charts/commit/87bfc2c7d2e2713ac0e5e39d6a7fca89ef32ba9a)) +* external postgres host ([#41](https://github.com/sentry-kubernetes/charts/issues/41)) ([bc6f5ab](https://github.com/sentry-kubernetes/charts/commit/bc6f5ab62ec1fb43d6b63254f6e6493344365acd)) +* Fix wrong service account for ingest-profiles deployment ([#966](https://github.com/sentry-kubernetes/charts/issues/966)) ([cead469](https://github.com/sentry-kubernetes/charts/commit/cead469eb418e8ae110f60c173ab0bb9f74e4cc3)) +* force UTC on clickhouse ([545052d](https://github.com/sentry-kubernetes/charts/commit/545052d90bb26559293e185f3436a671fa4ec2d4)) +* gcs filestore ([#18](https://github.com/sentry-kubernetes/charts/issues/18)) ([5249630](https://github.com/sentry-kubernetes/charts/commit/524963031fb29932992d166cb3ae06237c6cc19e)) +* github secret extra quotation mark([#415](https://github.com/sentry-kubernetes/charts/issues/415)) ([44cea73](https://github.com/sentry-kubernetes/charts/commit/44cea735054165be3e613dc142e9eb6f4136b032)) +* Healthcheck for Relay backend was missing so when using .Values.ingress.regexPathStyle: gke will throw an error that template is missing ([#612](https://github.com/sentry-kubernetes/charts/issues/612)) ([9da34de](https://github.com/sentry-kubernetes/charts/commit/9da34de847716a74fd7e6f347c5bc723f7f00bc5)) +* hide content-disposition header on /static for Safari ([#1051](https://github.com/sentry-kubernetes/charts/issues/1051)) ([a688d20](https://github.com/sentry-kubernetes/charts/commit/a688d20687f4c24cf9dd9dd4d54dc108b6dba2c7)) +* hpa names to make them unique ([#102](https://github.com/sentry-kubernetes/charts/issues/102)) ([ab9126a](https://github.com/sentry-kubernetes/charts/commit/ab9126a20e74b816e8473e08e4dcd6eacca4224b)) +* image pull policy ([#184](https://github.com/sentry-kubernetes/charts/issues/184)) ([0175798](https://github.com/sentry-kubernetes/charts/commit/0175798cf9982daef6f28771ce2dc6f4c688ff8d)) +* imagePullPolicy ingest consumer ([b036dc1](https://github.com/sentry-kubernetes/charts/commit/b036dc1493d47e0888bd7d89e0ab8f5a26ff2a06)) +* kafka env if disabled ([#43](https://github.com/sentry-kubernetes/charts/issues/43)) ([df96f9e](https://github.com/sentry-kubernetes/charts/commit/df96f9e8c84089e4c853651be4defad58ad40494)) +* make ingress, rbac compatible with latest k8s versions ([#114](https://github.com/sentry-kubernetes/charts/issues/114)) ([8d2f319](https://github.com/sentry-kubernetes/charts/commit/8d2f3196fe797a301ba6ebb21b793f3030d70962)) +* make the relay paths in the ingress compatible with traefik ([#274](https://github.com/sentry-kubernetes/charts/issues/274)) ([1707630](https://github.com/sentry-kubernetes/charts/commit/17076301717c4beafacf0febb64bb474b3897f6e)) +* Make the snuba api liveness and readiness timeouts configurable ([#409](https://github.com/sentry-kubernetes/charts/issues/409)) ([9625fbe](https://github.com/sentry-kubernetes/charts/commit/9625fbe471aa751722e0daace15faa4d3a6e4fd9)) +* **memcache:** do not set SERVER_MAX_VALUE_LENGTH with latest django versions ([#1131](https://github.com/sentry-kubernetes/charts/issues/1131)) ([1307658](https://github.com/sentry-kubernetes/charts/commit/1307658414ad84668b658c7411e952d487944589)) +* Missing config for snuba migration ([#948](https://github.com/sentry-kubernetes/charts/issues/948)) ([becf6b5](https://github.com/sentry-kubernetes/charts/commit/becf6b5bcb7774e63c5c250ca0a99a4da289667f)) +* missing profiling config ([#1038](https://github.com/sentry-kubernetes/charts/issues/1038)) ([5d21ac2](https://github.com/sentry-kubernetes/charts/commit/5d21ac2b358664d63560faf99d9a7108f4287c5a)) +* new deployments should be sent as hook as well ([#229](https://github.com/sentry-kubernetes/charts/issues/229)) ([29cb2c9](https://github.com/sentry-kubernetes/charts/commit/29cb2c911620cbb179b34a7d6fc76c7e882e8c69)) +* new dist tables ([#282](https://github.com/sentry-kubernetes/charts/issues/282)) ([6dd45b7](https://github.com/sentry-kubernetes/charts/commit/6dd45b7def24e291126eabfe883a033a9706dd87)) +* nginx configmap name ([#185](https://github.com/sentry-kubernetes/charts/issues/185)) ([3e86a9a](https://github.com/sentry-kubernetes/charts/commit/3e86a9a3f09ce942782ab745009f9265f6d6574c)) +* nginx static path not pass proxy ([#1060](https://github.com/sentry-kubernetes/charts/issues/1060)) ([ae07c35](https://github.com/sentry-kubernetes/charts/commit/ae07c35e642def515eacdaf0bd95d507fb3e8249)) +* no events found ([6b85730](https://github.com/sentry-kubernetes/charts/commit/6b8573087a6363223335d8a5569e0811cb961858)) +* pass postgresql password to user-create job ([#55](https://github.com/sentry-kubernetes/charts/issues/55)) ([4778949](https://github.com/sentry-kubernetes/charts/commit/4778949104db6192dfa09e83f43206337e754b34)) +* pass postgress password to db-init ([#52](https://github.com/sentry-kubernetes/charts/issues/52)) ([73b5df2](https://github.com/sentry-kubernetes/charts/commit/73b5df227d1e996c24c8f301038d06f23e022862)) +* persistent volume discrepancy ([#30](https://github.com/sentry-kubernetes/charts/issues/30)) ([3f70746](https://github.com/sentry-kubernetes/charts/commit/3f7074668300c0c80a866eda56982ef8c7886025)) +* postgresql existing secret path ([#1004](https://github.com/sentry-kubernetes/charts/issues/1004)) ([68614e1](https://github.com/sentry-kubernetes/charts/commit/68614e1691e1d3a0f3ad6d80ca675e395fc439f7)) +* rabbitmq connection ([#57](https://github.com/sentry-kubernetes/charts/issues/57)) ([0b18d52](https://github.com/sentry-kubernetes/charts/commit/0b18d52563e1938555395d994004d37b5b024102)) +* rabbitmq to support new bitnami chart ([#320](https://github.com/sentry-kubernetes/charts/issues/320)) ([2936e90](https://github.com/sentry-kubernetes/charts/commit/2936e9075cc82c7aa2ad2fcdc2c11df8911d4e91)) +* relay connection to sentry if port not 9000 ([#181](https://github.com/sentry-kubernetes/charts/issues/181)) ([44aaabf](https://github.com/sentry-kubernetes/charts/commit/44aaabf4b925a65f77ab2e2945f6ec82c36afe34)) +* relay deployment ([#179](https://github.com/sentry-kubernetes/charts/issues/179)) ([d8e34f5](https://github.com/sentry-kubernetes/charts/commit/d8e34f569e01cb04e4c17d5a03313c95b2cbf642)) +* replay session ([#1054](https://github.com/sentry-kubernetes/charts/issues/1054)) ([78c93e5](https://github.com/sentry-kubernetes/charts/commit/78c93e5996f7bc26ea4148e0aa8cb6c4b0b3aafb)) +* reverted 20.7.2 (not working properly without relay) ([#128](https://github.com/sentry-kubernetes/charts/issues/128)) ([a61af24](https://github.com/sentry-kubernetes/charts/commit/a61af243e3adb85a2c89e6b0755dae250830a1d4)) +* run Snuba in Distributed Dataset Mode ([#70](https://github.com/sentry-kubernetes/charts/issues/70)) ([7188166](https://github.com/sentry-kubernetes/charts/commit/7188166710aaaab6013c087c6091738731bb7ecf)) +* sentry secret deleted ([#748](https://github.com/sentry-kubernetes/charts/issues/748)) ([7d90dda](https://github.com/sentry-kubernetes/charts/commit/7d90ddaa49c3b629049620d08b582bedbcd89b79)) +* sentry version ([81bbe82](https://github.com/sentry-kubernetes/charts/commit/81bbe825b9bf52495b1eb6ded1d5d5dd1dfd04ff)) +* sentry web options ([#406](https://github.com/sentry-kubernetes/charts/issues/406)) ([73d4213](https://github.com/sentry-kubernetes/charts/commit/73d4213d255746c1a05eb5b7bd23801489e416da)) +* Set the right service account for ingest occurrences deployment ([#988](https://github.com/sentry-kubernetes/charts/issues/988)) ([a7d8390](https://github.com/sentry-kubernetes/charts/commit/a7d8390d46537dd904d2a9745358a894e304a6df)) +* snuba - custom redis ([#23](https://github.com/sentry-kubernetes/charts/issues/23)) ([04ad7fa](https://github.com/sentry-kubernetes/charts/commit/04ad7fa1db3e52da3aeeb8751f17b9d7ff023e0a)) +* snuba connection ([#13](https://github.com/sentry-kubernetes/charts/issues/13)) ([9bf1ffb](https://github.com/sentry-kubernetes/charts/commit/9bf1ffbddd4f8bc917d7602ad11b1c00443038b8)) +* space in relay config ([4c281c4](https://github.com/sentry-kubernetes/charts/commit/4c281c400566d3d35e6dd50582cdbffe293c9390)) +* special characters in email and password ([#165](https://github.com/sentry-kubernetes/charts/issues/165)) ([72df379](https://github.com/sentry-kubernetes/charts/commit/72df379355382d620fa3d28bf9f859d7938542cf)) +* symbolicator usage ([#173](https://github.com/sentry-kubernetes/charts/issues/173)) ([4e457e3](https://github.com/sentry-kubernetes/charts/commit/4e457e3b228a41602a30ad5b4920bce3e673b474)) +* templating error when disabling hooks ([#76](https://github.com/sentry-kubernetes/charts/issues/76)) ([f0a8e3f](https://github.com/sentry-kubernetes/charts/commit/f0a8e3f47a16895f0a1e11c6955cb5816454ebe2)) +* the default clickhouse installation is distributed, only turn on single node when clickhouse is disabled ([#624](https://github.com/sentry-kubernetes/charts/issues/624)) ([d923eab](https://github.com/sentry-kubernetes/charts/commit/d923eab060512957e91217f9029cf5d3286fc2e6)) +* turn off clickhouse backups to fix default installation ([#292](https://github.com/sentry-kubernetes/charts/issues/292)) ([45cc76d](https://github.com/sentry-kubernetes/charts/commit/45cc76da1c03ffe4c9e46f4be2c8d5b5c41f907d)) +* update clickhouse chart to the latest version ([#103](https://github.com/sentry-kubernetes/charts/issues/103)) ([9b64422](https://github.com/sentry-kubernetes/charts/commit/9b6442221627531bf4ce2a60b559475f7d4070ba)) +* updated gke BackendConfig with new values added on [#432](https://github.com/sentry-kubernetes/charts/issues/432) pull request ([#445](https://github.com/sentry-kubernetes/charts/issues/445)) ([cedbd69](https://github.com/sentry-kubernetes/charts/commit/cedbd69290c1b52c39f19da5088e34ebf276a6d3)) +* worker should work with both redis & rabbitmq ([#48](https://github.com/sentry-kubernetes/charts/issues/48)) ([f4ab657](https://github.com/sentry-kubernetes/charts/commit/f4ab65782ac5603ae6df8df6d49220a9461f423a)) +* **worker:** fix liveness probe changes ([#1139](https://github.com/sentry-kubernetes/charts/issues/1139)) ([1042c3d](https://github.com/sentry-kubernetes/charts/commit/1042c3dcc583ca09b69dd0ce23c1869ea2ac6ba2)) +* zookeeper enabled per default ([ca18057](https://github.com/sentry-kubernetes/charts/commit/ca18057666d14159d968d9d0a10ee5a6b406f723)) + +## 22.0.2 (2024-04-12) + +Automated releases diff --git a/charts/sentry/Chart.lock b/charts/sentry/Chart.lock new file mode 100644 index 000000000..a98dd633c --- /dev/null +++ b/charts/sentry/Chart.lock @@ -0,0 +1,27 @@ +dependencies: +- name: memcached + repository: oci://registry-1.docker.io/bitnamicharts + version: 7.5.2 +- name: redis + repository: oci://registry-1.docker.io/bitnamicharts + version: 17.11.3 +- name: kafka + repository: oci://registry-1.docker.io/bitnamicharts + version: 29.3.14 +- name: clickhouse + repository: https://sentry-kubernetes.github.io/charts + version: 3.13.0 +- name: zookeeper + repository: oci://registry-1.docker.io/bitnamicharts + version: 11.4.11 +- name: rabbitmq + repository: oci://registry-1.docker.io/bitnamicharts + version: 11.16.2 +- name: postgresql + repository: oci://registry-1.docker.io/bitnamicharts + version: 12.5.1 +- name: nginx + repository: oci://registry-1.docker.io/bitnamicharts + version: 18.2.5 +digest: sha256:df4a8d1128f8b3319c8c10a5a8202a691fe95fb28f92b193dcafcdbf0c6636eb +generated: "2024-11-11T00:18:02.524127944Z" diff --git a/sentry/Chart.yaml b/charts/sentry/Chart.yaml similarity index 51% rename from sentry/Chart.yaml rename to charts/sentry/Chart.yaml index 1c1ecdb49..27528a5f9 100644 --- a/sentry/Chart.yaml +++ b/charts/sentry/Chart.yaml @@ -2,41 +2,41 @@ apiVersion: v2 name: sentry description: A Helm chart for Kubernetes type: application -version: 15.0.0 -appVersion: 22.6.0 +version: 26.10.0 +appVersion: 24.10.0 dependencies: - name: memcached - repository: https://charts.bitnami.com/bitnami - version: 6.1.5 + repository: oci://registry-1.docker.io/bitnamicharts + version: 7.5.2 condition: sourcemaps.enabled - name: redis - repository: https://charts.bitnami.com/bitnami - version: 16.12.1 + repository: oci://registry-1.docker.io/bitnamicharts + version: 17.11.3 condition: redis.enabled - name: kafka - repository: https://charts.bitnami.com/bitnami - version: 16.3.2 + repository: oci://registry-1.docker.io/bitnamicharts + version: 29.3.14 condition: kafka.enabled - name: clickhouse repository: https://sentry-kubernetes.github.io/charts - version: 3.1.2 + version: 3.13.0 condition: clickhouse.enabled - name: zookeeper - repository: https://charts.bitnami.com/bitnami - version: 9.0.0 + repository: oci://registry-1.docker.io/bitnamicharts + version: 11.4.11 condition: zookeeper.enabled - name: rabbitmq - repository: https://charts.bitnami.com/bitnami - version: 8.32.2 + repository: oci://registry-1.docker.io/bitnamicharts + version: 11.16.2 alias: rabbitmq condition: rabbitmq.enabled - name: postgresql - repository: https://charts.bitnami.com/bitnami - version: 10.16.2 + repository: oci://registry-1.docker.io/bitnamicharts + version: 12.5.1 condition: postgresql.enabled - name: nginx - repository: https://charts.bitnami.com/bitnami - version: 12.0.4 + repository: oci://registry-1.docker.io/bitnamicharts + version: 18.2.5 condition: nginx.enabled maintainers: - name: sentry-kubernetes diff --git a/charts/sentry/README.md b/charts/sentry/README.md new file mode 100644 index 000000000..c4e4da69a --- /dev/null +++ b/charts/sentry/README.md @@ -0,0 +1,1325 @@ +# Install + +## Add repo + +``` +helm repo add sentry https://sentry-kubernetes.github.io/charts +``` + +## Without overrides + +``` +helm install sentry sentry/sentry --wait --timeout=1000s +``` + +## With your own values file + +``` +helm install sentry sentry/sentry -f values.yaml --wait --timeout=1000s +``` + +# Upgrade + +Read the upgrade guide before upgrading to major versions of the chart. +[Upgrade Guide](docs/UPGRADE.md) + +## Configuration + +The following table lists the configurable parameters of the Sentry chart and their default values. + +Note: this table is incomplete, so have a look at the values.yaml in case you miss something + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| asHook | bool | `true` | | +| auth.register | bool | `true` | | +| clickhouse.clickhouse.configmap.remote_servers.internal_replication | bool | `true` | | +| clickhouse.clickhouse.configmap.remote_servers.replica.backup.enabled | bool | `false` | | +| clickhouse.clickhouse.configmap.users.enabled | bool | `false` | | +| clickhouse.clickhouse.configmap.users.user[0].config.networks[0] | string | `"::/0"` | | +| clickhouse.clickhouse.configmap.users.user[0].config.password | string | `""` | | +| clickhouse.clickhouse.configmap.users.user[0].config.profile | string | `"default"` | | +| clickhouse.clickhouse.configmap.users.user[0].config.quota | string | `"default"` | | +| clickhouse.clickhouse.configmap.users.user[0].name | string | `"default"` | | +| clickhouse.clickhouse.configmap.zookeeper_servers.config[0].hostTemplate | string | `"{{ .Release.Name }}-zookeeper-clickhouse"` | | +| clickhouse.clickhouse.configmap.zookeeper_servers.config[0].index | string | `"clickhouse"` | | +| clickhouse.clickhouse.configmap.zookeeper_servers.config[0].port | string | `"2181"` | | +| clickhouse.clickhouse.configmap.zookeeper_servers.enabled | bool | `true` | | +| clickhouse.clickhouse.imageVersion | string | `"21.8.13.6"` | | +| clickhouse.clickhouse.persistentVolumeClaim.dataPersistentVolume.accessModes[0] | string | `"ReadWriteOnce"` | | +| clickhouse.clickhouse.persistentVolumeClaim.dataPersistentVolume.enabled | bool | `true` | | +| clickhouse.clickhouse.persistentVolumeClaim.dataPersistentVolume.storage | string | `"30Gi"` | | +| clickhouse.clickhouse.persistentVolumeClaim.enabled | bool | `true` | | +| clickhouse.clickhouse.replicas | string | `"1"` | | +| clickhouse.enabled | bool | `true` | | +| config.configYml | object | `{}` | | +| config.relay | string | `"# No YAML relay config given\n"` | | +| config.sentryConfPy | string | `"# No Python Extension Config Given\n"` | | +| config.snubaSettingsPy | string | `"# No Python Extension Config Given\n"` | | +| config.web.httpKeepalive | int | `15` | | +| config.web.maxRequests | int | `100000` | | +| config.web.maxRequestsDelta | int | `500` | | +| config.web.maxWorkerLifetime | int | `86400` | | +| discord | object | `{}` | | +| externalClickhouse.database | string | `"default"` | | +| externalClickhouse.host | string | `"clickhouse"` | | +| externalClickhouse.httpPort | int | `8123` | | +| externalClickhouse.password | string | `""` | | +| externalClickhouse.singleNode | bool | `true` | | +| externalClickhouse.tcpPort | int | `9000` | | +| externalClickhouse.username | string | `"default"` | | +| externalKafka.cluster | list | `[]` | Multi hosts and ports of external Kafka | +| externalKafka.host | string | `"kafka-confluent"` | Hostname or IP address of external Kafka | +| externalKafka.port | int | `9092` | Port for external Kafka | +| externalKafka.compression.type | string | `""` | Compression type for Kafka messages ('gzip', 'snappy', 'lz4', 'zstd') | +| externalKafka.message.max.bytes | int | `50000000` | Maximum message size for Kafka | +| externalKafka.sasl.mechanism | string | `"None"` | SASL mechanism for Kafka (PLAIN, SCRAM-256, SCRAM-512) | +| externalKafka.sasl.username | string | `"None"` | SASL username for Kafka | +| externalKafka.sasl.password | string | `"None"` | SASL password for Kafka | +| externalKafka.security.protocol | string | `"plaintext"` | Security protocol for Kafka (PLAINTEXT, SASL_PLAINTEXT, SASL_SSL, SSL) | +| externalPostgresql.connMaxAge | int | `0` | | +| externalPostgresql.database | string | `"sentry"` | | +| externalPostgresql.existingSecretKeys | object | `{}` | | +| externalPostgresql.port | int | `5432` | | +| externalPostgresql.username | string | `"postgres"` | | +| externalRedis.port | int | `6379` | | +| extraManifests | list | `[]` | | +| filestore.backend | string | `"filesystem"` | | +| filestore.filesystem.path | string | `"/var/lib/sentry/files"` | | +| filestore.filesystem.persistence.accessMode | string | `"ReadWriteOnce"` | | +| filestore.filesystem.persistence.enabled | bool | `true` | | +| filestore.filesystem.persistence.existingClaim | string | `""` | | +| filestore.filesystem.persistence.persistentWorkers | bool | `false` | | +| filestore.filesystem.persistence.size | string | `"10Gi"` | | +| filestore.gcs | object | `{}` | | +| filestore.s3 | object | `{}` | | +| geodata.accountID | string | `""` | | +| geodata.editionIDs | string | `""` | | +| geodata.licenseKey | string | `""` | | +| geodata.mountPath | string | `""` | | +| geodata.path | string | `""` | | +| geodata.persistence.size | string | `"1Gi"` | | +| geodata.volumeName | string | `""` | | +| github | object | `{}` | | +| google | object | `{}` | | +| hooks.activeDeadlineSeconds | int | `600` | | +| hooks.dbCheck.affinity | object | `{}` | | +| hooks.dbCheck.containerSecurityContext | object | `{}` | | +| hooks.dbCheck.enabled | bool | `true` | | +| hooks.dbCheck.env | list | `[]` | | +| hooks.dbCheck.image.imagePullSecrets | list | `[]` | | +| hooks.dbCheck.nodeSelector | object | `{}` | | +| hooks.dbCheck.podAnnotations | object | `{}` | | +| hooks.dbCheck.resources.limits.memory | string | `"64Mi"` | | +| hooks.dbCheck.resources.requests.cpu | string | `"100m"` | | +| hooks.dbCheck.resources.requests.memory | string | `"64Mi"` | | +| hooks.dbCheck.securityContext | object | `{}` | | +| hooks.dbInit.affinity | object | `{}` | | +| hooks.dbInit.enabled | bool | `true` | | +| hooks.dbInit.env | list | `[]` | | +| hooks.dbInit.nodeSelector | object | `{}` | | +| hooks.dbInit.podAnnotations | object | `{}` | | +| hooks.dbInit.resources.limits.memory | string | `"2048Mi"` | | +| hooks.dbInit.resources.requests.cpu | string | `"300m"` | | +| hooks.dbInit.resources.requests.memory | string | `"2048Mi"` | | +| hooks.dbInit.sidecars | list | `[]` | | +| hooks.dbInit.volumes | list | `[]` | | +| hooks.enabled | bool | `true` | | +| hooks.preUpgrade | bool | `false` | | +| hooks.removeOnSuccess | bool | `true` | | +| hooks.shareProcessNamespace | bool | `false` | | +| hooks.snubaInit.affinity | object | `{}` | | +| hooks.snubaInit.enabled | bool | `true` | | +| hooks.snubaInit.kafka.enabled | bool | `true` | | +| hooks.snubaInit.nodeSelector | object | `{}` | | +| hooks.snubaInit.podAnnotations | object | `{}` | | +| hooks.snubaInit.resources.limits.cpu | string | `"2000m"` | | +| hooks.snubaInit.resources.limits.memory | string | `"1Gi"` | | +| hooks.snubaInit.resources.requests.cpu | string | `"700m"` | | +| hooks.snubaInit.resources.requests.memory | string | `"1Gi"` | | +| hooks.snubaMigrate.enabled | bool | `true` | | +| images.relay.imagePullSecrets | list | `[]` | | +| images.sentry.imagePullSecrets | list | `[]` | | +| images.snuba.imagePullSecrets | list | `[]` | | +| images.symbolicator.imagePullSecrets | list | `[]` | | +| images.vroom.imagePullSecrets | list | `[]` | | +| ingress.alb.httpRedirect | bool | `false` | | +| ingress.enabled | bool | `true` | | +| ingress.regexPathStyle | string | `"nginx"` | | +| ipv6 | bool | `false` | | +| kafka.controller.replicaCount | int | `3` | | +| kafka.enabled | bool | `true` | | +| kafka.kraft.enabled | bool | `true` | | +| kafka.listeners.client.protocol | string | `"PLAINTEXT"` | Security protocol for the Kafka client listener (PLAINTEXT, SASL_PLAINTEXT, SASL_SSL, SSL) | +| kafka.listeners.controller.protocol | string | `"PLAINTEXT"` | | +| kafka.listeners.external.protocol | string | `"PLAINTEXT"` | | +| kafka.listeners.interbroker.protocol | string | `"PLAINTEXT"` | | +| kafka.provisioning.enabled | bool | `true` | | +| kafka.provisioning.topics[0].config."message.timestamp.type" | string | `"LogAppendTime"` | | +| kafka.provisioning.topics[0].name | string | `"events"` | | +| kafka.provisioning.topics[10].config."cleanup.policy" | string | `"compact,delete"` | | +| kafka.provisioning.topics[10].config."min.compaction.lag.ms" | string | `"3600000"` | | +| kafka.provisioning.topics[10].name | string | `"snuba-sessions-commit-log"` | | +| kafka.provisioning.topics[11].config."cleanup.policy" | string | `"compact,delete"` | | +| kafka.provisioning.topics[11].config."min.compaction.lag.ms" | string | `"3600000"` | | +| kafka.provisioning.topics[11].name | string | `"snuba-metrics-commit-log"` | | +| kafka.provisioning.topics[12].name | string | `"scheduled-subscriptions-events"` | | +| kafka.provisioning.topics[13].name | string | `"scheduled-subscriptions-transactions"` | | +| kafka.provisioning.topics[14].name | string | `"scheduled-subscriptions-sessions"` | | +| kafka.provisioning.topics[15].name | string | `"scheduled-subscriptions-metrics"` | | +| kafka.provisioning.topics[16].name | string | `"scheduled-subscriptions-generic-metrics-sets"` | | +| kafka.provisioning.topics[17].name | string | `"scheduled-subscriptions-generic-metrics-distributions"` | | +| kafka.provisioning.topics[18].name | string | `"scheduled-subscriptions-generic-metrics-counters"` | | +| kafka.provisioning.topics[19].name | string | `"events-subscription-results"` | | +| kafka.provisioning.topics[1].name | string | `"event-replacements"` | | +| kafka.provisioning.topics[20].name | string | `"transactions-subscription-results"` | | +| kafka.provisioning.topics[21].name | string | `"sessions-subscription-results"` | | +| kafka.provisioning.topics[22].name | string | `"metrics-subscription-results"` | | +| kafka.provisioning.topics[23].name | string | `"generic-metrics-subscription-results"` | | +| kafka.provisioning.topics[24].config."message.timestamp.type" | string | `"LogAppendTime"` | | +| kafka.provisioning.topics[24].name | string | `"snuba-queries"` | | +| kafka.provisioning.topics[25].config."message.timestamp.type" | string | `"LogAppendTime"` | | +| kafka.provisioning.topics[25].name | string | `"processed-profiles"` | | +| kafka.provisioning.topics[26].name | string | `"profiles-call-tree"` | | +| kafka.provisioning.topics[27].config."max.message.bytes" | string | `"15000000"` | | +| kafka.provisioning.topics[27].config."message.timestamp.type" | string | `"LogAppendTime"` | | +| kafka.provisioning.topics[27].name | string | `"ingest-replay-events"` | | +| kafka.provisioning.topics[28].config."message.timestamp.type" | string | `"LogAppendTime"` | | +| kafka.provisioning.topics[28].name | string | `"snuba-generic-metrics"` | | +| kafka.provisioning.topics[29].config."cleanup.policy" | string | `"compact,delete"` | | +| kafka.provisioning.topics[29].config."min.compaction.lag.ms" | string | `"3600000"` | | +| kafka.provisioning.topics[29].name | string | `"snuba-generic-metrics-sets-commit-log"` | | +| kafka.provisioning.topics[2].config."cleanup.policy" | string | `"compact,delete"` | | +| kafka.provisioning.topics[2].config."min.compaction.lag.ms" | string | `"3600000"` | | +| kafka.provisioning.topics[2].name | string | `"snuba-commit-log"` | | +| kafka.provisioning.topics[30].config."cleanup.policy" | string | `"compact,delete"` | | +| kafka.provisioning.topics[30].config."min.compaction.lag.ms" | string | `"3600000"` | | +| kafka.provisioning.topics[30].name | string | `"snuba-generic-metrics-distributions-commit-log"` | | +| kafka.provisioning.topics[31].config."cleanup.policy" | string | `"compact,delete"` | | +| kafka.provisioning.topics[31].config."min.compaction.lag.ms" | string | `"3600000"` | | +| kafka.provisioning.topics[31].name | string | `"snuba-generic-metrics-counters-commit-log"` | | +| kafka.provisioning.topics[32].config."message.timestamp.type" | string | `"LogAppendTime"` | | +| kafka.provisioning.topics[32].name | string | `"generic-events"` | | +| kafka.provisioning.topics[33].config."cleanup.policy" | string | `"compact,delete"` | | +| kafka.provisioning.topics[33].config."min.compaction.lag.ms" | string | `"3600000"` | | +| kafka.provisioning.topics[33].name | string | `"snuba-generic-events-commit-log"` | | +| kafka.provisioning.topics[34].config."message.timestamp.type" | string | `"LogAppendTime"` | | +| kafka.provisioning.topics[34].name | string | `"group-attributes"` | | +| kafka.provisioning.topics[35].name | string | `"snuba-attribution"` | | +| kafka.provisioning.topics[36].name | string | `"snuba-dead-letter-metrics"` | | +| kafka.provisioning.topics[37].name | string | `"snuba-dead-letter-sessions"` | | +| kafka.provisioning.topics[38].name | string | `"snuba-dead-letter-generic-metrics"` | | +| kafka.provisioning.topics[39].name | string | `"snuba-dead-letter-replays"` | | +| kafka.provisioning.topics[3].name | string | `"cdc"` | | +| kafka.provisioning.topics[40].name | string | `"snuba-dead-letter-generic-events"` | | +| kafka.provisioning.topics[41].name | string | `"snuba-dead-letter-querylog"` | | +| kafka.provisioning.topics[42].name | string | `"snuba-dead-letter-group-attributes"` | | +| kafka.provisioning.topics[43].name | string | `"ingest-attachments"` | | +| kafka.provisioning.topics[44].name | string | `"ingest-transactions"` | | +| kafka.provisioning.topics[45].name | string | `"ingest-events"` | | +| kafka.provisioning.topics[46].name | string | `"ingest-replay-recordings"` | | +| kafka.provisioning.topics[47].name | string | `"ingest-metrics"` | | +| kafka.provisioning.topics[48].name | string | `"ingest-performance-metrics"` | | +| kafka.provisioning.topics[49].name | string | `"ingest-monitors"` | | +| kafka.provisioning.topics[4].config."message.timestamp.type" | string | `"LogAppendTime"` | | +| kafka.provisioning.topics[4].name | string | `"transactions"` | | +| kafka.provisioning.topics[50].name | string | `"profiles"` | | +| kafka.provisioning.topics[51].name | string | `"ingest-occurrences"` | | +| kafka.provisioning.topics[52].name | string | `"snuba-spans"` | | +| kafka.provisioning.topics[53].name | string | `"shared-resources-usage"` | | +| kafka.provisioning.topics[54].name | string | `"snuba-metrics-summaries"` | | +| kafka.provisioning.topics[5].config."cleanup.policy" | string | `"compact,delete"` | | +| kafka.provisioning.topics[5].config."min.compaction.lag.ms" | string | `"3600000"` | | +| kafka.provisioning.topics[5].name | string | `"snuba-transactions-commit-log"` | | +| kafka.provisioning.topics[6].config."message.timestamp.type" | string | `"LogAppendTime"` | | +| kafka.provisioning.topics[6].name | string | `"snuba-metrics"` | | +| kafka.provisioning.topics[7].name | string | `"outcomes"` | | +| kafka.provisioning.topics[8].name | string | `"outcomes-billing"` | | +| kafka.provisioning.topics[9].name | string | `"ingest-sessions"` | | +| kafka.sasl.client.users | list | `[]` | List of usernames for client communications when SASL is enabled, first user will be used if enabled | +| kafka.sasl.client.passwords | list | `[]` | List of passwords for client communications when SASL is enabled, must match the number of client.users, first password will be used if enabled | +| kafka.sasl.enabledMechanisms | string | `"PLAIN,SCRAM-SHA-256,SCRAM-SHA-512"` | Comma-separated list of allowed SASL mechanisms when SASL listeners are configured | +| kafka.zookeeper.enabled | bool | `false` | | +| mail.backend | string | `"dummy"` | | +| mail.from | string | `""` | | +| mail.host | string | `""` | | +| mail.password | string | `""` | | +| mail.port | int | `25` | | +| mail.useSsl | bool | `false` | | +| mail.useTls | bool | `false` | | +| mail.username | string | `""` | | +| memcached.args[0] | string | `"memcached"` | | +| memcached.args[1] | string | `"-u memcached"` | | +| memcached.args[2] | string | `"-p 11211"` | | +| memcached.args[3] | string | `"-v"` | | +| memcached.args[4] | string | `"-m $(MEMCACHED_MEMORY_LIMIT)"` | | +| memcached.args[5] | string | `"-I $(MEMCACHED_MAX_ITEM_SIZE)"` | | +| memcached.extraEnvVarsCM | string | `"sentry-memcached"` | | +| memcached.maxItemSize | string | `"26214400"` | | +| memcached.memoryLimit | string | `"2048"` | | +| metrics.affinity | object | `{}` | | +| metrics.containerSecurityContext | object | `{}` | | +| metrics.enabled | bool | `false` | | +| metrics.image.pullPolicy | string | `"IfNotPresent"` | | +| metrics.image.repository | string | `"prom/statsd-exporter"` | | +| metrics.image.tag | string | `"v0.17.0"` | | +| metrics.livenessProbe.enabled | bool | `true` | | +| metrics.livenessProbe.failureThreshold | int | `3` | | +| metrics.livenessProbe.initialDelaySeconds | int | `30` | | +| metrics.livenessProbe.periodSeconds | int | `5` | | +| metrics.livenessProbe.successThreshold | int | `1` | | +| metrics.livenessProbe.timeoutSeconds | int | `2` | | +| metrics.nodeSelector | object | `{}` | | +| metrics.podAnnotations | object | `{}` | | +| metrics.readinessProbe.enabled | bool | `true` | | +| metrics.readinessProbe.failureThreshold | int | `3` | | +| metrics.readinessProbe.initialDelaySeconds | int | `30` | | +| metrics.readinessProbe.periodSeconds | int | `5` | | +| metrics.readinessProbe.successThreshold | int | `1` | | +| metrics.readinessProbe.timeoutSeconds | int | `2` | | +| metrics.resources | object | `{}` | | +| metrics.securityContext | object | `{}` | | +| metrics.service.labels | object | `{}` | | +| metrics.service.type | string | `"ClusterIP"` | | +| metrics.serviceMonitor.additionalLabels | object | `{}` | | +| metrics.serviceMonitor.enabled | bool | `false` | | +| metrics.serviceMonitor.metricRelabelings | list | `[]` | | +| metrics.serviceMonitor.namespace | string | `""` | | +| metrics.serviceMonitor.namespaceSelector | object | `{}` | | +| metrics.serviceMonitor.relabelings | list | `[]` | | +| metrics.serviceMonitor.scrapeInterval | string | `"30s"` | | +| metrics.tolerations | list | `[]` | | +| nginx.containerPort | int | `8080` | | +| nginx.customReadinessProbe.failureThreshold | int | `3` | | +| nginx.customReadinessProbe.initialDelaySeconds | int | `5` | | +| nginx.customReadinessProbe.periodSeconds | int | `5` | | +| nginx.customReadinessProbe.successThreshold | int | `1` | | +| nginx.customReadinessProbe.tcpSocket.port | string | `"http"` | | +| nginx.customReadinessProbe.timeoutSeconds | int | `3` | | +| nginx.enabled | bool | `true` | | +| nginx.existingServerBlockConfigmap | string | `"{{ template \"sentry.fullname\" . }}"` | | +| nginx.extraLocationSnippet | bool | `false` | | +| nginx.metrics.serviceMonitor | object | `{}` | | +| nginx.replicaCount | int | `1` | | +| nginx.resources | object | `{}` | | +| nginx.service.ports.http | int | `80` | | +| nginx.service.type | string | `"ClusterIP"` | | +| openai | object | `{}` | | +| postgresql.auth.database | string | `"sentry"` | | +| postgresql.connMaxAge | int | `0` | | +| postgresql.enabled | bool | `true` | | +| postgresql.nameOverride | string | `"sentry-postgresql"` | | +| postgresql.replication.applicationName | string | `"sentry"` | | +| postgresql.replication.enabled | bool | `false` | | +| postgresql.replication.numSynchronousReplicas | int | `1` | | +| postgresql.replication.readReplicas | int | `2` | | +| postgresql.replication.synchronousCommit | string | `"on"` | | +| prefix | string | `nil` | | +| rabbitmq.auth.erlangCookie | string | `"pHgpy3Q6adTskzAT6bLHCFqFTF7lMxhA"` | | +| rabbitmq.auth.password | string | `"guest"` | | +| rabbitmq.auth.username | string | `"guest"` | | +| rabbitmq.clustering.forceBoot | bool | `true` | | +| rabbitmq.clustering.rebalance | bool | `true` | | +| rabbitmq.enabled | bool | `true` | | +| rabbitmq.extraConfiguration | string | `"load_definitions = /app/load_definition.json\n"` | | +| rabbitmq.extraSecrets.load-definition."load_definition.json" | string | `"{\n \"users\": [\n {\n \"name\": \"{{ .Values.auth.username }}\",\n \"password\": \"{{ .Values.auth.password }}\",\n \"tags\": \"administrator\"\n }\n ],\n \"permissions\": [{\n \"user\": \"{{ .Values.auth.username }}\",\n \"vhost\": \"/\",\n \"configure\": \".*\",\n \"write\": \".*\",\n \"read\": \".*\"\n }],\n \"policies\": [\n {\n \"name\": \"ha-all\",\n \"pattern\": \".*\",\n \"vhost\": \"/\",\n \"definition\": {\n \"ha-mode\": \"all\",\n \"ha-sync-mode\": \"automatic\",\n \"ha-sync-batch-size\": 1\n }\n }\n ],\n \"vhosts\": [\n {\n \"name\": \"/\"\n }\n ]\n}\n"` | | +| rabbitmq.loadDefinition.enabled | bool | `true` | | +| rabbitmq.loadDefinition.existingSecret | string | `"load-definition"` | | +| rabbitmq.memoryHighWatermark | object | `{}` | | +| rabbitmq.nameOverride | string | `""` | | +| rabbitmq.pdb.create | bool | `true` | | +| rabbitmq.persistence.enabled | bool | `true` | | +| rabbitmq.replicaCount | int | `1` | | +| rabbitmq.resources | object | `{}` | | +| rabbitmq.vhost | string | `"/"` | | +| redis.auth.enabled | bool | `false` | | +| redis.auth.sentinel | bool | `false` | | +| redis.enabled | bool | `true` | | +| redis.master.persistence.enabled | bool | `true` | | +| redis.nameOverride | string | `"sentry-redis"` | | +| redis.replica.replicaCount | int | `1` | | +| relay.affinity | object | `{}` | | +| relay.autoscaling.enabled | bool | `false` | | +| relay.autoscaling.maxReplicas | int | `5` | | +| relay.autoscaling.minReplicas | int | `2` | | +| relay.autoscaling.targetCPUUtilizationPercentage | int | `50` | | +| relay.containerSecurityContext | object | `{}` | | +| relay.customResponseHeaders | list | `[]` | | +| relay.enabled | bool | `true` | | +| relay.env | list | `[]` | | +| relay.init.resources | object | `{}` | | +| relay.mode | string | `"managed"` | | +| relay.nodeSelector | object | `{}` | | +| relay.probeFailureThreshold | int | `5` | | +| relay.probeInitialDelaySeconds | int | `10` | | +| relay.probePeriodSeconds | int | `10` | | +| relay.probeSuccessThreshold | int | `1` | | +| relay.probeTimeoutSeconds | int | `2` | | +| relay.processing.kafkaConfig.messageMaxBytes | int | `50000000` | | +| relay.replicas | int | `1` | | +| relay.resources | object | `{}` | | +| relay.securityContext | object | `{}` | | +| relay.securityPolicy | string | `""` | | +| relay.service.annotations | object | `{}` | | +| relay.sidecars | list | `[]` | | +| relay.topologySpreadConstraints | list | `[]` | | +| relay.volumeMounts | list | `[]` | | +| relay.volumes | list | `[]` | | +| revisionHistoryLimit | int | `10` | | +| sentry.billingMetricsConsumer.affinity | object | `{}` | | +| sentry.billingMetricsConsumer.autoscaling.enabled | bool | `false` | | +| sentry.billingMetricsConsumer.autoscaling.maxReplicas | int | `3` | | +| sentry.billingMetricsConsumer.autoscaling.minReplicas | int | `1` | | +| sentry.billingMetricsConsumer.autoscaling.targetCPUUtilizationPercentage | int | `50` | | +| sentry.billingMetricsConsumer.containerSecurityContext | object | `{}` | | +| sentry.billingMetricsConsumer.enabled | bool | `true` | | +| sentry.billingMetricsConsumer.env | list | `[]` | | +| sentry.billingMetricsConsumer.livenessProbe.enabled | bool | `true` | | +| sentry.billingMetricsConsumer.livenessProbe.initialDelaySeconds | int | `5` | | +| sentry.billingMetricsConsumer.livenessProbe.periodSeconds | int | `320` | | +| sentry.billingMetricsConsumer.nodeSelector | object | `{}` | | +| sentry.billingMetricsConsumer.replicas | int | `1` | | +| sentry.billingMetricsConsumer.resources | object | `{}` | | +| sentry.billingMetricsConsumer.securityContext | object | `{}` | | +| sentry.billingMetricsConsumer.sidecars | list | `[]` | | +| sentry.billingMetricsConsumer.topologySpreadConstraints | list | `[]` | | +| sentry.billingMetricsConsumer.volumes | list | `[]` | | +| sentry.cleanup.activeDeadlineSeconds | int | `100` | | +| sentry.cleanup.concurrency | int | `1` | | +| sentry.cleanup.concurrencyPolicy | string | `"Allow"` | | +| sentry.cleanup.days | int | `90` | | +| sentry.cleanup.enabled | bool | `true` | | +| sentry.cleanup.failedJobsHistoryLimit | int | `5` | | +| sentry.cleanup.logLevel | string | `""` | | +| sentry.cleanup.schedule | string | `"0 0 * * *"` | | +| sentry.cleanup.serviceAccount | object | `{}` | | +| sentry.cleanup.sidecars | list | `[]` | | +| sentry.cleanup.successfulJobsHistoryLimit | int | `5` | | +| sentry.cleanup.volumes | list | `[]` | | +| sentry.cron.affinity | object | `{}` | | +| sentry.cron.enabled | bool | `true` | | +| sentry.cron.env | list | `[]` | | +| sentry.cron.nodeSelector | object | `{}` | | +| sentry.cron.replicas | int | `1` | | +| sentry.cron.resources | object | `{}` | | +| sentry.cron.sidecars | list | `[]` | | +| sentry.cron.topologySpreadConstraints | list | `[]` | | +| sentry.cron.volumes | list | `[]` | | +| sentry.features.enableFeedback | bool | `false` | | +| sentry.features.enableProfiling | bool | `false` | | +| sentry.features.enableSessionReplay | bool | `true` | | +| sentry.features.enableSpan | bool | `false` | | +| sentry.features.orgSubdomains | bool | `false` | | +| sentry.features.vstsLimitedScopes | bool | `true` | | +| sentry.genericMetricsConsumer.affinity | object | `{}` | | +| sentry.genericMetricsConsumer.autoscaling.enabled | bool | `false` | | +| sentry.genericMetricsConsumer.autoscaling.maxReplicas | int | `3` | | +| sentry.genericMetricsConsumer.autoscaling.minReplicas | int | `1` | | +| sentry.genericMetricsConsumer.autoscaling.targetCPUUtilizationPercentage | int | `50` | | +| sentry.genericMetricsConsumer.containerSecurityContext | object | `{}` | | +| sentry.genericMetricsConsumer.enabled | bool | `true` | | +| sentry.genericMetricsConsumer.env | list | `[]` | | +| sentry.genericMetricsConsumer.livenessProbe.enabled | bool | `true` | | +| sentry.genericMetricsConsumer.livenessProbe.initialDelaySeconds | int | `5` | | +| sentry.genericMetricsConsumer.livenessProbe.periodSeconds | int | `320` | | +| sentry.genericMetricsConsumer.nodeSelector | object | `{}` | | +| sentry.genericMetricsConsumer.replicas | int | `1` | | +| sentry.genericMetricsConsumer.resources | object | `{}` | | +| sentry.genericMetricsConsumer.securityContext | object | `{}` | | +| sentry.genericMetricsConsumer.sidecars | list | `[]` | | +| sentry.genericMetricsConsumer.topologySpreadConstraints | list | `[]` | | +| sentry.genericMetricsConsumer.volumes | list | `[]` | | +| sentry.ingestConsumerAttachments.affinity | object | `{}` | | +| sentry.ingestConsumerAttachments.autoscaling.enabled | bool | `false` | | +| sentry.ingestConsumerAttachments.autoscaling.maxReplicas | int | `3` | | +| sentry.ingestConsumerAttachments.autoscaling.minReplicas | int | `1` | | +| sentry.ingestConsumerAttachments.autoscaling.targetCPUUtilizationPercentage | int | `50` | | +| sentry.ingestConsumerAttachments.containerSecurityContext | object | `{}` | | +| sentry.ingestConsumerAttachments.enabled | bool | `true` | | +| sentry.ingestConsumerAttachments.env | list | `[]` | | +| sentry.ingestConsumerAttachments.livenessProbe.enabled | bool | `true` | | +| sentry.ingestConsumerAttachments.livenessProbe.initialDelaySeconds | int | `5` | | +| sentry.ingestConsumerAttachments.livenessProbe.periodSeconds | int | `320` | | +| sentry.ingestConsumerAttachments.nodeSelector | object | `{}` | | +| sentry.ingestConsumerAttachments.replicas | int | `1` | | +| sentry.ingestConsumerAttachments.resources | object | `{}` | | +| sentry.ingestConsumerAttachments.securityContext | object | `{}` | | +| sentry.ingestConsumerAttachments.sidecars | list | `[]` | | +| sentry.ingestConsumerAttachments.topologySpreadConstraints | list | `[]` | | +| sentry.ingestConsumerAttachments.volumes | list | `[]` | | +| sentry.ingestConsumerEvents.affinity | object | `{}` | | +| sentry.ingestConsumerEvents.autoscaling.enabled | bool | `false` | | +| sentry.ingestConsumerEvents.autoscaling.maxReplicas | int | `3` | | +| sentry.ingestConsumerEvents.autoscaling.minReplicas | int | `1` | | +| sentry.ingestConsumerEvents.autoscaling.targetCPUUtilizationPercentage | int | `50` | | +| sentry.ingestConsumerEvents.containerSecurityContext | object | `{}` | | +| sentry.ingestConsumerEvents.enabled | bool | `true` | | +| sentry.ingestConsumerEvents.env | list | `[]` | | +| sentry.ingestConsumerEvents.livenessProbe.enabled | bool | `true` | | +| sentry.ingestConsumerEvents.livenessProbe.initialDelaySeconds | int | `5` | | +| sentry.ingestConsumerEvents.livenessProbe.periodSeconds | int | `320` | | +| sentry.ingestConsumerEvents.nodeSelector | object | `{}` | | +| sentry.ingestConsumerEvents.replicas | int | `1` | | +| sentry.ingestConsumerEvents.resources | object | `{}` | | +| sentry.ingestConsumerEvents.securityContext | object | `{}` | | +| sentry.ingestConsumerEvents.sidecars | list | `[]` | | +| sentry.ingestConsumerEvents.topologySpreadConstraints | list | `[]` | | +| sentry.ingestConsumerEvents.volumes | list | `[]` | | +| sentry.ingestConsumerTransactions.affinity | object | `{}` | | +| sentry.ingestConsumerTransactions.autoscaling.enabled | bool | `false` | | +| sentry.ingestConsumerTransactions.autoscaling.maxReplicas | int | `3` | | +| sentry.ingestConsumerTransactions.autoscaling.minReplicas | int | `1` | | +| sentry.ingestConsumerTransactions.autoscaling.targetCPUUtilizationPercentage | int | `50` | | +| sentry.ingestConsumerTransactions.containerSecurityContext | object | `{}` | | +| sentry.ingestConsumerTransactions.enabled | bool | `true` | | +| sentry.ingestConsumerTransactions.env | list | `[]` | | +| sentry.ingestConsumerTransactions.livenessProbe.enabled | bool | `true` | | +| sentry.ingestConsumerTransactions.livenessProbe.initialDelaySeconds | int | `5` | | +| sentry.ingestConsumerTransactions.livenessProbe.periodSeconds | int | `320` | | +| sentry.ingestConsumerTransactions.nodeSelector | object | `{}` | | +| sentry.ingestConsumerTransactions.replicas | int | `1` | | +| sentry.ingestConsumerTransactions.resources | object | `{}` | | +| sentry.ingestConsumerTransactions.securityContext | object | `{}` | | +| sentry.ingestConsumerTransactions.sidecars | list | `[]` | | +| sentry.ingestConsumerTransactions.topologySpreadConstraints | list | `[]` | | +| sentry.ingestConsumerTransactions.volumes | list | `[]` | | +| sentry.ingestFeedback.affinity | object | `{}` | | +| sentry.ingestFeedback.autoscaling.enabled | bool | `false` | | +| sentry.ingestFeedback.autoscaling.maxReplicas | int | `3` | | +| sentry.ingestFeedback.autoscaling.minReplicas | int | `1` | | +| sentry.ingestFeedback.autoscaling.targetCPUUtilizationPercentage | int | `50` | | +| sentry.ingestFeedback.containerSecurityContext | object | `{}` | | +| sentry.ingestFeedback.enabled | bool | `true` | | +| sentry.ingestFeedback.env | list | `[]` | | +| sentry.ingestFeedback.livenessProbe.enabled | bool | `true` | | +| sentry.ingestFeedback.livenessProbe.initialDelaySeconds | int | `5` | | +| sentry.ingestFeedback.livenessProbe.periodSeconds | int | `320` | | +| sentry.ingestFeedback.nodeSelector | object | `{}` | | +| sentry.ingestFeedback.replicas | int | `1` | | +| sentry.ingestFeedback.resources | object | `{}` | | +| sentry.ingestFeedback.securityContext | object | `{}` | | +| sentry.ingestFeedback.sidecars | list | `[]` | | +| sentry.ingestFeedback.topologySpreadConstraints | list | `[]` | | +| sentry.ingestFeedback.volumes | list | `[]` | | +| sentry.ingestMonitors.affinity | object | `{}` | | +| sentry.ingestMonitors.autoscaling.enabled | bool | `false` | | +| sentry.ingestMonitors.autoscaling.maxReplicas | int | `3` | | +| sentry.ingestMonitors.autoscaling.minReplicas | int | `1` | | +| sentry.ingestMonitors.autoscaling.targetCPUUtilizationPercentage | int | `50` | | +| sentry.ingestMonitors.containerSecurityContext | object | `{}` | | +| sentry.ingestMonitors.enabled | bool | `true` | | +| sentry.ingestMonitors.env | list | `[]` | | +| sentry.ingestMonitors.livenessProbe.enabled | bool | `true` | | +| sentry.ingestMonitors.livenessProbe.initialDelaySeconds | int | `5` | | +| sentry.ingestMonitors.livenessProbe.periodSeconds | int | `320` | | +| sentry.ingestMonitors.nodeSelector | object | `{}` | | +| sentry.ingestMonitors.replicas | int | `1` | | +| sentry.ingestMonitors.resources | object | `{}` | | +| sentry.ingestMonitors.securityContext | object | `{}` | | +| sentry.ingestMonitors.sidecars | list | `[]` | | +| sentry.ingestMonitors.topologySpreadConstraints | list | `[]` | | +| sentry.ingestMonitors.volumes | list | `[]` | | +| sentry.ingestOccurrences.affinity | object | `{}` | | +| sentry.ingestOccurrences.autoscaling.enabled | bool | `false` | | +| sentry.ingestOccurrences.autoscaling.maxReplicas | int | `3` | | +| sentry.ingestOccurrences.autoscaling.minReplicas | int | `1` | | +| sentry.ingestOccurrences.autoscaling.targetCPUUtilizationPercentage | int | `50` | | +| sentry.ingestOccurrences.containerSecurityContext | object | `{}` | | +| sentry.ingestOccurrences.enabled | bool | `true` | | +| sentry.ingestOccurrences.env | list | `[]` | | +| sentry.ingestOccurrences.livenessProbe.enabled | bool | `true` | | +| sentry.ingestOccurrences.livenessProbe.initialDelaySeconds | int | `5` | | +| sentry.ingestOccurrences.livenessProbe.periodSeconds | int | `320` | | +| sentry.ingestOccurrences.nodeSelector | object | `{}` | | +| sentry.ingestOccurrences.replicas | int | `1` | | +| sentry.ingestOccurrences.resources | object | `{}` | | +| sentry.ingestOccurrences.securityContext | object | `{}` | | +| sentry.ingestOccurrences.sidecars | list | `[]` | | +| sentry.ingestOccurrences.topologySpreadConstraints | list | `[]` | | +| sentry.ingestOccurrences.volumes | list | `[]` | | +| sentry.ingestProfiles.affinity | object | `{}` | | +| sentry.ingestProfiles.autoscaling.enabled | bool | `false` | | +| sentry.ingestProfiles.autoscaling.maxReplicas | int | `3` | | +| sentry.ingestProfiles.autoscaling.minReplicas | int | `1` | | +| sentry.ingestProfiles.autoscaling.targetCPUUtilizationPercentage | int | `50` | | +| sentry.ingestProfiles.containerSecurityContext | object | `{}` | | +| sentry.ingestProfiles.env | list | `[]` | | +| sentry.ingestProfiles.livenessProbe.enabled | bool | `true` | | +| sentry.ingestProfiles.livenessProbe.initialDelaySeconds | int | `5` | | +| sentry.ingestProfiles.livenessProbe.periodSeconds | int | `320` | | +| sentry.ingestProfiles.nodeSelector | object | `{}` | | +| sentry.ingestProfiles.replicas | int | `1` | | +| sentry.ingestProfiles.resources | object | `{}` | | +| sentry.ingestProfiles.securityContext | object | `{}` | | +| sentry.ingestProfiles.sidecars | list | `[]` | | +| sentry.ingestProfiles.topologySpreadConstraints | list | `[]` | | +| sentry.ingestProfiles.volumes | list | `[]` | | +| sentry.ingestReplayRecordings.affinity | object | `{}` | | +| sentry.ingestReplayRecordings.autoscaling.enabled | bool | `false` | | +| sentry.ingestReplayRecordings.autoscaling.maxReplicas | int | `3` | | +| sentry.ingestReplayRecordings.autoscaling.minReplicas | int | `1` | | +| sentry.ingestReplayRecordings.autoscaling.targetCPUUtilizationPercentage | int | `50` | | +| sentry.ingestReplayRecordings.containerSecurityContext | object | `{}` | | +| sentry.ingestReplayRecordings.enabled | bool | `true` | | +| sentry.ingestReplayRecordings.env | list | `[]` | | +| sentry.ingestReplayRecordings.livenessProbe.enabled | bool | `true` | | +| sentry.ingestReplayRecordings.livenessProbe.initialDelaySeconds | int | `5` | | +| sentry.ingestReplayRecordings.livenessProbe.periodSeconds | int | `320` | | +| sentry.ingestReplayRecordings.nodeSelector | object | `{}` | | +| sentry.ingestReplayRecordings.replicas | int | `1` | | +| sentry.ingestReplayRecordings.resources | object | `{}` | | +| sentry.ingestReplayRecordings.securityContext | object | `{}` | | +| sentry.ingestReplayRecordings.sidecars | list | `[]` | | +| sentry.ingestReplayRecordings.topologySpreadConstraints | list | `[]` | | +| sentry.ingestReplayRecordings.volumes | list | `[]` | | +| sentry.kafka.message.max.bytes | int | `50000000` | Maximum message size for Kafka | +| sentry.kafka.compression.type | string | `""` | Compression type for Kafka messages | +| sentry.kafka.socket.timeout.ms | int | `1000` | Socket timeout for Kafka connections | +| sentry.metricsConsumer.affinity | object | `{}` | | +| sentry.metricsConsumer.autoscaling.enabled | bool | `false` | | +| sentry.metricsConsumer.autoscaling.maxReplicas | int | `3` | | +| sentry.metricsConsumer.autoscaling.minReplicas | int | `1` | | +| sentry.metricsConsumer.autoscaling.targetCPUUtilizationPercentage | int | `50` | | +| sentry.metricsConsumer.containerSecurityContext | object | `{}` | | +| sentry.metricsConsumer.enabled | bool | `true` | | +| sentry.metricsConsumer.env | list | `[]` | | +| sentry.metricsConsumer.livenessProbe.enabled | bool | `true` | | +| sentry.metricsConsumer.livenessProbe.initialDelaySeconds | int | `5` | | +| sentry.metricsConsumer.livenessProbe.periodSeconds | int | `320` | | +| sentry.metricsConsumer.nodeSelector | object | `{}` | | +| sentry.metricsConsumer.replicas | int | `1` | | +| sentry.metricsConsumer.resources | object | `{}` | | +| sentry.metricsConsumer.securityContext | object | `{}` | | +| sentry.metricsConsumer.sidecars | list | `[]` | | +| sentry.metricsConsumer.topologySpreadConstraints | list | `[]` | | +| sentry.metricsConsumer.volumes | list | `[]` | | +| sentry.postProcessForwardErrors.affinity | object | `{}` | | +| sentry.postProcessForwardErrors.containerSecurityContext | object | `{}` | | +| sentry.postProcessForwardErrors.enabled | bool | `true` | | +| sentry.postProcessForwardErrors.env | list | `[]` | | +| sentry.postProcessForwardErrors.livenessProbe.enabled | bool | `true` | | +| sentry.postProcessForwardErrors.livenessProbe.initialDelaySeconds | int | `5` | | +| sentry.postProcessForwardErrors.livenessProbe.periodSeconds | int | `320` | | +| sentry.postProcessForwardErrors.nodeSelector | object | `{}` | | +| sentry.postProcessForwardErrors.replicas | int | `1` | | +| sentry.postProcessForwardErrors.resources | object | `{}` | | +| sentry.postProcessForwardErrors.securityContext | object | `{}` | | +| sentry.postProcessForwardErrors.sidecars | list | `[]` | | +| sentry.postProcessForwardErrors.topologySpreadConstraints | list | `[]` | | +| sentry.postProcessForwardErrors.volumes | list | `[]` | | +| sentry.postProcessForwardIssuePlatform.affinity | object | `{}` | | +| sentry.postProcessForwardIssuePlatform.containerSecurityContext | object | `{}` | | +| sentry.postProcessForwardIssuePlatform.enabled | bool | `true` | | +| sentry.postProcessForwardIssuePlatform.env | list | `[]` | | +| sentry.postProcessForwardIssuePlatform.livenessProbe.enabled | bool | `true` | | +| sentry.postProcessForwardIssuePlatform.livenessProbe.initialDelaySeconds | int | `5` | | +| sentry.postProcessForwardIssuePlatform.livenessProbe.periodSeconds | int | `320` | | +| sentry.postProcessForwardIssuePlatform.nodeSelector | object | `{}` | | +| sentry.postProcessForwardIssuePlatform.replicas | int | `1` | | +| sentry.postProcessForwardIssuePlatform.resources | object | `{}` | | +| sentry.postProcessForwardIssuePlatform.securityContext | object | `{}` | | +| sentry.postProcessForwardIssuePlatform.sidecars | list | `[]` | | +| sentry.postProcessForwardIssuePlatform.topologySpreadConstraints | list | `[]` | | +| sentry.postProcessForwardIssuePlatform.volumes | list | `[]` | | +| sentry.postProcessForwardTransactions.affinity | object | `{}` | | +| sentry.postProcessForwardTransactions.containerSecurityContext | object | `{}` | | +| sentry.postProcessForwardTransactions.enabled | bool | `true` | | +| sentry.postProcessForwardTransactions.env | list | `[]` | | +| sentry.postProcessForwardTransactions.livenessProbe.enabled | bool | `true` | | +| sentry.postProcessForwardTransactions.livenessProbe.initialDelaySeconds | int | `5` | | +| sentry.postProcessForwardTransactions.livenessProbe.periodSeconds | int | `320` | | +| sentry.postProcessForwardTransactions.nodeSelector | object | `{}` | | +| sentry.postProcessForwardTransactions.replicas | int | `1` | | +| sentry.postProcessForwardTransactions.resources | object | `{}` | | +| sentry.postProcessForwardTransactions.securityContext | object | `{}` | | +| sentry.postProcessForwardTransactions.sidecars | list | `[]` | | +| sentry.postProcessForwardTransactions.topologySpreadConstraints | list | `[]` | | +| sentry.postProcessForwardTransactions.volumes | list | `[]` | | +| sentry.singleOrganization | bool | `true` | | +| sentry.subscriptionConsumerEvents.affinity | object | `{}` | | +| sentry.subscriptionConsumerEvents.containerSecurityContext | object | `{}` | | +| sentry.subscriptionConsumerEvents.enabled | bool | `true` | | +| sentry.subscriptionConsumerEvents.env | list | `[]` | | +| sentry.subscriptionConsumerEvents.livenessProbe.enabled | bool | `true` | | +| sentry.subscriptionConsumerEvents.livenessProbe.initialDelaySeconds | int | `5` | | +| sentry.subscriptionConsumerEvents.livenessProbe.periodSeconds | int | `320` | | +| sentry.subscriptionConsumerEvents.nodeSelector | object | `{}` | | +| sentry.subscriptionConsumerEvents.replicas | int | `1` | | +| sentry.subscriptionConsumerEvents.resources | object | `{}` | | +| sentry.subscriptionConsumerEvents.securityContext | object | `{}` | | +| sentry.subscriptionConsumerEvents.sidecars | list | `[]` | | +| sentry.subscriptionConsumerEvents.topologySpreadConstraints | list | `[]` | | +| sentry.subscriptionConsumerEvents.volumes | list | `[]` | | +| sentry.subscriptionConsumerGenericMetrics.affinity | object | `{}` | | +| sentry.subscriptionConsumerGenericMetrics.containerSecurityContext | object | `{}` | | +| sentry.subscriptionConsumerGenericMetrics.enabled | bool | `true` | | +| sentry.subscriptionConsumerGenericMetrics.env | list | `[]` | | +| sentry.subscriptionConsumerGenericMetrics.livenessProbe.enabled | bool | `true` | | +| sentry.subscriptionConsumerGenericMetrics.livenessProbe.initialDelaySeconds | int | `5` | | +| sentry.subscriptionConsumerGenericMetrics.livenessProbe.periodSeconds | int | `320` | | +| sentry.subscriptionConsumerGenericMetrics.nodeSelector | object | `{}` | | +| sentry.subscriptionConsumerGenericMetrics.replicas | int | `1` | | +| sentry.subscriptionConsumerGenericMetrics.resources | object | `{}` | | +| sentry.subscriptionConsumerGenericMetrics.securityContext | object | `{}` | | +| sentry.subscriptionConsumerGenericMetrics.sidecars | list | `[]` | | +| sentry.subscriptionConsumerGenericMetrics.topologySpreadConstraints | list | `[]` | | +| sentry.subscriptionConsumerGenericMetrics.volumes | list | `[]` | | +| sentry.subscriptionConsumerMetrics.affinity | object | `{}` | | +| sentry.subscriptionConsumerMetrics.containerSecurityContext | object | `{}` | | +| sentry.subscriptionConsumerMetrics.enabled | bool | `true` | | +| sentry.subscriptionConsumerMetrics.env | list | `[]` | | +| sentry.subscriptionConsumerMetrics.livenessProbe.enabled | bool | `true` | | +| sentry.subscriptionConsumerMetrics.livenessProbe.initialDelaySeconds | int | `5` | | +| sentry.subscriptionConsumerMetrics.livenessProbe.periodSeconds | int | `320` | | +| sentry.subscriptionConsumerMetrics.nodeSelector | object | `{}` | | +| sentry.subscriptionConsumerMetrics.replicas | int | `1` | | +| sentry.subscriptionConsumerMetrics.resources | object | `{}` | | +| sentry.subscriptionConsumerMetrics.securityContext | object | `{}` | | +| sentry.subscriptionConsumerMetrics.sidecars | list | `[]` | | +| sentry.subscriptionConsumerMetrics.topologySpreadConstraints | list | `[]` | | +| sentry.subscriptionConsumerMetrics.volumes | list | `[]` | | +| sentry.subscriptionConsumerSessions.affinity | object | `{}` | | +| sentry.subscriptionConsumerSessions.containerSecurityContext | object | `{}` | | +| sentry.subscriptionConsumerSessions.env | list | `[]` | | +| sentry.subscriptionConsumerSessions.nodeSelector | object | `{}` | | +| sentry.subscriptionConsumerSessions.replicas | int | `1` | | +| sentry.subscriptionConsumerSessions.resources | object | `{}` | | +| sentry.subscriptionConsumerSessions.securityContext | object | `{}` | | +| sentry.subscriptionConsumerSessions.sidecars | list | `[]` | | +| sentry.subscriptionConsumerSessions.topologySpreadConstraints | list | `[]` | | +| sentry.subscriptionConsumerSessions.volumes | list | `[]` | | +| sentry.subscriptionConsumerTransactions.affinity | object | `{}` | | +| sentry.subscriptionConsumerTransactions.containerSecurityContext | object | `{}` | | +| sentry.subscriptionConsumerTransactions.enabled | bool | `true` | | +| sentry.subscriptionConsumerTransactions.env | list | `[]` | | +| sentry.subscriptionConsumerTransactions.livenessProbe.enabled | bool | `true` | | +| sentry.subscriptionConsumerTransactions.livenessProbe.initialDelaySeconds | int | `5` | | +| sentry.subscriptionConsumerTransactions.livenessProbe.periodSeconds | int | `320` | | +| sentry.subscriptionConsumerTransactions.nodeSelector | object | `{}` | | +| sentry.subscriptionConsumerTransactions.replicas | int | `1` | | +| sentry.subscriptionConsumerTransactions.resources | object | `{}` | | +| sentry.subscriptionConsumerTransactions.securityContext | object | `{}` | | +| sentry.subscriptionConsumerTransactions.sidecars | list | `[]` | | +| sentry.subscriptionConsumerTransactions.topologySpreadConstraints | list | `[]` | | +| sentry.subscriptionConsumerTransactions.volumes | list | `[]` | | +| sentry.web.affinity | object | `{}` | | +| sentry.web.autoscaling.enabled | bool | `false` | | +| sentry.web.autoscaling.maxReplicas | int | `5` | | +| sentry.web.autoscaling.minReplicas | int | `2` | | +| sentry.web.autoscaling.targetCPUUtilizationPercentage | int | `50` | | +| sentry.web.containerSecurityContext | object | `{}` | | +| sentry.web.customResponseHeaders | list | `[]` | | +| sentry.web.enabled | bool | `true` | | +| sentry.web.env | list | `[]` | | +| sentry.web.existingSecretEnv | list | `[]` | | +| sentry.web.nodeSelector | object | `{}` | | +| sentry.web.probeFailureThreshold | int | `5` | | +| sentry.web.probeInitialDelaySeconds | int | `10` | | +| sentry.web.probePeriodSeconds | int | `10` | | +| sentry.web.probeSuccessThreshold | int | `1` | | +| sentry.web.probeTimeoutSeconds | int | `2` | | +| sentry.web.replicas | int | `1` | | +| sentry.web.resources | object | `{}` | | +| sentry.web.securityContext | object | `{}` | | +| sentry.web.securityPolicy | string | `""` | | +| sentry.web.service.annotations | object | `{}` | | +| sentry.web.sidecars | list | `[]` | | +| sentry.web.strategyType | string | `"RollingUpdate"` | | +| sentry.web.topologySpreadConstraints | list | `[]` | | +| sentry.web.volumeMounts | list | `[]` | | +| sentry.web.volumes | list | `[]` | | +| sentry.worker.affinity | object | `{}` | | +| sentry.worker.autoscaling.enabled | bool | `false` | | +| sentry.worker.autoscaling.maxReplicas | int | `5` | | +| sentry.worker.autoscaling.minReplicas | int | `2` | | +| sentry.worker.autoscaling.targetCPUUtilizationPercentage | int | `50` | | +| sentry.worker.enabled | bool | `true` | | +| sentry.worker.env | list | `[]` | | +| sentry.worker.existingSecretEnv | list | `[]` | | +| sentry.worker.livenessProbe.enabled | bool | `true` | | +| sentry.worker.livenessProbe.failureThreshold | int | `3` | | +| sentry.worker.livenessProbe.periodSeconds | int | `60` | | +| sentry.worker.livenessProbe.timeoutSeconds | int | `10` | | +| sentry.worker.nodeSelector | object | `{}` | | +| sentry.worker.replicas | int | `1` | | +| sentry.worker.resources | object | `{}` | | +| sentry.worker.sidecars | list | `[]` | | +| sentry.worker.topologySpreadConstraints | list | `[]` | | +| sentry.worker.volumeMounts | list | `[]` | | +| sentry.worker.volumes | list | `[]` | | +| sentry.workerEvents.affinity | object | `{}` | | +| sentry.workerEvents.autoscaling.enabled | bool | `false` | | +| sentry.workerEvents.autoscaling.maxReplicas | int | `5` | | +| sentry.workerEvents.autoscaling.minReplicas | int | `2` | | +| sentry.workerEvents.autoscaling.targetCPUUtilizationPercentage | int | `50` | | +| sentry.workerEvents.enabled | bool | `false` | | +| sentry.workerEvents.env | list | `[]` | | +| sentry.workerEvents.livenessProbe.enabled | bool | `false` | | +| sentry.workerEvents.livenessProbe.failureThreshold | int | `3` | | +| sentry.workerEvents.livenessProbe.periodSeconds | int | `60` | | +| sentry.workerEvents.livenessProbe.timeoutSeconds | int | `10` | | +| sentry.workerEvents.nodeSelector | object | `{}` | | +| sentry.workerEvents.queues | string | `"events.save_event,post_process_errors"` | | +| sentry.workerEvents.replicas | int | `1` | | +| sentry.workerEvents.resources | object | `{}` | | +| sentry.workerEvents.sidecars | list | `[]` | | +| sentry.workerEvents.topologySpreadConstraints | list | `[]` | | +| sentry.workerEvents.volumeMounts | list | `[]` | | +| sentry.workerEvents.volumes | list | `[]` | | +| sentry.workerTransactions.affinity | object | `{}` | | +| sentry.workerTransactions.autoscaling.enabled | bool | `false` | | +| sentry.workerTransactions.autoscaling.maxReplicas | int | `5` | | +| sentry.workerTransactions.autoscaling.minReplicas | int | `2` | | +| sentry.workerTransactions.autoscaling.targetCPUUtilizationPercentage | int | `50` | | +| sentry.workerTransactions.enabled | bool | `false` | | +| sentry.workerTransactions.env | list | `[]` | | +| sentry.workerTransactions.livenessProbe.enabled | bool | `false` | | +| sentry.workerTransactions.livenessProbe.failureThreshold | int | `3` | | +| sentry.workerTransactions.livenessProbe.periodSeconds | int | `60` | | +| sentry.workerTransactions.livenessProbe.timeoutSeconds | int | `10` | | +| sentry.workerTransactions.nodeSelector | object | `{}` | | +| sentry.workerTransactions.queues | string | `"events.save_event_transaction,post_process_transactions"` | | +| sentry.workerTransactions.replicas | int | `1` | | +| sentry.workerTransactions.resources | object | `{}` | | +| sentry.workerTransactions.sidecars | list | `[]` | | +| sentry.workerTransactions.topologySpreadConstraints | list | `[]` | | +| sentry.workerTransactions.volumeMounts | list | `[]` | | +| sentry.workerTransactions.volumes | list | `[]` | | +| service.annotations | object | `{}` | | +| service.externalPort | int | `9000` | | +| service.name | string | `"sentry"` | | +| service.type | string | `"ClusterIP"` | | +| serviceAccount.annotations | object | `{}` | Additional Service Account annotations. | +| serviceAccount.automountServiceAccountToken | bool | `true` | Automount API credentials for a Service Account. | +| serviceAccount.enabled | bool | `false` | If `true`, a custom Service Account will be used. | +| serviceAccount.name | string | `"sentry"` | The base name of the ServiceAccount to use. Will be appended with e.g. `snuba-api` or `web` for the pods accordingly. | +| slack | object | `{}` | | +| snuba.api.affinity | object | `{}` | | +| snuba.api.autoscaling.enabled | bool | `false` | | +| snuba.api.autoscaling.maxReplicas | int | `5` | | +| snuba.api.autoscaling.minReplicas | int | `2` | | +| snuba.api.autoscaling.targetCPUUtilizationPercentage | int | `50` | | +| snuba.api.command | list | `[]` | | +| snuba.api.containerSecurityContext | object | `{}` | | +| snuba.api.enabled | bool | `true` | | +| snuba.api.env | list | `[]` | | +| snuba.api.liveness.timeoutSeconds | int | `2` | | +| snuba.api.nodeSelector | object | `{}` | | +| snuba.api.probeInitialDelaySeconds | int | `10` | | +| snuba.api.readiness.timeoutSeconds | int | `2` | | +| snuba.api.replicas | int | `1` | | +| snuba.api.resources | object | `{}` | | +| snuba.api.securityContext | object | `{}` | | +| snuba.api.service.annotations | object | `{}` | | +| snuba.api.sidecars | list | `[]` | | +| snuba.api.topologySpreadConstraints | list | `[]` | | +| snuba.api.volumes | list | `[]` | | +| snuba.clickhouse.maxConnections | int | `100` | | +| snuba.consumer.affinity | object | `{}` | | +| snuba.consumer.autoOffsetReset | string | `"earliest"` | | +| snuba.consumer.containerSecurityContext | object | `{}` | | +| snuba.consumer.enabled | bool | `true` | | +| snuba.consumer.env | list | `[]` | | +| snuba.consumer.livenessProbe.enabled | bool | `true` | | +| snuba.consumer.livenessProbe.initialDelaySeconds | int | `5` | | +| snuba.consumer.livenessProbe.periodSeconds | int | `320` | | +| snuba.consumer.maxBatchTimeMs | int | `750` | | +| snuba.consumer.nodeSelector | object | `{}` | | +| snuba.consumer.replicas | int | `1` | | +| snuba.consumer.resources | object | `{}` | | +| snuba.consumer.securityContext | object | `{}` | | +| snuba.consumer.topologySpreadConstraints | list | `[]` | | +| snuba.dbInitJob.env | list | `[]` | | +| snuba.genericMetricsCountersConsumer.affinity | object | `{}` | | +| snuba.genericMetricsCountersConsumer.autoOffsetReset | string | `"earliest"` | | +| snuba.genericMetricsCountersConsumer.containerSecurityContext | object | `{}` | | +| snuba.genericMetricsCountersConsumer.enabled | bool | `true` | | +| snuba.genericMetricsCountersConsumer.env | list | `[]` | | +| snuba.genericMetricsCountersConsumer.livenessProbe.enabled | bool | `true` | | +| snuba.genericMetricsCountersConsumer.livenessProbe.initialDelaySeconds | int | `5` | | +| snuba.genericMetricsCountersConsumer.livenessProbe.periodSeconds | int | `320` | | +| snuba.genericMetricsCountersConsumer.maxBatchTimeMs | int | `750` | | +| snuba.genericMetricsCountersConsumer.nodeSelector | object | `{}` | | +| snuba.genericMetricsCountersConsumer.replicas | int | `1` | | +| snuba.genericMetricsCountersConsumer.resources | object | `{}` | | +| snuba.genericMetricsCountersConsumer.securityContext | object | `{}` | | +| snuba.genericMetricsCountersConsumer.topologySpreadConstraints | list | `[]` | | +| snuba.genericMetricsDistributionConsumer.affinity | object | `{}` | | +| snuba.genericMetricsDistributionConsumer.autoOffsetReset | string | `"earliest"` | | +| snuba.genericMetricsDistributionConsumer.containerSecurityContext | object | `{}` | | +| snuba.genericMetricsDistributionConsumer.enabled | bool | `true` | | +| snuba.genericMetricsDistributionConsumer.env | list | `[]` | | +| snuba.genericMetricsDistributionConsumer.livenessProbe.enabled | bool | `true` | | +| snuba.genericMetricsDistributionConsumer.livenessProbe.initialDelaySeconds | int | `5` | | +| snuba.genericMetricsDistributionConsumer.livenessProbe.periodSeconds | int | `320` | | +| snuba.genericMetricsDistributionConsumer.maxBatchTimeMs | int | `750` | | +| snuba.genericMetricsDistributionConsumer.nodeSelector | object | `{}` | | +| snuba.genericMetricsDistributionConsumer.replicas | int | `1` | | +| snuba.genericMetricsDistributionConsumer.resources | object | `{}` | | +| snuba.genericMetricsDistributionConsumer.securityContext | object | `{}` | | +| snuba.genericMetricsDistributionConsumer.topologySpreadConstraints | list | `[]` | | +| snuba.genericMetricsSetsConsumer.affinity | object | `{}` | | +| snuba.genericMetricsSetsConsumer.autoOffsetReset | string | `"earliest"` | | +| snuba.genericMetricsSetsConsumer.containerSecurityContext | object | `{}` | | +| snuba.genericMetricsSetsConsumer.enabled | bool | `true` | | +| snuba.genericMetricsSetsConsumer.env | list | `[]` | | +| snuba.genericMetricsSetsConsumer.livenessProbe.enabled | bool | `true` | | +| snuba.genericMetricsSetsConsumer.livenessProbe.initialDelaySeconds | int | `5` | | +| snuba.genericMetricsSetsConsumer.livenessProbe.periodSeconds | int | `320` | | +| snuba.genericMetricsSetsConsumer.maxBatchTimeMs | int | `750` | | +| snuba.genericMetricsSetsConsumer.nodeSelector | object | `{}` | | +| snuba.genericMetricsSetsConsumer.replicas | int | `1` | | +| snuba.genericMetricsSetsConsumer.resources | object | `{}` | | +| snuba.genericMetricsSetsConsumer.securityContext | object | `{}` | | +| snuba.genericMetricsSetsConsumer.topologySpreadConstraints | list | `[]` | | +| snuba.groupAttributesConsumer.affinity | object | `{}` | | +| snuba.groupAttributesConsumer.autoOffsetReset | string | `"earliest"` | | +| snuba.groupAttributesConsumer.containerSecurityContext | object | `{}` | | +| snuba.groupAttributesConsumer.enabled | bool | `true` | | +| snuba.groupAttributesConsumer.env | list | `[]` | | +| snuba.groupAttributesConsumer.livenessProbe.enabled | bool | `true` | | +| snuba.groupAttributesConsumer.livenessProbe.initialDelaySeconds | int | `5` | | +| snuba.groupAttributesConsumer.livenessProbe.periodSeconds | int | `320` | | +| snuba.groupAttributesConsumer.maxBatchTimeMs | int | `750` | | +| snuba.groupAttributesConsumer.nodeSelector | object | `{}` | | +| snuba.groupAttributesConsumer.replicas | int | `1` | | +| snuba.groupAttributesConsumer.resources | object | `{}` | | +| snuba.groupAttributesConsumer.securityContext | object | `{}` | | +| snuba.groupAttributesConsumer.topologySpreadConstraints | list | `[]` | | +| snuba.issueOccurrenceConsumer.affinity | object | `{}` | | +| snuba.issueOccurrenceConsumer.autoOffsetReset | string | `"earliest"` | | +| snuba.issueOccurrenceConsumer.containerSecurityContext | object | `{}` | | +| snuba.issueOccurrenceConsumer.enabled | bool | `true` | | +| snuba.issueOccurrenceConsumer.env | list | `[]` | | +| snuba.issueOccurrenceConsumer.livenessProbe.enabled | bool | `true` | | +| snuba.issueOccurrenceConsumer.livenessProbe.initialDelaySeconds | int | `5` | | +| snuba.issueOccurrenceConsumer.livenessProbe.periodSeconds | int | `320` | | +| snuba.issueOccurrenceConsumer.maxBatchTimeMs | int | `750` | | +| snuba.issueOccurrenceConsumer.nodeSelector | object | `{}` | | +| snuba.issueOccurrenceConsumer.replicas | int | `1` | | +| snuba.issueOccurrenceConsumer.resources | object | `{}` | | +| snuba.issueOccurrenceConsumer.securityContext | object | `{}` | | +| snuba.issueOccurrenceConsumer.topologySpreadConstraints | list | `[]` | | +| snuba.metricsConsumer.affinity | object | `{}` | | +| snuba.metricsConsumer.autoOffsetReset | string | `"earliest"` | | +| snuba.metricsConsumer.containerSecurityContext | object | `{}` | | +| snuba.metricsConsumer.enabled | bool | `true` | | +| snuba.metricsConsumer.env | list | `[]` | | +| snuba.metricsConsumer.livenessProbe.enabled | bool | `true` | | +| snuba.metricsConsumer.livenessProbe.initialDelaySeconds | int | `5` | | +| snuba.metricsConsumer.livenessProbe.periodSeconds | int | `320` | | +| snuba.metricsConsumer.maxBatchTimeMs | int | `750` | | +| snuba.metricsConsumer.nodeSelector | object | `{}` | | +| snuba.metricsConsumer.replicas | int | `1` | | +| snuba.metricsConsumer.resources | object | `{}` | | +| snuba.metricsConsumer.securityContext | object | `{}` | | +| snuba.metricsConsumer.topologySpreadConstraints | list | `[]` | | +| snuba.migrateJob.env | list | `[]` | | +| snuba.outcomesBillingConsumer.affinity | object | `{}` | | +| snuba.outcomesBillingConsumer.autoOffsetReset | string | `"earliest"` | | +| snuba.outcomesBillingConsumer.containerSecurityContext | object | `{}` | | +| snuba.outcomesBillingConsumer.enabled | bool | `true` | | +| snuba.outcomesBillingConsumer.env | list | `[]` | | +| snuba.outcomesBillingConsumer.livenessProbe.enabled | bool | `true` | | +| snuba.outcomesBillingConsumer.livenessProbe.initialDelaySeconds | int | `5` | | +| snuba.outcomesBillingConsumer.livenessProbe.periodSeconds | int | `320` | | +| snuba.outcomesBillingConsumer.maxBatchSize | string | `"3"` | | +| snuba.outcomesBillingConsumer.maxBatchTimeMs | int | `750` | | +| snuba.outcomesBillingConsumer.nodeSelector | object | `{}` | | +| snuba.outcomesBillingConsumer.replicas | int | `1` | | +| snuba.outcomesBillingConsumer.resources | object | `{}` | | +| snuba.outcomesBillingConsumer.securityContext | object | `{}` | | +| snuba.outcomesBillingConsumer.topologySpreadConstraints | list | `[]` | | +| snuba.outcomesConsumer.affinity | object | `{}` | | +| snuba.outcomesConsumer.autoOffsetReset | string | `"earliest"` | | +| snuba.outcomesConsumer.containerSecurityContext | object | `{}` | | +| snuba.outcomesConsumer.enabled | bool | `true` | | +| snuba.outcomesConsumer.env | list | `[]` | | +| snuba.outcomesConsumer.livenessProbe.enabled | bool | `true` | | +| snuba.outcomesConsumer.livenessProbe.initialDelaySeconds | int | `5` | | +| snuba.outcomesConsumer.livenessProbe.periodSeconds | int | `320` | | +| snuba.outcomesConsumer.maxBatchSize | string | `"3"` | | +| snuba.outcomesConsumer.nodeSelector | object | `{}` | | +| snuba.outcomesConsumer.replicas | int | `1` | | +| snuba.outcomesConsumer.resources | object | `{}` | | +| snuba.outcomesConsumer.securityContext | object | `{}` | | +| snuba.outcomesConsumer.topologySpreadConstraints | list | `[]` | | +| snuba.profilingFunctionsConsumer.affinity | object | `{}` | | +| snuba.profilingFunctionsConsumer.autoOffsetReset | string | `"earliest"` | | +| snuba.profilingFunctionsConsumer.containerSecurityContext | object | `{}` | | +| snuba.profilingFunctionsConsumer.env | list | `[]` | | +| snuba.profilingFunctionsConsumer.livenessProbe.enabled | bool | `true` | | +| snuba.profilingFunctionsConsumer.livenessProbe.initialDelaySeconds | int | `5` | | +| snuba.profilingFunctionsConsumer.livenessProbe.periodSeconds | int | `320` | | +| snuba.profilingFunctionsConsumer.maxBatchTimeMs | int | `750` | | +| snuba.profilingFunctionsConsumer.nodeSelector | object | `{}` | | +| snuba.profilingFunctionsConsumer.replicas | int | `1` | | +| snuba.profilingFunctionsConsumer.resources | object | `{}` | | +| snuba.profilingFunctionsConsumer.securityContext | object | `{}` | | +| snuba.profilingFunctionsConsumer.topologySpreadConstraints | list | `[]` | | +| snuba.profilingProfilesConsumer.affinity | object | `{}` | | +| snuba.profilingProfilesConsumer.autoOffsetReset | string | `"earliest"` | | +| snuba.profilingProfilesConsumer.containerSecurityContext | object | `{}` | | +| snuba.profilingProfilesConsumer.env | list | `[]` | | +| snuba.profilingProfilesConsumer.livenessProbe.enabled | bool | `true` | | +| snuba.profilingProfilesConsumer.livenessProbe.initialDelaySeconds | int | `5` | | +| snuba.profilingProfilesConsumer.livenessProbe.periodSeconds | int | `320` | | +| snuba.profilingProfilesConsumer.maxBatchTimeMs | int | `750` | | +| snuba.profilingProfilesConsumer.nodeSelector | object | `{}` | | +| snuba.profilingProfilesConsumer.replicas | int | `1` | | +| snuba.profilingProfilesConsumer.resources | object | `{}` | | +| snuba.profilingProfilesConsumer.securityContext | object | `{}` | | +| snuba.profilingProfilesConsumer.topologySpreadConstraints | list | `[]` | | +| snuba.replacer.affinity | object | `{}` | | +| snuba.replacer.autoOffsetReset | string | `"earliest"` | | +| snuba.replacer.containerSecurityContext | object | `{}` | | +| snuba.replacer.enabled | bool | `true` | | +| snuba.replacer.env | list | `[]` | | +| snuba.replacer.nodeSelector | object | `{}` | | +| snuba.replacer.replicas | int | `1` | | +| snuba.replacer.resources | object | `{}` | | +| snuba.replacer.securityContext | object | `{}` | | +| snuba.replacer.topologySpreadConstraints | list | `[]` | | +| snuba.replaysConsumer.affinity | object | `{}` | | +| snuba.replaysConsumer.autoOffsetReset | string | `"earliest"` | | +| snuba.replaysConsumer.containerSecurityContext | object | `{}` | | +| snuba.replaysConsumer.enabled | bool | `true` | | +| snuba.replaysConsumer.env | list | `[]` | | +| snuba.replaysConsumer.livenessProbe.enabled | bool | `true` | | +| snuba.replaysConsumer.livenessProbe.initialDelaySeconds | int | `5` | | +| snuba.replaysConsumer.livenessProbe.periodSeconds | int | `320` | | +| snuba.replaysConsumer.maxBatchTimeMs | int | `750` | | +| snuba.replaysConsumer.nodeSelector | object | `{}` | | +| snuba.replaysConsumer.replicas | int | `1` | | +| snuba.replaysConsumer.resources | object | `{}` | | +| snuba.replaysConsumer.securityContext | object | `{}` | | +| snuba.replaysConsumer.topologySpreadConstraints | list | `[]` | | +| snuba.rustConsumer | bool | `false` | | +| snuba.sessionsConsumer.affinity | object | `{}` | | +| snuba.sessionsConsumer.autoOffsetReset | string | `"earliest"` | | +| snuba.sessionsConsumer.containerSecurityContext | object | `{}` | | +| snuba.sessionsConsumer.env | list | `[]` | | +| snuba.sessionsConsumer.nodeSelector | object | `{}` | | +| snuba.sessionsConsumer.replicas | int | `1` | | +| snuba.sessionsConsumer.resources | object | `{}` | | +| snuba.sessionsConsumer.securityContext | object | `{}` | | +| snuba.sessionsConsumer.topologySpreadConstraints | list | `[]` | | +| snuba.spansConsumer.affinity | object | `{}` | | +| snuba.spansConsumer.autoOffsetReset | string | `"earliest"` | | +| snuba.spansConsumer.containerSecurityContext | object | `{}` | | +| snuba.spansConsumer.enabled | bool | `true` | | +| snuba.spansConsumer.env | list | `[]` | | +| snuba.spansConsumer.livenessProbe.enabled | bool | `true` | | +| snuba.spansConsumer.livenessProbe.initialDelaySeconds | int | `5` | | +| snuba.spansConsumer.livenessProbe.periodSeconds | int | `320` | | +| snuba.spansConsumer.maxBatchTimeMs | int | `750` | | +| snuba.spansConsumer.nodeSelector | object | `{}` | | +| snuba.spansConsumer.replicas | int | `1` | | +| snuba.spansConsumer.resources | object | `{}` | | +| snuba.spansConsumer.securityContext | object | `{}` | | +| snuba.spansConsumer.topologySpreadConstraints | list | `[]` | | +| snuba.subscriptionConsumerEvents.affinity | object | `{}` | | +| snuba.subscriptionConsumerEvents.autoOffsetReset | string | `"earliest"` | | +| snuba.subscriptionConsumerEvents.containerSecurityContext | object | `{}` | | +| snuba.subscriptionConsumerEvents.enabled | bool | `true` | | +| snuba.subscriptionConsumerEvents.env | list | `[]` | | +| snuba.subscriptionConsumerEvents.livenessProbe.enabled | bool | `true` | | +| snuba.subscriptionConsumerEvents.livenessProbe.initialDelaySeconds | int | `5` | | +| snuba.subscriptionConsumerEvents.livenessProbe.periodSeconds | int | `320` | | +| snuba.subscriptionConsumerEvents.nodeSelector | object | `{}` | | +| snuba.subscriptionConsumerEvents.replicas | int | `1` | | +| snuba.subscriptionConsumerEvents.resources | object | `{}` | | +| snuba.subscriptionConsumerEvents.securityContext | object | `{}` | | +| snuba.subscriptionConsumerEvents.topologySpreadConstraints | list | `[]` | | +| snuba.subscriptionConsumerMetrics.affinity | object | `{}` | | +| snuba.subscriptionConsumerMetrics.autoOffsetReset | string | `"earliest"` | | +| snuba.subscriptionConsumerMetrics.containerSecurityContext | object | `{}` | | +| snuba.subscriptionConsumerMetrics.enabled | bool | `true` | | +| snuba.subscriptionConsumerMetrics.env | list | `[]` | | +| snuba.subscriptionConsumerMetrics.livenessProbe.enabled | bool | `true` | | +| snuba.subscriptionConsumerMetrics.livenessProbe.initialDelaySeconds | int | `5` | | +| snuba.subscriptionConsumerMetrics.livenessProbe.periodSeconds | int | `320` | | +| snuba.subscriptionConsumerMetrics.nodeSelector | object | `{}` | | +| snuba.subscriptionConsumerMetrics.replicas | int | `1` | | +| snuba.subscriptionConsumerMetrics.resources | object | `{}` | | +| snuba.subscriptionConsumerMetrics.securityContext | object | `{}` | | +| snuba.subscriptionConsumerMetrics.topologySpreadConstraints | list | `[]` | | +| snuba.subscriptionConsumerSessions.affinity | object | `{}` | | +| snuba.subscriptionConsumerSessions.autoOffsetReset | string | `"earliest"` | | +| snuba.subscriptionConsumerSessions.containerSecurityContext | object | `{}` | | +| snuba.subscriptionConsumerSessions.env | list | `[]` | | +| snuba.subscriptionConsumerSessions.nodeSelector | object | `{}` | | +| snuba.subscriptionConsumerSessions.replicas | int | `1` | | +| snuba.subscriptionConsumerSessions.resources | object | `{}` | | +| snuba.subscriptionConsumerSessions.securityContext | object | `{}` | | +| snuba.subscriptionConsumerSessions.sidecars | list | `[]` | | +| snuba.subscriptionConsumerSessions.topologySpreadConstraints | list | `[]` | | +| snuba.subscriptionConsumerSessions.volumes | list | `[]` | | +| snuba.subscriptionConsumerTransactions.affinity | object | `{}` | | +| snuba.subscriptionConsumerTransactions.autoOffsetReset | string | `"earliest"` | | +| snuba.subscriptionConsumerTransactions.containerSecurityContext | object | `{}` | | +| snuba.subscriptionConsumerTransactions.enabled | bool | `true` | | +| snuba.subscriptionConsumerTransactions.env | list | `[]` | | +| snuba.subscriptionConsumerTransactions.livenessProbe.enabled | bool | `true` | | +| snuba.subscriptionConsumerTransactions.livenessProbe.initialDelaySeconds | int | `5` | | +| snuba.subscriptionConsumerTransactions.livenessProbe.periodSeconds | int | `320` | | +| snuba.subscriptionConsumerTransactions.nodeSelector | object | `{}` | | +| snuba.subscriptionConsumerTransactions.replicas | int | `1` | | +| snuba.subscriptionConsumerTransactions.resources | object | `{}` | | +| snuba.subscriptionConsumerTransactions.securityContext | object | `{}` | | +| snuba.subscriptionConsumerTransactions.topologySpreadConstraints | list | `[]` | | +| snuba.transactionsConsumer.affinity | object | `{}` | | +| snuba.transactionsConsumer.autoOffsetReset | string | `"earliest"` | | +| snuba.transactionsConsumer.containerSecurityContext | object | `{}` | | +| snuba.transactionsConsumer.enabled | bool | `true` | | +| snuba.transactionsConsumer.env | list | `[]` | | +| snuba.transactionsConsumer.livenessProbe.enabled | bool | `true` | | +| snuba.transactionsConsumer.livenessProbe.initialDelaySeconds | int | `5` | | +| snuba.transactionsConsumer.livenessProbe.periodSeconds | int | `320` | | +| snuba.transactionsConsumer.maxBatchTimeMs | int | `750` | | +| snuba.transactionsConsumer.nodeSelector | object | `{}` | | +| snuba.transactionsConsumer.replicas | int | `1` | | +| snuba.transactionsConsumer.resources | object | `{}` | | +| snuba.transactionsConsumer.securityContext | object | `{}` | | +| snuba.transactionsConsumer.topologySpreadConstraints | list | `[]` | | +| sourcemaps.enabled | bool | `false` | | +| symbolicator.api.affinity | object | `{}` | | +| symbolicator.api.autoscaling.enabled | bool | `false` | | +| symbolicator.api.autoscaling.maxReplicas | int | `5` | | +| symbolicator.api.autoscaling.minReplicas | int | `2` | | +| symbolicator.api.autoscaling.targetCPUUtilizationPercentage | int | `50` | | +| symbolicator.api.config | string | `"# See: https://getsentry.github.io/symbolicator/#configuration\ncache_dir: \"/data\"\nbind: \"0.0.0.0:3021\"\nlogging:\n level: \"warn\"\nmetrics:\n statsd: null\n prefix: \"symbolicator\"\nsentry_dsn: null\nconnect_to_reserved_ips: true\n# caches:\n# downloaded:\n# max_unused_for: 1w\n# retry_misses_after: 5m\n# retry_malformed_after: 5m\n# derived:\n# max_unused_for: 1w\n# retry_misses_after: 5m\n# retry_malformed_after: 5m\n# diagnostics:\n# retention: 1w"` | | +| symbolicator.api.containerSecurityContext | object | `{}` | | +| symbolicator.api.env | list | `[]` | | +| symbolicator.api.nodeSelector | object | `{}` | | +| symbolicator.api.persistence.accessModes[0] | string | `"ReadWriteOnce"` | | +| symbolicator.api.persistence.enabled | bool | `true` | | +| symbolicator.api.persistence.size | string | `"10Gi"` | | +| symbolicator.api.probeInitialDelaySeconds | int | `10` | | +| symbolicator.api.replicas | int | `1` | | +| symbolicator.api.resources | object | `{}` | | +| symbolicator.api.securityContext | object | `{}` | | +| symbolicator.api.topologySpreadConstraints | list | `[]` | | +| symbolicator.api.usedeployment | bool | `true` | | +| symbolicator.cleanup.enabled | bool | `false` | | +| symbolicator.enabled | bool | `false` | | +| system.adminEmail | string | `""` | | +| system.public | bool | `false` | | +| system.url | string | `""` | | +| user.create | bool | `true` | | +| user.email | string | `"admin@sentry.local"` | | +| user.password | string | `"aaaa"` | | +| vroom.affinity | object | `{}` | | +| vroom.autoscaling.enabled | bool | `false` | | +| vroom.autoscaling.maxReplicas | int | `5` | | +| vroom.autoscaling.minReplicas | int | `2` | | +| vroom.autoscaling.targetCPUUtilizationPercentage | int | `50` | | +| vroom.containerSecurityContext | object | `{}` | | +| vroom.env | list | `[]` | | +| vroom.nodeSelector | object | `{}` | | +| vroom.probeFailureThreshold | int | `5` | | +| vroom.probeInitialDelaySeconds | int | `10` | | +| vroom.probePeriodSeconds | int | `10` | | +| vroom.probeSuccessThreshold | int | `1` | | +| vroom.probeTimeoutSeconds | int | `2` | | +| vroom.replicas | int | `1` | | +| vroom.resources | object | `{}` | | +| vroom.securityContext | object | `{}` | | +| vroom.service.annotations | object | `{}` | | +| vroom.sidecars | list | `[]` | | +| vroom.volumeMounts | list | `[]` | | +| vroom.volumes | list | `[]` | | +| zookeeper.enabled | bool | `true` | | +| zookeeper.nameOverride | string | `"zookeeper-clickhouse"` | | +| zookeeper.replicaCount | int | `1` | | + +## NGINX and/or Ingress + +By default, NGINX is enabled to allow sending the incoming requests to [Sentry Relay](https://getsentry.github.io/relay/) or the Django backend depending on the path. When Sentry is meant to be exposed outside of the Kubernetes cluster, it is recommended to disable NGINX and let the Ingress do the same. It's recommended to go with the go to Ingress Controller, [NGINX Ingress](https://kubernetes.github.io/ingress-nginx/) but others should work as well. + +## Sentry secret key + +If no `sentry.existingSecret` value is specified, for your security, the [`system.secret-key`](https://develop.sentry.dev/config/#general) is generated for you on the first installation and stored in a kubernetes secret. + +If `sentry.existingSecret` / `sentry.existingSecretKey` values are provided, those secrets will be used. + + +## Symbolicator and or JavaScript source maps + +For getting native stacktraces and minidumps symbolicated with debug symbols (e.g. iOS/Android), you need to enable Symbolicator via + +```yaml +symbolicator: + enabled: true +``` + +However, you also need to share the data between sentry-worker and sentry-web. This can be done in different ways: + +- Using Cloud Storage like GCP GCS or AWS S3, see `filestore.backend` in `values.yaml` +- Using a filesystem like + +```yaml +filestore: + filesystem: + persistence: + persistentWorkers: true + # storageClass: 'efs-storage' # see note below +``` + +Note: If you need to run or cannot avoid running sentry-worker and sentry-web on different cluster nodes, you need to set `filestore.filesystem.persistence.accessMode: ReadWriteMany` or might get problems. HOWEVER, [not all volume drivers support it](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes), like AWS EBS or GCP disks. +So you would want to create and use a `StorageClass` with a supported volume driver like [AWS EFS](https://github.com/kubernetes-sigs/aws-efs-csi-driver) + +Its also important having `connect_to_reserved_ips: true` in the symbolicator config file, which this Chart defaults to. + +#### Source Maps + +To get javascript source map processing working, you need to activate sourcemaps, which in turn activates the memcached dependency: + +```yaml +sourcemaps: + enabled: true +``` + +For details on the background see this blog post: https://engblog.yext.com/post/sentry-js-source-maps + + +## Geolocation + +[Geolocation of IP addresses](https://develop.sentry.dev/self-hosted/geolocation/) is supported if you provide a GeoIP database: + +Example values.yaml: + +```yaml + +relay: + # provide a volume for relay that contains the geoip database + volumes: + - name: geoip + hostPath: + path: /geodata + type: Directory + + +sentry: + web: + # provide a volume for sentry-web that contains the geoip database + volumes: + - name: geoip + hostPath: + path: /geodata + type: Directory + + worker: + # provide a volume for sentry-worker that contains the geoip database + volumes: + - name: geoip + hostPath: + path: /geodata + type: Directory + + +# enable and reference the volume +geodata: + volumeName: geoip + # mountPath of the volume containing the database + mountPath: /geodata + # path to the geoip database inside the volumemount + path: /geodata/GeoLite2-City.mmdb +``` + +or + +Warning: +storage must support ReadWriteMany + +```yaml +# enable and reference the volume +geodata: + accountID: "example" + licenseKey: "example" + editionIDs: "example" + persistence: + ## If defined, storageClassName: + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + # storageClass: "" + size: 1Gi + volumeName: "data-sentry-geoip" + # mountPath of the volume containing the database + mountPath: "/usr/share/GeoIP" + # path to the geoip database inside the volumemount + path: "/usr/share/GeoIP/GeoLite2-City.mmdb" +``` + +## External Kafka configuration + +You can either provide a single host, which is there by default in `values.yaml`, like this: + +```yaml +externalKafka: + ## Hostname or ip address of external kafka + ## + host: "kafka-confluent" + port: 9092 +``` + +or you can feed in a cluster of Kafka instances like below: + +```yaml +externalKafka: + cluster: + ## List of Hostnames or ip addresses and ports of external kafka + - host: "233.5.100.28" + port: 9092 + - host: "kafka-confluent-2" + port: 9093 + - host: "kafka-confluent-3" + port: 9094 +``` + +## External Postgres configuration + +You can either pass postgres connection credentials directly in `values.yaml`: + +```yaml +externalPostgresql: + host: postgres + port: 5432 + username: postgres + password: postgres + database: sentry +``` + +or use existing `secret` like in the example below: + +```yaml +externalPostgresql: + existingSecret: secret-name + existingSecretKeys: + password: password + username: username + database: database + port: port + host: host +``` + +it is possible to define which properties should be taken from secret or `values.yaml`, example below only takes `username` and `password` values from the secret: + +```yaml +externalPostgresql: + existingSecret: secret-name + existingSecretKeys: + password: password + username: username + port: 8000 + host: postgres + database: sentry +``` + +> ⚠️ `.Values.externalPostgresql.existingSecretKey` is deprecated, `.Values.externalPostgresql.existingSecretKeys.password` should be used instead. + +# Usage + +- [AWS + Terraform](docs/usage-aws-terraform.md) +- [DigitalOcean](docs/usage-digitalocean.md) diff --git a/sentry/docs/UPGRADE.md b/charts/sentry/docs/UPGRADE.md similarity index 100% rename from sentry/docs/UPGRADE.md rename to charts/sentry/docs/UPGRADE.md diff --git a/sentry/docs/usage-aws-terraform.md b/charts/sentry/docs/usage-aws-terraform.md similarity index 100% rename from sentry/docs/usage-aws-terraform.md rename to charts/sentry/docs/usage-aws-terraform.md diff --git a/sentry/docs/usage-digitalocean.md b/charts/sentry/docs/usage-digitalocean.md similarity index 100% rename from sentry/docs/usage-digitalocean.md rename to charts/sentry/docs/usage-digitalocean.md diff --git a/sentry/templates/NOTES.txt b/charts/sentry/templates/NOTES.txt similarity index 62% rename from sentry/templates/NOTES.txt rename to charts/sentry/templates/NOTES.txt index f99c764ad..d9cd60aba 100644 --- a/sentry/templates/NOTES.txt +++ b/charts/sentry/templates/NOTES.txt @@ -1,3 +1,9 @@ * When running upgrades, make sure to give back the `system.secretKey` value. kubectl -n {{ .Release.Namespace }} get configmap {{ template "sentry.fullname" . }}-sentry -o json | grep -m1 -Po '(?<=system.secret-key: )[^\\]*' + +{{ if not (.Values.kafka.enabled) }} +* Sentry use external kafka: + +{{ template "sentry.kafka.bootstrap_servers_string" . }} +{{ end -}} diff --git a/charts/sentry/templates/_helper.tpl b/charts/sentry/templates/_helper.tpl new file mode 100644 index 000000000..36da31e02 --- /dev/null +++ b/charts/sentry/templates/_helper.tpl @@ -0,0 +1,951 @@ +{{/* vim: set filetype=mustache: */}} + +{{- define "sentry.prefix" -}} + {{- if .Values.prefix -}} + {{.Values.prefix}}- + {{- else -}} + {{- end -}} +{{- end -}} + +{{- define "nginx.port" -}}{{ default "8080" .Values.nginx.containerPort }}{{- end -}} +{{- define "relay.port" -}}3000{{- end -}} +{{- define "relay.healthCheck.readinessRequestPath" -}}/api/relay/healthcheck/ready/{{- end -}} +{{- define "relay.healthCheck.livenessRequestPath" -}}/api/relay/healthcheck/live/{{- end -}} +{{- define "sentry.port" -}}9000{{- end -}} +{{- define "sentry.healthCheck.requestPath" -}}/_health/{{- end -}} +{{- define "relay.healthCheck.requestPath" -}}/api/relay/healthcheck/live/{{- end -}} +{{- define "snuba.port" -}}1218{{- end -}} +{{- define "symbolicator.port" -}}3021{{- end -}} +{{- define "vroom.port" -}}8085{{- end -}} + +{{- define "relay.image" -}} +{{- default "getsentry/relay" .Values.images.relay.repository -}} +: +{{- default .Chart.AppVersion .Values.images.relay.tag -}} +{{- end -}} +{{- define "sentry.image" -}} +{{- default "getsentry/sentry" .Values.images.sentry.repository -}} +: +{{- default .Chart.AppVersion .Values.images.sentry.tag -}} +{{- end -}} +{{- define "snuba.image" -}} +{{- default "getsentry/snuba" .Values.images.snuba.repository -}} +: +{{- default .Chart.AppVersion .Values.images.snuba.tag -}} +{{- end -}} + +{{- define "symbolicator.image" -}} +{{- default "getsentry/symbolicator" .Values.images.symbolicator.repository -}} +: +{{- default .Chart.AppVersion .Values.images.symbolicator.tag -}} +{{- end -}} + +{{- define "dbCheck.image" -}} +{{- default "subfuzion/netcat" .Values.hooks.dbCheck.image.repository -}} +: +{{- default "latest" .Values.hooks.dbCheck.image.tag -}} +{{- end -}} + +{{- define "vroom.image" -}} +{{- default "getsentry/vroom" .Values.images.vroom.repository -}} +: +{{- default .Chart.AppVersion .Values.images.vroom.tag -}} +{{- end -}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "sentry.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "sentry.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + + +{{/* +Get KubeVersion removing pre-release information. +*/}} +{{- define "sentry.kubeVersion" -}} + {{- default .Capabilities.KubeVersion.Version (regexFind "v[0-9]+\\.[0-9]+\\.[0-9]+" .Capabilities.KubeVersion.Version) -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "sentry.ingress.apiVersion" -}} + {{- if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19.x" (include "sentry.kubeVersion" .)) -}} + {{- print "networking.k8s.io/v1" -}} + {{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" -}} + {{- print "networking.k8s.io/v1beta1" -}} + {{- else -}} + {{- print "extensions/v1beta1" -}} + {{- end -}} +{{- end -}} + +{{/* +Return if ingress is stable. +*/}} +{{- define "sentry.ingress.isStable" -}} + {{- eq (include "sentry.ingress.apiVersion" .) "networking.k8s.io/v1" -}} +{{- end -}} + +{{/* +Return the appropriate batch apiVersion for cronjobs. +batch/v1beta1 will no longer be served in v1.25 +See more at https://kubernetes.io/docs/reference/using-api/deprecation-guide/#cronjob-v125 +*/}} +{{- define "sentry.batch.apiVersion" -}} + {{- if and (.Capabilities.APIVersions.Has "batch/v1") (semverCompare ">= 1.21.x" (include "sentry.kubeVersion" .)) -}} + {{- print "batch/v1" -}} + {{- else if .Capabilities.APIVersions.Has "batch/v1beta1" -}} + {{- print "batch/v1beta1" -}} + {{- end -}} +{{- end -}} + +{{/* +Return if batch is stable. +*/}} +{{- define "sentry.batch.isStable" -}} + {{- eq (include "sentry.batch.apiVersion" .) "batch/v1" -}} +{{- end -}} + +{{/* +Return if ingress supports ingressClassName. +*/}} +{{- define "sentry.ingress.supportsIngressClassName" -}} + {{- or (eq (include "sentry.ingress.isStable" .) "true") (and (eq (include "sentry.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18.x" (include "sentry.kubeVersion" .))) -}} +{{- end -}} + +{{/* +Return if ingress supports pathType. +*/}} +{{- define "sentry.ingress.supportsPathType" -}} + {{- or (eq (include "sentry.ingress.isStable" .) "true") (and (eq (include "sentry.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18.x" (include "sentry.kubeVersion" .))) -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "sentry.postgresql.fullname" -}} +{{- if .Values.postgresql.fullnameOverride -}} +{{- .Values.postgresql.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.postgresql.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name "sentry-postgresql" | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{- define "sentry.redis.fullname" -}} +{{- if .Values.redis.fullnameOverride -}} +{{- .Values.redis.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.redis.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name "sentry-redis" | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{- define "sentry.rabbitmq.fullname" -}} +{{- printf "%s-%s" .Release.Name "rabbitmq" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "sentry.clickhouse.fullname" -}} +{{- printf "%s-%s" .Release.Name "clickhouse" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "sentry.kafka.fullname" -}} +{{- printf "%s-%s" .Release.Name "kafka" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "sentry.zookeeper.fullname" -}} +{{- if .Values.kafka.zookeeper.fullnameOverride -}} +{{- .Values.kafka.zookeeper.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.kafka.zookeeper.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name "zookeeper" | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Set postgres host +*/}} +{{- define "sentry.postgresql.host" -}} +{{- if .Values.postgresql.enabled -}} +{{- template "sentry.postgresql.fullname" . -}} +{{- else -}} +{{ required "A valid .Values.externalPostgresql.host is required" .Values.externalPostgresql.host }} +{{- end -}} +{{- end -}} + +{{/* +Set postgres secret +*/}} +{{- define "sentry.postgresql.secret" -}} +{{- if .Values.postgresql.enabled -}} +{{- template "sentry.postgresql.fullname" . -}} +{{- else -}} +{{- template "sentry.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Set postgres port +*/}} +{{- define "sentry.postgresql.port" -}} +{{- if .Values.postgresql.enabled -}} +{{- default 5432 .Values.postgresql.primary.service.ports.postgresql }} +{{- else -}} +{{- required "A valid .Values.externalPostgresql.port is required" .Values.externalPostgresql.port -}} +{{- end -}} +{{- end -}} + +{{/* +Set postgresql username +*/}} +{{- define "sentry.postgresql.username" -}} +{{- if .Values.postgresql.enabled -}} +{{- default "postgres" .Values.postgresql.postgresqlUsername }} +{{- else -}} +{{ required "A valid .Values.externalPostgresql.username is required" .Values.externalPostgresql.username }} +{{- end -}} +{{- end -}} + +{{/* +Set postgresql database +*/}} +{{- define "sentry.postgresql.database" -}} +{{- if .Values.postgresql.enabled -}} +{{- default "sentry" .Values.postgresql.postgresqlDatabase }} +{{- else -}} +{{ required "A valid .Values.externalPostgresql.database is required" .Values.externalPostgresql.database }} +{{- end -}} +{{- end -}} + +{{/* +Set redis host +*/}} +{{- define "sentry.redis.host" -}} +{{- if .Values.redis.enabled -}} +{{- template "sentry.redis.fullname" . -}}-master +{{- else -}} +{{ required "A valid .Values.externalRedis.host is required" .Values.externalRedis.host }} +{{- end -}} +{{- end -}} + +{{/* +Set redis secret +*/}} +{{- define "sentry.redis.secret" -}} +{{- if .Values.redis.enabled -}} +{{- template "sentry.redis.fullname" . -}} +{{- else -}} +{{- template "sentry.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Set redis port +*/}} +{{- define "sentry.redis.port" -}} +{{- if .Values.redis.enabled -}} +{{- default 6379 .Values.redis.redisPort }} +{{- else -}} +{{ required "A valid .Values.externalRedis.port is required" .Values.externalRedis.port }} +{{- end -}} +{{- end -}} + +{{/* +Set redis password +*/}} +{{- define "sentry.redis.password" -}} +{{- if and (.Values.redis.enabled) (.Values.redis.auth.enabled) -}} +{{ .Values.redis.auth.password }} +{{- else if .Values.externalRedis.password -}} +{{ .Values.externalRedis.password }} +{{- else }} +{{- end -}} +{{- end -}} + +{{/* +Set redis db +*/}} +{{- define "sentry.redis.db" -}} +{{- if .Values.redis.enabled -}} +{{ default 0 .Values.redis.db }} +{{- else -}} +{{ default 0 .Values.externalRedis.db }} +{{- end -}} +{{- end -}} + +{{/* +Set redis ssl +*/}} +{{- define "sentry.redis.ssl" -}} +{{- if .Values.redis.enabled -}} +{{ default false .Values.redis.ssl }} +{{- else -}} +{{ default false .Values.externalRedis.ssl }} +{{- end -}} +{{- end -}} + + +{{/* +Create the name of the service account to use +*/}} +{{- define "sentry.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "sentry.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Set ClickHouse host +*/}} +{{- define "sentry.clickhouse.host" -}} +{{- if .Values.clickhouse.enabled -}} +{{- template "sentry.clickhouse.fullname" . -}} +{{- else -}} +{{ required "A valid .Values.externalClickhouse.host is required" .Values.externalClickhouse.host }} +{{- end -}} +{{- end -}} + +{{/* +Set ClickHouse port +*/}} +{{- define "sentry.clickhouse.port" -}} +{{- if .Values.clickhouse.enabled -}} +{{- default 9000 .Values.clickhouse.clickhouse.tcp_port }} +{{- else -}} +{{ required "A valid .Values.externalClickhouse.tcpPort is required" .Values.externalClickhouse.tcpPort }} +{{- end -}} +{{- end -}} + +{{/* +Set ClickHouse HTTP port +*/}} +{{- define "sentry.clickhouse.http_port" -}} +{{- if .Values.clickhouse.enabled -}} +{{- default 8123 .Values.clickhouse.clickhouse.http_port }} +{{- else -}} +{{ required "A valid .Values.externalClickhouse.httpPort is required" .Values.externalClickhouse.httpPort }} +{{- end -}} +{{- end -}} + +{{/* +Set ClickHouse Database +*/}} +{{- define "sentry.clickhouse.database" -}} +{{- if .Values.clickhouse.enabled -}} +default +{{- else -}} +{{ required "A valid .Values.externalClickhouse.database is required" .Values.externalClickhouse.database }} +{{- end -}} +{{- end -}} + +{{/* +Set ClickHouse User +*/}} +{{- define "sentry.clickhouse.username" -}} +{{- if .Values.clickhouse.enabled -}} + {{- if .Values.clickhouse.clickhouse.configmap.users.enabled -}} +{{ (index .Values.clickhouse.clickhouse.configmap.users.user 0).name }} + {{- else -}} +default + {{- end -}} +{{- else -}} +{{ required "A valid .Values.externalClickhouse.username is required" .Values.externalClickhouse.username }} +{{- end -}} +{{- end -}} + +{{/* +Set ClickHouse Password +*/}} +{{- define "sentry.clickhouse.password" -}} +{{- if .Values.clickhouse.enabled -}} + {{- if .Values.clickhouse.clickhouse.configmap.users.enabled -}} +{{ (index .Values.clickhouse.clickhouse.configmap.users.user 0).config.password }} + {{- else -}} + {{- end -}} +{{- else -}} +{{ .Values.externalClickhouse.password }} +{{- end -}} +{{- end -}} + +{{/* +Set ClickHouse cluster name +*/}} +{{- define "sentry.clickhouse.cluster.name" -}} +{{- if .Values.clickhouse.enabled -}} +{{ .Release.Name | printf "%s-clickhouse" }} +{{- else -}} +{{ required "A valid .Values.externalClickhouse.clusterName is required" .Values.externalClickhouse.clusterName }} +{{- end -}} +{{- end -}} + +{{/* +Set Kafka Confluent host +*/}} +{{- define "sentry.kafka.host" -}} +{{- if .Values.kafka.enabled -}} +{{- template "sentry.kafka.fullname" . -}} +{{- else if and (.Values.externalKafka) (not (.Values.externalKafka.cluster)) -}} +{{ required "A valid .Values.externalKafka.host is required" .Values.externalKafka.host }} +{{- end -}} +{{- end -}} + +{{/* +Set Kafka Confluent port +*/}} +{{- define "sentry.kafka.port" -}} +{{- if and (.Values.kafka.enabled) (.Values.kafka.service.ports.client) -}} +{{- .Values.kafka.service.ports.client }} +{{- else if and (.Values.externalKafka) (not (.Values.externalKafka.cluster)) -}} +{{ required "A valid .Values.externalKafka.port is required" .Values.externalKafka.port }} +{{- end -}} +{{- end -}} + +{{/* +Set Kafka Confluent Controller port +*/}} +{{- define "sentry.kafka.controller_port" -}} +{{- if and (.Values.kafka.enabled) (.Values.kafka.service.ports.controller ) -}} +{{- .Values.kafka.service.ports.controller }} +{{- else if and (.Values.externalKafka) (not (.Values.externalKafka.cluster)) -}} +{{ required "A valid .Values.externalKafka.port is required" .Values.externalKafka.port }} +{{- end -}} +{{- end -}} + +{{/* +Set Kafka bootstrap servers string +*/}} +{{- define "sentry.kafka.bootstrap_servers_string" -}} +{{- if or (.Values.kafka.enabled) (not (.Values.externalKafka.cluster)) -}} +{{ printf "%s:%s" (include "sentry.kafka.host" .) (include "sentry.kafka.port" .) }} +{{- else -}} +{{- range $index, $elem := .Values.externalKafka.cluster -}} +{{- if $index -}},{{- end -}}{{ printf "%s:%s" $elem.host (toString $elem.port) }} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +SASL auth setings for Kafka: +* https://github.com/getsentry/snuba/blob/24.9.0/snuba/settings/__init__.py#L220-L230 +* https://github.com/getsentry/sentry/blob/24.9.0/src/sentry/utils/kafka_config.py#L9-L34 +* https://github.com/getsentry/sentry/blob/24.9.0/src/sentry/conf/server.py#L2844-L2853 +*/}} + +{{/* +Set Kafka security protocol +*/}} +{{- define "sentry.kafka.security_protocol" -}} +{{- if .Values.kafka.enabled -}} +{{ default "plaintext" .Values.kafka.listeners.client.protocol }} +{{- else -}} +{{ default "plaintext" .Values.externalKafka.security.protocol }} +{{- end -}} +{{- end -}} + +{{/* +Set Kafka sasl mechanism +*/}} +{{- define "sentry.kafka.sasl_mechanism" -}} +{{- $CheckProtocol := include "sentry.kafka.security_protocol" . -}} +{{- if (regexMatch "^SASL_" $CheckProtocol) -}} +{{- if .Values.kafka.enabled -}} +{{ default "None" (split "," .Values.kafka.sasl.enabledMechanisms)._0 }} +{{- else -}} +{{ default "None" .Values.externalKafka.sasl.mechanism }} +{{- end -}} +{{- else -}} +{{ "None" }} +{{- end -}} +{{- end -}} + +{{/* +Set Kafka sasl username +*/}} +{{- define "sentry.kafka.sasl_username" -}} +{{- $CheckProtocol := include "sentry.kafka.security_protocol" . -}} +{{- if (regexMatch "^SASL_" $CheckProtocol) -}} +{{- if .Values.kafka.enabled -}} +{{ default "None" (first (default tuple .Values.kafka.sasl.client.users)) }} +{{- else -}} +{{ default "None" .Values.externalKafka.sasl.username }} +{{- end -}} +{{- else -}} +{{ "None" }} +{{- end -}} +{{- end -}} + +{{/* +Set Kafka sasl password +*/}} +{{- define "sentry.kafka.sasl_password" -}} +{{- $CheckProtocol := include "sentry.kafka.security_protocol" . -}} +{{- if (regexMatch "^SASL_" $CheckProtocol) -}} +{{- if .Values.kafka.enabled -}} +{{ default "None" (first (default tuple .Values.kafka.sasl.client.passwords)) }} +{{- else -}} +{{ default "None" .Values.externalKafka.sasl.password }} +{{- end -}} +{{- else -}} +{{ "None" }} +{{- end -}} +{{- end -}} + +{{/* +Set Senty compression.type for Kafka +*/}} +{{- define "sentry.kafka.compression_type" -}} +{{- if .Values.kafka.enabled -}} +{{ default "" .Values.sentry.kafka.compression.type }} +{{- else -}} +{{ default "" .Values.externalKafka.compression.type }} +{{- end -}} +{{- end -}} + +{{/* +Set Senty message.max.bytes for Kafka +*/}} +{{- define "sentry.kafka.message_max_bytes" -}} +{{- if .Values.kafka.enabled -}} +{{ default 50000000 .Values.sentry.kafka.message.max.bytes | int64 }} +{{- else -}} +{{ default 50000000 .Values.externalKafka.message.max.bytes | int64 }} +{{- end -}} +{{- end -}} + +{{/* +Set Senty socket.timeout for Kafka +*/}} +{{- define "sentry.kafka.socket_timeout_ms" -}} +{{- if .Values.kafka.enabled -}} +{{ default 1000 .Values.sentry.kafka.socket.timeout.ms | int64 }} +{{- else -}} +{{ default 1000 .Values.externalKafka.socket.timeout.ms | int64 }} +{{- end -}} +{{- end -}} + +{{/* +Set RabbitMQ host +*/}} +{{- define "sentry.rabbitmq.host" -}} +{{- if .Values.rabbitmq.enabled -}} +{{- default "sentry-rabbitmq-ha" (include "sentry.rabbitmq.fullname" .) -}} +{{- else -}} +{{ .Values.rabbitmq.host }} +{{- end -}} +{{- end -}} + +{{/* +Common Snuba environment variables +*/}} +{{- define "sentry.snuba.env" -}} +- name: SNUBA_SETTINGS + value: /etc/snuba/settings.py +- name: DEFAULT_BROKERS + value: {{ include "sentry.kafka.bootstrap_servers_string" . | quote }} +{{- $sentryKafkaSaslMechanism := include "sentry.kafka.sasl_mechanism" . -}} +{{- if not (eq "None" $sentryKafkaSaslMechanism) }} +- name: KAFKA_SASL_MECHANISM + value: {{ $sentryKafkaSaslMechanism | quote}} +{{- end }} +{{- $sentryKafkaSaslUsername := include "sentry.kafka.sasl_username" . -}} +{{- if not (eq "None" $sentryKafkaSaslUsername) }} +- name: KAFKA_SASL_USERNAME + value: {{ $sentryKafkaSaslUsername | quote }} +{{- end }} +{{- $sentryKafkaSaslPassword := include "sentry.kafka.sasl_password" . -}} +{{- if not (eq "None" $sentryKafkaSaslPassword) }} +- name: KAFKA_SASL_PASSWORD + value: {{ $sentryKafkaSaslPassword | quote }} +{{- end }} +- name: KAFKA_SECURITY_PROTOCOL + value: {{ include "sentry.kafka.security_protocol" . | quote }} +{{- if and (.Values.redis.enabled) (.Values.redis.auth.enabled) }} +{{- if .Values.redis.auth.password }} +- name: REDIS_PASSWORD + value: {{ .Values.redis.auth.password | quote }} +{{- else if .Values.redis.auth.existingSecret }} +- name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ default (include "sentry.redis.fullname" .) .Values.redis.auth.existingSecret }} + key: {{ default "redis-password" .Values.redis.auth.existingSecretPasswordKey }} +{{- end }} +{{- else if .Values.externalRedis.password }} +- name: REDIS_PASSWORD + value: {{ .Values.externalRedis.password | quote }} +{{- else if .Values.externalRedis.existingSecret }} +- name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.externalRedis.existingSecret }} + key: {{ default "redis-password" .Values.externalRedis.existingSecretKey }} +{{- end }} +{{- if .Values.externalClickhouse.existingSecret }} +- name: CLICKHOUSE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.externalClickhouse.existingSecret }} + key: {{ default "clickhouse-password" .Values.externalClickhouse.existingSecretKey }} +{{- end }} +- name: CLICKHOUSE_MAX_CONNECTIONS + value: {{ .Values.snuba.clickhouse.maxConnections | quote }} +{{- if .Values.ipv6 }} +- name: UWSGI_HTTP_SOCKET + value: "[::]:1218" +{{- end }} +- name: REDIS_PORT + value: {{ default "6379" (include "sentry.redis.port" . | quote ) -}} +{{- end -}} + +{{- define "vroom.env" -}} +- name: SENTRY_KAFKA_BROKERS_PROFILING + value: {{ include "sentry.kafka.bootstrap_servers_string" . | quote }} +- name: SENTRY_KAFKA_BROKERS_OCCURRENCES + value: {{ include "sentry.kafka.bootstrap_servers_string" . | quote }} +- name: SENTRY_BUCKET_PROFILES + value: file://localhost//var/lib/sentry-profiles +- name: SENTRY_SNUBA_HOST + value: http://{{ template "sentry.fullname" . }}-snuba:{{ template "snuba.port" . }} +{{- end -}} + +{{/* +Common Sentry environment variables +*/}} +{{- define "sentry.env" -}} +{{- $redisHost := include "sentry.redis.host" . -}} +{{- $redisPort := include "sentry.redis.port" . -}} +{{- $redisDb := include "sentry.redis.db" . -}} +{{- $redisProto := ternary "rediss" "redis" (eq (include "sentry.redis.ssl" .) "true") -}} +- name: SNUBA + value: http://{{ template "sentry.fullname" . }}-snuba:{{ template "snuba.port" . }} +- name: VROOM + value: http://{{ template "sentry.fullname" . }}-vroom:{{ template "vroom.port" . }} +{{- if .Values.sentry.existingSecret }} +- name: SENTRY_SECRET_KEY + valueFrom: + secretKeyRef: + name: {{ .Values.sentry.existingSecret }} + key: {{ default "key" .Values.sentry.existingSecretKey }} +{{- else }} +- name: SENTRY_SECRET_KEY + valueFrom: + secretKeyRef: + name: {{ template "sentry.fullname" . }}-sentry-secret + key: "key" +{{- end }} +{{- if .Values.postgresql.enabled }} +- name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ default (include "sentry.postgresql.fullname" .) .Values.postgresql.auth.existingSecret }} + key: {{ default "postgres-password" .Values.postgresql.auth.secretKeys.adminPasswordKey }} +{{- else if .Values.externalPostgresql.password }} +- name: POSTGRES_PASSWORD + value: {{ .Values.externalPostgresql.password | quote }} +{{- else if .Values.externalPostgresql.existingSecret }} +- name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.externalPostgresql.existingSecret }} + key: {{ or .Values.externalPostgresql.existingSecretKeys.password .Values.externalPostgresql.existingSecretKey "postgresql-password" }} +{{- end }} +{{- if and .Values.externalPostgresql.existingSecret .Values.externalPostgresql.existingSecretKeys.username }} +- name: POSTGRES_USER + valueFrom: + secretKeyRef: + name: {{ .Values.externalPostgresql.existingSecret }} + key: {{ default .Values.externalPostgresql.existingSecretKeys.username }} +{{- else }} +- name: POSTGRES_USER + value: {{ include "sentry.postgresql.username" . | quote }} +{{- end }} +{{- if and .Values.externalPostgresql.existingSecret .Values.externalPostgresql.existingSecretKeys.database }} +- name: POSTGRES_NAME + valueFrom: + secretKeyRef: + name: {{ .Values.externalPostgresql.existingSecret }} + key: {{ default .Values.externalPostgresql.existingSecretKeys.database }} +{{- else }} +- name: POSTGRES_NAME + value: {{ include "sentry.postgresql.database" . | quote }} +{{- end }} +{{- if .Values.pgbouncer.enabled }} +- name: POSTGRES_HOST + value: {{ template "sentry.fullname" . }}-pgbouncer +{{- else }} +{{- if and .Values.externalPostgresql.existingSecret .Values.externalPostgresql.existingSecretKeys.host }} +- name: POSTGRES_HOST + valueFrom: + secretKeyRef: + name: {{ .Values.externalPostgresql.existingSecret }} + key: {{ default .Values.externalPostgresql.existingSecretKeys.host }} +{{- else }} +- name: POSTGRES_HOST + value: {{ include "sentry.postgresql.host" . | quote }} +{{- end }} +{{- end }} +{{- if .Values.pgbouncer.enabled }} +- name: POSTGRES_PORT + value: "5432" +{{- else }} +{{- if and .Values.externalPostgresql.existingSecret .Values.externalPostgresql.existingSecretKeys.port }} +- name: POSTGRES_PORT + valueFrom: + secretKeyRef: + name: {{ .Values.externalPostgresql.existingSecret }} + key: {{ default .Values.externalPostgresql.existingSecretKeys.port }} +{{- else }} +- name: POSTGRES_PORT + value: {{ include "sentry.postgresql.port" . | quote }} +{{- end }} +{{- end }} +{{- if and (eq .Values.filestore.backend "s3") .Values.filestore.s3.existingSecret }} +- name: S3_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: {{ .Values.filestore.s3.existingSecret }} + key: {{ default "s3-access-key-id" .Values.filestore.s3.accessKeyIdRef }} +- name: S3_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: {{ .Values.filestore.s3.existingSecret }} + key: {{ default "s3-secret-access-key" .Values.filestore.s3.secretAccessKeyRef }} +{{- end }} +{{- if .Values.redis.enabled }} +{{- if .Values.redis.password }} +- name: REDIS_PASSWORD + value: {{ .Values.redis.password | quote }} +{{- else if .Values.redis.existingSecret }} +- name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ default (include "sentry.redis.fullname" .) .Values.redis.existingSecret }} + key: {{ default "redis-password" .Values.redis.existingSecretKey }} +{{- end }} +{{- else if .Values.externalRedis.password }} +- name: REDIS_PASSWORD + value: {{ .Values.externalRedis.password | quote }} +{{- else if .Values.externalRedis.existingSecret }} +- name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.externalRedis.existingSecret }} + key: {{ default "redis-password" .Values.externalRedis.existingSecretKey }} +{{- end }} +{{- if and (.Values.redis.enabled) (.Values.redis.auth.existingSecret) }} +- name: HELM_CHARTS_SENTRY_REDIS_PASSWORD_CONTROLLED + valueFrom: + secretKeyRef: + name: {{ .Values.redis.auth.existingSecret }} + key: {{ default "redis-password" .Values.redis.auth.existingSecretPasswordKey }} +- name: BROKER_URL + value: "{{ $redisProto }}://:$(HELM_CHARTS_SENTRY_REDIS_PASSWORD_CONTROLLED)@{{ $redisHost }}:{{ $redisPort }}/{{ $redisDb }}" +{{- else if (.Values.externalRedis.existingSecret) }} +- name: HELM_CHARTS_SENTRY_REDIS_PASSWORD_CONTROLLED + valueFrom: + secretKeyRef: + name: {{ .Values.externalRedis.existingSecret }} + key: {{ default "redis-password" .Values.externalRedis.existingSecretKey }} +- name: BROKER_URL + value: "{{ $redisProto }}://:$(HELM_CHARTS_SENTRY_REDIS_PASSWORD_CONTROLLED)@{{ $redisHost }}:{{ $redisPort }}/{{ $redisDb }}" +{{- end }} +{{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} +- name: GOOGLE_APPLICATION_CREDENTIALS + value: /var/run/secrets/google/{{ .Values.filestore.gcs.credentialsFile }} +{{- end }} +{{- if .Values.mail.password }} +- name: SENTRY_EMAIL_PASSWORD + value: {{ .Values.mail.password | quote }} +{{- else if .Values.mail.existingSecret }} +- name: SENTRY_EMAIL_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.mail.existingSecret }} + key: {{ default "mail-password" .Values.mail.existingSecretKey }} +{{- end }} +{{- if .Values.slack.existingSecret }} +- name: SLACK_CLIENT_ID + valueFrom: + secretKeyRef: + name: {{ .Values.slack.existingSecret }} + key: {{ default "client-id" .Values.slack.existingSecretClientId }} +- name: SLACK_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: {{ .Values.slack.existingSecret }} + key: {{ default "client-secret" .Values.slack.existingSecretClientSecret }} +- name: SLACK_SIGNING_SECRET + valueFrom: + secretKeyRef: + name: {{ .Values.slack.existingSecret }} + key: {{ default "signing-secret" .Values.slack.existingSecretSigningSecret }} +{{- end }} +{{- if .Values.discord.existingSecret }} +- name: DISCORD_APPLICATION_ID + valueFrom: + secretKeyRef: + name: {{ .Values.discord.existingSecret }} + key: {{ default "application-id" .Values.discord.existingSecretApplicationId }} +- name: DISCORD_PUBLIC_KEY + valueFrom: + secretKeyRef: + name: {{ .Values.discord.existingSecret }} + key: {{ default "public-key" .Values.discord.existingSecretPublicKey }} +- name: DISCORD_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: {{ .Values.discord.existingSecret }} + key: {{ default "client-secret" .Values.discord.existingSecretClientSecret }} +- name: DISCORD_BOT_TOKEN + valueFrom: + secretKeyRef: + name: {{ .Values.discord.existingSecret }} + key: {{ default "bot-token" .Values.discord.existingSecretBotToken }} +{{- end }} +{{- if and .Values.github.existingSecret }} +- name: GITHUB_APP_PRIVATE_KEY + valueFrom: + secretKeyRef: + name: {{ .Values.github.existingSecret }} + key: {{ default "private-key" .Values.github.existingSecretPrivateKeyKey }} +- name: GITHUB_APP_WEBHOOK_SECRET + valueFrom: + secretKeyRef: + name: {{ .Values.github.existingSecret }} + key: {{ default "webhook-secret" .Values.github.existingSecretWebhookSecretKey }} +- name: GITHUB_APP_CLIENT_ID + valueFrom: + secretKeyRef: + name: {{ .Values.github.existingSecret }} + key: {{ default "client-id" .Values.github.existingSecretClientIdKey }} +- name: GITHUB_APP_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: {{ .Values.github.existingSecret }} + key: {{ default "client-secret" .Values.github.existingSecretClientSecretKey }} +{{- end }} +{{- if .Values.google.existingSecret }} +- name: GOOGLE_AUTH_CLIENT_ID + valueFrom: + secretKeyRef: + name: {{ .Values.google.existingSecret }} + key: {{ default "client-id" .Values.google.existingSecretClientIdKey }} +- name: GOOGLE_AUTH_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: {{ .Values.google.existingSecret }} + key: {{ default "client-secret" .Values.google.existingSecretClientSecretKey }} +{{- end }} +{{- if .Values.openai.existingSecret }} +- name: OPENAI_API_KEY + valueFrom: + secretKeyRef: + name: {{ .Values.openai.existingSecret }} + key: {{ default "api-token" .Values.openai.existingSecretKey }} +{{- end }} +{{- end -}} + +{{- define "sentry.autoscaling.apiVersion" -}} +{{- if .Capabilities.APIVersions.Has "autoscaling/v2" -}} +{{- print "autoscaling/v2" -}} +{{- else -}} +{{- print "autoscaling/v1" -}} +{{- end -}} +{{- end -}} + + +{{/* +Pgbouncer environment variables +*/}} +{{- define "sentry.pgbouncer.env" -}} +{{- if and .Values.externalPostgresql.existingSecret .Values.externalPostgresql.existingSecretKeys.host }} +- name: POSTGRESQL_HOST + valueFrom: + secretKeyRef: + name: {{ .Values.externalPostgresql.existingSecret }} + key: {{ default .Values.externalPostgresql.existingSecretKeys.host }} +{{- else }} +- name: POSTGRESQL_HOST + value: {{ include "sentry.postgresql.host" . | quote }} +{{- end }} +{{- if and .Values.externalPostgresql.existingSecret .Values.externalPostgresql.existingSecretKeys.port }} +- name: POSTGRESQL_PORT + valueFrom: + secretKeyRef: + name: {{ .Values.externalPostgresql.existingSecret }} + key: {{ default .Values.externalPostgresql.existingSecretKeys.port }} +{{- else }} +- name: POSTGRESQL_PORT + value: {{ include "sentry.postgresql.port" . | quote }} +{{- end }} +{{- if and .Values.externalPostgresql.existingSecret .Values.externalPostgresql.existingSecretKeys.database }} +- name: PGBOUNCER_DATABASE + valueFrom: + secretKeyRef: + name: {{ .Values.externalPostgresql.existingSecret }} + key: {{ default .Values.externalPostgresql.existingSecretKeys.database }} +{{- else }} +- name: PGBOUNCER_DATABASE + value: {{ include "sentry.postgresql.database" . | quote }} +{{- end }} +{{- if .Values.postgresql.enabled }} +- name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + name: {{ default (include "sentry.postgresql.fullname" .) .Values.postgresql.auth.existingSecret }} + key: {{ default "postgres-password" .Values.postgresql.auth.secretKeys.adminPasswordKey }} +{{- else if .Values.externalPostgresql.password }} +- name: POSTGRESQL_PASSWORD + value: {{ .Values.externalPostgresql.password | quote }} +{{- else if .Values.externalPostgresql.existingSecret }} +- name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.externalPostgresql.existingSecret }} + key: {{ or .Values.externalPostgresql.existingSecretKeys.password .Values.externalPostgresql.existingSecretKey "postgresql-password" }} +{{- end }} +{{- if and .Values.externalPostgresql.existingSecret .Values.externalPostgresql.existingSecretKeys.username }} +- name: POSTGRESQL_USERNAME + valueFrom: + secretKeyRef: + name: {{ .Values.externalPostgresql.existingSecret }} + key: {{ default .Values.externalPostgresql.existingSecretKeys.username }} +{{- else }} +- name: POSTGRESQL_USERNAME + value: {{ include "sentry.postgresql.username" . | quote }} +{{- end }} +{{- end -}} diff --git a/sentry/templates/configmap-memcached.yaml b/charts/sentry/templates/configmap-memcached.yaml similarity index 82% rename from sentry/templates/configmap-memcached.yaml rename to charts/sentry/templates/configmap-memcached.yaml index abe359071..7df7930e7 100644 --- a/sentry/templates/configmap-memcached.yaml +++ b/charts/sentry/templates/configmap-memcached.yaml @@ -1,4 +1,5 @@ --- +{{- if .Values.sourcemaps.enabled }} apiVersion: v1 kind: ConfigMap metadata: @@ -6,3 +7,4 @@ metadata: data: MEMCACHED_MEMORY_LIMIT: "{{ .Values.memcached.memoryLimit }}" MEMCACHED_MAX_ITEM_SIZE: "{{ .Values.memcached.maxItemSize }}" +{{- end }} diff --git a/charts/sentry/templates/configmap-nginx.yaml b/charts/sentry/templates/configmap-nginx.yaml new file mode 100644 index 000000000..9d64ffaa3 --- /dev/null +++ b/charts/sentry/templates/configmap-nginx.yaml @@ -0,0 +1,66 @@ +{{- if .Values.nginx.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "sentry.fullname" . }}-nginx +data: + server-block.conf: | + {{ if .Values.relay.enabled }} + upstream relay { + server {{ template "sentry.fullname" . }}-relay:{{ template "relay.port" }}; + } + {{ end -}} + + upstream sentry { + server {{ template "sentry.fullname" . }}-web:{{ template "sentry.port" }}; + } + + server { + listen {{ template "nginx.port" }}; + {{- if .Values.ipv6 }} + listen [::]:{{ template "nginx.port" }}; + {{- end }} + + proxy_redirect off; + proxy_buffer_size 128k; + proxy_buffers 4 256k; + proxy_busy_buffers_size 256k; + proxy_set_header Host $host; + + {{ if .Values.relay.enabled }} + location /api/store/ { + proxy_pass http://relay; + } + + location ~ ^/api/[1-9]\d*/ { + proxy_pass http://relay; + } + + location ^~ /api/0/relays/ { + proxy_pass http://relay; + } + {{ end -}} + + {{ if or .Values.nginx.metrics.enabled .Values.nginx.metrics.serviceMonitor.enabled -}} + location = /status/ { + stub_status on; + access_log off; + } + + {{ end -}} + + location / { + proxy_pass http://sentry; + } + + location /_static/ { + proxy_pass http://sentry; + proxy_hide_header Content-Disposition; + } + + {{- if .Values.nginx.extraLocationSnippet }} + {{ .Values.nginx.extraLocationSnippet | nindent 6 }} + {{- end }} + + } +{{- end }} diff --git a/charts/sentry/templates/deployment-geoip-job.yaml b/charts/sentry/templates/deployment-geoip-job.yaml new file mode 100644 index 000000000..58c88754d --- /dev/null +++ b/charts/sentry/templates/deployment-geoip-job.yaml @@ -0,0 +1,42 @@ +{{- if .Values.geodata.accountID }} +apiVersion: batch/v1 +kind: Job +metadata: + name: geoip-install-job + annotations: + "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook-weight": "9" +spec: + template: + spec: + initContainers: + - name: init-create-geoip-dir + image: busybox + command: ['sh', '-c', 'mkdir -p /usr/share/GeoIP'] + volumeMounts: + - name: {{ .Values.geodata.volumeName }} + mountPath: {{ .Values.geodata.mountPath }} + - name: init-geoip-conf + image: busybox + command: ['sh', '-c', 'echo -e "AccountID $(echo $GEOIPUPDATE_ACCOUNT_ID)\nLicenseKey $(echo $GEOIPUPDATE_LICENSE_KEY)\nEditionIDs $(echo $GEOIPUPDATE_EDITION_IDS)" > /usr/share/GeoIP/GeoIP.conf'] + envFrom: + - secretRef: + name: {{ template "sentry.fullname" . }}-geoip-env + volumeMounts: + - name: {{ .Values.geodata.volumeName }} + mountPath: {{ .Values.geodata.mountPath }} + containers: + - name: geoipupdate + image: ghcr.io/maxmind/geoipupdate:v7.0.1 + envFrom: + - secretRef: + name: {{ template "sentry.fullname" . }}-geoip-env + volumeMounts: + - name: {{ .Values.geodata.volumeName }} + mountPath: {{ .Values.geodata.mountPath }} + volumes: + - name: {{ .Values.geodata.volumeName }} + persistentVolumeClaim: + claimName: data-sentry-geoip + restartPolicy: OnFailure +{{- end }} diff --git a/charts/sentry/templates/extra-manifest.yaml b/charts/sentry/templates/extra-manifest.yaml new file mode 100644 index 000000000..0e5456bfd --- /dev/null +++ b/charts/sentry/templates/extra-manifest.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraManifests }} +--- +{{ tpl (toYaml .) $ }} +{{- end }} diff --git a/sentry/templates/gke/backendconfig-sentry-relay.yaml b/charts/sentry/templates/gke/backendconfig-sentry-relay.yaml similarity index 63% rename from sentry/templates/gke/backendconfig-sentry-relay.yaml rename to charts/sentry/templates/gke/backendconfig-sentry-relay.yaml index 22e40ff57..544b4148c 100644 --- a/sentry/templates/gke/backendconfig-sentry-relay.yaml +++ b/charts/sentry/templates/gke/backendconfig-sentry-relay.yaml @@ -1,5 +1,5 @@ {{- if and (.Values.ingress.enabled) (eq (default "nginx" .Values.ingress.regexPathStyle) "gke") }} -apiVersion: cloud.google.com/v1beta1 +apiVersion: cloud.google.com/v1 kind: BackendConfig metadata: name: {{ include "sentry.fullname" . }}-relay @@ -10,6 +10,14 @@ metadata: release: "{{ .Release.Name }}" heritage: "{{ .Release.Service }}" spec: + {{- if .Values.relay.customResponseHeaders }} + customResponseHeaders: + headers: + {{- if .Values.ingress.tls }} + - "strict-transport-security: max-age=31536000; includeSubDomains" + {{- end -}} + {{ toYaml .Values.relay.customResponseHeaders | nindent 6 }} + {{- end }} healthCheck: checkIntervalSec: {{ .Values.relay.probePeriodSeconds }} timeoutSec: {{ .Values.relay.probeTimeoutSeconds }} @@ -18,4 +26,8 @@ spec: type: HTTP requestPath: {{ template "relay.healthCheck.requestPath" }} port: {{ template "relay.port" . }} -{{- end }} + {{- if .Values.relay.securityPolicy }} + securityPolicy: + name: {{ .Values.relay.securityPolicy }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/sentry/templates/gke/backendconfig-sentry-web.yaml b/charts/sentry/templates/gke/backendconfig-sentry-web.yaml similarity index 64% rename from sentry/templates/gke/backendconfig-sentry-web.yaml rename to charts/sentry/templates/gke/backendconfig-sentry-web.yaml index 143d1907c..3011ec862 100644 --- a/sentry/templates/gke/backendconfig-sentry-web.yaml +++ b/charts/sentry/templates/gke/backendconfig-sentry-web.yaml @@ -1,5 +1,5 @@ {{- if and (.Values.ingress.enabled) (eq (default "nginx" .Values.ingress.regexPathStyle) "gke") }} -apiVersion: cloud.google.com/v1beta1 +apiVersion: cloud.google.com/v1 kind: BackendConfig metadata: name: {{ include "sentry.fullname" . }}-web @@ -10,6 +10,14 @@ metadata: release: "{{ .Release.Name }}" heritage: "{{ .Release.Service }}" spec: + {{- if .Values.sentry.web.customResponseHeaders }} + customResponseHeaders: + headers: + {{- if .Values.ingress.tls }} + - "strict-transport-security: max-age=31536000; includeSubDomains" + {{- end -}} + {{ toYaml .Values.sentry.web.customResponseHeaders | nindent 6 }} + {{- end }} healthCheck: checkIntervalSec: {{ .Values.sentry.web.probePeriodSeconds }} timeoutSec: {{ .Values.sentry.web.probeTimeoutSeconds }} @@ -18,4 +26,8 @@ spec: type: HTTP requestPath: {{ template "sentry.healthCheck.requestPath" }} port: {{ .Values.service.externalPort }} + {{- if .Values.sentry.web.securityPolicy }} + securityPolicy: + name: {{ .Values.sentry.web.securityPolicy }} + {{- end }} {{- end }} \ No newline at end of file diff --git a/sentry/templates/hooks/sentry-db-check.job.yaml b/charts/sentry/templates/hooks/sentry-db-check.job.yaml similarity index 56% rename from sentry/templates/hooks/sentry-db-check.job.yaml rename to charts/sentry/templates/hooks/sentry-db-check.job.yaml index 4b21a1870..082c02bde 100644 --- a/sentry/templates/hooks/sentry-db-check.job.yaml +++ b/charts/sentry/templates/hooks/sentry-db-check.job.yaml @@ -1,8 +1,9 @@ -{{- if .Values.hooks.enabled -}} +{{- if and .Values.hooks.enabled .Values.hooks.dbCheck.enabled -}} {{- $clickhouseHost := include "sentry.clickhouse.host" . -}} {{- $clickhousePort := include "sentry.clickhouse.port" . -}} {{- $kafkaHost := include "sentry.kafka.host" . -}} {{- $kafkaPort := include "sentry.kafka.port" . -}} +{{- $kafkaControllerPort := include "sentry.kafka.controller_port" . -}} apiVersion: batch/v1 kind: Job metadata: @@ -15,10 +16,13 @@ metadata: annotations: # This is what defines this resource as a hook. Without this line, the # job is considered part of the release. - "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook": "post-install,{{ if .Values.hooks.preUpgrade }}pre-upgrade{{ else }}post-upgrade{{ end }}" "helm.sh/hook-delete-policy": "{{ if .Values.hooks.removeOnSuccess }}hook-succeeded,{{ end }}before-hook-creation" "helm.sh/hook-weight": "-1" spec: + {{- if .Values.hooks.activeDeadlineSeconds }} + activeDeadlineSeconds: {{ .Values.hooks.activeDeadlineSeconds }} + {{- end}} template: metadata: name: {{ template "sentry.fullname" . }}-db-check @@ -35,6 +39,9 @@ spec: {{- if .Values.sentry.worker.podLabels }} {{ toYaml .Values.sentry.worker.podLabels | indent 8 }} {{- end }} + {{- if .Values.hooks.dbCheck.podLabels }} +{{ toYaml .Values.hooks.dbCheck.podLabels | indent 8 }} + {{- end }} spec: {{- if .Values.hooks.dbCheck.affinity }} affinity: @@ -43,19 +50,30 @@ spec: {{- if .Values.hooks.dbCheck.nodeSelector }} nodeSelector: {{ toYaml .Values.hooks.dbCheck.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} {{- end }} {{- if .Values.hooks.dbCheck.tolerations }} tolerations: {{ toYaml .Values.hooks.dbCheck.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} {{- end }} restartPolicy: Never {{- if .Values.hooks.dbCheck.image.imagePullSecrets }} imagePullSecrets: {{ toYaml .Values.hooks.dbCheck.image.imagePullSecrets | indent 8 }} {{- end }} + {{- if .Values.hooks.dbCheck.securityContext }} + securityContext: +{{ toYaml .Values.hooks.dbCheck.securityContext | indent 8 }} + {{- else }} {{- if .Values.hooks.securityContext }} securityContext: {{ toYaml .Values.hooks.securityContext | indent 8 }} + {{- end }} {{- end }} containers: - name: db-check @@ -101,23 +119,57 @@ spec: while [ $KAFKA_STATUS -eq 0 ]; do KAFKA_STATUS=1 {{- if .Values.kafka.enabled }} - KAFKA_REPLICAS={{ .Values.kafka.replicaCount }} - i=0; while [ $i -lt $KAFKA_REPLICAS ]; do - KAFKA_HOST={{ $kafkaHost }}-$i.{{ $kafkaHost }}-headless - if ! nc -z "$KAFKA_HOST" {{ $kafkaPort }}; then - KAFKA_STATUS=0 - echo "$KAFKA_HOST is not available yet" + + {{- if .Values.kafka.zookeeper.enabled }} + KAFKA_REPLICAS={{ .Values.kafka.broker.replicaCount | default 3 }} + echo "Kafka Zookeeper is enabled, checking if kafka brokers are up" + KZ_STATUS=0 + while [ $KZ_STATUS -eq 0 ]; do + KZ_STATUS=1 + i=0; while [ $i -lt $KAFKA_REPLICAS ]; do + KZ_HOST={{ $kafkaHost }}-broker-$i.{{ $kafkaHost }}-broker-headless + if ! nc -z "$KZ_HOST" {{ $kafkaPort }}; then + KZ_STATUS=0 + echo "$KZ_HOST is not available yet" + fi + i=$((i+1)) + done + if [ "$KZ_STATUS" -eq 0 ]; then + echo "Kafka not ready. Sleeping for 10s before trying again" + sleep 10; + fi + done + echo "Zookeeper is up" + {{- end }} + {{- if .Values.kafka.kraft.enabled }} + KAFKA_REPLICAS={{ .Values.kafka.controller.replicaCount | default 3 }} + echo "Kafka Kraft is enabled, checking if Kraft controllers are up" + KRAFT_STATUS=0 + while [ $KRAFT_STATUS -eq 0 ]; do + KRAFT_STATUS=1 + i=0; while [ $i -lt $KAFKA_REPLICAS ]; do + KRAFT_HOST={{ $kafkaHost }}-controller-$i.{{ $kafkaHost }}-controller-headless + if ! nc -z "$KRAFT_HOST" {{ $kafkaControllerPort }}; then + KRAFT_STATUS=0 + echo "$KRAFT_HOST is not available yet" + fi + i=$((i+1)) + done + if [ "$KRAFT_STATUS" -eq 0 ]; then + echo "Kraft controllers not ready. Sleeping for 10s before trying again" + sleep 10; fi - i=$((i+1)) done - {{- else if (not (kindIs "slice" .Values.externalKafka)) }} + echo "Kraft controllers are up" + {{- end }} + {{- else if (not (.Values.externalKafka.cluster)) }} KAFKA_HOST={{ .Values.externalKafka.host }} if ! nc -z "$KAFKA_HOST" {{ $kafkaPort }}; then KAFKA_STATUS=0 echo "$KAFKA_HOST is not available yet" fi {{- else }} - {{- range $elem := .Values.externalKafka }} + {{- range $elem := .Values.externalKafka.cluster }} KAFKA_HOST={{ $elem.host }} if ! nc -z "$KAFKA_HOST" {{ $elem.port }}; then KAFKA_STATUS=0 @@ -131,10 +183,36 @@ spec: fi done echo "Kafka is up" +{{- if .Values.hooks.dbCheck.volumeMounts }} + volumeMounts: +{{ toYaml .Values.hooks.dbCheck.volumeMounts | indent 8 }} +{{- end }} env: {{- if .Values.hooks.dbCheck.env }} {{ toYaml .Values.hooks.dbCheck.env | indent 8 }} {{- end }} resources: {{ toYaml .Values.hooks.dbCheck.resources | indent 10 }} +{{- if .Values.hooks.dbCheck.containerSecurityContext }} + securityContext: +{{ toYaml .Values.hooks.dbCheck.containerSecurityContext | indent 10 }} +{{- end }} +{{- if .Values.hooks.dbCheck.sidecars }} +{{ toYaml .Values.hooks.dbCheck.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} +{{- if or .Values.hooks.dbCheck.volumes .Values.global.volumes }} + volumes: +{{- if .Values.hooks.dbCheck.volumes }} +{{ toYaml .Values.hooks.dbCheck.volumes | indent 6 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 6 }} +{{- end }} +{{- end }} + {{- if .Values.hooks.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.hooks.shareProcessNamespace }} + {{- end }} {{- end }} diff --git a/sentry/templates/hooks/sentry-db-init.job.yaml b/charts/sentry/templates/hooks/sentry-db-init.job.yaml similarity index 65% rename from sentry/templates/hooks/sentry-db-init.job.yaml rename to charts/sentry/templates/hooks/sentry-db-init.job.yaml index 510b367a6..cdc185bd2 100644 --- a/sentry/templates/hooks/sentry-db-init.job.yaml +++ b/charts/sentry/templates/hooks/sentry-db-init.job.yaml @@ -1,4 +1,4 @@ -{{- if .Values.hooks.enabled -}} +{{- if and .Values.hooks.enabled .Values.hooks.dbInit.enabled -}} apiVersion: batch/v1 kind: Job metadata: @@ -11,15 +11,18 @@ metadata: annotations: # This is what defines this resource as a hook. Without this line, the # job is considered part of the release. - "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook": "post-install,{{ if .Values.hooks.preUpgrade }}pre-upgrade{{ else }}post-upgrade{{ end }}" "helm.sh/hook-delete-policy": "{{ if .Values.hooks.removeOnSuccess }}hook-succeeded,{{ end }}before-hook-creation" "helm.sh/hook-weight": "6" spec: + {{- if .Values.hooks.activeDeadlineSeconds }} + activeDeadlineSeconds: {{ .Values.hooks.activeDeadlineSeconds }} + {{- end}} template: metadata: name: {{ template "sentry.fullname" . }}-db-init annotations: - checksum/configmap.yaml: {{ include (print $.Template.BasePath "/configmap-sentry.yaml") . | sha256sum }} + checksum/configmap.yaml: {{ include "sentry.config" . | sha256sum }} {{- if .Values.sentry.worker.annotations }} {{ toYaml .Values.sentry.worker.annotations | indent 8 }} {{- end }} @@ -32,6 +35,9 @@ spec: {{- if .Values.sentry.worker.podLabels }} {{ toYaml .Values.sentry.worker.podLabels | indent 8 }} {{- end }} + {{- if .Values.hooks.dbInit.podLabels }} +{{ toYaml .Values.hooks.dbInit.podLabels | indent 8 }} + {{- end }} spec: {{- if .Values.hooks.dbInit.affinity }} affinity: @@ -40,10 +46,16 @@ spec: {{- if .Values.hooks.dbInit.nodeSelector }} nodeSelector: {{ toYaml .Values.hooks.dbInit.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} {{- end }} {{- if .Values.hooks.dbInit.tolerations }} tolerations: {{ toYaml .Values.hooks.dbInit.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} {{- end }} restartPolicy: Never {{- if .Values.images.sentry.imagePullSecrets }} @@ -57,9 +69,14 @@ spec: dnsConfig: {{ toYaml .Values.dnsConfig | indent 8 }} {{- end }} + {{- if .Values.hooks.dbInit.securityContext }} + securityContext: +{{ toYaml .Values.hooks.dbInit.securityContext | indent 8 }} + {{- else }} {{- if .Values.hooks.securityContext }} securityContext: {{ toYaml .Values.hooks.securityContext | indent 8 }} + {{- end }} {{- end }} containers: - name: db-init-job @@ -67,13 +84,7 @@ spec: imagePullPolicy: {{ default "IfNotPresent" .Values.images.sentry.pullPolicy }} command: ["sentry","upgrade","--noinput"] env: - {{- if .Values.postgresql.enabled }} - - name: POSTGRES_PASSWORD - valueFrom: - secretKeyRef: - name: {{ default (include "sentry.postgresql.fullname" .) .Values.postgresql.existingSecret }} - key: {{ default "postgresql-password" .Values.postgresql.existingSecretKey }} - {{- end }} +{{ include "sentry.env" . | indent 8 }} {{- if .Values.hooks.dbInit.env }} {{ toYaml .Values.hooks.dbInit.env | indent 8 }} {{- end }} @@ -81,10 +92,20 @@ spec: - mountPath: /etc/sentry name: config readOnly: true +{{- if .Values.hooks.dbInit.volumeMounts }} +{{ toYaml .Values.hooks.dbInit.volumeMounts | indent 8 }} +{{- end }} resources: {{ toYaml .Values.hooks.dbInit.resources | indent 10 }} +{{- if .Values.hooks.dbInit.containerSecurityContext }} + securityContext: +{{ toYaml .Values.hooks.dbInit.containerSecurityContext | indent 10 }} +{{- end }} {{- if .Values.hooks.dbInit.sidecars }} {{ toYaml .Values.hooks.dbInit.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} {{- end }} volumes: - name: config @@ -93,4 +114,10 @@ spec: {{- if .Values.hooks.dbInit.volumes }} {{ toYaml .Values.hooks.dbInit.volumes | indent 6 }} {{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 6 }} +{{- end }} + {{- if .Values.hooks.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.hooks.shareProcessNamespace }} + {{- end }} {{- end -}} diff --git a/charts/sentry/templates/hooks/sentry-secret-create.yaml b/charts/sentry/templates/hooks/sentry-secret-create.yaml new file mode 100644 index 000000000..267503217 --- /dev/null +++ b/charts/sentry/templates/hooks/sentry-secret-create.yaml @@ -0,0 +1,17 @@ +{{- if not .Values.sentry.existingSecret -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "sentry.fullname" . }}-sentry-secret + labels: + app: sentry + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + annotations: + "helm.sh/hook": "pre-install" + "helm.sh/hook-weight": "3" +type: Opaque +data: + key: {{ randAlphaNum 50 | b64enc | quote }} +{{- end -}} diff --git a/sentry/templates/hooks/snuba-db-init.job.yaml b/charts/sentry/templates/hooks/snuba-db-init.job.yaml similarity index 57% rename from sentry/templates/hooks/snuba-db-init.job.yaml rename to charts/sentry/templates/hooks/snuba-db-init.job.yaml index 591ed8d69..5ae8c3059 100644 --- a/sentry/templates/hooks/snuba-db-init.job.yaml +++ b/charts/sentry/templates/hooks/snuba-db-init.job.yaml @@ -1,4 +1,4 @@ -{{- if .Values.hooks.enabled -}} +{{- if and .Values.hooks.enabled .Values.hooks.snubaInit.enabled -}} {{- $clickhouseHost := include "sentry.clickhouse.host" . -}} apiVersion: batch/v1 kind: Job @@ -12,16 +12,19 @@ metadata: annotations: # This is what defines this resource as a hook. Without this line, the # job is considered part of the release. - "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook": "post-install,{{ if .Values.hooks.preUpgrade }}pre-upgrade{{ else }}post-upgrade{{ end }}" "helm.sh/hook-delete-policy": "{{ if .Values.hooks.removeOnSuccess }}hook-succeeded,{{ end }}before-hook-creation" "helm.sh/hook-weight": "3" spec: + {{- if .Values.hooks.activeDeadlineSeconds }} + activeDeadlineSeconds: {{ .Values.hooks.activeDeadlineSeconds }} + {{- end}} template: metadata: name: {{ template "sentry.fullname" . }}-snuba-db-init annotations: checksum/snubaSettingsPy: {{ .Values.config.snubaSettingsPy | sha256sum }} - checksum/config.yaml: {{ include (print $.Template.BasePath "/configmap-snuba.yaml") . | sha256sum }} + checksum/config.yaml: {{ include "sentry.snuba.config" . | sha256sum }} {{- if .Values.snuba.annotations }} {{ toYaml .Values.snuba.annotations | indent 8 }} {{- end }} @@ -34,6 +37,9 @@ spec: {{- if .Values.snuba.podLabels }} {{ toYaml .Values.snuba.podLabels | indent 8 }} {{- end }} + {{- if .Values.hooks.snubaInit.podLabels }} +{{ toYaml .Values.hooks.snubaInit.podLabels | indent 8 }} + {{- end }} spec: {{- if .Values.hooks.snubaInit.affinity }} affinity: @@ -42,10 +48,16 @@ spec: {{- if .Values.hooks.snubaInit.nodeSelector }} nodeSelector: {{ toYaml .Values.hooks.snubaInit.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} {{- end }} {{- if .Values.hooks.snubaInit.tolerations }} tolerations: {{ toYaml .Values.hooks.snubaInit.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} {{- end }} restartPolicy: Never {{- if .Values.images.snuba.imagePullSecrets }} @@ -59,14 +71,28 @@ spec: dnsConfig: {{ toYaml .Values.dnsConfig | indent 8 }} {{- end }} + {{- if .Values.hooks.snubaInit.securityContext }} + securityContext: +{{ toYaml .Values.hooks.snubaInit.securityContext | indent 8 }} + {{- else }} {{- if .Values.hooks.securityContext }} securityContext: {{ toYaml .Values.hooks.securityContext | indent 8 }} + {{- end }} {{- end }} containers: - name: snuba-init image: "{{ template "snuba.image" . }}" - command: [snuba, bootstrap, --no-migrate, --force] + command: + - snuba + - bootstrap + - --no-migrate + {{- if .Values.hooks.snubaInit.kafka.enabled }} + - --kafka + {{- else }} + - --no-kafka + {{- end }} + - --force env: - name: LOG_LEVEL value: debug @@ -81,10 +107,32 @@ spec: - mountPath: /etc/snuba name: config readOnly: true +{{- if .Values.hooks.snubaInit.volumeMounts }} +{{ toYaml .Values.hooks.snubaInit.volumeMounts | indent 8 }} +{{- end }} resources: {{ toYaml .Values.hooks.snubaInit.resources | indent 10 }} +{{- if .Values.hooks.snubaInit.containerSecurityContext }} + securityContext: +{{ toYaml .Values.hooks.snubaInit.containerSecurityContext | indent 10 }} +{{- end }} +{{- if .Values.hooks.snubaInit.sidecars }} +{{ toYaml .Values.hooks.snubaInit.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} volumes: - - name: config - configMap: - name: {{ template "sentry.fullname" . }}-snuba + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-snuba +{{- if .Values.hooks.snubaInit.volumes }} +{{ toYaml .Values.hooks.snubaInit.volumes | indent 6 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 6 }} +{{- end }} + {{- if .Values.hooks.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.hooks.shareProcessNamespace }} + {{- end }} {{- end }} diff --git a/sentry/templates/hooks/snuba-migrate.job.yaml b/charts/sentry/templates/hooks/snuba-migrate.job.yaml similarity index 60% rename from sentry/templates/hooks/snuba-migrate.job.yaml rename to charts/sentry/templates/hooks/snuba-migrate.job.yaml index b2977dbf4..323432bdc 100644 --- a/sentry/templates/hooks/snuba-migrate.job.yaml +++ b/charts/sentry/templates/hooks/snuba-migrate.job.yaml @@ -1,4 +1,4 @@ -{{- if .Values.hooks.enabled -}} +{{- if and .Values.hooks.enabled .Values.hooks.snubaMigrate.enabled -}} {{- $clickhouseHost := include "sentry.clickhouse.host" . -}} apiVersion: batch/v1 kind: Job @@ -12,16 +12,19 @@ metadata: annotations: # This is what defines this resource as a hook. Without this line, the # job is considered part of the release. - "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook": "post-install,{{ if .Values.hooks.preUpgrade }}pre-upgrade{{ else }}post-upgrade{{ end }}" "helm.sh/hook-delete-policy": "{{ if .Values.hooks.removeOnSuccess }}hook-succeeded,{{ end }}before-hook-creation" "helm.sh/hook-weight": "5" spec: + {{- if .Values.hooks.activeDeadlineSeconds }} + activeDeadlineSeconds: {{ .Values.hooks.activeDeadlineSeconds }} + {{- end}} template: metadata: name: {{ template "sentry.fullname" . }}-snuba-migrate annotations: checksum/snubaSettingsPy: {{ .Values.config.snubaSettingsPy | sha256sum }} - checksum/config.yaml: {{ include (print $.Template.BasePath "/configmap-snuba.yaml") . | sha256sum }} + checksum/config.yaml: {{ include "sentry.snuba.config" . | sha256sum }} {{- if .Values.snuba.annotations }} {{ toYaml .Values.snuba.annotations | indent 8 }} {{- end }} @@ -34,6 +37,9 @@ spec: {{- if .Values.snuba.podLabels }} {{ toYaml .Values.snuba.podLabels | indent 8 }} {{- end }} + {{- if .Values.hooks.snubaMigrate.podLabels }} +{{ toYaml .Values.hooks.snubaMigrate.podLabels | indent 8 }} + {{- end }} spec: {{- if .Values.hooks.snubaInit.affinity }} affinity: @@ -42,10 +48,16 @@ spec: {{- if .Values.hooks.snubaInit.nodeSelector }} nodeSelector: {{ toYaml .Values.hooks.snubaInit.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} {{- end }} {{- if .Values.hooks.snubaInit.tolerations }} tolerations: {{ toYaml .Values.hooks.snubaInit.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} {{- end }} restartPolicy: Never {{- if .Values.images.snuba.imagePullSecrets }} @@ -59,9 +71,14 @@ spec: dnsConfig: {{ toYaml .Values.dnsConfig | indent 8 }} {{- end }} + {{- if .Values.hooks.snubaMigrate.securityContext }} + securityContext: +{{ toYaml .Values.hooks.snubaMigrate.securityContext | indent 8 }} + {{- else }} {{- if .Values.hooks.securityContext }} securityContext: {{ toYaml .Values.hooks.securityContext | indent 8 }} + {{- end }} {{- end }} containers: - name: snuba-migrate @@ -81,10 +98,32 @@ spec: - mountPath: /etc/snuba name: config readOnly: true +{{- if .Values.hooks.snubaInit.volumeMounts }} +{{ toYaml .Values.hooks.snubaInit.volumeMounts | indent 8 }} +{{- end }} resources: {{ toYaml .Values.hooks.snubaInit.resources | indent 10 }} +{{- if .Values.hooks.snubaMigrate.containerSecurityContext }} + securityContext: +{{ toYaml .Values.hooks.snubaMigrate.containerSecurityContext | indent 10 }} +{{- end }} +{{- if .Values.hooks.snubaMigrate.sidecars }} +{{ toYaml .Values.hooks.snubaMigrate.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} volumes: - - name: config - configMap: - name: {{ template "sentry.fullname" . }}-snuba + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-snuba +{{- if .Values.hooks.snubaInit.volumes }} +{{ toYaml .Values.hooks.snubaInit.volumes | indent 6 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 6 }} +{{- end }} + {{- if .Values.hooks.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.hooks.shareProcessNamespace }} + {{- end }} {{- end }} diff --git a/sentry/templates/hooks/user-create.yaml b/charts/sentry/templates/hooks/user-create.yaml similarity index 58% rename from sentry/templates/hooks/user-create.yaml rename to charts/sentry/templates/hooks/user-create.yaml index 69d682a9e..ad3bf7393 100644 --- a/sentry/templates/hooks/user-create.yaml +++ b/charts/sentry/templates/hooks/user-create.yaml @@ -9,15 +9,18 @@ metadata: release: "{{ .Release.Name }}" heritage: "{{ .Release.Service }}" annotations: - "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook": "post-install,{{ if .Values.hooks.preUpgrade }}pre-upgrade{{ else }}post-upgrade{{ end }}" "helm.sh/hook-delete-policy": "{{ if .Values.hooks.removeOnSuccess }}hook-succeeded,{{ end }}before-hook-creation" "helm.sh/hook-weight": "9" spec: + {{- if .Values.hooks.activeDeadlineSeconds }} + activeDeadlineSeconds: {{ .Values.hooks.activeDeadlineSeconds }} + {{- end}} template: metadata: name: {{ template "sentry.fullname" . }}-user-create annotations: - checksum/configmap.yaml: {{ include (print $.Template.BasePath "/configmap-sentry.yaml") . | sha256sum }} + checksum/configmap.yaml: {{ include "sentry.config" . | sha256sum }} {{- if .Values.sentry.worker.annotations }} {{ toYaml .Values.sentry.worker.annotations | indent 8 }} {{- end }} @@ -31,10 +34,24 @@ spec: {{ toYaml .Values.sentry.worker.podLabels | indent 8 }} {{- end }} spec: + {{- if .Values.hooks.dbInit.affinity }} + affinity: +{{ toYaml .Values.hooks.dbInit.affinity | indent 8 }} + {{- end }} + {{- if .Values.hooks.dbInit.nodeSelector }} + nodeSelector: +{{ toYaml .Values.hooks.dbInit.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} + {{- end }} restartPolicy: Never {{- if .Values.hooks.dbInit.tolerations }} tolerations: {{ toYaml .Values.hooks.dbInit.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} {{- end }} {{- if .Values.images.sentry.imagePullSecrets }} imagePullSecrets: @@ -47,19 +64,25 @@ spec: dnsConfig: {{ toYaml .Values.dnsConfig | indent 8 }} {{- end }} + {{- if .Values.hooks.dbInit.securityContext }} + securityContext: +{{ toYaml .Values.hooks.dbInit.securityContext | indent 8 }} + {{- else }} {{- if .Values.hooks.securityContext }} securityContext: {{ toYaml .Values.hooks.securityContext | indent 8 }} + {{- end }} {{- end }} containers: - name: user-create-job image: "{{ template "sentry.image" . }}" imagePullPolicy: {{ default "IfNotPresent" .Values.images.sentry.pullPolicy }} - command: ["/bin/bash", "-c"] - # Create user but do not exit 1 when user already exists (exit code 3 from createuser command) - # https://docs.sentry.io/server/cli/createuser/ - args: - - > + command: + - "/bin/bash" + - "-c" + # Create user but do not exit 1 when user already exists (exit code 3 from createuser command) + # https://docs.sentry.io/server/cli/createuser/ + - | sentry createuser \ --no-input \ --superuser \ @@ -71,6 +94,7 @@ spec: exit 1; \ fi env: +{{ include "sentry.env" . | indent 8 }} {{- if .Values.user.existingSecret }} - name: ADMIN_PASSWORD valueFrom: @@ -80,25 +104,40 @@ spec: {{- else if .Values.user.password }} - name: ADMIN_PASSWORD value: {{ .Values.user.password | quote }} - {{- end }} - {{- if .Values.postgresql.enabled }} - - name: POSTGRES_PASSWORD - valueFrom: - secretKeyRef: - name: {{ default (include "sentry.postgresql.fullname" .) .Values.postgresql.existingSecret }} - key: {{ default "postgresql-password" .Values.postgresql.existingSecretKey }} {{- end }} {{- if .Values.hooks.dbInit.env }} -{{ toYaml .Values.hooks.dbInit.env | indent 10 }} +{{ toYaml .Values.hooks.dbInit.env | indent 8 }} {{- end }} volumeMounts: - mountPath: /etc/sentry name: config readOnly: true +{{- if .Values.hooks.dbInit.volumeMounts }} +{{ toYaml .Values.hooks.dbInit.volumeMounts | indent 8 }} +{{- end }} resources: {{ toYaml .Values.hooks.dbInit.resources | indent 10 }} +{{- if .Values.hooks.dbInit.containerSecurityContext }} + securityContext: +{{ toYaml .Values.hooks.dbInit.containerSecurityContext | indent 10 }} +{{- end }} +{{- if .Values.hooks.dbInit.sidecars }} +{{ toYaml .Values.hooks.dbInit.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} volumes: - name: config configMap: name: {{ template "sentry.fullname" . }}-sentry +{{- if .Values.hooks.dbInit.volumes }} +{{ toYaml .Values.hooks.dbInit.volumes | indent 6 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 6 }} +{{- end }} + {{- if .Values.hooks.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.hooks.shareProcessNamespace }} + {{- end }} {{- end -}} diff --git a/sentry/templates/ingress.yaml b/charts/sentry/templates/ingress.yaml similarity index 90% rename from sentry/templates/ingress.yaml rename to charts/sentry/templates/ingress.yaml index d84927257..88f2b6e5d 100644 --- a/sentry/templates/ingress.yaml +++ b/charts/sentry/templates/ingress.yaml @@ -142,6 +142,24 @@ spec: - path: {{ default "/" .Values.ingress.path }}api/{[1-9][0-9]*}/{(.*)} {{- else }} - path: {{ default "/" .Values.ingress.path }}api/[1-9][0-9]*/(.*) + {{- end }} + {{- if $ingressSupportsPathType }} + pathType: {{ $ingressPathType }} + {{- end }} + backend: + {{- if $ingressApiIsStable }} + service: + name: {{ template "sentry.fullname" . }}-relay + port: + number: {{ template "relay.port" . }} + {{- else }} + serviceName: {{ template "sentry.fullname" . }}-relay + servicePort: {{ template "relay.port" . }} + {{- end }} + {{- if eq (default "nginx" .Values.ingress.regexPathStyle) "traefik" }} + - path: {{ default "/" .Values.ingress.path }}api/0/relays/{(.*)} + {{- else }} + - path: {{ default "/" .Values.ingress.path }}api/0/relays/(.*) {{- end }} {{- if $ingressSupportsPathType }} pathType: {{ $ingressPathType }} diff --git a/charts/sentry/templates/pgbouncer/pgbouncer-deployment.yaml b/charts/sentry/templates/pgbouncer/pgbouncer-deployment.yaml new file mode 100644 index 000000000..ed839602c --- /dev/null +++ b/charts/sentry/templates/pgbouncer/pgbouncer-deployment.yaml @@ -0,0 +1,65 @@ +{{- if .Values.pgbouncer.enabled }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "sentry.fullname" . }}-pgbouncer + labels: + app: {{ template "sentry.fullname" . }}-pgbouncer +spec: + replicas: {{ .Values.pgbouncer.replicas }} + selector: + matchLabels: + app: {{ template "sentry.fullname" . }}-pgbouncer + {{- if .Values.pgbouncer.updateStrategy }} + strategy: + {{ toYaml .Values.pgbouncer.updateStrategy | nindent 4 }} + {{- end }} + template: + metadata: + labels: + app: {{ template "sentry.fullname" . }}-pgbouncer + spec: + containers: + - name: pgbouncer + image: {{ .Values.pgbouncer.image.repository }}:{{ .Values.pgbouncer.image.tag }} + imagePullPolicy: {{ .Values.pgbouncer.image.pullPolicy }} + resources: + {{ toYaml .Values.pgbouncer.resources | nindent 10 }} + env: + {{ include "sentry.pgbouncer.env" . | nindent 10 }} + - name: PGBOUNCER_PORT + value: "5432" + - name: PGBOUNCER_AUTH_TYPE + value: {{ .Values.pgbouncer.authType | quote }} + - name: PGBOUNCER_MAX_CLIENT_CONN + value: {{ .Values.pgbouncer.maxClientConn | quote }} + - name: PGBOUNCER_DEFAULT_POOL_SIZE + value: {{ .Values.pgbouncer.poolSize | quote }} + - name: PGBOUNCER_POOL_MODE + value: {{ .Values.pgbouncer.poolMode | quote }} + ports: + - containerPort: 5432 + name: pgbouncer + protocol: TCP + {{- if .Values.pgbouncer.nodeSelector }} + nodeSelector: + {{ toYaml .Values.pgbouncer.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.pgbouncer.tolerations }} + tolerations: + {{ toYaml .Values.pgbouncer.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.pgbouncer.affinity }} + affinity: + {{ toYaml .Values.pgbouncer.affinity | nindent 8 }} + {{- end }} + {{- if .Values.pgbouncer.topologySpreadConstraints }} + topologySpreadConstraints: + {{ toYaml .Values.pgbouncer.topologySpreadConstraints | nindent 8 }} + {{- end }} + {{- if .Values.pgbouncer.priorityClassName }} + priorityClassName: "{{ .Values.pgbouncer.priorityClassName }}" + {{- end }} + terminationGracePeriodSeconds: 10 +{{- end }} diff --git a/charts/sentry/templates/pgbouncer/pgbouncer-pdb.yaml b/charts/sentry/templates/pgbouncer/pgbouncer-pdb.yaml new file mode 100644 index 000000000..338bb080e --- /dev/null +++ b/charts/sentry/templates/pgbouncer/pgbouncer-pdb.yaml @@ -0,0 +1,18 @@ +{{- if .Values.pgbouncer.enabled }} +{{- if .Values.pgbouncer.podDisruptionBudget.enabled }} +--- +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: {{ template "sentry.fullname" . }}-pgbouncer +spec: + {{- if and .Values.pgbouncer.podDisruptionBudget.minAvailable (not (hasKey .Values.pgbouncer.podDisruptionBudget "maxUnavailable")) }} + minAvailable: {{ .Values.pgbouncer.podDisruptionBudget.minAvailable }} + {{- else if .Values.pgbouncer.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.pgbouncer.podDisruptionBudget.maxUnavailable }} + {{- end }} + selector: + matchLabels: + app: {{ template "sentry.fullname" . }}-pgbouncer +{{- end }} +{{- end }} diff --git a/charts/sentry/templates/pgbouncer/pgbouncer-service.yaml b/charts/sentry/templates/pgbouncer/pgbouncer-service.yaml new file mode 100644 index 000000000..4f7f3adcb --- /dev/null +++ b/charts/sentry/templates/pgbouncer/pgbouncer-service.yaml @@ -0,0 +1,14 @@ +{{- if .Values.pgbouncer.enabled }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "sentry.fullname" . }}-pgbouncer +spec: + selector: + app: {{ template "sentry.fullname" . }}-pgbouncer + ports: + - name: pgbouncer + port: 5432 + targetPort: 5432 +{{- end }} diff --git a/charts/sentry/templates/pvc-geoip.yaml b/charts/sentry/templates/pvc-geoip.yaml new file mode 100644 index 000000000..42814a492 --- /dev/null +++ b/charts/sentry/templates/pvc-geoip.yaml @@ -0,0 +1,23 @@ +{{- if .Values.geodata.accountID }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: data-sentry-geoip + labels: + app: sentry + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + annotations: + "helm.sh/hook": "pre-install,pre-upgrade" + "helm.sh/hook-weight": "-1" +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: {{ .Values.geodata.persistence.size }} +{{- if .Values.geodata.persistence.storageClass }} + storageClassName: {{ .Values.geodata.persistence.storageClass | quote }} +{{- end }} +{{- end }} diff --git a/sentry/templates/pvc.yaml b/charts/sentry/templates/pvc.yaml similarity index 100% rename from sentry/templates/pvc.yaml rename to charts/sentry/templates/pvc.yaml diff --git a/charts/sentry/templates/relay/_helper-sentry-relay.tpl b/charts/sentry/templates/relay/_helper-sentry-relay.tpl new file mode 100644 index 000000000..83f17272f --- /dev/null +++ b/charts/sentry/templates/relay/_helper-sentry-relay.tpl @@ -0,0 +1,120 @@ +{{- define "sentry.relay.config" -}} +{{- $redisHost := include "sentry.redis.host" . -}} +{{- $redisPort := include "sentry.redis.port" . -}} +{{- $redisPass := include "sentry.redis.password" . -}} +{{- $redisDb := include "sentry.redis.db" . -}} +{{- $redisProto := ternary "rediss" "redis" (eq (include "sentry.redis.ssl" .) "true") -}} +config.yml: |- + relay: + {{- if .Values.relay.mode }} + mode: {{ .Values.relay.mode }} + {{- end }} + upstream: "http://{{ template "sentry.fullname" . }}-web:{{ .Values.service.externalPort }}/" + {{- if .Values.ipv6 }} + host: "::" + {{- else }} + host: 0.0.0.0 + {{- end }} + port: {{ template "relay.port" }} + + {{- if .Values.relay.cache }} + {{- if .Values.relay.cache.envelopeBufferSize }} + cache: + envelope_buffer_size: {{ int64 .Values.relay.cache.envelopeBufferSize | quote }} + {{- end }} + {{- end }} + + {{- if .Values.relay.logging }} + logging: + {{- if .Values.relay.logging.level }} + level: {{ .Values.relay.logging.level }} + {{- end }} + {{- if .Values.relay.logging.format }} + format: {{ .Values.relay.logging.format }} + {{- end }} + {{- end }} + + processing: + enabled: true + {{- if .Values.geodata.path }} + geoip_path: {{ .Values.geodata.path | quote }} + {{- end }} + + kafka_config: + - name: "bootstrap.servers" + value: {{ (include "sentry.kafka.bootstrap_servers_string" .) | quote }} + {{- if .Values.relay.processing.kafkaConfig.messageMaxBytes }} + - name: "message.max.bytes" + value: {{ int64 .Values.relay.processing.kafkaConfig.messageMaxBytes | quote }} + {{- end }} + {{- if .Values.relay.processing.kafkaConfig.messageTimeoutMs }} + - name: "message.timeout.ms" + value: {{ int64 .Values.relay.processing.kafkaConfig.messageTimeoutMs | quote }} + {{- end }} + {{- if .Values.relay.processing.kafkaConfig.requestTimeoutMs }} + - name: "request.timeout.ms" + value: {{ int64 .Values.relay.processing.kafkaConfig.requestTimeoutMs | quote }} + {{- end }} + {{- if .Values.relay.processing.kafkaConfig.deliveryTimeoutMs }} + - name: "delivery.timeout.ms" + value: {{ int64 .Values.relay.processing.kafkaConfig.deliveryTimeoutMs | quote }} + {{- end }} + {{- if .Values.relay.processing.kafkaConfig.apiVersionRequestTimeoutMs }} + - name: "api.version.request.timeout.ms" + value: {{ int64 .Values.relay.processing.kafkaConfig.apiVersionRequestTimeoutMs | quote }} + {{- end }} + {{- $sentryKafkaSaslMechanism := include "sentry.kafka.sasl_mechanism" . -}} + {{- if not (eq "None" $sentryKafkaSaslMechanism) }} + - name: "sasl.mechanism" + value: {{ $sentryKafkaSaslMechanism | quote }} + {{- end }} + {{- $sentryKafkaSaslUsername := include "sentry.kafka.sasl_username" . -}} + {{- if not (eq "None" $sentryKafkaSaslUsername) }} + - name: "sasl.username" + value: {{ $sentryKafkaSaslUsername | quote }} + {{- end }} + {{- $sentryKafkaSaslPassword := include "sentry.kafka.sasl_password" . -}} + {{- if not (eq "None" $sentryKafkaSaslPassword) }} + - name: "sasl.password" + value: {{ $sentryKafkaSaslPassword | quote }} + {{- end }} + {{- $sentryKafkaSecurityProtocol := include "sentry.kafka.security_protocol" . -}} + {{- if not (eq "plaintext" $sentryKafkaSecurityProtocol) }} + - name: security.protocol + value: {{ $sentryKafkaSecurityProtocol | quote }} + {{- end }} + {{- if .Values.relay.processing.additionalKafkaConfig }} + {{ toYaml .Values.relay.processing.additionalKafkaConfig | nindent 6 }} + {{- end }} + + {{- if $redisPass }} + {{- if and (not .Values.externalRedis.existingSecret) (not .Values.redis.auth.existingSecret)}} + redis: "{{ $redisProto }}://:{{ $redisPass }}@{{ $redisHost }}:{{ $redisPort }}/{{ $redisDb }}" + {{- end }} + {{- else }} + redis: "{{ $redisProto }}://{{ $redisHost }}:{{ $redisPort }}/{{ $redisDb }}" + {{- end }} + + {{- if ((.Values.kafkaTopicOverrides).prefix) }} + topics: + metrics_sessions: "{{ default "" .Values.kafkaTopicOverrides.prefix }}ingest-metrics" + events: "{{ default "" .Values.kafkaTopicOverrides.prefix }}ingest-attachments" + transactions: "{{ default "" .Values.kafkaTopicOverrides.prefix }}ingest-transactions" + outcomes: "{{ default "" .Values.kafkaTopicOverrides.prefix }}outcomes" + outcomes_billing: "{{ default "" .Values.kafkaTopicOverrides.prefix }}ingest-outcomes" + metrics_generic: "{{ default "" .Values.kafkaTopicOverrides.prefix }}ingest-performance-metrics" + profiles: "{{ default "" .Values.kafkaTopicOverrides.prefix }}profiles" + replay_events: "{{ default "" .Values.kafkaTopicOverrides.prefix }}ingest-replay-events" + replay_recordings: "{{ default "" .Values.kafkaTopicOverrides.prefix }}ingest-replay-recordings" + monitors: "{{ default "" .Values.kafkaTopicOverrides.prefix }}ingest-monitors" + spans: "{{ default "" .Values.kafkaTopicOverrides.prefix }}snuba-spans" + metrics_summaries: "{{ default "" .Values.kafkaTopicOverrides.prefix }}snuba-metrics-summaries" + cogs: "{{ default "" .Values.kafkaTopicOverrides.prefix }}shared-resources-usage" + feedback: "{{ default "" .Values.kafkaTopicOverrides.prefix }}ingest-feedback-events" + {{- else }} + topics: + metrics_sessions: "ingest-metrics" + {{- end }} + + {{ .Values.config.relay | nindent 2 }} +{{- end -}} diff --git a/charts/sentry/templates/relay/configmap-relay.yaml b/charts/sentry/templates/relay/configmap-relay.yaml new file mode 100644 index 000000000..54826c83c --- /dev/null +++ b/charts/sentry/templates/relay/configmap-relay.yaml @@ -0,0 +1,13 @@ +{{- if .Values.relay.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "sentry.fullname" . }}-relay + labels: + app: sentry + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +data: + {{ include "sentry.relay.config" . | nindent 2 }} +{{- end }} diff --git a/sentry/templates/deployment-relay.yaml b/charts/sentry/templates/relay/deployment-relay.yaml similarity index 60% rename from sentry/templates/deployment-relay.yaml rename to charts/sentry/templates/relay/deployment-relay.yaml index 08ae85364..c5384e32e 100644 --- a/sentry/templates/deployment-relay.yaml +++ b/charts/sentry/templates/relay/deployment-relay.yaml @@ -1,3 +1,10 @@ +{{- if .Values.relay.enabled }} +{{- $redisHost := include "sentry.redis.host" . -}} +{{- $redisPort := include "sentry.redis.port" . -}} +{{- $redisDb := include "sentry.redis.db" . -}} +{{- $redisPass := include "sentry.redis.password" . -}} +{{- $redisProto := ternary "rediss" "redis" (eq (include "sentry.redis.ssl" .) "true") -}} + apiVersion: apps/v1 kind: Deployment metadata: @@ -30,7 +37,7 @@ spec: metadata: annotations: checksum/relay: {{ .Values.config.relay | sha256sum }} - checksum/config.yaml: {{ include (print $.Template.BasePath "/configmap-relay.yaml") . | sha256sum }} + checksum/config.yaml: {{ include "sentry.relay.config" . | sha256sum }} {{- if .Values.relay.annotations }} {{ toYaml .Values.relay.annotations | indent 8 }} {{- end }} @@ -49,10 +56,20 @@ spec: {{- if .Values.relay.nodeSelector }} nodeSelector: {{ toYaml .Values.relay.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} {{- end }} {{- if .Values.relay.tolerations }} tolerations: {{ toYaml .Values.relay.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.relay.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.relay.topologySpreadConstraints | indent 8 }} {{- end }} {{- if .Values.images.relay.imagePullSecrets }} imagePullSecrets: @@ -68,10 +85,31 @@ spec: imagePullPolicy: {{ default "IfNotPresent" .Values.images.sentry.pullPolicy }} args: - "credentials" - - "generate" + - {{ .Values.relay.init.credentialsSubcommand | default "generate" | quote }} +{{- if .Values.relay.init.additionalArgs }} +{{ toYaml .Values.relay.init.additionalArgs | indent 12 }} +{{- end }} + resources: +{{ toYaml .Values.relay.init.resources | indent 12 }} +{{- if .Values.relay.containerSecurityContext }} + securityContext: +{{ toYaml .Values.relay.containerSecurityContext | indent 12 }} +{{- end }} env: - name: RELAY_PORT value: '{{ template "relay.port" }}' + {{- if and (not $redisPass) (.Values.externalRedis.existingSecret) }} + - name: HELM_CHARTS_RELAY_REDIS_PASSWORD_CONTROLLED + valueFrom: + secretKeyRef: + name: {{ .Values.externalRedis.existingSecret }} + key: {{ default "redis-password" .Values.externalRedis.existingSecretKey }} + - name: RELAY_REDIS_URL + value: {{ $redisProto }}://:$(HELM_CHARTS_RELAY_REDIS_PASSWORD_CONTROLLED)@{{ $redisHost }}:{{ $redisPort }}/{{ $redisDb }} + {{- end }} +{{- if .Values.relay.init.env }} +{{ toYaml .Values.relay.init.env | indent 12 }} +{{- end }} volumeMounts: - name: credentials mountPath: /work/.relay @@ -79,6 +117,9 @@ spec: mountPath: /work/.relay/config.yml subPath: config.yml readOnly: true +{{- if .Values.relay.init.volumeMounts }} +{{ toYaml .Values.relay.init.volumeMounts | indent 12 }} +{{- end }} {{- if .Values.dnsPolicy }} dnsPolicy: {{ .Values.dnsPolicy | quote }} {{- end }} @@ -88,6 +129,10 @@ spec: {{- end }} containers: - name: {{ .Chart.Name }}-relay +{{- if .Values.relay.args }} + args: +{{ toYaml .Values.relay.args | indent 10 }} +{{- end }} image: "{{ template "relay.image" . }}" imagePullPolicy: {{ default "IfNotPresent" .Values.images.sentry.pullPolicy }} ports: @@ -95,6 +140,15 @@ spec: env: - name: RELAY_PORT value: '{{ template "relay.port" }}' + {{- if and (not $redisPass) (.Values.externalRedis.existingSecret) }} + - name: HELM_CHARTS_RELAY_REDIS_PASSWORD_CONTROLLED + valueFrom: + secretKeyRef: + name: {{ .Values.externalRedis.existingSecret }} + key: {{ default "redis-password" .Values.externalRedis.existingSecretKey }} + - name: RELAY_REDIS_URL + value: {{ $redisProto }}://:$(HELM_CHARTS_RELAY_REDIS_PASSWORD_CONTROLLED)@{{ $redisHost }}:{{ $redisPort }}/{{ $redisDb }} + {{- end }} {{- if .Values.relay.env }} {{ toYaml .Values.relay.env | indent 8 }} {{- end }} @@ -105,6 +159,13 @@ spec: mountPath: /work/.relay/config.yml subPath: config.yml readOnly: true + {{- if and .Values.geodata.volumeName .Values.geodata.accountID }} + - name: {{ .Values.geodata.volumeName }} + mountPath: {{ .Values.geodata.mountPath }} + {{- end }} +{{- if .Values.relay.volumeMounts }} +{{ toYaml .Values.relay.volumeMounts | indent 10 }} +{{- end }} livenessProbe: failureThreshold: {{ .Values.relay.probeFailureThreshold }} httpGet: @@ -127,8 +188,15 @@ spec: timeoutSeconds: {{ .Values.relay.probeTimeoutSeconds }} resources: {{ toYaml .Values.relay.resources | indent 12 }} +{{- if .Values.relay.containerSecurityContext }} + securityContext: +{{ toYaml .Values.relay.containerSecurityContext | indent 12 }} +{{- end }} {{- if .Values.relay.sidecars }} {{ toYaml .Values.relay.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} {{- end }} {{- if .Values.serviceAccount.enabled }} serviceAccountName: {{ .Values.serviceAccount.name }}-relay @@ -140,9 +208,18 @@ spec: defaultMode: 0644 - name: credentials emptyDir: {} + {{- if and .Values.geodata.volumeName .Values.geodata.accountID }} + - name: {{ .Values.geodata.volumeName }} + persistentVolumeClaim: + claimName: {{ .Values.geodata.volumeName }} + {{- end }} {{- if .Values.relay.volumes }} {{ toYaml .Values.relay.volumes | indent 6 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 6 }} {{- end }} {{- if .Values.relay.priorityClassName }} priorityClassName: "{{ .Values.relay.priorityClassName }}" {{- end }} +{{- end }} diff --git a/charts/sentry/templates/relay/hpa-relay.yaml b/charts/sentry/templates/relay/hpa-relay.yaml new file mode 100644 index 000000000..dad896f83 --- /dev/null +++ b/charts/sentry/templates/relay/hpa-relay.yaml @@ -0,0 +1,38 @@ +{{- if and .Values.relay.enabled .Values.relay.autoscaling.enabled }} +apiVersion: {{ template "sentry.autoscaling.apiVersion" . }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "sentry.fullname" . }}-relay + annotations: + meta.helm.sh/release-name: "{{ .Release.Name }}" + meta.helm.sh/release-namespace: "{{ .Release.Namespace }}" + "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook-weight": "25" +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "sentry.fullname" . }}-relay + minReplicas: {{ .Values.relay.autoscaling.minReplicas }} + maxReplicas: {{ .Values.relay.autoscaling.maxReplicas }} + {{- if eq (include "sentry.autoscaling.apiVersion" .) "autoscaling/v1" }} + targetCPUUtilizationPercentage: {{ .Values.relay.autoscaling.targetCPUUtilizationPercentage }} + {{- else if semverCompare ">=1.27-0" .Capabilities.KubeVersion.GitVersion }} + metrics: + - type: ContainerResource + containerResource: + container: {{ .Chart.Name }}-relay + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.relay.autoscaling.targetCPUUtilizationPercentage }} + {{- else }} + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.relay.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/sentry/templates/service-relay.yaml b/charts/sentry/templates/relay/service-relay.yaml similarity index 95% rename from sentry/templates/service-relay.yaml rename to charts/sentry/templates/relay/service-relay.yaml index 962b0e393..a60ab581f 100644 --- a/sentry/templates/service-relay.yaml +++ b/charts/sentry/templates/relay/service-relay.yaml @@ -1,3 +1,4 @@ +{{- if .Values.relay.enabled }} apiVersion: v1 kind: Service metadata: @@ -24,3 +25,4 @@ spec: selector: app: {{ template "sentry.fullname" . }} role: relay +{{- end }} diff --git a/sentry/templates/serviceaccount-relay.yaml b/charts/sentry/templates/relay/serviceaccount-relay.yaml similarity index 80% rename from sentry/templates/serviceaccount-relay.yaml rename to charts/sentry/templates/relay/serviceaccount-relay.yaml index 787415127..4e0f8bd2e 100644 --- a/sentry/templates/serviceaccount-relay.yaml +++ b/charts/sentry/templates/relay/serviceaccount-relay.yaml @@ -1,4 +1,4 @@ -{{- if .Values.serviceAccount.enabled }} +{{- if and .Values.relay.enabled .Values.serviceAccount.enabled }} apiVersion: v1 kind: ServiceAccount metadata: @@ -7,4 +7,4 @@ metadata: annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }} {{- end }} automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/sentry/templates/secret-geoip-env.yaml b/charts/sentry/templates/secret-geoip-env.yaml new file mode 100644 index 000000000..09ed813dc --- /dev/null +++ b/charts/sentry/templates/secret-geoip-env.yaml @@ -0,0 +1,16 @@ +{{- if .Values.geodata.accountID }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "sentry.fullname" . }}-geoip-env + labels: + app: sentry + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + GEOIPUPDATE_ACCOUNT_ID: {{ .Values.geodata.accountID | b64enc | quote }} + GEOIPUPDATE_LICENSE_KEY: {{ .Values.geodata.licenseKey | b64enc | quote }} + GEOIPUPDATE_EDITION_IDS: {{ .Values.geodata.editionIDs | b64enc | quote }} +{{- end -}} diff --git a/charts/sentry/templates/sentry/_helper-sentry.tpl b/charts/sentry/templates/sentry/_helper-sentry.tpl new file mode 100644 index 000000000..87379810a --- /dev/null +++ b/charts/sentry/templates/sentry/_helper-sentry.tpl @@ -0,0 +1,680 @@ +{{- define "sentry.config" -}} +{{- $redisHost := include "sentry.redis.host" . -}} +{{- $redisPort := include "sentry.redis.port" . -}} +{{- $redisPass := include "sentry.redis.password" . -}} +{{- $redisDb := include "sentry.redis.db" . -}} +{{- $redisProto := ternary "rediss" "redis" (eq (include "sentry.redis.ssl" .) "true") -}} +config.yml: |- + {{- if .Values.system.adminEmail }} + system.admin-email: {{ .Values.system.adminEmail | quote }} + {{- end }} + {{- if .Values.system.url }} + system.url-prefix: {{ .Values.system.url | quote }} + {{- end }} + + # This URL will be used to tell Symbolicator where to obtain the Sentry source. + # See https://getsentry.github.io/symbolicator/api/ + system.internal-url-prefix: 'http://{{ template "sentry.fullname" . }}-web:{{ .Values.service.externalPort }}' + symbolicator.enabled: {{ .Values.symbolicator.enabled }} + {{- if .Values.symbolicator.enabled }} + symbolicator.options: + url: "http://{{ template "sentry.fullname" . }}-symbolicator:{{ template "symbolicator.port" }}" + {{- end }} + + ########## + # Github # + ########## + {{- with .Values.github.appId }} + github-app.id: {{ . }} + {{- end }} + {{- with .Values.github.appName }} + github-app.name: {{ . | quote }} + {{- end }} + {{- if not .Values.github.existingSecret }} + {{- with .Values.github.privateKey }} + github-app.private-key: {{- . | toYaml | indent 2 }} + {{- end }} + {{- with .Values.github.webhookSecret }} + github-app.webhook-secret: {{ . | quote }} + {{- end }} + {{- with .Values.github.clientId }} + github-app.client-id: {{ . | quote }} + {{- end }} + {{- with .Values.github.clientSecret }} + github-app.client-secret: {{ . | quote }} + {{- end }} + {{- end }} + + ########## + # Google # + ########## + {{- if and (.Values.google.clientId) (.Values.google.clientSecret) (not .Values.google.existingSecret) }} + auth-google.client-id: {{ .Values.google.clientId | quote }} + auth-google.client-secret: {{ .Values.google.clientSecret | quote }} + {{- end }} + + ######### + # Slack # + ######### + {{- if and (.Values.slack.clientId) (.Values.slack.clientSecret) (.Values.slack.signingSecret) (not .Values.slack.existingSecret) }} + slack.client-id: {{ .Values.slack.clientId | quote }} + slack.client-secret: {{ .Values.slack.clientSecret | quote }} + slack.signing-secret: {{ .Values.slack.signingSecret | quote }} + {{ end }} + + ########### + # Discord # + ########### + {{- if and (.Values.discord.applicationId) (.Values.discord.publicKey) (.Values.discord.clientSecret) (.Values.discord.botToken) (not .Values.discord.existingSecret) }} + discord.application-id: {{ .Values.discord.applicationId | quote }} + discord.public-key: {{ .Values.discord.publicKey | quote }} + discord.client-secret: {{ .Values.discord.clientSecret | quote }} + discord.bot-token: {{ .Values.discord.botToken | quote }} + {{ end }} + + ######### + # Redis # + ######### + # This is configured in the sentry.conf.py as that has support for environment variables. + + ################ + # File storage # + ################ + # Uploaded media uses these `filestore` settings. The available + # backends are either `filesystem` or `s3`. + filestore.backend: {{ .Values.filestore.backend | quote }} + {{- if eq .Values.filestore.backend "filesystem" }} + filestore.options: + location: {{ .Values.filestore.filesystem.path | quote }} + {{ end }} + {{- if eq .Values.filestore.backend "gcs" }} + filestore.options: + bucket_name: {{ .Values.filestore.gcs.bucketName | quote }} + {{ end }} + + {{- if .Values.config.configYml }} + {{ .Values.config.configYml | toYaml | nindent 2 }} + {{- end }} +sentry.conf.py: |- + from sentry.conf.server import * # NOQA + from distutils.util import strtobool + + BYTE_MULTIPLIER = 1024 + UNITS = ("K", "M", "G") + def unit_text_to_bytes(text): + unit = text[-1].upper() + power = UNITS.index(unit) + 1 + return float(text[:-1])*(BYTE_MULTIPLIER**power) + + {{- if .Values.sourcemaps.enabled }} + CACHES = { + "default": { + "BACKEND": "django.core.cache.backends.memcached.PyMemcacheCache", + "LOCATION": [ + "{{ template "sentry.fullname" . }}-memcached:11211" + ], + "TIMEOUT": 3600, + "OPTIONS": {"ignore_exc": True} + } + } + {{- end }} + + DATABASES = { + "default": { + "ENGINE": "sentry.db.postgres", + "NAME": os.environ.get("POSTGRES_NAME", ""), + "USER": os.environ.get("POSTGRES_USER", ""), + "PASSWORD": os.environ.get("POSTGRES_PASSWORD", ""), + "HOST": os.environ.get("POSTGRES_HOST", ""), + "PORT": os.environ.get("POSTGRES_PORT", ""), + {{- if .Values.postgresql.enabled }} + "CONN_MAX_AGE": {{ .Values.postgresql.connMaxAge }}, + {{- else }} + "CONN_MAX_AGE": {{ .Values.externalPostgresql.connMaxAge }}, + {{- end }} + {{- if .Values.externalPostgresql.sslMode }} + 'OPTIONS': { + 'sslmode': '{{ .Values.externalPostgresql.sslMode }}', + }, + {{- end }} + } + } + + {{- if .Values.geodata.path }} + GEOIP_PATH_MMDB = {{ .Values.geodata.path | quote }} + {{- end }} + + # You should not change this setting after your database has been created + # unless you have altered all schemas first + SENTRY_USE_BIG_INTS = True + + ########### + # General # + ########### + + # Disable sends anonymous usage statistics + SENTRY_BEACON = False + + secret_key = env('SENTRY_SECRET_KEY') + if not secret_key: + raise Exception('Error: SENTRY_SECRET_KEY is undefined') + + SENTRY_OPTIONS['system.secret-key'] = secret_key + + # Set default for SAMPLED_DEFAULT_RATE: + SAMPLED_DEFAULT_RATE = {{ .Values.global.sampledDefaultRate | default 1.0 }} + + # Instruct Sentry that this install intends to be run by a single organization + # and thus various UI optimizations should be enabled. + SENTRY_SINGLE_ORGANIZATION = {{ if .Values.sentry.singleOrganization }}True{{ else }}False{{ end }} + + SENTRY_OPTIONS["system.event-retention-days"] = int(env('SENTRY_EVENT_RETENTION_DAYS') or {{ .Values.sentry.cleanup.days | quote }}) + + ######### + # Redis # + ######### + + # Generic Redis configuration used as defaults for various things including: + # Buffers, Quotas, TSDB + SENTRY_OPTIONS["redis.clusters"] = { + "default": { + "hosts": { + 0: { + "host": {{ $redisHost | quote }}, + "password": os.environ.get("REDIS_PASSWORD", {{ $redisPass | quote }}), + "port": {{ $redisPort | quote }}, + {{- if .Values.externalRedis.ssl }} + "ssl": {{ .Values.externalRedis.ssl | quote }}, + {{- end }} + "db": {{ $redisDb | quote }} + } + } + } + } + + ######### + # Queue # + ######### + + # See https://docs.getsentry.com/on-premise/server/queue/ for more + # information on configuring your queue broker and workers. Sentry relies + # on a Python framework called Celery to manage queues. + + {{- if or (.Values.rabbitmq.enabled) (.Values.rabbitmq.host) }} + BROKER_URL = os.environ.get("BROKER_URL", "amqp://{{ .Values.rabbitmq.auth.username }}:{{ .Values.rabbitmq.auth.password }}@{{ template "sentry.rabbitmq.host" . }}:5672/{{ .Values.rabbitmq.vhost }}") + {{- else if $redisPass }} + BROKER_URL = os.environ.get("BROKER_URL", "{{ $redisProto }}://:{{ $redisPass }}@{{ $redisHost }}:{{ $redisPort }}/{{ $redisDb }}") + {{- else if and (not .Values.externalRedis.existingSecret) (not .Values.redis.auth.existingSecret)}} + BROKER_URL = os.environ.get("BROKER_URL", "{{ $redisProto }}://{{ $redisHost }}:{{ $redisPort }}/{{ $redisDb }}") + {{- end }} + + ######### + # Cache # + ######### + + # Sentry currently utilizes two separate mechanisms. While CACHES is not a + # requirement, it will optimize several high throughput patterns. + + # CACHES = { + # "default": { + # "BACKEND": "django.core.cache.backends.memcached.MemcachedCache", + # "LOCATION": ["memcached:11211"], + # "TIMEOUT": 3600, + # } + # } + + # A primary cache is required for things such as processing events + SENTRY_CACHE = "sentry.cache.redis.RedisCache" + + DEFAULT_KAFKA_OPTIONS = { + "common": { + "bootstrap.servers": {{ (include "sentry.kafka.bootstrap_servers_string" .) | quote }}, + "message.max.bytes": {{ include "sentry.kafka.message_max_bytes" . }}, + {{- $sentryKafkaCompressionType := include "sentry.kafka.compression_type" . -}} + {{- if $sentryKafkaCompressionType }} + "compression.type": {{ $sentryKafkaCompressionType | quote }}, + {{- end }} + "socket.timeout.ms": {{ include "sentry.kafka.socket_timeout_ms" . }}, + {{- $sentryKafkaSaslMechanism := include "sentry.kafka.sasl_mechanism" . -}} + {{- if not (eq "None" $sentryKafkaSaslMechanism) }} + "sasl.mechanism": {{ $sentryKafkaSaslMechanism | quote }}, + {{- end }} + {{- $sentryKafkaSaslUsername := include "sentry.kafka.sasl_username" . -}} + {{- if not (eq "None" $sentryKafkaSaslUsername) }} + "sasl.username": {{ $sentryKafkaSaslUsername | quote }}, + {{- end }} + {{- $sentryKafkaSaslPassword := include "sentry.kafka.sasl_password" . -}} + {{- if not (eq "None" $sentryKafkaSaslPassword) }} + "sasl.password": {{ $sentryKafkaSaslPassword | quote }}, + {{- end }} + {{- $sentryKafkaSecurityProtocol := include "sentry.kafka.security_protocol" . -}} + {{- if not (eq "plaintext" $sentryKafkaSecurityProtocol) }} + "security.protocol": {{ $sentryKafkaSecurityProtocol | quote }}, + {{- end }} + } + } + + SENTRY_EVENTSTREAM = "sentry.eventstream.kafka.KafkaEventStream" + SENTRY_EVENTSTREAM_OPTIONS = {"producer_configuration": DEFAULT_KAFKA_OPTIONS} + + {{- if ((.Values.kafkaTopicOverrides).prefix) }} + SENTRY_CHARTS_KAFKA_TOPIC_PREFIX = {{ .Values.kafkaTopicOverrides.prefix | quote }} + + from sentry.conf.types.kafka_definition import Topic + for topic in Topic: + KAFKA_TOPIC_OVERRIDES[topic.value] = f"{SENTRY_CHARTS_KAFKA_TOPIC_PREFIX}{topic.value}" + {{- end }} + + KAFKA_CLUSTERS["default"] = DEFAULT_KAFKA_OPTIONS + + ############### + # Rate Limits # + ############### + + # Rate limits apply to notification handlers and are enforced per-project + # automatically. + + SENTRY_RATELIMITER = "sentry.ratelimits.redis.RedisRateLimiter" + + ################## + # Update Buffers # + ################## + + # Buffers (combined with queueing) act as an intermediate layer between the + # database and the storage API. They will greatly improve efficiency on large + # numbers of the same events being sent to the API in a short amount of time. + # (read: if you send any kind of real data to Sentry, you should enable buffers) + + SENTRY_BUFFER = "sentry.buffer.redis.RedisBuffer" + + ########## + # Quotas # + ########## + + # Quotas allow you to rate limit individual projects or the Sentry install as + # a whole. + + SENTRY_QUOTAS = "sentry.quotas.redis.RedisQuota" + + ######## + # TSDB # + ######## + + # The TSDB is used for building charts as well as making things like per-rate + # alerts possible. + + SENTRY_TSDB = "sentry.tsdb.redissnuba.RedisSnubaTSDB" + + ######### + # SNUBA # + ######### + + SENTRY_SEARCH = "sentry.search.snuba.EventsDatasetSnubaSearchBackend" + SENTRY_SEARCH_OPTIONS = {} + SENTRY_TAGSTORE_OPTIONS = {} + + ########### + # Digests # + ########### + + # The digest backend powers notification summaries. + + SENTRY_DIGESTS = "sentry.digests.backends.redis.RedisBackend" + + ################### + # Metrics Backend # + ################### + + SENTRY_RELEASE_HEALTH = "sentry.release_health.metrics.MetricsReleaseHealthBackend" + SENTRY_RELEASE_MONITOR = "sentry.release_health.release_monitor.metrics.MetricReleaseMonitorBackend" + + ############## + # Web Server # + ############## + + {{- if .Values.ipv6 }} + SENTRY_WEB_HOST = "[::]" + {{- else }} + SENTRY_WEB_HOST = "0.0.0.0" + {{- end }} + + + SENTRY_WEB_PORT = {{ template "sentry.port" }} + SENTRY_PUBLIC = {{ .Values.system.public | ternary "True" "False" }} + SENTRY_WEB_OPTIONS = { + {{- if .Values.ipv6 }} + "http-socket": "%s:%s" % (SENTRY_WEB_HOST, SENTRY_WEB_PORT), + {{- else }} + "http": "%s:%s" % (SENTRY_WEB_HOST, SENTRY_WEB_PORT), + {{- end }} + "protocol": "uwsgi", + # This is needed to prevent https://git.io/fj7Lw + "uwsgi-socket": None, + # Keep this between 15s-75s as that's what Relay supports + "http-keepalive": {{ .Values.config.web.httpKeepalive }}, + "http-chunked-input": True, + # the number of web workers + 'workers': 3, + # Turn off memory reporting + "memory-report": False, + # Some stuff so uwsgi will cycle workers sensibly + 'max-requests': {{ .Values.config.web.maxRequests }}, + 'max-requests-delta': {{ .Values.config.web.maxRequestsDelta }}, + 'max-worker-lifetime': {{ .Values.config.web.maxWorkerLifetime }}, + # Duplicate options from sentry default just so we don't get + # bit by sentry changing a default value that we depend on. + 'thunder-lock': True, + 'log-x-forwarded-for': False, + 'buffer-size': 32768, + 'limit-post': 209715200, + 'disable-logging': True, + 'reload-on-rss': 600, + 'ignore-sigpipe': True, + 'ignore-write-errors': True, + 'disable-write-exception': True, + } + + ########### + # SSL/TLS # + ########### + + # If you're using a reverse SSL proxy, you should enable the X-Forwarded-Proto + # header and enable the settings below + + # SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') + # SESSION_COOKIE_SECURE = True + # CSRF_COOKIE_SECURE = True + # SOCIAL_AUTH_REDIRECT_IS_HTTPS = True + + # End of SSL/TLS settings + + ############ + # Features # + ############ + + + SENTRY_FEATURES = { + "auth:register": {{ .Values.auth.register | ternary "True" "False" }} + } + SENTRY_FEATURES["projects:sample-events"] = False + SENTRY_FEATURES.update( + { + feature: True + for feature in ( + {{- if not .Values.sentry.singleOrganization }} + "organizations:create", + {{ end -}} + + {{- if .Values.sentry.features.orgSubdomains }} + "organizations:org-subdomains", + {{ end -}} + + "organizations:advanced-search", + "organizations:android-mappings", + "organizations:api-keys", + "organizations:boolean-search", + "organizations:related-events", + "organizations:alert-filters", + "organizations:custom-symbol-sources", + "organizations:dashboards-basic", + "organizations:dashboards-edit", + "organizations:data-forwarding", + "organizations:discover", + "organizations:discover-basic", + "organizations:discover-query", + "organizations:discover-frontend-use-events-endpoint", + "organizations:enterprise-perf", + "organizations:event-attachments", + "organizations:events", + "organizations:global-views", + "organizations:incidents", + "organizations:metric-alert-builder-aggregate", + "organizations:metric-alert-gui-filters", + "organizations:integrations-event-hooks", + "organizations:integrations-issue-basic", + "organizations:integrations-issue-sync", + "organizations:integrations-alert-rule", + "organizations:integrations-chat-unfurl", + "organizations:integrations-incident-management", + "organizations:integrations-ticket-rules", + + {{- if .Values.sentry.features.vstsLimitedScopes }} + "organizations:integrations-vsts-limited-scopes", + {{ end -}} + + "organizations:integrations-stacktrace-link", + "organizations:internal-catchall", + "organizations:invite-members", + "organizations:large-debug-files", + "organizations:monitors", + "organizations:onboarding", + "organizations:org-saved-searches", + "organizations:performance-view", + "organizations:performance-frontend-use-events-endpoint", + "organizations:project-detail", + "organizations:relay", + "organizations:release-performance-views", + "organizations:rule-page", + "organizations:set-grouping-config", + "organizations:custom-event-title", + "organizations:slack-migration", + "organizations:sso-basic", + "organizations:sso-rippling", + "organizations:sso-saml2", + "organizations:sso-migration", + "organizations:stacktrace-hover-preview", + "organizations:symbol-sources", + "organizations:transaction-comparison", + "organizations:usage-stats-graph", + "organizations:inbox", + "organizations:unhandled-issue-flag", + "organizations:invite-members-rate-limits", + "organizations:dashboards-v2", + "organizations:reprocessing-v2", + "organizations:metrics", + "organizations:metrics-extraction", + "organizations:transaction-metrics-extraction", + + {{- if .Values.sentry.features.enableSessionReplay}} + "organizations:session-replay", + "organizations:session-replay-ui", + "organizations:session-replay-sdk", + "organizations:session-replay-count-query-optimize", + "organizations:session-replay-sdk-errors-only", + "organizations:session-replay-recording-scrubbing", + "organizations:session-replay-a11y-tab", + "organizations:session-replay-slack-new-issue", + "organizations:session-replay-issue-emails", + "organizations:session-replay-event-linking", + "organizations:session-replay-weekly-email", + "organizations:session-replay-trace-table", + "organizations:session-replay-rage-dead-selectors", + "organizations:session-replay-new-event-counts", + "organizations:session-replay-new-timeline", + "organizations:issue-details-replay-event", + {{ end -}} + + "organizations:issue-platform", + + {{- if .Values.sentry.features.enableProfiling }} + "organizations:profiling", + "organizations:profiling-ui-frames", + "organizations:profiling-using-transactions", + "organizations:profiling-beta", + "organizations:profiling-stacktrace-links", + "organizations:profiling-cpu-chart", + "organizations:profiling-memory-chart", + "organizations:profiling-view", + {{ end -}} + + {{- if .Values.sentry.features.enableFeedback }} + "organizations:user-feedback-ui", + "organizations:user-feedback-ingest", + "organizations:feedback-ingest", + "organizations:feedback-post-process-group", + "organizations:feedback-visible", + {{ end -}} + + {{- if .Values.sentry.features.enableSpan }} + "projects:span-metrics-extraction", + "projects:span-metrics-extraction-addons", + "organizations:indexed-spans-extraction", + "organizations:starfish-browser-resource-module-image-view", + "organizations:starfish-browser-resource-module-ui", + "organizations:starfish-browser-webvitals", + "organizations:starfish-browser-webvitals-pageoverview-v2", + "organizations:starfish-browser-webvitals-use-backend-scores", + "organizations:performance-calculate-score-relay", + "organizations:starfish-browser-webvitals-replace-fid-with-inp", + "organizations:deprecate-fid-from-performance-score", + "organizations:performance-database-view", + "organizations:performance-screens-view", + "organizations:mobile-ttid-ttfd-contribution", + "organizations:starfish-mobile-appstart", + "organizations:standalone-span-ingestion", + "organizations:insights-entry-points", + "organizations:insights-initial-modules", + "organizations:insights-addon-modules", + {{ end -}} + + "organizations:dashboards-mep", + "organizations:mep-rollout-flag", + "organizations:dashboards-rh-widget", + "organizations:metrics-extraction", + "organizations:transaction-metrics-extraction", + + "projects:alert-filters", + "projects:custom-inbound-filters", + "projects:data-forwarding", + "projects:discard-groups", + "projects:issue-alerts-targeting", + "projects:minidump", + "projects:rate-limits", + "projects:sample-events", + "projects:servicehooks", + "projects:similarity-view", + "projects:similarity-indexing", + "projects:similarity-view-v2", + "projects:similarity-indexing-v2", + + "projects:plugins", + {{- if .Values.sentry.customFeatures }} + {{- range $CustomFeature := .Values.sentry.customFeatures }} + "{{ $CustomFeature}}", + {{- end }} + {{- end }} + ) + } + ) + + ####################### + # Email Configuration # + ####################### + SENTRY_OPTIONS['mail.backend'] = os.getenv("SENTRY_EMAIL_BACKEND", {{ .Values.mail.backend | quote }}) + SENTRY_OPTIONS['mail.use-tls'] = bool(strtobool(os.getenv("SENTRY_EMAIL_USE_TLS", {{ .Values.mail.useTls | quote }}))) + SENTRY_OPTIONS['mail.use-ssl'] = bool(strtobool(os.getenv("SENTRY_EMAIL_USE_SSL", {{ .Values.mail.useSsl | quote }}))) + SENTRY_OPTIONS['mail.username'] = os.getenv("SENTRY_EMAIL_USERNAME", {{ .Values.mail.username | quote }}) + SENTRY_OPTIONS['mail.password'] = os.getenv("SENTRY_EMAIL_PASSWORD", "") + SENTRY_OPTIONS['mail.port'] = int(os.getenv("SENTRY_EMAIL_PORT", {{ .Values.mail.port | quote }})) + SENTRY_OPTIONS['mail.host'] = os.getenv("SENTRY_EMAIL_HOST", {{ .Values.mail.host | quote }}) + SENTRY_OPTIONS['mail.from'] = os.getenv("SENTRY_EMAIL_FROM", {{ .Values.mail.from | quote }}) + + ####################### + # Filestore S3 Configuration # + ####################### + {{- if eq .Values.filestore.backend "s3" }} + SENTRY_OPTIONS['filestore.options'] = { + 'access_key': os.getenv("S3_ACCESS_KEY_ID", {{ .Values.filestore.s3.accessKey | default "" | quote }}), + 'secret_key': os.getenv("S3_SECRET_ACCESS_KEY", {{ .Values.filestore.s3.secretKey | default "" | quote }}), + {{- if .Values.filestore.s3.bucketName }} + 'bucket_name': {{ .Values.filestore.s3.bucketName | quote }}, + {{- end }} + {{- if .Values.filestore.s3.endpointUrl }} + 'endpoint_url': {{ .Values.filestore.s3.endpointUrl | quote }}, + {{- end }} + {{- if .Values.filestore.s3.signature_version }} + 'signature_version': {{ .Values.filestore.s3.signature_version | quote }}, + {{- end }} + {{- if .Values.filestore.s3.region_name }} + 'region_name': {{ .Values.filestore.s3.region_name | quote }}, + {{- end }} + {{- if .Values.filestore.s3.default_acl }} + 'default_acl': {{ .Values.filestore.s3.default_acl | quote }}, + {{- end }} + #add comfig params for s3 + {{- if .Values.filestore.s3.addressing_style }} + 'addressing_style': {{ .Values.filestore.s3.addressing_style | quote }}, + {{- end }} + {{- if .Values.filestore.s3.location }} + 'location': {{ .Values.filestore.s3.location | quote }}, + {{- end }} + } + {{- end }} + + ######################### + # Bitbucket Integration # + ######################### + + # BITBUCKET_CONSUMER_KEY = 'YOUR_BITBUCKET_CONSUMER_KEY' + # BITBUCKET_CONSUMER_SECRET = 'YOUR_BITBUCKET_CONSUMER_SECRET' + + ######### + # Relay # + ######### + SENTRY_RELAY_WHITELIST_PK = [] + SENTRY_RELAY_OPEN_REGISTRATION = True + + ####################### + # OpenAi Suggestions # + ####################### + + OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "") + if OPENAI_API_KEY: + SENTRY_FEATURES["organizations:open-ai-suggestion"] = True + +{{- if .Values.metrics.enabled }} + SENTRY_METRICS_BACKEND = 'sentry.metrics.statsd.StatsdMetricsBackend' + SENTRY_METRICS_OPTIONS = { + 'host': '{{ template "sentry.fullname" . }}-metrics', + 'port': 9125, + } +{{- end }} + +{{- if .Values.slack.existingSecret }} + ######### + # SLACK # + ######### + SENTRY_OPTIONS['slack.client-id'] = os.environ.get("SLACK_CLIENT_ID") + SENTRY_OPTIONS['slack.client-secret'] = os.environ.get("SLACK_CLIENT_SECRET") + SENTRY_OPTIONS['slack.signing-secret'] = os.environ.get("SLACK_SIGNING_SECRET") +{{- end }} + +{{- if .Values.discord.existingSecret }} + ########### + # DISCORD # + ########### + SENTRY_OPTIONS['discord.application-id'] = os.environ.get("DISCORD_APPLICATION_ID") + SENTRY_OPTIONS['discord.public-key'] = os.environ.get("DISCORD_PUBLIC_KEY") + SENTRY_OPTIONS['discord.client-secret'] = os.environ.get("DISCORD_CLIENT_SECRET") + SENTRY_OPTIONS['discord.bot-token'] = os.environ.get("DISCORD_BOT_TOKEN") +{{- end }} + +{{- if .Values.google.existingSecret }} + ######### + # GOOGLE # + ######### + SENTRY_OPTIONS['auth-google.client-id'] = os.environ.get("GOOGLE_AUTH_CLIENT_ID") + SENTRY_OPTIONS['auth-google.client-secret'] = os.environ.get("GOOGLE_AUTH_CLIENT_SECRET") +{{- end }} + +{{- if .Values.github.existingSecret }} + ########## + # Github # + ########## + SENTRY_OPTIONS['github-app.private-key'] = os.environ.get("GITHUB_APP_PRIVATE_KEY") + SENTRY_OPTIONS['github-app.webhook-secret'] = os.environ.get("GITHUB_APP_WEBHOOK_SECRET") + SENTRY_OPTIONS['github-app.client-id'] = os.environ.get("GITHUB_APP_CLIENT_ID") + SENTRY_OPTIONS['github-app.client-secret'] = os.environ.get("GITHUB_APP_CLIENT_SECRET") +{{- end }} + {{ .Values.config.sentryConfPy | nindent 2 }} +{{- end -}} diff --git a/sentry/templates/cronjob-sentry-cleanup.yaml b/charts/sentry/templates/sentry/cleanup/cronjob-sentry-cleanup.yaml similarity index 69% rename from sentry/templates/cronjob-sentry-cleanup.yaml rename to charts/sentry/templates/sentry/cleanup/cronjob-sentry-cleanup.yaml index 076ce16e6..50199afb4 100644 --- a/sentry/templates/cronjob-sentry-cleanup.yaml +++ b/charts/sentry/templates/sentry/cleanup/cronjob-sentry-cleanup.yaml @@ -1,5 +1,6 @@ {{- if .Values.sentry.cleanup.enabled }} -apiVersion: batch/v1beta1 +{{- $batchApiIsStable := eq (include "sentry.batch.isStable" .) "true" -}} +apiVersion: {{ include "sentry.batch.apiVersion" . }} kind: CronJob metadata: name: {{ template "sentry.fullname" . }}-sentry-cleanup @@ -10,15 +11,20 @@ metadata: heritage: "{{ .Release.Service }}" spec: schedule: "{{ .Values.sentry.cleanup.schedule }}" + successfulJobsHistoryLimit: {{ .Values.sentry.cleanup.successfulJobsHistoryLimit }} + failedJobsHistoryLimit: {{ .Values.sentry.cleanup.failedJobsHistoryLimit }} concurrencyPolicy: "{{ .Values.sentry.cleanup.concurrencyPolicy }}" jobTemplate: spec: + {{- if .Values.sentry.cleanup.activeDeadlineSeconds }} + activeDeadlineSeconds: {{ .Values.sentry.cleanup.activeDeadlineSeconds }} + {{- end}} template: metadata: annotations: checksum/configYml: {{ .Values.config.configYml | toYaml | toString | sha256sum }} checksum/sentryConfPy: {{ .Values.config.sentryConfPy | sha256sum }} - checksum/config.yaml: {{ include (print $.Template.BasePath "/configmap-sentry.yaml") . | sha256sum }} + checksum/config.yaml: {{ include "sentry.config" . | sha256sum }} {{- if .Values.sentry.cleanup.annotations }} {{ toYaml .Values.sentry.cleanup.annotations | indent 12 }} {{- end }} @@ -35,11 +41,17 @@ spec: {{- end }} {{- if .Values.sentry.cleanup.nodeSelector }} nodeSelector: +{{ toYaml .Values.sentry.cleanup.nodeSelector | indent 12 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: {{ toYaml .Values.sentry.cleanup.nodeSelector | indent 12 }} {{- end }} {{- if .Values.sentry.cleanup.tolerations }} tolerations: {{ toYaml .Values.sentry.cleanup.tolerations | indent 12 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 12 }} {{- end }} {{- if .Values.dnsPolicy }} dnsPolicy: {{ .Values.dnsPolicy | quote }} @@ -52,6 +64,10 @@ spec: imagePullSecrets: {{ toYaml .Values.images.sentry.imagePullSecrets | indent 12 }} {{- end }} + {{- if .Values.sentry.cleanup.securityContext }} + securityContext: +{{ toYaml .Values.sentry.cleanup.securityContext | indent 12 }} + {{- end }} containers: - name: {{ .Chart.Name }}-sentry-cleanup image: "{{ template "sentry.image" . }}" @@ -59,30 +75,21 @@ spec: command: ["sentry"] args: - "cleanup" + - "--concurrency" + - {{ .Values.sentry.cleanup.concurrency | quote }} - "--days" - "{{ .Values.sentry.cleanup.days }}" + {{- if .Values.sentry.cleanup.logLevel }} + - "-l" + - {{ .Values.sentry.cleanup.logLevel | quote }} + {{- end }} env: - - name: SNUBA - value: http://{{ template "sentry.fullname" . }}-snuba:{{ template "snuba.port" }} - name: C_FORCE_ROOT value: "true" - {{- if .Values.postgresql.enabled }} - - name: POSTGRES_PASSWORD - valueFrom: - secretKeyRef: - name: {{ default (include "sentry.postgresql.fullname" .) .Values.postgresql.existingSecret }} - key: {{ default "postgresql-password" .Values.postgresql.existingSecretKey }} - {{- else if .Values.externalPostgresql.password }} - - name: POSTGRES_PASSWORD - value: {{ include "sentry.postgresql.password" . | quote }} - {{- end }} - {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /var/run/secrets/google/{{ .Values.filestore.gcs.credentialsFile }} - {{- end }} - {{- if .Values.sentry.cleanup.env }} +{{ include "sentry.env" . | indent 12 }} +{{- if .Values.sentry.cleanup.env }} {{ toYaml .Values.sentry.cleanup.env | indent 12 }} - {{- end }} +{{- end }} volumeMounts: - mountPath: /etc/sentry name: config @@ -93,10 +100,20 @@ spec: - name: sentry-google-cloud-key mountPath: /var/run/secrets/google {{ end }} +{{- if .Values.sentry.cleanup.volumeMounts }} +{{ toYaml .Values.sentry.cleanup.volumeMounts | indent 12 }} +{{- end }} resources: {{ toYaml .Values.sentry.cleanup.resources | indent 14 }} +{{- if .Values.sentry.cleanup.containerSecurityContext }} + securityContext: +{{ toYaml .Values.sentry.cleanup.containerSecurityContext | indent 14 }} +{{- end }} {{- if .Values.sentry.cleanup.sidecars }} {{ toYaml .Values.sentry.cleanup.sidecars | indent 10 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 10 }} {{- end }} restartPolicy: Never volumes: @@ -122,8 +139,14 @@ spec: {{ end }} {{- if .Values.sentry.cleanup.volumes }} {{ toYaml .Values.sentry.cleanup.volumes | indent 10 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 10 }} {{- end }} {{- if .Values.sentry.cleanup.priorityClassName }} priorityClassName: "{{ .Values.sentry.cleanup.priorityClassName }}" {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ .Values.serviceAccount.name }}-cleanup + {{- end }} {{- end }} diff --git a/sentry/templates/serviceaccount-sentry-worker.yaml b/charts/sentry/templates/sentry/cleanup/serviceaccount-sentry-cleanup.yaml similarity index 83% rename from sentry/templates/serviceaccount-sentry-worker.yaml rename to charts/sentry/templates/sentry/cleanup/serviceaccount-sentry-cleanup.yaml index 6bebecc25..422f64746 100644 --- a/sentry/templates/serviceaccount-sentry-worker.yaml +++ b/charts/sentry/templates/sentry/cleanup/serviceaccount-sentry-cleanup.yaml @@ -2,9 +2,9 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: {{ .Values.serviceAccount.name }}-worker + name: {{ .Values.serviceAccount.name }}-cleanup {{- if .Values.serviceAccount.annotations }} annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }} {{- end }} automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/sentry/templates/sentry/configmap-sentry.yaml b/charts/sentry/templates/sentry/configmap-sentry.yaml new file mode 100644 index 000000000..986f74231 --- /dev/null +++ b/charts/sentry/templates/sentry/configmap-sentry.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "sentry.fullname" . }}-sentry + labels: + app: sentry + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +data: + {{ include "sentry.config" . | nindent 2 }} diff --git a/sentry/templates/deployment-sentry-cron.yaml b/charts/sentry/templates/sentry/cron/deployment-sentry-cron.yaml similarity index 76% rename from sentry/templates/deployment-sentry-cron.yaml rename to charts/sentry/templates/sentry/cron/deployment-sentry-cron.yaml index fe4f584e4..57c0f47b9 100644 --- a/sentry/templates/deployment-sentry-cron.yaml +++ b/charts/sentry/templates/sentry/cron/deployment-sentry-cron.yaml @@ -1,3 +1,4 @@ +{{- if .Values.sentry.cron.enabled }} apiVersion: apps/v1 kind: Deployment metadata: @@ -20,7 +21,7 @@ spec: annotations: checksum/configYml: {{ .Values.config.configYml | toYaml | toString | sha256sum }} checksum/sentryConfPy: {{ .Values.config.sentryConfPy | sha256sum }} - checksum/config.yaml: {{ include (print $.Template.BasePath "/configmap-sentry.yaml") . | sha256sum }} + checksum/config.yaml: {{ include "sentry.config" . | sha256sum }} {{- if .Values.sentry.cron.annotations }} {{ toYaml .Values.sentry.cron.annotations | indent 8 }} {{- end }} @@ -39,10 +40,20 @@ spec: {{- if .Values.sentry.cron.nodeSelector }} nodeSelector: {{ toYaml .Values.sentry.cron.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} {{- end }} {{- if .Values.sentry.cron.tolerations }} tolerations: {{ toYaml .Values.sentry.cron.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.sentry.cron.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.sentry.cron.topologySpreadConstraints | indent 8 }} {{- end }} {{- if .Values.images.sentry.imagePullSecrets }} imagePullSecrets: @@ -67,22 +78,18 @@ spec: args: - "run" - "cron" + {{- if .Values.sentry.cron.logLevel }} + - "--loglevel" + - "{{ .Values.sentry.cron.logLevel }}" + {{- end }} + {{- if .Values.sentry.cron.logFormat }} + - "--logformat" + - "{{ .Values.sentry.cron.logFormat }}" + {{- end }} env: - - name: SNUBA - value: http://{{ template "sentry.fullname" . }}-snuba:{{ template "snuba.port" }} - name: C_FORCE_ROOT value: "true" - {{- if .Values.postgresql.enabled }} - - name: POSTGRES_PASSWORD - valueFrom: - secretKeyRef: - name: {{ default (include "sentry.postgresql.fullname" .) .Values.postgresql.existingSecret }} - key: {{ default "postgresql-password" .Values.postgresql.existingSecretKey }} - {{- end }} - {{ if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /var/run/secrets/google/{{ .Values.filestore.gcs.credentialsFile }} - {{ end }} +{{ include "sentry.env" . | indent 8 }} {{- if .Values.sentry.cron.env }} {{ toYaml .Values.sentry.cron.env | indent 8 }} {{- end }} @@ -96,10 +103,20 @@ spec: - name: sentry-google-cloud-key mountPath: /var/run/secrets/google {{ end }} +{{- if .Values.sentry.cron.volumeMounts }} +{{ toYaml .Values.sentry.cron.volumeMounts | indent 8 }} +{{- end }} resources: {{ toYaml .Values.sentry.cron.resources | indent 12 }} +{{- if .Values.sentry.cron.containerSecurityContext }} + securityContext: +{{ toYaml .Values.sentry.cron.containerSecurityContext | indent 12 }} +{{- end }} {{- if .Values.sentry.cron.sidecars }} {{ toYaml .Values.sentry.cron.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} {{- end }} {{- if .Values.serviceAccount.enabled }} serviceAccountName: {{ .Values.serviceAccount.name }}-cron @@ -127,7 +144,11 @@ spec: {{ end }} {{- if .Values.sentry.cron.volumes }} {{ toYaml .Values.sentry.cron.volumes | indent 6 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 6 }} {{- end }} {{- if .Values.sentry.cron.priorityClassName }} priorityClassName: "{{ .Values.sentry.cron.priorityClassName }}" {{- end }} +{{- end }} diff --git a/sentry/templates/serviceaccount-sentry-cron.yaml b/charts/sentry/templates/sentry/cron/serviceaccount-sentry-cron.yaml similarity index 78% rename from sentry/templates/serviceaccount-sentry-cron.yaml rename to charts/sentry/templates/sentry/cron/serviceaccount-sentry-cron.yaml index 82984cbe2..9f63cc39b 100644 --- a/sentry/templates/serviceaccount-sentry-cron.yaml +++ b/charts/sentry/templates/sentry/cron/serviceaccount-sentry-cron.yaml @@ -1,4 +1,4 @@ -{{- if .Values.serviceAccount.enabled }} +{{- if and .Values.serviceAccount.enabled .Values.sentry.cron.enabled }} apiVersion: v1 kind: ServiceAccount metadata: @@ -7,4 +7,4 @@ metadata: annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }} {{- end }} automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/sentry/templates/sentry/ingest/attachments/deployment-sentry-ingest-consumer-attachments.yaml b/charts/sentry/templates/sentry/ingest/attachments/deployment-sentry-ingest-consumer-attachments.yaml new file mode 100644 index 000000000..4f2e5917e --- /dev/null +++ b/charts/sentry/templates/sentry/ingest/attachments/deployment-sentry-ingest-consumer-attachments.yaml @@ -0,0 +1,201 @@ +{{- if .Values.sentry.ingestConsumerAttachments.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "sentry.fullname" . }}-ingest-consumer-attachments + labels: + app: {{ template "sentry.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + app.kubernetes.io/managed-by: "Helm" + {{- if .Values.asHook }} + {{- /* Add the Helm annotations so that deployment after asHook from true to false works */}} + annotations: + meta.helm.sh/release-name: "{{ .Release.Name }}" + meta.helm.sh/release-namespace: "{{ .Release.Namespace }}" + "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook-weight": "10" + {{- end }} +spec: + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: ingest-consumer-attachments +{{- if not .Values.sentry.ingestConsumerAttachments.autoscaling.enabled }} + replicas: {{ .Values.sentry.ingestConsumerAttachments.replicas }} +{{- end }} + template: + metadata: + annotations: + checksum/configYml: {{ .Values.config.configYml | toYaml | toString | sha256sum }} + checksum/sentryConfPy: {{ .Values.config.sentryConfPy | sha256sum }} + checksum/config.yaml: {{ include "sentry.config" . | sha256sum }} + {{- if .Values.sentry.ingestConsumerAttachments.annotations }} +{{ toYaml .Values.sentry.ingestConsumerAttachments.annotations | indent 8 }} + {{- end }} + labels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: ingest-consumer-attachments + {{- if .Values.sentry.ingestConsumerAttachments.podLabels }} +{{ toYaml .Values.sentry.ingestConsumerAttachments.podLabels | indent 8 }} + {{- end }} + spec: + affinity: + {{- if .Values.sentry.ingestConsumerAttachments.affinity }} +{{ toYaml .Values.sentry.ingestConsumerAttachments.affinity | indent 8 }} + {{- end }} + {{- if .Values.sentry.ingestConsumerAttachments.nodeSelector }} + nodeSelector: +{{ toYaml .Values.sentry.ingestConsumerAttachments.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.sentry.ingestConsumerAttachments.tolerations }} + tolerations: +{{ toYaml .Values.sentry.ingestConsumerAttachments.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.sentry.ingestConsumerAttachments.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.sentry.ingestConsumerAttachments.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if .Values.images.sentry.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.images.sentry.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy | quote }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: +{{ toYaml .Values.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.sentry.ingestConsumerAttachments.securityContext }} + securityContext: +{{ toYaml .Values.sentry.ingestConsumerAttachments.securityContext | indent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-ingest-consumer-attachments + image: "{{ template "sentry.image" . }}" + imagePullPolicy: {{ default "IfNotPresent" .Values.images.sentry.pullPolicy }} + command: ["sentry"] + args: + - "run" + - "consumer" + - "ingest-attachments" + - "--consumer-group" + - "ingest-consumer" + {{- if .Values.sentry.ingestConsumerAttachments.autoOffsetReset }} + - "--auto-offset-reset" + - "{{ .Values.sentry.ingestConsumerAttachments.autoOffsetReset }}" + {{- end }} + {{- if .Values.sentry.ingestConsumerAttachments.noStrictOffsetReset }} + - "--no-strict-offset-reset" + {{- end }} + {{- if .Values.sentry.ingestConsumerAttachments.livenessProbe.enabled }} + - "--healthcheck-file-path" + - "/tmp/health.txt" + {{- end }} + {{- if .Values.sentry.ingestConsumerAttachments.logLevel }} + - "--log-level" + - "{{ .Values.sentry.ingestConsumerAttachments.logLevel }}" + {{- end }} + {{- if .Values.sentry.ingestConsumerAttachments.maxPollIntervalMs }} + - "--max-poll-interval-ms" + - "{{ .Values.sentry.ingestConsumerAttachments.maxPollIntervalMs }}" + {{- end }} + - "--" + {{- if .Values.sentry.ingestConsumerAttachments.concurrency }} + - "--processes" + - "{{ .Values.sentry.ingestConsumerAttachments.concurrency }}" + {{- end }} + {{- if .Values.sentry.ingestConsumerAttachments.maxBatchSize }} + - "--max-batch-size" + - "{{ .Values.sentry.ingestConsumerAttachments.maxBatchSize }}" + {{- end }} + {{- if .Values.sentry.ingestConsumerAttachments.maxBatchTimeMs }} + - "--max-batch-size" + - "{{ .Values.sentry.ingestConsumerAttachments.maxBatchTimeMs }}" + {{- end }} + {{- if .Values.sentry.ingestConsumerAttachments.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.sentry.ingestConsumerAttachments.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentry.ingestConsumerAttachments.livenessProbe.periodSeconds }} + {{- end }} + env: + - name: C_FORCE_ROOT + value: "true" +{{ include "sentry.env" . | indent 8 }} +{{- if .Values.sentry.ingestConsumerAttachments.env }} +{{ toYaml .Values.sentry.ingestConsumerAttachments.env | indent 8 }} +{{- end }} + volumeMounts: + - mountPath: /etc/sentry + name: config + readOnly: true + - mountPath: {{ .Values.filestore.filesystem.path }} + name: sentry-data + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + mountPath: /var/run/secrets/google + {{ end }} +{{- if .Values.sentry.ingestConsumerAttachments.volumeMounts }} +{{ toYaml .Values.sentry.ingestConsumerAttachments.volumeMounts | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.sentry.ingestConsumerAttachments.resources | indent 12 }} +{{- if .Values.sentry.ingestConsumerAttachments.containerSecurityContext }} + securityContext: +{{ toYaml .Values.sentry.ingestConsumerAttachments.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.sentry.ingestConsumerAttachments.sidecars }} +{{ toYaml .Values.sentry.ingestConsumerAttachments.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ .Values.serviceAccount.name }}-ingest-consumer-attachments + {{- end }} + volumes: + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-sentry + - name: sentry-data + {{- if and (eq .Values.filestore.backend "filesystem") .Values.filestore.filesystem.persistence.enabled (.Values.filestore.filesystem.persistence.persistentWorkers) }} + {{- if .Values.filestore.filesystem.persistence.existingClaim }} + persistentVolumeClaim: + claimName: {{ .Values.filestore.filesystem.persistence.existingClaim }} + {{- else }} + persistentVolumeClaim: + claimName: {{ template "sentry.fullname" . }}-data + {{- end }} + {{- else }} + emptyDir: {} + {{ end }} + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + secret: + secretName: {{ .Values.filestore.gcs.secretName }} + {{ end }} +{{- if .Values.sentry.ingestConsumerAttachments.volumes }} +{{ toYaml .Values.sentry.ingestConsumerAttachments.volumes | indent 6 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 6 }} +{{- end }} + {{- if .Values.sentry.ingestConsumerEvents.priorityClassName }} + priorityClassName: "{{ .Values.sentry.ingestConsumerEvents.priorityClassName }}" + {{- end }} +{{- end }} diff --git a/charts/sentry/templates/sentry/ingest/attachments/hpa-ingest-consumer-attachments.yaml b/charts/sentry/templates/sentry/ingest/attachments/hpa-ingest-consumer-attachments.yaml new file mode 100644 index 000000000..e3a87bb8e --- /dev/null +++ b/charts/sentry/templates/sentry/ingest/attachments/hpa-ingest-consumer-attachments.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.sentry.ingestConsumerAttachments.enabled .Values.sentry.ingestConsumerAttachments.autoscaling.enabled }} +apiVersion: {{ template "sentry.autoscaling.apiVersion" . }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "sentry.fullname" . }}-sentry-ingest-consumer-attachments +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "sentry.fullname" . }}-ingest-consumer-attachments + minReplicas: {{ .Values.sentry.ingestConsumerAttachments.autoscaling.minReplicas }} + maxReplicas: {{ .Values.sentry.ingestConsumerAttachments.autoscaling.maxReplicas }} + {{- if eq (include "sentry.autoscaling.apiVersion" .) "autoscaling/v1" }} + targetCPUUtilizationPercentage: {{ .Values.sentry.ingestConsumerAttachments.autoscaling.targetCPUUtilizationPercentage }} + {{- else if semverCompare ">=1.27-0" .Capabilities.KubeVersion.GitVersion }} + metrics: + - type: ContainerResource + containerResource: + container: {{ .Chart.Name }}-ingest-consumer-attachments + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.sentry.ingestConsumerAttachments.autoscaling.targetCPUUtilizationPercentage }} + {{- else }} + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.sentry.ingestConsumerAttachments.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/charts/sentry/templates/sentry/ingest/attachments/serviceaccount-sentry-ingest-consumer-attachments.yaml b/charts/sentry/templates/sentry/ingest/attachments/serviceaccount-sentry-ingest-consumer-attachments.yaml new file mode 100644 index 000000000..55632fc25 --- /dev/null +++ b/charts/sentry/templates/sentry/ingest/attachments/serviceaccount-sentry-ingest-consumer-attachments.yaml @@ -0,0 +1,10 @@ +{{- if and .Values.serviceAccount.enabled .Values.sentry.ingestConsumerAttachments.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.name }}-ingest-consumer-attachments +{{- if .Values.serviceAccount.annotations }} + annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }} +{{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/charts/sentry/templates/sentry/ingest/events/deployment-sentry-ingest-consumer-events.yaml b/charts/sentry/templates/sentry/ingest/events/deployment-sentry-ingest-consumer-events.yaml new file mode 100644 index 000000000..33fa2e125 --- /dev/null +++ b/charts/sentry/templates/sentry/ingest/events/deployment-sentry-ingest-consumer-events.yaml @@ -0,0 +1,201 @@ +{{- if .Values.sentry.ingestConsumerEvents.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "sentry.fullname" . }}-ingest-consumer-events + labels: + app: {{ template "sentry.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + app.kubernetes.io/managed-by: "Helm" + {{- if .Values.asHook }} + {{- /* Add the Helm annotations so that deployment after asHook from true to false works */}} + annotations: + meta.helm.sh/release-name: "{{ .Release.Name }}" + meta.helm.sh/release-namespace: "{{ .Release.Namespace }}" + "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook-weight": "10" + {{- end }} +spec: + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: ingest-consumer-events +{{- if not .Values.sentry.ingestConsumerEvents.autoscaling.enabled }} + replicas: {{ .Values.sentry.ingestConsumerEvents.replicas }} +{{- end }} + template: + metadata: + annotations: + checksum/configYml: {{ .Values.config.configYml | toYaml | toString | sha256sum }} + checksum/sentryConfPy: {{ .Values.config.sentryConfPy | sha256sum }} + checksum/config.yaml: {{ include "sentry.config" . | sha256sum }} + {{- if .Values.sentry.ingestConsumerEvents.annotations }} +{{ toYaml .Values.sentry.ingestConsumerEvents.annotations | indent 8 }} + {{- end }} + labels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: ingest-consumer-events + {{- if .Values.sentry.ingestConsumerEvents.podLabels }} +{{ toYaml .Values.sentry.ingestConsumerEvents.podLabels | indent 8 }} + {{- end }} + spec: + affinity: + {{- if .Values.sentry.ingestConsumerEvents.affinity }} +{{ toYaml .Values.sentry.ingestConsumerEvents.affinity | indent 8 }} + {{- end }} + {{- if .Values.sentry.ingestConsumerEvents.nodeSelector }} + nodeSelector: +{{ toYaml .Values.sentry.ingestConsumerEvents.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.sentry.ingestConsumerEvents.tolerations }} + tolerations: +{{ toYaml .Values.sentry.ingestConsumerEvents.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.sentry.ingestConsumerEvents.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.sentry.ingestConsumerEvents.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if .Values.images.sentry.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.images.sentry.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy | quote }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: +{{ toYaml .Values.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.sentry.ingestConsumerEvents.securityContext }} + securityContext: +{{ toYaml .Values.sentry.ingestConsumerEvents.securityContext | indent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-ingest-consumer-events + image: "{{ template "sentry.image" . }}" + imagePullPolicy: {{ default "IfNotPresent" .Values.images.sentry.pullPolicy }} + command: ["sentry"] + args: + - "run" + - "consumer" + - "ingest-events" + - "--consumer-group" + - "ingest-consumer" + {{- if .Values.sentry.ingestConsumerEvents.autoOffsetReset }} + - "--auto-offset-reset" + - "{{ .Values.sentry.ingestConsumerEvents.autoOffsetReset }}" + {{- end }} + {{- if .Values.sentry.ingestConsumerEvents.noStrictOffsetReset }} + - "--no-strict-offset-reset" + {{- end }} + {{- if .Values.sentry.ingestConsumerEvents.livenessProbe.enabled }} + - "--healthcheck-file-path" + - "/tmp/health.txt" + {{- end }} + {{- if .Values.sentry.ingestConsumerEvents.logLevel }} + - "--log-level" + - "{{ .Values.sentry.ingestConsumerEvents.logLevel }}" + {{- end }} + - "--" + {{- if .Values.sentry.ingestConsumerEvents.maxBatchSize }} + - "--max-batch-size" + - "{{ .Values.sentry.ingestConsumerEvents.maxBatchSize }}" + {{- end }} + {{- if .Values.sentry.ingestConsumerEvents.inputBlockSize }} + - "--input-block-size" + - "{{ .Values.sentry.ingestConsumerEvents.inputBlockSize }}" + {{- end }} + {{- if .Values.sentry.ingestConsumerEvents.maxBatchTimeMs }} + - "--max-batch-time-ms" + - "{{ .Values.sentry.ingestConsumerEvents.maxBatchTimeMs }}" + {{- end }} + {{- if .Values.sentry.ingestConsumerEvents.concurrency }} + - "--processes" + - "{{ .Values.sentry.ingestConsumerEvents.concurrency }}" + {{- end }} + {{- if .Values.sentry.ingestConsumerEvents.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.sentry.ingestConsumerEvents.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentry.ingestConsumerEvents.livenessProbe.periodSeconds }} + {{- end }} + env: + - name: C_FORCE_ROOT + value: "true" +{{ include "sentry.env" . | indent 8 }} +{{- if .Values.sentry.ingestConsumerEvents.env }} +{{ toYaml .Values.sentry.ingestConsumerEvents.env | indent 8 }} +{{- end }} + volumeMounts: + - mountPath: /etc/sentry + name: config + readOnly: true + - mountPath: {{ .Values.filestore.filesystem.path }} + name: sentry-data + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + mountPath: /var/run/secrets/google + {{ end }} +{{- if .Values.sentry.ingestConsumerEvents.volumeMounts }} +{{ toYaml .Values.sentry.ingestConsumerEvents.volumeMounts | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.sentry.ingestConsumerEvents.resources | indent 12 }} +{{- if .Values.sentry.ingestConsumerEvents.containerSecurityContext }} + securityContext: +{{ toYaml .Values.sentry.ingestConsumerEvents.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.sentry.ingestConsumerEvents.sidecars }} +{{ toYaml .Values.sentry.ingestConsumerEvents.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ .Values.serviceAccount.name }}-ingest-consumer-events + {{- end }} + volumes: + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-sentry + - name: sentry-data + {{- if and (eq .Values.filestore.backend "filesystem") .Values.filestore.filesystem.persistence.enabled (.Values.filestore.filesystem.persistence.persistentWorkers) }} + {{- if .Values.filestore.filesystem.persistence.existingClaim }} + persistentVolumeClaim: + claimName: {{ .Values.filestore.filesystem.persistence.existingClaim }} + {{- else }} + persistentVolumeClaim: + claimName: {{ template "sentry.fullname" . }}-data + {{- end }} + {{- else }} + emptyDir: {} + {{ end }} + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + secret: + secretName: {{ .Values.filestore.gcs.secretName }} + {{ end }} +{{- if .Values.sentry.ingestConsumerEvents.volumes }} +{{ toYaml .Values.sentry.ingestConsumerEvents.volumes | indent 6 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 6 }} +{{- end }} + {{- if .Values.sentry.ingestConsumerEvents.priorityClassName }} + priorityClassName: "{{ .Values.sentry.ingestConsumerEvents.priorityClassName }}" + {{- end }} +{{- end }} diff --git a/charts/sentry/templates/sentry/ingest/events/hpa-ingest-consumer-events.yaml b/charts/sentry/templates/sentry/ingest/events/hpa-ingest-consumer-events.yaml new file mode 100644 index 000000000..95f6ab86d --- /dev/null +++ b/charts/sentry/templates/sentry/ingest/events/hpa-ingest-consumer-events.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.sentry.ingestConsumerEvents.enabled .Values.sentry.ingestConsumerEvents.autoscaling.enabled }} +apiVersion: {{ template "sentry.autoscaling.apiVersion" . }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "sentry.fullname" . }}-sentry-ingest-consumer-events +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "sentry.fullname" . }}-ingest-consumer-events + minReplicas: {{ .Values.sentry.ingestConsumerEvents.autoscaling.minReplicas }} + maxReplicas: {{ .Values.sentry.ingestConsumerEvents.autoscaling.maxReplicas }} + {{- if eq (include "sentry.autoscaling.apiVersion" .) "autoscaling/v1" }} + targetCPUUtilizationPercentage: {{ .Values.sentry.ingestConsumerEvents.autoscaling.targetCPUUtilizationPercentage }} + {{- else if semverCompare ">=1.27-0" .Capabilities.KubeVersion.GitVersion }} + metrics: + - type: ContainerResource + containerResource: + container: {{ .Chart.Name }}-ingest-consumer-events + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.sentry.ingestConsumerEvents.autoscaling.targetCPUUtilizationPercentage }} + {{- else }} + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.sentry.ingestConsumerEvents.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/charts/sentry/templates/sentry/ingest/events/serviceaccount-sentry-ingest-consumer-events.yaml b/charts/sentry/templates/sentry/ingest/events/serviceaccount-sentry-ingest-consumer-events.yaml new file mode 100644 index 000000000..54925afca --- /dev/null +++ b/charts/sentry/templates/sentry/ingest/events/serviceaccount-sentry-ingest-consumer-events.yaml @@ -0,0 +1,10 @@ +{{- if and .Values.serviceAccount.enabled .Values.sentry.ingestConsumerEvents.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.name }}-ingest-consumer-events +{{- if .Values.serviceAccount.annotations }} + annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }} +{{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/charts/sentry/templates/sentry/ingest/feedback/deployment-sentry-ingest-feedback.yaml b/charts/sentry/templates/sentry/ingest/feedback/deployment-sentry-ingest-feedback.yaml new file mode 100644 index 000000000..03fb4a35c --- /dev/null +++ b/charts/sentry/templates/sentry/ingest/feedback/deployment-sentry-ingest-feedback.yaml @@ -0,0 +1,180 @@ +{{- if .Values.sentry.ingestFeedback.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "sentry.fullname" . }}-ingest-feedback + labels: + app: {{ template "sentry.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + app.kubernetes.io/managed-by: "Helm" + {{- if .Values.asHook }} + {{- /* Add the Helm annotations so that deployment after asHook from true to false works */}} + annotations: + meta.helm.sh/release-name: "{{ .Release.Name }}" + meta.helm.sh/release-namespace: "{{ .Release.Namespace }}" + "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook-weight": "10" + {{- end }} +spec: + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: ingest-feedback +{{- if not .Values.sentry.ingestFeedback.autoscaling.enabled }} + replicas: {{ .Values.sentry.ingestFeedback.replicas }} +{{- end }} + template: + metadata: + annotations: + checksum/configYml: {{ .Values.config.configYml | toYaml | toString | sha256sum }} + checksum/sentryConfPy: {{ .Values.config.sentryConfPy | sha256sum }} + checksum/config.yaml: {{ include "sentry.config" . | sha256sum }} + {{- if .Values.sentry.ingestFeedback.annotations }} +{{ toYaml .Values.sentry.ingestFeedback.annotations | indent 8 }} + {{- end }} + labels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: ingest-feedback + {{- if .Values.sentry.ingestFeedback.podLabels }} +{{ toYaml .Values.sentry.ingestFeedback.podLabels | indent 8 }} + {{- end }} + spec: + affinity: + {{- if .Values.sentry.ingestFeedback.affinity }} +{{ toYaml .Values.sentry.ingestFeedback.affinity | indent 8 }} + {{- end }} + {{- if .Values.sentry.ingestFeedback.nodeSelector }} + nodeSelector: +{{ toYaml .Values.sentry.ingestFeedback.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.sentry.ingestFeedback.tolerations }} + tolerations: +{{ toYaml .Values.sentry.ingestFeedback.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.sentry.ingestFeedback.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.sentry.ingestFeedback.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if .Values.images.sentry.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.images.sentry.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy | quote }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: +{{ toYaml .Values.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.sentry.ingestFeedback.securityContext }} + securityContext: +{{ toYaml .Values.sentry.ingestFeedback.securityContext | indent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-ingest-feedback + image: "{{ template "sentry.image" . }}" + imagePullPolicy: {{ default "IfNotPresent" .Values.images.sentry.pullPolicy }} + command: ["sentry"] + args: + - "run" + - "consumer" + - "ingest-feedback-events" + - "--consumer-group" + - "ingest-feedback" + {{- if .Values.sentry.ingestFeedback.autoOffsetReset }} + - "--auto-offset-reset" + - "{{ .Values.sentry.ingestFeedback.autoOffsetReset }}" + {{- end }} + {{- if .Values.sentry.ingestFeedback.noStrictOffsetReset }} + - "--no-strict-offset-reset" + {{- end }} + {{- if .Values.sentry.ingestFeedback.livenessProbe.enabled }} + - "--healthcheck-file-path" + - "/tmp/health.txt" + {{- end }} + {{- if .Values.sentry.ingestFeedback.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.sentry.ingestFeedback.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentry.ingestFeedback.livenessProbe.periodSeconds }} + {{- end }} + env: + - name: C_FORCE_ROOT + value: "true" +{{ include "sentry.env" . | indent 8 }} +{{- if .Values.sentry.ingestFeedback.env }} +{{ toYaml .Values.sentry.ingestFeedback.env | indent 8 }} +{{- end }} + volumeMounts: + - mountPath: /etc/sentry + name: config + readOnly: true + - mountPath: {{ .Values.filestore.filesystem.path }} + name: sentry-data + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + mountPath: /var/run/secrets/google + {{ end }} +{{- if .Values.sentry.ingestFeedback.volumeMounts }} +{{ toYaml .Values.sentry.ingestFeedback.volumeMounts | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.sentry.ingestFeedback.resources | indent 12 }} +{{- if .Values.sentry.ingestFeedback.containerSecurityContext }} + securityContext: +{{ toYaml .Values.sentry.ingestFeedback.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.sentry.ingestFeedback.sidecars }} +{{ toYaml .Values.sentry.ingestFeedback.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ .Values.serviceAccount.name }}-ingest-feedback + {{- end }} + volumes: + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-sentry + - name: sentry-data + {{- if and (eq .Values.filestore.backend "filesystem") .Values.filestore.filesystem.persistence.enabled (.Values.filestore.filesystem.persistence.persistentWorkers) }} + {{- if .Values.filestore.filesystem.persistence.existingClaim }} + persistentVolumeClaim: + claimName: {{ .Values.filestore.filesystem.persistence.existingClaim }} + {{- else }} + persistentVolumeClaim: + claimName: {{ template "sentry.fullname" . }}-data + {{- end }} + {{- else }} + emptyDir: {} + {{ end }} + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + secret: + secretName: {{ .Values.filestore.gcs.secretName }} + {{ end }} +{{- if .Values.sentry.ingestFeedback.volumes }} +{{ toYaml .Values.sentry.ingestFeedback.volumes | indent 6 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 6 }} +{{- end }} + {{- if .Values.sentry.ingestFeedback.priorityClassName }} + priorityClassName: "{{ .Values.sentry.ingestFeedback.priorityClassName }}" + {{- end }} +{{- end }} diff --git a/charts/sentry/templates/sentry/ingest/feedback/serviceaccount-sentry-ingest-feedback.yaml b/charts/sentry/templates/sentry/ingest/feedback/serviceaccount-sentry-ingest-feedback.yaml new file mode 100644 index 000000000..34a827c4b --- /dev/null +++ b/charts/sentry/templates/sentry/ingest/feedback/serviceaccount-sentry-ingest-feedback.yaml @@ -0,0 +1,10 @@ +{{- if and .Values.serviceAccount.enabled .Values.sentry.ingestMonitors.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.name }}-ingest-feedback +{{- if .Values.serviceAccount.annotations }} + annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }} +{{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/sentry/templates/deployment-sentry-ingest-consumer.yaml b/charts/sentry/templates/sentry/ingest/monitors/deployment-sentry-ingest-monitors.yaml similarity index 50% rename from sentry/templates/deployment-sentry-ingest-consumer.yaml rename to charts/sentry/templates/sentry/ingest/monitors/deployment-sentry-ingest-monitors.yaml index 188fbb3e4..606358149 100644 --- a/sentry/templates/deployment-sentry-ingest-consumer.yaml +++ b/charts/sentry/templates/sentry/ingest/monitors/deployment-sentry-ingest-monitors.yaml @@ -1,7 +1,8 @@ +{{- if .Values.sentry.ingestMonitors.enabled }} apiVersion: apps/v1 kind: Deployment metadata: - name: {{ template "sentry.fullname" . }}-ingest-consumer + name: {{ template "sentry.fullname" . }}-ingest-monitors labels: app: {{ template "sentry.fullname" . }} chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" @@ -22,38 +23,48 @@ spec: matchLabels: app: {{ template "sentry.fullname" . }} release: "{{ .Release.Name }}" - role: ingest-consumer -{{- if not .Values.sentry.ingestConsumer.autoscaling.enabled }} - replicas: {{ .Values.sentry.ingestConsumer.replicas }} + role: ingest-monitors +{{- if not .Values.sentry.ingestMonitors.autoscaling.enabled }} + replicas: {{ .Values.sentry.ingestMonitors.replicas }} {{- end }} template: metadata: annotations: checksum/configYml: {{ .Values.config.configYml | toYaml | toString | sha256sum }} checksum/sentryConfPy: {{ .Values.config.sentryConfPy | sha256sum }} - checksum/config.yaml: {{ include (print $.Template.BasePath "/configmap-sentry.yaml") . | sha256sum }} - {{- if .Values.sentry.ingestConsumer.annotations }} -{{ toYaml .Values.sentry.ingestConsumer.annotations | indent 8 }} + checksum/config.yaml: {{ include "sentry.config" . | sha256sum }} + {{- if .Values.sentry.ingestMonitors.annotations }} +{{ toYaml .Values.sentry.ingestMonitors.annotations | indent 8 }} {{- end }} labels: app: {{ template "sentry.fullname" . }} release: "{{ .Release.Name }}" - role: ingest-consumer - {{- if .Values.sentry.ingestConsumer.podLabels }} -{{ toYaml .Values.sentry.ingestConsumer.podLabels | indent 8 }} + role: ingest-monitors + {{- if .Values.sentry.ingestMonitors.podLabels }} +{{ toYaml .Values.sentry.ingestMonitors.podLabels | indent 8 }} {{- end }} spec: affinity: - {{- if .Values.sentry.ingestConsumer.affinity }} -{{ toYaml .Values.sentry.ingestConsumer.affinity | indent 8 }} + {{- if .Values.sentry.ingestMonitors.affinity }} +{{ toYaml .Values.sentry.ingestMonitors.affinity | indent 8 }} {{- end }} - {{- if .Values.sentry.ingestConsumer.nodeSelector }} + {{- if .Values.sentry.ingestMonitors.nodeSelector }} nodeSelector: -{{ toYaml .Values.sentry.ingestConsumer.nodeSelector | indent 8 }} +{{ toYaml .Values.sentry.ingestMonitors.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} {{- end }} - {{- if .Values.sentry.ingestConsumer.tolerations }} + {{- if .Values.sentry.ingestMonitors.tolerations }} + tolerations: +{{ toYaml .Values.sentry.ingestMonitors.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} tolerations: -{{ toYaml .Values.sentry.ingestConsumer.tolerations | indent 8 }} +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.sentry.ingestMonitors.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.sentry.ingestMonitors.topologySpreadConstraints | indent 8 }} {{- end }} {{- if .Values.images.sentry.imagePullSecrets }} imagePullSecrets: @@ -66,45 +77,47 @@ spec: dnsConfig: {{ toYaml .Values.dnsConfig | indent 8 }} {{- end }} - {{- if .Values.sentry.ingestConsumer.securityContext }} + {{- if .Values.sentry.ingestMonitors.securityContext }} securityContext: -{{ toYaml .Values.sentry.ingestConsumer.securityContext | indent 8 }} +{{ toYaml .Values.sentry.ingestMonitors.securityContext | indent 8 }} {{- end }} containers: - - name: {{ .Chart.Name }}-ingest-consumer + - name: {{ .Chart.Name }}-ingest-monitors image: "{{ template "sentry.image" . }}" imagePullPolicy: {{ default "IfNotPresent" .Values.images.sentry.pullPolicy }} command: ["sentry"] args: - "run" - - "ingest-consumer" - - "--all-consumer-types" - {{- if .Values.sentry.ingestConsumer.concurrency }} - - "--concurrency" - - "{{ .Values.sentry.ingestConsumer.concurrency }}" + - "consumer" + - "ingest-monitors" + - "--consumer-group" + - "ingest-monitors" + {{- if .Values.sentry.ingestMonitors.autoOffsetReset }} + - "--auto-offset-reset" + - "{{ .Values.sentry.ingestMonitors.autoOffsetReset }}" {{- end }} - {{- if .Values.sentry.ingestConsumer.maxBatchSize }} - - "--max-batch-size" - - "{{ .Values.sentry.ingestConsumer.maxBatchSize }}" + {{- if .Values.sentry.ingestMonitors.noStrictOffsetReset }} + - "--no-strict-offset-reset" {{- end }} + {{- if .Values.sentry.ingestMonitors.livenessProbe.enabled }} + - "--healthcheck-file-path" + - "/tmp/health.txt" + {{- end }} + {{- if .Values.sentry.ingestMonitors.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.sentry.ingestMonitors.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentry.ingestMonitors.livenessProbe.periodSeconds }} + {{- end }} env: - - name: SNUBA - value: http://{{ template "sentry.fullname" . }}-snuba:{{ template "snuba.port" }} - name: C_FORCE_ROOT value: "true" - {{- if .Values.postgresql.enabled }} - - name: POSTGRES_PASSWORD - valueFrom: - secretKeyRef: - name: {{ default (include "sentry.postgresql.fullname" .) .Values.postgresql.existingSecret }} - key: {{ default "postgresql-password" .Values.postgresql.existingSecretKey }} - {{- end }} - {{ if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /var/run/secrets/google/{{ .Values.filestore.gcs.credentialsFile }} - {{ end }} -{{- if .Values.sentry.ingestConsumer.env }} -{{ toYaml .Values.sentry.ingestConsumer.env | indent 8 }} +{{ include "sentry.env" . | indent 8 }} +{{- if .Values.sentry.ingestMonitors.env }} +{{ toYaml .Values.sentry.ingestMonitors.env | indent 8 }} {{- end }} volumeMounts: - mountPath: /etc/sentry @@ -116,16 +129,23 @@ spec: - name: sentry-google-cloud-key mountPath: /var/run/secrets/google {{ end }} -{{- if .Values.sentry.ingestConsumer.volumeMounts }} -{{ toYaml .Values.sentry.ingestConsumer.volumeMounts | indent 8 }} +{{- if .Values.sentry.ingestMonitors.volumeMounts }} +{{ toYaml .Values.sentry.ingestMonitors.volumeMounts | indent 8 }} {{- end }} resources: -{{ toYaml .Values.sentry.ingestConsumer.resources | indent 12 }} -{{- if .Values.sentry.ingestConsumer.sidecars }} -{{ toYaml .Values.sentry.ingestConsumer.sidecars | indent 6 }} +{{ toYaml .Values.sentry.ingestMonitors.resources | indent 12 }} +{{- if .Values.sentry.ingestMonitors.containerSecurityContext }} + securityContext: +{{ toYaml .Values.sentry.ingestMonitors.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.sentry.ingestMonitors.sidecars }} +{{ toYaml .Values.sentry.ingestMonitors.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} {{- end }} {{- if .Values.serviceAccount.enabled }} - serviceAccountName: {{ .Values.serviceAccount.name }}-ingest-consumer + serviceAccountName: {{ .Values.serviceAccount.name }}-ingest-monitors {{- end }} volumes: - name: config @@ -148,9 +168,13 @@ spec: secret: secretName: {{ .Values.filestore.gcs.secretName }} {{ end }} -{{- if .Values.sentry.ingestConsumer.volumes }} -{{ toYaml .Values.sentry.ingestConsumer.volumes | indent 6 }} +{{- if .Values.sentry.ingestMonitors.volumes }} +{{ toYaml .Values.sentry.ingestMonitors.volumes | indent 6 }} {{- end }} - {{- if .Values.sentry.ingestConsumer.priorityClassName }} - priorityClassName: "{{ .Values.sentry.ingestConsumer.priorityClassName }}" +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 6 }} +{{- end }} + {{- if .Values.sentry.ingestMonitors.priorityClassName }} + priorityClassName: "{{ .Values.sentry.ingestMonitors.priorityClassName }}" {{- end }} +{{- end }} diff --git a/charts/sentry/templates/sentry/ingest/monitors/serviceaccount-sentry-ingest-monitors.yaml b/charts/sentry/templates/sentry/ingest/monitors/serviceaccount-sentry-ingest-monitors.yaml new file mode 100644 index 000000000..a1896c1dc --- /dev/null +++ b/charts/sentry/templates/sentry/ingest/monitors/serviceaccount-sentry-ingest-monitors.yaml @@ -0,0 +1,10 @@ +{{- if and .Values.serviceAccount.enabled .Values.sentry.ingestMonitors.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.name }}-ingest-monitors +{{- if .Values.serviceAccount.annotations }} + annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }} +{{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/charts/sentry/templates/sentry/ingest/occurrences/deployment-sentry-ingest-occurrences.yaml b/charts/sentry/templates/sentry/ingest/occurrences/deployment-sentry-ingest-occurrences.yaml new file mode 100644 index 000000000..10963904e --- /dev/null +++ b/charts/sentry/templates/sentry/ingest/occurrences/deployment-sentry-ingest-occurrences.yaml @@ -0,0 +1,180 @@ +{{- if .Values.sentry.ingestOccurrences.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "sentry.fullname" . }}-ingest-occurrences + labels: + app: {{ template "sentry.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + app.kubernetes.io/managed-by: "Helm" + {{- if .Values.asHook }} + {{- /* Add the Helm annotations so that deployment after asHook from true to false works */}} + annotations: + meta.helm.sh/release-name: "{{ .Release.Name }}" + meta.helm.sh/release-namespace: "{{ .Release.Namespace }}" + "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook-weight": "10" + {{- end }} +spec: + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: ingest-occurrences +{{- if not .Values.sentry.ingestOccurrences.autoscaling.enabled }} + replicas: {{ .Values.sentry.ingestOccurrences.replicas }} +{{- end }} + template: + metadata: + annotations: + checksum/configYml: {{ .Values.config.configYml | toYaml | toString | sha256sum }} + checksum/sentryConfPy: {{ .Values.config.sentryConfPy | sha256sum }} + checksum/config.yaml: {{ include "sentry.config" . | sha256sum }} + {{- if .Values.sentry.ingestOccurrences.annotations }} +{{ toYaml .Values.sentry.ingestOccurrences.annotations | indent 8 }} + {{- end }} + labels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: ingest-occurrences + {{- if .Values.sentry.ingestOccurrences.podLabels }} +{{ toYaml .Values.sentry.ingestOccurrences.podLabels | indent 8 }} + {{- end }} + spec: + affinity: + {{- if .Values.sentry.ingestOccurrences.affinity }} +{{ toYaml .Values.sentry.ingestOccurrences.affinity | indent 8 }} + {{- end }} + {{- if .Values.sentry.ingestOccurrences.nodeSelector }} + nodeSelector: +{{ toYaml .Values.sentry.ingestOccurrences.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.sentry.ingestOccurrences.tolerations }} + tolerations: +{{ toYaml .Values.sentry.ingestOccurrences.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.sentry.ingestOccurrences.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.sentry.ingestOccurrences.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if .Values.images.sentry.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.images.sentry.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy | quote }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: +{{ toYaml .Values.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.sentry.ingestOccurrences.securityContext }} + securityContext: +{{ toYaml .Values.sentry.ingestOccurrences.securityContext | indent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-ingest-occurrences + image: "{{ template "sentry.image" . }}" + imagePullPolicy: {{ default "IfNotPresent" .Values.images.sentry.pullPolicy }} + command: ["sentry"] + args: + - "run" + - "consumer" + - "ingest-occurrences" + - "--consumer-group" + - "ingest-occurrences" + {{- if .Values.sentry.ingestOccurrences.autoOffsetReset }} + - "--auto-offset-reset" + - "{{ .Values.sentry.ingestOccurrences.autoOffsetReset }}" + {{- end }} + {{- if .Values.sentry.ingestOccurrences.noStrictOffsetReset }} + - "--no-strict-offset-reset" + {{- end }} + {{- if .Values.sentry.ingestOccurrences.livenessProbe.enabled }} + - "--healthcheck-file-path" + - "/tmp/health.txt" + {{- end }} + {{- if .Values.sentry.ingestOccurrences.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.sentry.ingestOccurrences.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentry.ingestOccurrences.livenessProbe.periodSeconds }} + {{- end }} + env: + - name: C_FORCE_ROOT + value: "true" +{{ include "sentry.env" . | indent 8 }} +{{- if .Values.sentry.ingestOccurrences.env }} +{{ toYaml .Values.sentry.ingestOccurrences.env | indent 8 }} +{{- end }} + volumeMounts: + - mountPath: /etc/sentry + name: config + readOnly: true + - mountPath: {{ .Values.filestore.filesystem.path }} + name: sentry-data + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + mountPath: /var/run/secrets/google + {{ end }} +{{- if .Values.sentry.ingestOccurrences.volumeMounts }} +{{ toYaml .Values.sentry.ingestOccurrences.volumeMounts | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.sentry.ingestOccurrences.resources | indent 12 }} +{{- if .Values.sentry.ingestOccurrences.containerSecurityContext }} + securityContext: +{{ toYaml .Values.sentry.ingestOccurrences.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.sentry.ingestOccurrences.sidecars }} +{{ toYaml .Values.sentry.ingestOccurrences.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ .Values.serviceAccount.name }}-ingest-occurrences + {{- end }} + volumes: + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-sentry + - name: sentry-data + {{- if and (eq .Values.filestore.backend "filesystem") .Values.filestore.filesystem.persistence.enabled (.Values.filestore.filesystem.persistence.persistentWorkers) }} + {{- if .Values.filestore.filesystem.persistence.existingClaim }} + persistentVolumeClaim: + claimName: {{ .Values.filestore.filesystem.persistence.existingClaim }} + {{- else }} + persistentVolumeClaim: + claimName: {{ template "sentry.fullname" . }}-data + {{- end }} + {{- else }} + emptyDir: {} + {{ end }} + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + secret: + secretName: {{ .Values.filestore.gcs.secretName }} + {{ end }} +{{- if .Values.sentry.ingestOccurrences.volumes }} +{{ toYaml .Values.sentry.ingestOccurrences.volumes | indent 6 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 6 }} +{{- end }} + {{- if .Values.sentry.ingestOccurrences.priorityClassName }} + priorityClassName: "{{ .Values.sentry.ingestOccurrences.priorityClassName }}" + {{- end }} +{{- end }} diff --git a/charts/sentry/templates/sentry/ingest/occurrences/serviceaccount-sentry-ingest-occurrences.yaml b/charts/sentry/templates/sentry/ingest/occurrences/serviceaccount-sentry-ingest-occurrences.yaml new file mode 100644 index 000000000..a5f227a38 --- /dev/null +++ b/charts/sentry/templates/sentry/ingest/occurrences/serviceaccount-sentry-ingest-occurrences.yaml @@ -0,0 +1,10 @@ +{{- if and .Values.serviceAccount.enabled .Values.sentry.ingestOccurrences.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.name }}-ingest-occurrences +{{- if .Values.serviceAccount.annotations }} + annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }} +{{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/charts/sentry/templates/sentry/ingest/profiles/deployment-sentry-ingest-profiles.yaml b/charts/sentry/templates/sentry/ingest/profiles/deployment-sentry-ingest-profiles.yaml new file mode 100644 index 000000000..bc89b6dff --- /dev/null +++ b/charts/sentry/templates/sentry/ingest/profiles/deployment-sentry-ingest-profiles.yaml @@ -0,0 +1,180 @@ +{{- if .Values.sentry.features.enableProfiling }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "sentry.fullname" . }}-ingest-profiles + labels: + app: {{ template "sentry.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + app.kubernetes.io/managed-by: "Helm" + {{- if .Values.asHook }} + {{- /* Add the Helm annotations so that deployment after asHook from true to false works */}} + annotations: + meta.helm.sh/release-name: "{{ .Release.Name }}" + meta.helm.sh/release-namespace: "{{ .Release.Namespace }}" + "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook-weight": "10" + {{- end }} +spec: + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: ingest-profiles +{{- if not .Values.sentry.ingestProfiles.autoscaling.enabled }} + replicas: {{ .Values.sentry.ingestProfiles.replicas }} +{{- end }} + template: + metadata: + annotations: + checksum/configYml: {{ .Values.config.configYml | toYaml | toString | sha256sum }} + checksum/sentryConfPy: {{ .Values.config.sentryConfPy | sha256sum }} + checksum/config.yaml: {{ include "sentry.config" . | sha256sum }} + {{- if .Values.sentry.ingestProfiles.annotations }} +{{ toYaml .Values.sentry.ingestProfiles.annotations | indent 8 }} + {{- end }} + labels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: ingest-profiles + {{- if .Values.sentry.ingestProfiles.podLabels }} +{{ toYaml .Values.sentry.ingestProfiles.podLabels | indent 8 }} + {{- end }} + spec: + affinity: + {{- if .Values.sentry.ingestProfiles.affinity }} +{{ toYaml .Values.sentry.ingestProfiles.affinity | indent 8 }} + {{- end }} + {{- if .Values.sentry.ingestProfiles.nodeSelector }} + nodeSelector: +{{ toYaml .Values.sentry.ingestProfiles.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.sentry.ingestProfiles.tolerations }} + tolerations: +{{ toYaml .Values.sentry.ingestProfiles.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.sentry.ingestProfiles.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.sentry.ingestProfiles.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if .Values.images.sentry.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.images.sentry.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy | quote }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: +{{ toYaml .Values.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.sentry.ingestProfiles.securityContext }} + securityContext: +{{ toYaml .Values.sentry.ingestProfiles.securityContext | indent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-ingest-profiles + image: "{{ template "sentry.image" . }}" + imagePullPolicy: {{ default "IfNotPresent" .Values.images.sentry.pullPolicy }} + command: ["sentry"] + args: + - "run" + - "consumer" + - "ingest-profiles" + - "--consumer-group" + - "ingest-profiles" + {{- if .Values.sentry.ingestProfiles.autoOffsetReset }} + - "--auto-offset-reset" + - "{{ .Values.sentry.ingestProfiles.autoOffsetReset }}" + {{- end }} + {{- if .Values.sentry.ingestProfiles.noStrictOffsetReset }} + - "--no-strict-offset-reset" + {{- end }} + {{- if .Values.sentry.ingestProfiles.livenessProbe.enabled }} + - "--healthcheck-file-path" + - "/tmp/health.txt" + {{- end }} + {{- if .Values.sentry.ingestProfiles.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.sentry.ingestProfiles.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentry.ingestProfiles.livenessProbe.periodSeconds }} + {{- end }} + env: + - name: C_FORCE_ROOT + value: "true" +{{ include "sentry.env" . | indent 8 }} +{{- if .Values.sentry.ingestProfiles.env }} +{{ toYaml .Values.sentry.ingestProfiles.env | indent 8 }} +{{- end }} + volumeMounts: + - mountPath: /etc/sentry + name: config + readOnly: true + - mountPath: {{ .Values.filestore.filesystem.path }} + name: sentry-data + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + mountPath: /var/run/secrets/google + {{ end }} +{{- if .Values.sentry.ingestProfiles.volumeMounts }} +{{ toYaml .Values.sentry.ingestProfiles.volumeMounts | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.sentry.ingestProfiles.resources | indent 12 }} +{{- if .Values.sentry.ingestProfiles.containerSecurityContext }} + securityContext: +{{ toYaml .Values.sentry.ingestProfiles.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.sentry.ingestProfiles.sidecars }} +{{ toYaml .Values.sentry.ingestProfiles.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ .Values.serviceAccount.name }}-ingest-profiles + {{- end }} + volumes: + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-sentry + - name: sentry-data + {{- if and (eq .Values.filestore.backend "filesystem") .Values.filestore.filesystem.persistence.enabled (.Values.filestore.filesystem.persistence.persistentWorkers) }} + {{- if .Values.filestore.filesystem.persistence.existingClaim }} + persistentVolumeClaim: + claimName: {{ .Values.filestore.filesystem.persistence.existingClaim }} + {{- else }} + persistentVolumeClaim: + claimName: {{ template "sentry.fullname" . }}-data + {{- end }} + {{- else }} + emptyDir: {} + {{ end }} + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + secret: + secretName: {{ .Values.filestore.gcs.secretName }} + {{ end }} +{{- if .Values.sentry.ingestProfiles.volumes }} +{{ toYaml .Values.sentry.ingestProfiles.volumes | indent 6 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 6 }} +{{- end }} + {{- if .Values.sentry.ingestProfiles.priorityClassName }} + priorityClassName: "{{ .Values.sentry.ingestProfiles.priorityClassName }}" + {{- end }} +{{- end }} diff --git a/charts/sentry/templates/sentry/ingest/profiles/serviceaccount-sentry-ingest-profiles.yaml b/charts/sentry/templates/sentry/ingest/profiles/serviceaccount-sentry-ingest-profiles.yaml new file mode 100644 index 000000000..725e5f863 --- /dev/null +++ b/charts/sentry/templates/sentry/ingest/profiles/serviceaccount-sentry-ingest-profiles.yaml @@ -0,0 +1,10 @@ +{{- if and .Values.serviceAccount.enabled .Values.sentry.features.enableProfiling }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.name }}-ingest-profiles +{{- if .Values.serviceAccount.annotations }} + annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }} +{{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/charts/sentry/templates/sentry/ingest/replay-recordings/deployment-sentry-ingest-replay-recordings.yaml b/charts/sentry/templates/sentry/ingest/replay-recordings/deployment-sentry-ingest-replay-recordings.yaml new file mode 100644 index 000000000..c1969c347 --- /dev/null +++ b/charts/sentry/templates/sentry/ingest/replay-recordings/deployment-sentry-ingest-replay-recordings.yaml @@ -0,0 +1,180 @@ +{{- if .Values.sentry.ingestReplayRecordings.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "sentry.fullname" . }}-ingest-replay-recordings + labels: + app: {{ template "sentry.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + app.kubernetes.io/managed-by: "Helm" + {{- if .Values.asHook }} + {{- /* Add the Helm annotations so that deployment after asHook from true to false works */}} + annotations: + meta.helm.sh/release-name: "{{ .Release.Name }}" + meta.helm.sh/release-namespace: "{{ .Release.Namespace }}" + "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook-weight": "10" + {{- end }} +spec: + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: ingest-replay-recordings +{{- if not .Values.sentry.ingestReplayRecordings.autoscaling.enabled }} + replicas: {{ .Values.sentry.ingestReplayRecordings.replicas }} +{{- end }} + template: + metadata: + annotations: + checksum/configYml: {{ .Values.config.configYml | toYaml | toString | sha256sum }} + checksum/sentryConfPy: {{ .Values.config.sentryConfPy | sha256sum }} + checksum/config.yaml: {{ include "sentry.config" . | sha256sum }} + {{- if .Values.sentry.ingestReplayRecordings.annotations }} +{{ toYaml .Values.sentry.ingestReplayRecordings.annotations | indent 8 }} + {{- end }} + labels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: ingest-replay-recordings + {{- if .Values.sentry.ingestReplayRecordings.podLabels }} +{{ toYaml .Values.sentry.ingestReplayRecordings.podLabels | indent 8 }} + {{- end }} + spec: + affinity: + {{- if .Values.sentry.ingestReplayRecordings.affinity }} +{{ toYaml .Values.sentry.ingestReplayRecordings.affinity | indent 8 }} + {{- end }} + {{- if .Values.sentry.ingestReplayRecordings.nodeSelector }} + nodeSelector: +{{ toYaml .Values.sentry.ingestReplayRecordings.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.sentry.ingestReplayRecordings.tolerations }} + tolerations: +{{ toYaml .Values.sentry.ingestReplayRecordings.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.sentry.ingestReplayRecordings.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.sentry.ingestReplayRecordings.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if .Values.images.sentry.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.images.sentry.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy | quote }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: +{{ toYaml .Values.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.sentry.ingestReplayRecordings.securityContext }} + securityContext: +{{ toYaml .Values.sentry.ingestReplayRecordings.securityContext | indent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-ingest-replay-recordings + image: "{{ template "sentry.image" . }}" + imagePullPolicy: {{ default "IfNotPresent" .Values.images.sentry.pullPolicy }} + command: ["sentry"] + args: + - "run" + - "consumer" + - "ingest-replay-recordings" + - "--consumer-group" + - "ingest-replay-recordings" + {{- if .Values.sentry.ingestReplayRecordings.autoOffsetReset }} + - "--auto-offset-reset" + - "{{ .Values.sentry.ingestReplayRecordings.autoOffsetReset }}" + {{- end }} + {{- if .Values.sentry.ingestReplayRecordings.noStrictOffsetReset }} + - "--no-strict-offset-reset" + {{- end }} + {{- if .Values.sentry.ingestReplayRecordings.livenessProbe.enabled }} + - "--healthcheck-file-path" + - "/tmp/health.txt" + {{- end }} + {{- if .Values.sentry.ingestReplayRecordings.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.sentry.ingestReplayRecordings.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentry.ingestReplayRecordings.livenessProbe.periodSeconds }} + {{- end }} + env: + - name: C_FORCE_ROOT + value: "true" +{{ include "sentry.env" . | indent 8 }} +{{- if .Values.sentry.ingestReplayRecordings.env }} +{{ toYaml .Values.sentry.ingestReplayRecordings.env | indent 8 }} +{{- end }} + volumeMounts: + - mountPath: /etc/sentry + name: config + readOnly: true + - mountPath: {{ .Values.filestore.filesystem.path }} + name: sentry-data + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + mountPath: /var/run/secrets/google + {{ end }} +{{- if .Values.sentry.ingestReplayRecordings.volumeMounts }} +{{ toYaml .Values.sentry.ingestReplayRecordings.volumeMounts | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.sentry.ingestReplayRecordings.resources | indent 12 }} +{{- if .Values.sentry.ingestReplayRecordings.containerSecurityContext }} + securityContext: +{{ toYaml .Values.sentry.ingestReplayRecordings.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.sentry.ingestReplayRecordings.sidecars }} +{{ toYaml .Values.sentry.ingestReplayRecordings.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ .Values.serviceAccount.name }}-ingest-replay-recordings + {{- end }} + volumes: + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-sentry + - name: sentry-data + {{- if and (eq .Values.filestore.backend "filesystem") .Values.filestore.filesystem.persistence.enabled (.Values.filestore.filesystem.persistence.persistentWorkers) }} + {{- if .Values.filestore.filesystem.persistence.existingClaim }} + persistentVolumeClaim: + claimName: {{ .Values.filestore.filesystem.persistence.existingClaim }} + {{- else }} + persistentVolumeClaim: + claimName: {{ template "sentry.fullname" . }}-data + {{- end }} + {{- else }} + emptyDir: {} + {{ end }} + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + secret: + secretName: {{ .Values.filestore.gcs.secretName }} + {{ end }} +{{- if .Values.sentry.ingestReplayRecordings.volumes }} +{{ toYaml .Values.sentry.ingestReplayRecordings.volumes | indent 6 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 6 }} +{{- end }} + {{- if .Values.sentry.ingestReplayRecordings.priorityClassName }} + priorityClassName: "{{ .Values.sentry.ingestReplayRecordings.priorityClassName }}" + {{- end }} +{{- end }} diff --git a/charts/sentry/templates/sentry/ingest/replay-recordings/serviceaccount-sentry-ingest-replay-recordings.yaml b/charts/sentry/templates/sentry/ingest/replay-recordings/serviceaccount-sentry-ingest-replay-recordings.yaml new file mode 100644 index 000000000..1b977de24 --- /dev/null +++ b/charts/sentry/templates/sentry/ingest/replay-recordings/serviceaccount-sentry-ingest-replay-recordings.yaml @@ -0,0 +1,10 @@ +{{- if and .Values.serviceAccount.enabled .Values.sentry.ingestReplayRecordings.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.name }}-ingest-replay-recordings +{{- if .Values.serviceAccount.annotations }} + annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }} +{{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/charts/sentry/templates/sentry/ingest/transactions/deployment-sentry-ingest-consumer-transactions.yaml b/charts/sentry/templates/sentry/ingest/transactions/deployment-sentry-ingest-consumer-transactions.yaml new file mode 100644 index 000000000..c2df0c68d --- /dev/null +++ b/charts/sentry/templates/sentry/ingest/transactions/deployment-sentry-ingest-consumer-transactions.yaml @@ -0,0 +1,195 @@ +{{- if .Values.sentry.ingestConsumerTransactions.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "sentry.fullname" . }}-ingest-consumer-transactions + labels: + app: {{ template "sentry.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + app.kubernetes.io/managed-by: "Helm" + {{- if .Values.asHook }} + {{- /* Add the Helm annotations so that deployment after asHook from true to false works */}} + annotations: + meta.helm.sh/release-name: "{{ .Release.Name }}" + meta.helm.sh/release-namespace: "{{ .Release.Namespace }}" + "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook-weight": "10" + {{- end }} +spec: + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: ingest-consumer-transactions +{{- if not .Values.sentry.ingestConsumerTransactions.autoscaling.enabled }} + replicas: {{ .Values.sentry.ingestConsumerTransactions.replicas }} +{{- end }} + template: + metadata: + annotations: + checksum/configYml: {{ .Values.config.configYml | toYaml | toString | sha256sum }} + checksum/sentryConfPy: {{ .Values.config.sentryConfPy | sha256sum }} + checksum/config.yaml: {{ include "sentry.config" . | sha256sum }} + {{- if .Values.sentry.ingestConsumerTransactions.annotations }} +{{ toYaml .Values.sentry.ingestConsumerTransactions.annotations | indent 8 }} + {{- end }} + labels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: ingest-consumer-transactions + {{- if .Values.sentry.ingestConsumerTransactions.podLabels }} +{{ toYaml .Values.sentry.ingestConsumerTransactions.podLabels | indent 8 }} + {{- end }} + spec: + affinity: + {{- if .Values.sentry.ingestConsumerTransactions.affinity }} +{{ toYaml .Values.sentry.ingestConsumerTransactions.affinity | indent 8 }} + {{- end }} + {{- if .Values.sentry.ingestConsumerTransactions.nodeSelector }} + nodeSelector: +{{ toYaml .Values.sentry.ingestConsumerTransactions.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.sentry.ingestConsumerTransactions.tolerations }} + tolerations: +{{ toYaml .Values.sentry.ingestConsumerTransactions.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.sentry.ingestConsumerTransactions.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.sentry.ingestConsumerTransactions.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if .Values.images.sentry.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.images.sentry.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy | quote }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: +{{ toYaml .Values.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.sentry.ingestConsumerTransactions.securityContext }} + securityContext: +{{ toYaml .Values.sentry.ingestConsumerTransactions.securityContext | indent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-ingest-consumer-transactions + image: "{{ template "sentry.image" . }}" + imagePullPolicy: {{ default "IfNotPresent" .Values.images.sentry.pullPolicy }} + command: ["sentry"] + args: + - "run" + - "consumer" + - "ingest-transactions" + - "--consumer-group" + - "ingest-consumer" + {{- if .Values.sentry.ingestConsumerTransactions.autoOffsetReset }} + - "--auto-offset-reset" + - "{{ .Values.sentry.ingestConsumerTransactions.autoOffsetReset }}" + {{- end }} + {{- if .Values.sentry.ingestConsumerTransactions.noStrictOffsetReset }} + - "--no-strict-offset-reset" + {{- end }} + {{- if .Values.sentry.ingestConsumerTransactions.livenessProbe.enabled }} + - "--healthcheck-file-path" + - "/tmp/health.txt" + {{- end }} + {{- if .Values.sentry.ingestConsumerTransactions.logLevel }} + - "--log-level" + - "{{ .Values.sentry.ingestConsumerTransactions.logLevel }}" + {{- end }} + - "--" + {{- if .Values.sentry.ingestConsumerTransactions.maxBatchSize }} + - "--max-batch-size" + - "{{ .Values.sentry.ingestConsumerTransactions.maxBatchSize }}" + {{- end }} + {{- if .Values.sentry.ingestConsumerTransactions.inputBlockSize }} + - "--input-block-size" + - "{{ .Values.sentry.ingestConsumerTransactions.inputBlockSize }}" + {{- end }} + {{- if .Values.sentry.ingestConsumerTransactions.maxBatchTimeMs }} + - "--max-batch-time-ms" + - "{{ .Values.sentry.ingestConsumerTransactions.maxBatchTimeMs }}" + {{- end }} + {{- if .Values.sentry.ingestConsumerTransactions.concurrency }} + - "--processes" + - "{{ .Values.sentry.ingestConsumerTransactions.concurrency }}" + {{- end }} + {{- if .Values.sentry.ingestConsumerTransactions.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.sentry.ingestConsumerTransactions.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentry.ingestConsumerTransactions.livenessProbe.periodSeconds }} + {{- end }} + env: + - name: C_FORCE_ROOT + value: "true" +{{ include "sentry.env" . | indent 8 }} +{{- if .Values.sentry.ingestConsumerTransactions.env }} +{{ toYaml .Values.sentry.ingestConsumerTransactions.env | indent 8 }} +{{- end }} + volumeMounts: + - mountPath: /etc/sentry + name: config + readOnly: true + - mountPath: {{ .Values.filestore.filesystem.path }} + name: sentry-data + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + mountPath: /var/run/secrets/google + {{ end }} +{{- if .Values.sentry.ingestConsumerTransactions.volumeMounts }} +{{ toYaml .Values.sentry.ingestConsumerTransactions.volumeMounts | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.sentry.ingestConsumerTransactions.resources | indent 12 }} +{{- if .Values.sentry.ingestConsumerTransactions.containerSecurityContext }} + securityContext: +{{ toYaml .Values.sentry.ingestConsumerTransactions.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.sentry.ingestConsumerTransactions.sidecars }} +{{ toYaml .Values.sentry.ingestConsumerTransactions.sidecars | indent 6 }} +{{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ .Values.serviceAccount.name }}-ingest-consumer-transactions + {{- end }} + volumes: + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-sentry + - name: sentry-data + {{- if and (eq .Values.filestore.backend "filesystem") .Values.filestore.filesystem.persistence.enabled (.Values.filestore.filesystem.persistence.persistentWorkers) }} + {{- if .Values.filestore.filesystem.persistence.existingClaim }} + persistentVolumeClaim: + claimName: {{ .Values.filestore.filesystem.persistence.existingClaim }} + {{- else }} + persistentVolumeClaim: + claimName: {{ template "sentry.fullname" . }}-data + {{- end }} + {{- else }} + emptyDir: {} + {{ end }} + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + secret: + secretName: {{ .Values.filestore.gcs.secretName }} + {{ end }} +{{- if .Values.sentry.ingestConsumerTransactions.volumes }} +{{ toYaml .Values.sentry.ingestConsumerTransactions.volumes | indent 6 }} +{{- end }} + {{- if .Values.sentry.ingestConsumerTransactions.priorityClassName }} + priorityClassName: "{{ .Values.sentry.ingestConsumerTransactions.priorityClassName }}" + {{- end }} +{{- end }} diff --git a/charts/sentry/templates/sentry/ingest/transactions/hpa-ingest-consumer-transactions.yaml b/charts/sentry/templates/sentry/ingest/transactions/hpa-ingest-consumer-transactions.yaml new file mode 100644 index 000000000..f854c3ea8 --- /dev/null +++ b/charts/sentry/templates/sentry/ingest/transactions/hpa-ingest-consumer-transactions.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.sentry.ingestConsumerTransactions.enabled .Values.sentry.ingestConsumerTransactions.autoscaling.enabled }} +apiVersion: {{ template "sentry.autoscaling.apiVersion" . }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "sentry.fullname" . }}-sentry-ingest-consumer-transactions +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "sentry.fullname" . }}-ingest-consumer-transactions + minReplicas: {{ .Values.sentry.ingestConsumerTransactions.autoscaling.minReplicas }} + maxReplicas: {{ .Values.sentry.ingestConsumerTransactions.autoscaling.maxReplicas }} + {{- if eq (include "sentry.autoscaling.apiVersion" .) "autoscaling/v1" }} + targetCPUUtilizationPercentage: {{ .Values.sentry.ingestConsumerTransactions.autoscaling.targetCPUUtilizationPercentage }} + {{- else if semverCompare ">=1.27-0" .Capabilities.KubeVersion.GitVersion }} + metrics: + - type: ContainerResource + containerResource: + container: {{ .Chart.Name }}-ingest-consumer-transactions + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.sentry.ingestConsumerTransactions.autoscaling.targetCPUUtilizationPercentage }} + {{- else }} + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.sentry.ingestConsumerTransactions.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/charts/sentry/templates/sentry/ingest/transactions/serviceaccount-sentry-ingest-consumer-transactions.yaml b/charts/sentry/templates/sentry/ingest/transactions/serviceaccount-sentry-ingest-consumer-transactions.yaml new file mode 100644 index 000000000..57121995c --- /dev/null +++ b/charts/sentry/templates/sentry/ingest/transactions/serviceaccount-sentry-ingest-consumer-transactions.yaml @@ -0,0 +1,10 @@ +{{- if and .Values.serviceAccount.enabled .Values.sentry.ingestConsumerTransactions.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.name }}-ingest-consumer-transactions +{{- if .Values.serviceAccount.annotations }} + annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }} +{{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/charts/sentry/templates/sentry/metrics/billing/deployment-sentry-billing-metrics-consumer.yaml b/charts/sentry/templates/sentry/metrics/billing/deployment-sentry-billing-metrics-consumer.yaml new file mode 100644 index 000000000..082453add --- /dev/null +++ b/charts/sentry/templates/sentry/metrics/billing/deployment-sentry-billing-metrics-consumer.yaml @@ -0,0 +1,181 @@ +{{- if .Values.sentry.billingMetricsConsumer.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "sentry.fullname" . }}-billing-metrics-consumer + labels: + app: {{ template "sentry.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + app.kubernetes.io/managed-by: "Helm" + {{- if .Values.asHook }} + {{- /* Add the Helm annotations so that deployment after asHook from true to false works */}} + annotations: + meta.helm.sh/release-name: "{{ .Release.Name }}" + meta.helm.sh/release-namespace: "{{ .Release.Namespace }}" + "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook-weight": "10" + {{- end }} +spec: + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: billing-metrics-consumer +{{- if not .Values.sentry.billingMetricsConsumer.autoscaling.enabled }} + replicas: {{ .Values.sentry.billingMetricsConsumer.replicas }} +{{- end }} + template: + metadata: + annotations: + checksum/configYml: {{ .Values.config.configYml | toYaml | toString | sha256sum }} + checksum/sentryConfPy: {{ .Values.config.sentryConfPy | sha256sum }} + checksum/config.yaml: {{ include "sentry.config" . | sha256sum }} + {{- if .Values.sentry.billingMetricsConsumer.annotations }} +{{ toYaml .Values.sentry.billingMetricsConsumer.annotations | indent 8 }} + {{- end }} + labels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: billing-metrics-consumer + {{- if .Values.sentry.billingMetricsConsumer.podLabels }} +{{ toYaml .Values.sentry.billingMetricsConsumer.podLabels | indent 8 }} + {{- end }} + spec: + affinity: + {{- if .Values.sentry.billingMetricsConsumer.affinity }} +{{ toYaml .Values.sentry.billingMetricsConsumer.affinity | indent 8 }} + {{- end }} + {{- if .Values.sentry.billingMetricsConsumer.nodeSelector }} + nodeSelector: +{{ toYaml .Values.sentry.billingMetricsConsumer.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.sentry.billingMetricsConsumer.tolerations }} + tolerations: +{{ toYaml .Values.sentry.billingMetricsConsumer.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.sentry.billingMetricsConsumer.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.sentry.billingMetricsConsumer.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if .Values.images.sentry.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.images.sentry.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy | quote }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: +{{ toYaml .Values.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.sentry.billingMetricsConsumer.securityContext }} + securityContext: +{{ toYaml .Values.sentry.billingMetricsConsumer.securityContext | indent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-billing-metrics-consumer + image: "{{ template "sentry.image" . }}" + imagePullPolicy: {{ default "IfNotPresent" .Values.images.sentry.pullPolicy }} + command: ["sentry"] + args: + - "run" + - "consumer" + - "billing-metrics-consumer" + - "--consumer-group" + - "billing-metrics-consumer" + {{- if .Values.sentry.billingMetricsConsumer.autoOffsetReset }} + - "--auto-offset-reset" + - "{{ .Values.sentry.billingMetricsConsumer.autoOffsetReset }}" + {{- end }} + {{- if .Values.sentry.billingMetricsConsumer.noStrictOffsetReset }} + - "--no-strict-offset-reset" + {{- end }} + {{- if .Values.sentry.billingMetricsConsumer.livenessProbe.enabled }} + - "--healthcheck-file-path" + - "/tmp/health.txt" + {{- end }} + - "--" + {{- if .Values.sentry.billingMetricsConsumer.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.sentry.billingMetricsConsumer.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentry.billingMetricsConsumer.livenessProbe.periodSeconds }} + {{- end }} + env: + - name: C_FORCE_ROOT + value: "true" +{{ include "sentry.env" . | indent 8 }} +{{- if .Values.sentry.billingMetricsConsumer.env }} +{{ toYaml .Values.sentry.billingMetricsConsumer.env | indent 8 }} +{{- end }} + volumeMounts: + - mountPath: /etc/sentry + name: config + readOnly: true + - mountPath: {{ .Values.filestore.filesystem.path }} + name: sentry-data + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + mountPath: /var/run/secrets/google + {{ end }} +{{- if .Values.sentry.billingMetricsConsumer.volumeMounts }} +{{ toYaml .Values.sentry.billingMetricsConsumer.volumeMounts | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.sentry.billingMetricsConsumer.resources | indent 12 }} +{{- if .Values.sentry.billingMetricsConsumer.containerSecurityContext }} + securityContext: +{{ toYaml .Values.sentry.billingMetricsConsumer.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.sentry.billingMetricsConsumer.sidecars }} +{{ toYaml .Values.sentry.billingMetricsConsumer.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ .Values.serviceAccount.name }}-billing-metrics-consumer + {{- end }} + volumes: + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-sentry + - name: sentry-data + {{- if and (eq .Values.filestore.backend "filesystem") .Values.filestore.filesystem.persistence.enabled (.Values.filestore.filesystem.persistence.persistentWorkers) }} + {{- if .Values.filestore.filesystem.persistence.existingClaim }} + persistentVolumeClaim: + claimName: {{ .Values.filestore.filesystem.persistence.existingClaim }} + {{- else }} + persistentVolumeClaim: + claimName: {{ template "sentry.fullname" . }}-data + {{- end }} + {{- else }} + emptyDir: {} + {{ end }} + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + secret: + secretName: {{ .Values.filestore.gcs.secretName }} + {{ end }} +{{- if .Values.sentry.billingMetricsConsumer.volumes }} +{{ toYaml .Values.sentry.billingMetricsConsumer.volumes | indent 6 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 6 }} +{{- end }} + {{- if .Values.sentry.billingMetricsConsumer.priorityClassName }} + priorityClassName: "{{ .Values.sentry.billingMetricsConsumer.priorityClassName }}" + {{- end }} +{{- end }} diff --git a/charts/sentry/templates/sentry/metrics/billing/serviceaccount-sentry-billing-metrics-consumer.yaml b/charts/sentry/templates/sentry/metrics/billing/serviceaccount-sentry-billing-metrics-consumer.yaml new file mode 100644 index 000000000..950164a58 --- /dev/null +++ b/charts/sentry/templates/sentry/metrics/billing/serviceaccount-sentry-billing-metrics-consumer.yaml @@ -0,0 +1,10 @@ +{{- if and .Values.serviceAccount.enabled .Values.sentry.billingMetricsConsumer.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.name }}-billing-metrics-consumer +{{- if .Values.serviceAccount.annotations }} + annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }} +{{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/sentry/templates/deployment-metrics.yaml b/charts/sentry/templates/sentry/metrics/deployment-metrics.yaml similarity index 73% rename from sentry/templates/deployment-metrics.yaml rename to charts/sentry/templates/sentry/metrics/deployment-metrics.yaml index f6b1af584..2e20fe3fb 100644 --- a/sentry/templates/deployment-metrics.yaml +++ b/charts/sentry/templates/sentry/metrics/deployment-metrics.yaml @@ -21,14 +21,17 @@ spec: annotations: checksum/configYml: {{ .Values.config.configYml | toYaml | toString | sha256sum }} checksum/sentryConfPy: {{ .Values.config.sentryConfPy | sha256sum }} - checksum/config.yaml: {{ include (print $.Template.BasePath "/configmap-sentry.yaml") . | sha256sum }} - {{- if .Values.sentry.web.annotations }} -{{ toYaml .Values.sentry.web.annotations | indent 8 }} + checksum/config.yaml: {{ include "sentry.config" . | sha256sum }} + {{- if .Values.metrics.podAnnotations }} +{{ toYaml .Values.metrics.podAnnotations | indent 8 }} {{- end }} labels: app: {{ template "sentry.fullname" . }}-metrics release: "{{ .Release.Name }}" role: metrics + {{- if .Values.metrics.podLabels }} +{{ toYaml .Values.metrics.podLabels | indent 8 }} + {{- end }} spec: {{- if .Values.metrics.affinity }} affinity: @@ -37,10 +40,16 @@ spec: {{- if .Values.metrics.nodeSelector }} nodeSelector: {{ toYaml .Values.metrics.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} {{- end }} {{- if .Values.metrics.tolerations }} tolerations: {{ toYaml .Values.metrics.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} {{- end }} {{- if .Values.metrics.schedulerName }} schedulerName: "{{ .Values.metrics.schedulerName }}" @@ -68,6 +77,10 @@ spec: containerPort: 9125 - name: metrics containerPort: 9102 +{{- if .Values.metrics.volumeMounts }} + volumeMounts: +{{ toYaml .Values.metrics.volumeMounts | indent 8 }} +{{- end }} {{- if .Values.metrics.livenessProbe.enabled }} livenessProbe: httpGet: @@ -94,6 +107,26 @@ spec: {{- end }} resources: {{ toYaml .Values.metrics.resources | indent 10 }} +{{- if .Values.metrics.containerSecurityContext }} + securityContext: +{{ toYaml .Values.metrics.containerSecurityContext | indent 10 }} +{{- end }} +{{- if .Values.metrics.sidecars }} +{{ toYaml .Values.metrics.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} +{{- if or .Values.metrics.volumes .Values.global.volumes }} + volumes: +{{- if .Values.metrics.volumes }} +{{ toYaml .Values.metrics.volumes | indent 6 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 6 }} +{{- end }} +{{- end }} + {{- if .Values.serviceAccount.enabled }} serviceAccountName: {{ .Values.serviceAccount.name }}-metrics {{- end }} diff --git a/charts/sentry/templates/sentry/metrics/deployment-sentry-metrics-consumer.yaml b/charts/sentry/templates/sentry/metrics/deployment-sentry-metrics-consumer.yaml new file mode 100644 index 000000000..17894a419 --- /dev/null +++ b/charts/sentry/templates/sentry/metrics/deployment-sentry-metrics-consumer.yaml @@ -0,0 +1,191 @@ +{{- if .Values.sentry.metricsConsumer.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "sentry.fullname" . }}-metrics-consumer + labels: + app: {{ template "sentry.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + app.kubernetes.io/managed-by: "Helm" + {{- if .Values.asHook }} + {{- /* Add the Helm annotations so that deployment after asHook from true to false works */}} + annotations: + meta.helm.sh/release-name: "{{ .Release.Name }}" + meta.helm.sh/release-namespace: "{{ .Release.Namespace }}" + "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook-weight": "10" + {{- end }} +spec: + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: metrics-consumer + replicas: {{ .Values.sentry.metricsConsumer.replicas }} + template: + metadata: + annotations: + checksum/configYml: {{ .Values.config.configYml | toYaml | toString | sha256sum }} + checksum/sentryConfPy: {{ .Values.config.sentryConfPy | sha256sum }} + checksum/config.yaml: {{ include "sentry.config" . | sha256sum }} + {{- if .Values.sentry.metricsConsumer.annotations }} +{{ toYaml .Values.sentry.metricsConsumer.annotations | indent 8 }} + {{- end }} + labels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: metrics-consumer + {{- if .Values.sentry.metricsConsumer.podLabels }} +{{ toYaml .Values.sentry.metricsConsumer.podLabels | indent 8 }} + {{- end }} + spec: + affinity: + {{- if .Values.sentry.metricsConsumer.affinity }} +{{ toYaml .Values.sentry.metricsConsumer.affinity | indent 8 }} + {{- end }} + {{- if .Values.sentry.metricsConsumer.nodeSelector }} + nodeSelector: +{{ toYaml .Values.sentry.metricsConsumer.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.sentry.metricsConsumer.tolerations }} + tolerations: +{{ toYaml .Values.sentry.metricsConsumer.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.sentry.metricsConsumer.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.sentry.metricsConsumer.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if .Values.images.sentry.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.images.sentry.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy | quote }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: +{{ toYaml .Values.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.sentry.metricsConsumer.securityContext }} + securityContext: +{{ toYaml .Values.sentry.metricsConsumer.securityContext | indent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-metrics-consumer + image: "{{ template "sentry.image" . }}" + imagePullPolicy: {{ default "IfNotPresent" .Values.images.sentry.pullPolicy }} + command: ["sentry"] + args: + - "run" + - "consumer" + - "ingest-metrics" + - "--consumer-group" + - "metrics-consumer" + {{- if .Values.sentry.metricsConsumer.autoOffsetReset }} + - "--auto-offset-reset" + - "{{ .Values.sentry.metricsConsumer.autoOffsetReset }}" + {{- end }} + {{- if .Values.sentry.metricsConsumer.noStrictOffsetReset }} + - "--no-strict-offset-reset" + {{- end }} + {{- if .Values.sentry.metricsConsumer.livenessProbe.enabled }} + - "--healthcheck-file-path" + - "/tmp/health.txt" + {{- end }} + {{- if .Values.sentry.metricsConsumer.logLevel }} + - "--log-level" + - "{{ .Values.sentry.metricsConsumer.logLevel }}" + {{- end }} + {{- if .Values.sentry.metricsConsumer.maxPollIntervalMs }} + - "--max-poll-interval-ms" + - "{{ .Values.sentry.metricsConsumer.maxPollIntervalMs }}" + {{- end }} + - "--" + {{- if .Values.sentry.metricsConsumer.concurrency }} + - "--processes" + - "{{ .Values.sentry.metricsConsumer.concurrency }}" + {{- end }} + {{- if .Values.sentry.metricsConsumer.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.sentry.metricsConsumer.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentry.metricsConsumer.livenessProbe.periodSeconds }} + {{- end }} + env: + - name: C_FORCE_ROOT + value: "true" +{{ include "sentry.env" . | indent 8 }} +{{- if .Values.sentry.metricsConsumer.env }} +{{ toYaml .Values.sentry.metricsConsumer.env | indent 8 }} +{{- end }} + volumeMounts: + - mountPath: /etc/sentry + name: config + readOnly: true + - mountPath: {{ .Values.filestore.filesystem.path }} + name: sentry-data + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + mountPath: /var/run/secrets/google + {{ end }} +{{- if .Values.sentry.metricsConsumer.volumeMounts }} +{{ toYaml .Values.sentry.metricsConsumer.volumeMounts | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.sentry.metricsConsumer.resources | indent 12 }} +{{- if .Values.sentry.metricsConsumer.containerSecurityContext }} + securityContext: +{{ toYaml .Values.sentry.metricsConsumer.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.sentry.metricsConsumer.sidecars }} +{{ toYaml .Values.sentry.metricsConsumer.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ .Values.serviceAccount.name }}-metrics-consumer + {{- end }} + volumes: + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-sentry + - name: sentry-data + {{- if and (eq .Values.filestore.backend "filesystem") .Values.filestore.filesystem.persistence.enabled (.Values.filestore.filesystem.persistence.persistentWorkers) }} + {{- if .Values.filestore.filesystem.persistence.existingClaim }} + persistentVolumeClaim: + claimName: {{ .Values.filestore.filesystem.persistence.existingClaim }} + {{- else }} + persistentVolumeClaim: + claimName: {{ template "sentry.fullname" . }}-data + {{- end }} + {{- else }} + emptyDir: {} + {{ end }} + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + secret: + secretName: {{ .Values.filestore.gcs.secretName }} + {{ end }} +{{- if .Values.sentry.metricsConsumer.volumes }} +{{ toYaml .Values.sentry.metricsConsumer.volumes | indent 6 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 6 }} +{{- end }} + {{- if .Values.sentry.metricsConsumer.priorityClassName }} + priorityClassName: "{{ .Values.sentry.metricsConsumer.priorityClassName }}" + {{- end }} +{{- end }} diff --git a/charts/sentry/templates/sentry/metrics/generic/deployment-sentry-generic-metrics-consumer.yaml b/charts/sentry/templates/sentry/metrics/generic/deployment-sentry-generic-metrics-consumer.yaml new file mode 100644 index 000000000..8f415b5c2 --- /dev/null +++ b/charts/sentry/templates/sentry/metrics/generic/deployment-sentry-generic-metrics-consumer.yaml @@ -0,0 +1,191 @@ +{{- if .Values.sentry.genericMetricsConsumer.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "sentry.fullname" . }}-generic-metrics-consumer + labels: + app: {{ template "sentry.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + app.kubernetes.io/managed-by: "Helm" + {{- if .Values.asHook }} + {{- /* Add the Helm annotations so that deployment after asHook from true to false works */}} + annotations: + meta.helm.sh/release-name: "{{ .Release.Name }}" + meta.helm.sh/release-namespace: "{{ .Release.Namespace }}" + "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook-weight": "10" + {{- end }} +spec: + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: generic-metrics-consumer + replicas: {{ .Values.sentry.genericMetricsConsumer.replicas }} + template: + metadata: + annotations: + checksum/configYml: {{ .Values.config.configYml | toYaml | toString | sha256sum }} + checksum/sentryConfPy: {{ .Values.config.sentryConfPy | sha256sum }} + checksum/config.yaml: {{ include "sentry.config" . | sha256sum }} + {{- if .Values.sentry.genericMetricsConsumer.annotations }} +{{ toYaml .Values.sentry.genericMetricsConsumer.annotations | indent 8 }} + {{- end }} + labels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: generic-metrics-consumer + {{- if .Values.sentry.genericMetricsConsumer.podLabels }} +{{ toYaml .Values.sentry.genericMetricsConsumer.podLabels | indent 8 }} + {{- end }} + spec: + affinity: + {{- if .Values.sentry.genericMetricsConsumer.affinity }} +{{ toYaml .Values.sentry.genericMetricsConsumer.affinity | indent 8 }} + {{- end }} + {{- if .Values.sentry.genericMetricsConsumer.nodeSelector }} + nodeSelector: +{{ toYaml .Values.sentry.genericMetricsConsumer.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.sentry.genericMetricsConsumer.tolerations }} + tolerations: +{{ toYaml .Values.sentry.genericMetricsConsumer.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.sentry.genericMetricsConsumer.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.sentry.genericMetricsConsumer.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if .Values.images.sentry.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.images.sentry.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy | quote }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: +{{ toYaml .Values.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.sentry.genericMetricsConsumer.securityContext }} + securityContext: +{{ toYaml .Values.sentry.genericMetricsConsumer.securityContext | indent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-generic-metrics-consumer + image: "{{ template "sentry.image" . }}" + imagePullPolicy: {{ default "IfNotPresent" .Values.images.sentry.pullPolicy }} + command: ["sentry"] + args: + - "run" + - "consumer" + - "ingest-generic-metrics" + {{- if .Values.sentry.genericMetricsConsumer.autoOffsetReset }} + - "--auto-offset-reset" + - "{{ .Values.sentry.genericMetricsConsumer.autoOffsetReset }}" + {{- end }} + {{- if .Values.sentry.genericMetricsConsumer.noStrictOffsetReset }} + - "--no-strict-offset-reset" + {{- end }} + - "--consumer-group" + - "generic-metrics-consumer" + {{- if .Values.sentry.genericMetricsConsumer.livenessProbe.enabled }} + - "--healthcheck-file-path" + - "/tmp/health.txt" + {{- end }} + {{- if .Values.sentry.genericMetricsConsumer.logLevel }} + - "--log-level" + - "{{ .Values.sentry.genericMetricsConsumer.logLevel }}" + {{- end }} + {{- if .Values.sentry.genericMetricsConsumer.maxPollIntervalMs }} + - "--max-poll-interval-ms" + - "{{ .Values.sentry.genericMetricsConsumer.maxPollIntervalMs }}" + {{- end }} + - "--" + {{- if .Values.sentry.genericMetricsConsumer.concurrency }} + - "--processes" + - "{{ .Values.sentry.genericMetricsConsumer.concurrency }}" + {{- end }} + {{- if .Values.sentry.genericMetricsConsumer.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.sentry.genericMetricsConsumer.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentry.genericMetricsConsumer.livenessProbe.periodSeconds }} + {{- end }} + env: + - name: C_FORCE_ROOT + value: "true" +{{ include "sentry.env" . | indent 8 }} +{{- if .Values.sentry.genericMetricsConsumer.env }} +{{ toYaml .Values.sentry.genericMetricsConsumer.env | indent 8 }} +{{- end }} + volumeMounts: + - mountPath: /etc/sentry + name: config + readOnly: true + - mountPath: {{ .Values.filestore.filesystem.path }} + name: sentry-data + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + mountPath: /var/run/secrets/google + {{ end }} +{{- if .Values.sentry.genericMetricsConsumer.volumeMounts }} +{{ toYaml .Values.sentry.genericMetricsConsumer.volumeMounts | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.sentry.genericMetricsConsumer.resources | indent 12 }} +{{- if .Values.sentry.genericMetricsConsumer.containerSecurityContext }} + securityContext: +{{ toYaml .Values.sentry.genericMetricsConsumer.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.sentry.genericMetricsConsumer.sidecars }} +{{ toYaml .Values.sentry.genericMetricsConsumer.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ .Values.serviceAccount.name }}-generic-metrics-consumer + {{- end }} + volumes: + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-sentry + - name: sentry-data + {{- if and (eq .Values.filestore.backend "filesystem") .Values.filestore.filesystem.persistence.enabled (.Values.filestore.filesystem.persistence.persistentWorkers) }} + {{- if .Values.filestore.filesystem.persistence.existingClaim }} + persistentVolumeClaim: + claimName: {{ .Values.filestore.filesystem.persistence.existingClaim }} + {{- else }} + persistentVolumeClaim: + claimName: {{ template "sentry.fullname" . }}-data + {{- end }} + {{- else }} + emptyDir: {} + {{ end }} + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + secret: + secretName: {{ .Values.filestore.gcs.secretName }} + {{ end }} +{{- if .Values.sentry.genericMetricsConsumer.volumes }} +{{ toYaml .Values.sentry.genericMetricsConsumer.volumes | indent 6 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 6 }} +{{- end }} + {{- if .Values.sentry.genericMetricsConsumer.priorityClassName }} + priorityClassName: "{{ .Values.sentry.genericMetricsConsumer.priorityClassName }}" + {{- end }} +{{- end }} diff --git a/charts/sentry/templates/sentry/metrics/generic/serviceaccount-sentry-generic-metrics-consumer.yaml b/charts/sentry/templates/sentry/metrics/generic/serviceaccount-sentry-generic-metrics-consumer.yaml new file mode 100644 index 000000000..794b42865 --- /dev/null +++ b/charts/sentry/templates/sentry/metrics/generic/serviceaccount-sentry-generic-metrics-consumer.yaml @@ -0,0 +1,10 @@ +{{- if and .Values.serviceAccount.enabled .Values.sentry.genericMetricsConsumer.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.name }}-generic-metrics-consumer +{{- if .Values.serviceAccount.annotations }} + annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }} +{{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/sentry/templates/service-metrics.yaml b/charts/sentry/templates/sentry/metrics/service-metrics.yaml similarity index 98% rename from sentry/templates/service-metrics.yaml rename to charts/sentry/templates/sentry/metrics/service-metrics.yaml index fc5eaf523..bf8087d35 100644 --- a/sentry/templates/service-metrics.yaml +++ b/charts/sentry/templates/sentry/metrics/service-metrics.yaml @@ -30,4 +30,4 @@ spec: app: {{ template "sentry.fullname" . }}-metrics release: {{ .Release.Name }} role: metrics -{{- end }} \ No newline at end of file +{{- end }} diff --git a/sentry/templates/serviceaccount-metrics.yaml b/charts/sentry/templates/sentry/metrics/serviceaccount-metrics.yaml similarity index 97% rename from sentry/templates/serviceaccount-metrics.yaml rename to charts/sentry/templates/sentry/metrics/serviceaccount-metrics.yaml index 302ca431d..f31772385 100644 --- a/sentry/templates/serviceaccount-metrics.yaml +++ b/charts/sentry/templates/sentry/metrics/serviceaccount-metrics.yaml @@ -7,4 +7,4 @@ metadata: annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }} {{- end }} automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/sentry/templates/sentry/metrics/serviceaccount-sentry-metrics-consumer.yaml b/charts/sentry/templates/sentry/metrics/serviceaccount-sentry-metrics-consumer.yaml new file mode 100644 index 000000000..2bc83bd10 --- /dev/null +++ b/charts/sentry/templates/sentry/metrics/serviceaccount-sentry-metrics-consumer.yaml @@ -0,0 +1,10 @@ +{{- if and .Values.serviceAccount.enabled .Values.sentry.metricsConsumer.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.name }}-metrics-consumer +{{- if .Values.serviceAccount.annotations }} + annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }} +{{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/sentry/templates/servicemonitor-metrics.yaml b/charts/sentry/templates/sentry/metrics/servicemonitor-metrics.yaml similarity index 78% rename from sentry/templates/servicemonitor-metrics.yaml rename to charts/sentry/templates/sentry/metrics/servicemonitor-metrics.yaml index c05d44711..4e3027397 100644 --- a/sentry/templates/servicemonitor-metrics.yaml +++ b/charts/sentry/templates/sentry/metrics/servicemonitor-metrics.yaml @@ -21,6 +21,12 @@ spec: {{- if .Values.metrics.serviceMonitor.honorLabels }} honorLabels: true {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- toYaml .Values.metrics.serviceMonitor.metricRelabelings | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- toYaml .Values.metrics.serviceMonitor.relabelings | nindent 6 }} + {{- end }} {{- if .Values.metrics.serviceMonitor.namespaceSelector }} namespaceSelector: {{ toYaml .Values.metrics.serviceMonitor.namespaceSelector | indent 4 -}} @@ -34,4 +40,4 @@ spec: app: {{ template "sentry.fullname" . }}-metrics release: "{{ .Release.Name }}" role: metrics -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/sentry/templates/sentry/post-process-forwarder/errors/deployment-sentry-post-process-forwarder-errors.yaml b/charts/sentry/templates/sentry/post-process-forwarder/errors/deployment-sentry-post-process-forwarder-errors.yaml new file mode 100644 index 000000000..c9a4649de --- /dev/null +++ b/charts/sentry/templates/sentry/post-process-forwarder/errors/deployment-sentry-post-process-forwarder-errors.yaml @@ -0,0 +1,178 @@ +{{- if .Values.sentry.postProcessForwardErrors.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "sentry.fullname" . }}-post-process-forward-errors + labels: + app: sentry + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + app.kubernetes.io/managed-by: "Helm" + {{- if .Values.asHook }} + {{- /* Add the Helm annotations so that deployment after asHook from true to false works */}} + annotations: + meta.helm.sh/release-name: "{{ .Release.Name }}" + meta.helm.sh/release-namespace: "{{ .Release.Namespace }}" + "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook-weight": "10" + {{- end }} +spec: + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + app: sentry + release: "{{ .Release.Name }}" + role: sentry-post-process-forward-errors + replicas: {{ .Values.sentry.postProcessForwardErrors.replicas }} + template: + metadata: + annotations: + checksum/configYml: {{ .Values.config.configYml | toYaml | toString | sha256sum }} + checksum/sentryConfPy: {{ .Values.config.sentryConfPy | sha256sum }} + checksum/config.yaml: {{ include "sentry.config" . | sha256sum }} + {{- if .Values.sentry.postProcessForwardErrors.annotations }} +{{ toYaml .Values.sentry.postProcessForwardErrors.annotations | indent 8 }} + {{- end }} + labels: + app: sentry + release: "{{ .Release.Name }}" + role: sentry-post-process-forward-errors + {{- if .Values.sentry.postProcessForwardErrors.podLabels }} +{{ toYaml .Values.sentry.postProcessForwardErrors.podLabels | indent 8 }} + {{- end }} + spec: + affinity: + {{- if .Values.sentry.postProcessForwardErrors.affinity }} +{{ toYaml .Values.sentry.postProcessForwardErrors.affinity | indent 8 }} + {{- end }} + {{- if .Values.sentry.postProcessForwardErrors.nodeSelector }} + nodeSelector: +{{ toYaml .Values.sentry.postProcessForwardErrors.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.sentry.postProcessForwardErrors.tolerations }} + tolerations: +{{ toYaml .Values.sentry.postProcessForwardErrors.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.sentry.postProcessForwardErrors.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.sentry.postProcessForwardErrors.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if .Values.images.sentry.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.images.sentry.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy | quote }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: +{{ toYaml .Values.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.sentry.postProcessForwardErrors.securityContext }} + securityContext: +{{ toYaml .Values.sentry.postProcessForwardErrors.securityContext | indent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-post-process-forward-errors + image: "{{ template "sentry.image" . }}" + imagePullPolicy: {{ default "IfNotPresent" .Values.images.sentry.pullPolicy }} + command: ["sentry"] + args: + - "run" + - "consumer" + - "post-process-forwarder-errors" + - "--consumer-group" + - "post-process-forwarder" + - "--synchronize-commit-log-topic={{ default "" ((.Values.kafkaTopicOverrides).prefix) }}snuba-commit-log" + - "--synchronize-commit-group=snuba-consumers" + {{- if .Values.sentry.postProcessForwardErrors.autoOffsetReset }} + - "--auto-offset-reset" + - "{{ .Values.sentry.postProcessForwardErrors.autoOffsetReset }}" + {{- end }} + {{- if .Values.sentry.postProcessForwardErrors.noStrictOffsetReset }} + - "--no-strict-offset-reset" + {{- end }} + {{- if .Values.sentry.postProcessForwardErrors.livenessProbe.enabled }} + - "--healthcheck-file-path" + - "/tmp/health.txt" + {{- end }} + {{- if .Values.sentry.postProcessForwardErrors.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.sentry.postProcessForwardErrors.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentry.postProcessForwardErrors.livenessProbe.periodSeconds }} + {{- end }} + env: +{{ include "sentry.env" . | indent 8 }} +{{- if .Values.sentry.postProcessForwardErrors.env }} +{{ toYaml .Values.sentry.postProcessForwardErrors.env | indent 8 }} +{{- end }} + volumeMounts: + - mountPath: /etc/sentry + name: config + readOnly: true + - mountPath: {{ .Values.filestore.filesystem.path }} + name: sentry-data + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + mountPath: /var/run/secrets/google + {{ end }} +{{- if .Values.sentry.postProcessForwardErrors.volumeMounts }} +{{ toYaml .Values.sentry.postProcessForwardErrors.volumeMounts | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.sentry.postProcessForwardErrors.resources | indent 12 }} +{{- if .Values.sentry.postProcessForwardErrors.containerSecurityContext }} + securityContext: +{{ toYaml .Values.sentry.postProcessForwardErrors.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.sentry.postProcessForwardErrors.sidecars }} +{{ toYaml .Values.sentry.postProcessForwardErrors.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ .Values.serviceAccount.name }}-post-process-forwarder-errors + {{- end }} + volumes: + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-sentry + - name: sentry-data + {{- if and (eq .Values.filestore.backend "filesystem") .Values.filestore.filesystem.persistence.enabled (.Values.filestore.filesystem.persistence.persistentWorkers) }} + {{- if .Values.filestore.filesystem.persistence.existingClaim }} + persistentVolumeClaim: + claimName: {{ .Values.filestore.filesystem.persistence.existingClaim }} + {{- else }} + persistentVolumeClaim: + claimName: {{ template "sentry.fullname" . }}-data + {{- end }} + {{- else }} + emptyDir: {} + {{ end }} + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + secret: + secretName: {{ .Values.filestore.gcs.secretName }} + {{ end }} +{{- if .Values.sentry.postProcessForwardErrors.volumes }} +{{ toYaml .Values.sentry.postProcessForwardErrors.volumes | indent 6 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 6 }} +{{- end }} + {{- if .Values.sentry.postProcessForwardErrors.priorityClassName }} + priorityClassName: "{{ .Values.sentry.postProcessForwardErrors.priorityClassName }}" + {{- end }} +{{- end }} diff --git a/sentry/templates/serviceaccount-sentry-post-process-forwarder.yaml b/charts/sentry/templates/sentry/post-process-forwarder/errors/serviceaccount-sentry-post-process-forwarder-errors.yaml similarity index 77% rename from sentry/templates/serviceaccount-sentry-post-process-forwarder.yaml rename to charts/sentry/templates/sentry/post-process-forwarder/errors/serviceaccount-sentry-post-process-forwarder-errors.yaml index 87132901d..35b243cf7 100644 --- a/sentry/templates/serviceaccount-sentry-post-process-forwarder.yaml +++ b/charts/sentry/templates/sentry/post-process-forwarder/errors/serviceaccount-sentry-post-process-forwarder-errors.yaml @@ -1,8 +1,8 @@ -{{- if .Values.serviceAccount.enabled }} +{{- if and .Values.serviceAccount.enabled .Values.sentry.postProcessForwardErrors.enabled }} apiVersion: v1 kind: ServiceAccount metadata: - name: {{ .Values.serviceAccount.name }}-post-process-forwarder + name: {{ .Values.serviceAccount.name }}-post-process-forwarder-errors {{- if .Values.serviceAccount.annotations }} annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }} {{- end }} diff --git a/charts/sentry/templates/sentry/post-process-forwarder/issue-platform/deployment-sentry-post-process-forwarder-issue-platform.yaml b/charts/sentry/templates/sentry/post-process-forwarder/issue-platform/deployment-sentry-post-process-forwarder-issue-platform.yaml new file mode 100644 index 000000000..493a330bc --- /dev/null +++ b/charts/sentry/templates/sentry/post-process-forwarder/issue-platform/deployment-sentry-post-process-forwarder-issue-platform.yaml @@ -0,0 +1,182 @@ +{{- if .Values.sentry.postProcessForwardIssuePlatform.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "sentry.fullname" . }}-post-process-forward-issue-platform + labels: + app: sentry + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + app.kubernetes.io/managed-by: "Helm" + {{- if .Values.asHook }} + {{- /* Add the Helm annotations so that deployment after asHook from true to false works */}} + annotations: + meta.helm.sh/release-name: "{{ .Release.Name }}" + meta.helm.sh/release-namespace: "{{ .Release.Namespace }}" + "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook-weight": "10" + {{- end }} +spec: + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + app: sentry + release: "{{ .Release.Name }}" + role: sentry-post-process-forward-issue-platform + replicas: {{ .Values.sentry.postProcessForwardIssuePlatform.replicas }} + template: + metadata: + annotations: + checksum/configYml: {{ .Values.config.configYml | toYaml | toString | sha256sum }} + checksum/sentryConfPy: {{ .Values.config.sentryConfPy | sha256sum }} + checksum/config.yaml: {{ include "sentry.config" . | sha256sum }} + {{- if .Values.sentry.postProcessForwardIssuePlatform.annotations }} +{{ toYaml .Values.sentry.postProcessForwardIssuePlatform.annotations | indent 8 }} + {{- end }} + labels: + app: sentry + release: "{{ .Release.Name }}" + role: sentry-post-process-forward-issue-platform + {{- if .Values.sentry.postProcessForwardIssuePlatform.podLabels }} +{{ toYaml .Values.sentry.postProcessForwardIssuePlatform.podLabels | indent 8 }} + {{- end }} + spec: + affinity: + {{- if .Values.sentry.postProcessForwardIssuePlatform.affinity }} +{{ toYaml .Values.sentry.postProcessForwardIssuePlatform.affinity | indent 8 }} + {{- end }} + {{- if .Values.sentry.postProcessForwardIssuePlatform.nodeSelector }} + nodeSelector: +{{ toYaml .Values.sentry.postProcessForwardIssuePlatform.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.sentry.postProcessForwardIssuePlatform.tolerations }} + tolerations: +{{ toYaml .Values.sentry.postProcessForwardIssuePlatform.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.sentry.postProcessForwardIssuePlatform.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.sentry.postProcessForwardIssuePlatform.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if .Values.images.sentry.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.images.sentry.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy | quote }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: +{{ toYaml .Values.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.sentry.postProcessForwardIssuePlatform.securityContext }} + securityContext: +{{ toYaml .Values.sentry.postProcessForwardIssuePlatform.securityContext | indent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-post-process-forward-issue-platform + image: "{{ template "sentry.image" . }}" + imagePullPolicy: {{ default "IfNotPresent" .Values.images.sentry.pullPolicy }} + command: ["sentry"] + args: + - "run" + - "consumer" + - "post-process-forwarder-issue-platform" + - "--consumer-group" + - "post-process-forwarder" + - "--synchronize-commit-log-topic={{ default "" ((.Values.kafkaTopicOverrides).prefix) }}snuba-generic-events-commit-log" + - "--synchronize-commit-group" + - "generic_events_group" + {{- if .Values.sentry.postProcessForwardIssuePlatform.autoOffsetReset }} + - "--auto-offset-reset" + - "{{ .Values.sentry.postProcessForwardIssuePlatform.autoOffsetReset }}" + {{- end }} + {{- if .Values.sentry.postProcessForwardIssuePlatform.noStrictOffsetReset }} + - "--no-strict-offset-reset" + {{- end }} + {{- if .Values.sentry.postProcessForwardIssuePlatform.livenessProbe.enabled }} + - "--healthcheck-file-path" + - "/tmp/health.txt" + {{- end }} + {{- if .Values.sentry.postProcessForwardIssuePlatform.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.sentry.postProcessForwardIssuePlatform.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentry.postProcessForwardIssuePlatform.livenessProbe.periodSeconds }} + {{- end }} + env: +{{ include "sentry.env" . | indent 8 }} +{{- if .Values.sentry.postProcessForwardIssuePlatform.env }} +{{ toYaml .Values.sentry.postProcessForwardIssuePlatform.env | indent 8 }} +{{- end }} + volumeMounts: + - mountPath: /etc/sentry + name: config + readOnly: true + - mountPath: {{ .Values.filestore.filesystem.path }} + name: sentry-data + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + mountPath: /var/run/secrets/google + {{ end }} +{{- if .Values.sentry.postProcessForwardIssuePlatform.volumeMounts }} +{{ toYaml .Values.sentry.postProcessForwardIssuePlatform.volumeMounts | indent 8 }} +{{- end }} +{{- if .Values.global.volumeMounts }} +{{ toYaml .Values.global.volumeMounts | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.sentry.postProcessForwardIssuePlatform.resources | indent 12 }} +{{- if .Values.sentry.postProcessForwardIssuePlatform.containerSecurityContext }} + securityContext: +{{ toYaml .Values.sentry.postProcessForwardIssuePlatform.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.sentry.postProcessForwardIssuePlatform.sidecars }} +{{ toYaml .Values.sentry.postProcessForwardIssuePlatform.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ .Values.serviceAccount.name }}-post-process-forwarder-issue-platform + {{- end }} + volumes: + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-sentry + - name: sentry-data + {{- if and (eq .Values.filestore.backend "filesystem") .Values.filestore.filesystem.persistence.enabled (.Values.filestore.filesystem.persistence.persistentWorkers) }} + {{- if .Values.filestore.filesystem.persistence.existingClaim }} + persistentVolumeClaim: + claimName: {{ .Values.filestore.filesystem.persistence.existingClaim }} + {{- else }} + persistentVolumeClaim: + claimName: {{ template "sentry.fullname" . }}-data + {{- end }} + {{- else }} + emptyDir: {} + {{ end }} + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + secret: + secretName: {{ .Values.filestore.gcs.secretName }} + {{ end }} +{{- if .Values.sentry.postProcessForwardIssuePlatform.volumes }} +{{ toYaml .Values.sentry.postProcessForwardIssuePlatform.volumes | indent 6 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 6 }} +{{- end }} + {{- if .Values.sentry.postProcessForwardIssuePlatform.priorityClassName }} + priorityClassName: "{{ .Values.sentry.postProcessForwardIssuePlatform.priorityClassName }}" + {{- end }} +{{- end }} diff --git a/charts/sentry/templates/sentry/post-process-forwarder/issue-platform/serviceaccount-sentry-post-process-forwarder-issue-platform.yaml b/charts/sentry/templates/sentry/post-process-forwarder/issue-platform/serviceaccount-sentry-post-process-forwarder-issue-platform.yaml new file mode 100644 index 000000000..c882b5479 --- /dev/null +++ b/charts/sentry/templates/sentry/post-process-forwarder/issue-platform/serviceaccount-sentry-post-process-forwarder-issue-platform.yaml @@ -0,0 +1,10 @@ +{{- if and .Values.serviceAccount.enabled .Values.sentry.postProcessForwardIssuePlatform.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.name }}-post-process-forwarder-issue-platform +{{- if .Values.serviceAccount.annotations }} + annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }} +{{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/charts/sentry/templates/sentry/post-process-forwarder/transactions/deployment-sentry-post-process-forwarder-transactions.yaml b/charts/sentry/templates/sentry/post-process-forwarder/transactions/deployment-sentry-post-process-forwarder-transactions.yaml new file mode 100644 index 000000000..ddbbc4eff --- /dev/null +++ b/charts/sentry/templates/sentry/post-process-forwarder/transactions/deployment-sentry-post-process-forwarder-transactions.yaml @@ -0,0 +1,186 @@ +{{- if .Values.sentry.postProcessForwardTransactions.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "sentry.fullname" . }}-post-process-forward-transactions + labels: + app: sentry + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + app.kubernetes.io/managed-by: "Helm" + {{- if .Values.asHook }} + {{- /* Add the Helm annotations so that deployment after asHook from true to false works */}} + annotations: + meta.helm.sh/release-name: "{{ .Release.Name }}" + meta.helm.sh/release-namespace: "{{ .Release.Namespace }}" + "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook-weight": "10" + {{- end }} +spec: + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + app: sentry + release: "{{ .Release.Name }}" + role: sentry-post-process-forward-transactions + replicas: {{ .Values.sentry.postProcessForwardTransactions.replicas }} + template: + metadata: + annotations: + checksum/configYml: {{ .Values.config.configYml | toYaml | toString | sha256sum }} + checksum/sentryConfPy: {{ .Values.config.sentryConfPy | sha256sum }} + checksum/config.yaml: {{ include "sentry.config" . | sha256sum }} + {{- if .Values.sentry.postProcessForwardTransactions.annotations }} +{{ toYaml .Values.sentry.postProcessForwardTransactions.annotations | indent 8 }} + {{- end }} + labels: + app: sentry + release: "{{ .Release.Name }}" + role: sentry-post-process-forward-transactions + {{- if .Values.sentry.postProcessForwardTransactions.podLabels }} +{{ toYaml .Values.sentry.postProcessForwardTransactions.podLabels | indent 8 }} + {{- end }} + spec: + affinity: + {{- if .Values.sentry.postProcessForwardTransactions.affinity }} +{{ toYaml .Values.sentry.postProcessForwardTransactions.affinity | indent 8 }} + {{- end }} + {{- if .Values.sentry.postProcessForwardTransactions.nodeSelector }} + nodeSelector: +{{ toYaml .Values.sentry.postProcessForwardTransactions.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.sentry.postProcessForwardTransactions.tolerations }} + tolerations: +{{ toYaml .Values.sentry.postProcessForwardTransactions.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.sentry.postProcessForwardTransactions.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.sentry.postProcessForwardTransactions.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if .Values.images.sentry.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.images.sentry.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy | quote }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: +{{ toYaml .Values.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.sentry.postProcessForwardTransactions.securityContext }} + securityContext: +{{ toYaml .Values.sentry.postProcessForwardTransactions.securityContext | indent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-post-process-forward-transactions + image: "{{ template "sentry.image" . }}" + imagePullPolicy: {{ default "IfNotPresent" .Values.images.sentry.pullPolicy }} + command: ["sentry"] + args: + - "run" + - "consumer" + - "post-process-forwarder-transactions" + - "--consumer-group" + - "post-process-forwarder" + - "--synchronize-commit-log-topic={{ default "" ((.Values.kafkaTopicOverrides).prefix) }}snuba-transactions-commit-log" + - "--synchronize-commit-group" + - "transactions_group" + {{- if .Values.sentry.postProcessForwardTransactions.autoOffsetReset }} + - "--auto-offset-reset" + - "{{ .Values.sentry.postProcessForwardTransactions.autoOffsetReset }}" + {{- end }} + {{- if .Values.sentry.postProcessForwardTransactions.noStrictOffsetReset }} + - "--no-strict-offset-reset" + {{- end }} + {{- if .Values.sentry.postProcessForwardTransactions.livenessProbe.enabled }} + - "--healthcheck-file-path" + - "/tmp/health.txt" + {{- end }} + - "--" + {{- if .Values.sentry.postProcessForwardTransactions.processes }} + - "--mode" + - "multiprocess" + - "--processes" + - "{{ .Values.sentry.postProcessForwardTransactions.processes }}" + {{- end }} + {{- if .Values.sentry.postProcessForwardTransactions.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.sentry.postProcessForwardTransactions.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentry.postProcessForwardTransactions.livenessProbe.periodSeconds }} + {{- end }} + env: +{{ include "sentry.env" . | indent 8 }} +{{- if .Values.sentry.postProcessForwardTransactions.env }} +{{ toYaml .Values.sentry.postProcessForwardTransactions.env | indent 8 }} +{{- end }} + volumeMounts: + - mountPath: /etc/sentry + name: config + readOnly: true + - mountPath: {{ .Values.filestore.filesystem.path }} + name: sentry-data + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + mountPath: /var/run/secrets/google + {{ end }} +{{- if .Values.sentry.postProcessForwardTransactions.volumeMounts }} +{{ toYaml .Values.sentry.postProcessForwardTransactions.volumeMounts | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.sentry.postProcessForwardTransactions.resources | indent 12 }} +{{- if .Values.sentry.postProcessForwardTransactions.containerSecurityContext }} + securityContext: +{{ toYaml .Values.sentry.postProcessForwardTransactions.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.sentry.postProcessForwardTransactions.sidecars }} +{{ toYaml .Values.sentry.postProcessForwardTransactions.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ .Values.serviceAccount.name }}-post-process-forwarder-transactions + {{- end }} + volumes: + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-sentry + - name: sentry-data + {{- if and (eq .Values.filestore.backend "filesystem") .Values.filestore.filesystem.persistence.enabled (.Values.filestore.filesystem.persistence.persistentWorkers) }} + {{- if .Values.filestore.filesystem.persistence.existingClaim }} + persistentVolumeClaim: + claimName: {{ .Values.filestore.filesystem.persistence.existingClaim }} + {{- else }} + persistentVolumeClaim: + claimName: {{ template "sentry.fullname" . }}-data + {{- end }} + {{- else }} + emptyDir: {} + {{ end }} + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + secret: + secretName: {{ .Values.filestore.gcs.secretName }} + {{ end }} +{{- if .Values.sentry.postProcessForwardTransactions.volumes }} +{{ toYaml .Values.sentry.postProcessForwardTransactions.volumes | indent 6 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 6 }} +{{- end }} + {{- if .Values.sentry.postProcessForwardTransactions.priorityClassName }} + priorityClassName: "{{ .Values.sentry.postProcessForwardTransactions.priorityClassName }}" + {{- end }} +{{- end }} diff --git a/charts/sentry/templates/sentry/post-process-forwarder/transactions/serviceaccount-sentry-post-process-forwarder-transactions.yaml b/charts/sentry/templates/sentry/post-process-forwarder/transactions/serviceaccount-sentry-post-process-forwarder-transactions.yaml new file mode 100644 index 000000000..4689e4595 --- /dev/null +++ b/charts/sentry/templates/sentry/post-process-forwarder/transactions/serviceaccount-sentry-post-process-forwarder-transactions.yaml @@ -0,0 +1,10 @@ +{{- if and .Values.serviceAccount.enabled .Values.sentry.postProcessForwardTransactions.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.name }}-post-process-forwarder-transactions +{{- if .Values.serviceAccount.annotations }} + annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }} +{{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/sentry/templates/deployment-sentry-subscription-consumer-events.yaml b/charts/sentry/templates/sentry/subscription-consumer/events/deployment-sentry-subscription-consumer-events.yaml similarity index 69% rename from sentry/templates/deployment-sentry-subscription-consumer-events.yaml rename to charts/sentry/templates/sentry/subscription-consumer/events/deployment-sentry-subscription-consumer-events.yaml index 11f6423c2..7cf37b810 100644 --- a/sentry/templates/deployment-sentry-subscription-consumer-events.yaml +++ b/charts/sentry/templates/sentry/subscription-consumer/events/deployment-sentry-subscription-consumer-events.yaml @@ -1,3 +1,4 @@ +{{- if .Values.sentry.subscriptionConsumerEvents.enabled }} apiVersion: apps/v1 kind: Deployment metadata: @@ -29,7 +30,7 @@ spec: annotations: checksum/configYml: {{ .Values.config.configYml | toYaml | toString | sha256sum }} checksum/sentryConfPy: {{ .Values.config.sentryConfPy | sha256sum }} - checksum/config.yaml: {{ include (print $.Template.BasePath "/configmap-sentry.yaml") . | sha256sum }} + checksum/config.yaml: {{ include "sentry.config" . | sha256sum }} {{- if .Values.sentry.subscriptionConsumerEvents.annotations }} {{ toYaml .Values.sentry.subscriptionConsumerEvents.annotations | indent 8 }} {{- end }} @@ -48,10 +49,20 @@ spec: {{- if .Values.sentry.subscriptionConsumerEvents.nodeSelector }} nodeSelector: {{ toYaml .Values.sentry.subscriptionConsumerEvents.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} {{- end }} {{- if .Values.sentry.subscriptionConsumerEvents.tolerations }} tolerations: {{ toYaml .Values.sentry.subscriptionConsumerEvents.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.sentry.subscriptionConsumerEvents.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.sentry.subscriptionConsumerEvents.topologySpreadConstraints | indent 8 }} {{- end }} {{- if .Values.images.sentry.imagePullSecrets }} imagePullSecrets: @@ -68,25 +79,32 @@ spec: command: ["sentry"] args: - "run" - - "query-subscription-consumer" - - "--topic" + - "consumer" - "events-subscription-results" - - "--commit-batch-size" - - "{{ default "1" .Values.sentry.subscriptionConsumerEvents.commitBatchSize }}" - env: - - name: SNUBA - value: http://{{ template "sentry.fullname" . }}-snuba:{{ template "snuba.port" }} - {{- if .Values.postgresql.enabled }} - - name: POSTGRES_PASSWORD - valueFrom: - secretKeyRef: - name: {{ default (include "sentry.postgresql.fullname" .) .Values.postgresql.existingSecret }} - key: {{ default "postgresql-password" .Values.postgresql.existingSecretKey }} + {{- if .Values.sentry.subscriptionConsumerEvents.autoOffsetReset }} + - "--auto-offset-reset" + - "{{ .Values.sentry.subscriptionConsumerEvents.autoOffsetReset }}" + {{- end }} + {{- if .Values.sentry.subscriptionConsumerEvents.noStrictOffsetReset }} + - "--no-strict-offset-reset" + {{- end }} + - "--consumer-group" + - "query-subscription-consumer" + {{- if .Values.sentry.subscriptionConsumerEvents.livenessProbe.enabled }} + - "--healthcheck-file-path" + - "/tmp/health.txt" + {{- end }} + {{- if .Values.sentry.subscriptionConsumerEvents.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.sentry.subscriptionConsumerEvents.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentry.subscriptionConsumerEvents.livenessProbe.periodSeconds }} {{- end }} - {{ if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /var/run/secrets/google/{{ .Values.filestore.gcs.credentialsFile }} - {{ end }} + env: +{{ include "sentry.env" . | indent 8 }} {{- if .Values.sentry.subscriptionConsumerEvents.env }} {{ toYaml .Values.sentry.subscriptionConsumerEvents.env | indent 8 }} {{- end }} @@ -100,10 +118,20 @@ spec: - name: sentry-google-cloud-key mountPath: /var/run/secrets/google {{ end }} +{{- if .Values.sentry.subscriptionConsumerEvents.volumeMounts }} +{{ toYaml .Values.sentry.subscriptionConsumerEvents.volumeMounts | indent 8 }} +{{- end }} resources: {{ toYaml .Values.sentry.subscriptionConsumerEvents.resources | indent 12 }} +{{- if .Values.sentry.subscriptionConsumerEvents.containerSecurityContext }} + securityContext: +{{ toYaml .Values.sentry.subscriptionConsumerEvents.containerSecurityContext | indent 12 }} +{{- end }} {{- if .Values.sentry.subscriptionConsumerEvents.sidecars }} {{ toYaml .Values.sentry.subscriptionConsumerEvents.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} {{- end }} {{- if .Values.serviceAccount.enabled }} serviceAccountName: {{ .Values.serviceAccount.name }}-subscription-consumer-events @@ -131,7 +159,11 @@ spec: {{ end }} {{- if .Values.sentry.subscriptionConsumerEvents.volumes }} {{ toYaml .Values.sentry.subscriptionConsumerEvents.volumes | indent 6 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 6 }} {{- end }} {{- if .Values.sentry.subscriptionConsumerEvents.priorityClassName }} priorityClassName: "{{ .Values.sentry.subscriptionConsumerEvents.priorityClassName }}" {{- end }} +{{- end }} diff --git a/sentry/templates/serviceaccount-sentry-subscription-consumer-events.yaml b/charts/sentry/templates/sentry/subscription-consumer/events/serviceaccount-sentry-subscription-consumer-events.yaml similarity index 78% rename from sentry/templates/serviceaccount-sentry-subscription-consumer-events.yaml rename to charts/sentry/templates/sentry/subscription-consumer/events/serviceaccount-sentry-subscription-consumer-events.yaml index 7678d9c14..ccd86337e 100644 --- a/sentry/templates/serviceaccount-sentry-subscription-consumer-events.yaml +++ b/charts/sentry/templates/sentry/subscription-consumer/events/serviceaccount-sentry-subscription-consumer-events.yaml @@ -1,4 +1,4 @@ -{{- if .Values.serviceAccount.enabled }} +{{- if and .Values.serviceAccount.enabled .Values.sentry.subscriptionConsumerEvents.enabled }} apiVersion: v1 kind: ServiceAccount metadata: diff --git a/charts/sentry/templates/sentry/subscription-consumer/generic-metrics/deployment-sentry-subscription-consumer-generic-metrics.yaml b/charts/sentry/templates/sentry/subscription-consumer/generic-metrics/deployment-sentry-subscription-consumer-generic-metrics.yaml new file mode 100644 index 000000000..35a7f7b54 --- /dev/null +++ b/charts/sentry/templates/sentry/subscription-consumer/generic-metrics/deployment-sentry-subscription-consumer-generic-metrics.yaml @@ -0,0 +1,187 @@ +{{- if .Values.sentry.subscriptionConsumerGenericMetrics.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "sentry.fullname" . }}-subscription-consumer-generic-metrics + labels: + app: {{ template "sentry.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + app.kubernetes.io/managed-by: "Helm" + {{- if .Values.asHook }} + {{- /* Add the Helm annotations so that deployment after asHook from true to false works */}} + annotations: + meta.helm.sh/release-name: "{{ .Release.Name }}" + meta.helm.sh/release-namespace: "{{ .Release.Namespace }}" + "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook-weight": "10" + {{- end }} +spec: + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: subscription-consumer-generic-metrics + replicas: {{ .Values.sentry.subscriptionConsumerGenericMetrics.replicas }} + template: + metadata: + annotations: + checksum/configYml: {{ .Values.config.configYml | toYaml | toString | sha256sum }} + checksum/sentryConfPy: {{ .Values.config.sentryConfPy | sha256sum }} + checksum/config.yaml: {{ include "sentry.config" . | sha256sum }} + {{- if .Values.sentry.subscriptionConsumerGenericMetrics.annotations }} +{{ toYaml .Values.sentry.subscriptionConsumerGenericMetrics.annotations | indent 8 }} + {{- end }} + labels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: subscription-consumer-generic-metrics + {{- if .Values.sentry.subscriptionConsumerGenericMetrics.podLabels }} +{{ toYaml .Values.sentry.subscriptionConsumerGenericMetrics.podLabels | indent 8 }} + {{- end }} + spec: + affinity: + {{- if .Values.sentry.subscriptionConsumerGenericMetrics.affinity }} +{{ toYaml .Values.sentry.subscriptionConsumerGenericMetrics.affinity | indent 8 }} + {{- end }} + {{- if .Values.sentry.subscriptionConsumerGenericMetrics.nodeSelector }} + nodeSelector: +{{ toYaml .Values.sentry.subscriptionConsumerGenericMetrics.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.sentry.subscriptionConsumerGenericMetrics.tolerations }} + tolerations: +{{ toYaml .Values.sentry.subscriptionConsumerGenericMetrics.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.sentry.subscriptionConsumerGenericMetrics.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.sentry.subscriptionConsumerGenericMetrics.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if .Values.images.sentry.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.images.sentry.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy | quote }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: +{{ toYaml .Values.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.sentry.subscriptionConsumerGenericMetrics.securityContext }} + securityContext: +{{ toYaml .Values.sentry.subscriptionConsumerGenericMetrics.securityContext | indent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-subscription-consumer-generic-metrics + image: "{{ template "sentry.image" . }}" + imagePullPolicy: {{ default "IfNotPresent" .Values.images.sentry.pullPolicy }} + command: ["sentry"] + args: + - "run" + - "consumer" + - "generic-metrics-subscription-results" + - "--consumer-group" + - "query-subscription-consumer" + {{- if .Values.sentry.subscriptionConsumerGenericMetrics.autoOffsetReset }} + - "--auto-offset-reset" + - "{{ .Values.sentry.subscriptionConsumerGenericMetrics.autoOffsetReset }}" + {{- end }} + {{- if .Values.sentry.subscriptionConsumerGenericMetrics.noStrictOffsetReset }} + - "--no-strict-offset-reset" + {{- end }} + {{- if .Values.sentry.subscriptionConsumerGenericMetrics.livenessProbe.enabled }} + - "--healthcheck-file-path" + - "/tmp/health.txt" + {{- end }} + - "--" + {{- if .Values.sentry.subscriptionConsumerGenericMetrics.maxBatchSize }} + - "--max-batch-size" + - "{{ .Values.sentry.subscriptionConsumerGenericMetrics.maxBatchSize }}" + {{- end }} + {{- if .Values.sentry.subscriptionConsumerGenericMetrics.concurrency }} + - "--processes" + - "{{ .Values.sentry.subscriptionConsumerGenericMetrics.concurrency }}" + {{- end }} + {{- if .Values.sentry.subscriptionConsumerGenericMetrics.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.sentry.subscriptionConsumerGenericMetrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentry.subscriptionConsumerGenericMetrics.livenessProbe.periodSeconds }} + {{- end }} + env: + - name: C_FORCE_ROOT + value: "true" +{{ include "sentry.env" . | indent 8 }} +{{- if .Values.sentry.subscriptionConsumerGenericMetrics.env }} +{{ toYaml .Values.sentry.subscriptionConsumerGenericMetrics.env | indent 8 }} +{{- end }} + volumeMounts: + - mountPath: /etc/sentry + name: config + readOnly: true + - mountPath: {{ .Values.filestore.filesystem.path }} + name: sentry-data + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + mountPath: /var/run/secrets/google + {{ end }} +{{- if .Values.sentry.subscriptionConsumerGenericMetrics.volumeMounts }} +{{ toYaml .Values.sentry.subscriptionConsumerGenericMetrics.volumeMounts | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.sentry.subscriptionConsumerGenericMetrics.resources | indent 12 }} +{{- if .Values.sentry.subscriptionConsumerGenericMetrics.containerSecurityContext }} + securityContext: +{{ toYaml .Values.sentry.subscriptionConsumerGenericMetrics.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.sentry.subscriptionConsumerGenericMetrics.sidecars }} +{{ toYaml .Values.sentry.subscriptionConsumerGenericMetrics.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ .Values.serviceAccount.name }}-subscription-consumer-generic-metrics + {{- end }} + volumes: + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-sentry + - name: sentry-data + {{- if and (eq .Values.filestore.backend "filesystem") .Values.filestore.filesystem.persistence.enabled (.Values.filestore.filesystem.persistence.persistentWorkers) }} + {{- if .Values.filestore.filesystem.persistence.existingClaim }} + persistentVolumeClaim: + claimName: {{ .Values.filestore.filesystem.persistence.existingClaim }} + {{- else }} + persistentVolumeClaim: + claimName: {{ template "sentry.fullname" . }}-data + {{- end }} + {{- else }} + emptyDir: {} + {{ end }} + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + secret: + secretName: {{ .Values.filestore.gcs.secretName }} + {{ end }} +{{- if .Values.sentry.subscriptionConsumerGenericMetrics.volumes }} +{{ toYaml .Values.sentry.subscriptionConsumerGenericMetrics.volumes | indent 6 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 6 }} +{{- end }} + {{- if .Values.sentry.subscriptionConsumerGenericMetrics.priorityClassName }} + priorityClassName: "{{ .Values.sentry.subscriptionConsumerGenericMetrics.priorityClassName }}" + {{- end }} +{{- end }} diff --git a/charts/sentry/templates/sentry/subscription-consumer/generic-metrics/serviceaccount-sentry-subscription-consumer-generic-metrics.yaml b/charts/sentry/templates/sentry/subscription-consumer/generic-metrics/serviceaccount-sentry-subscription-consumer-generic-metrics.yaml new file mode 100644 index 000000000..1c83405d1 --- /dev/null +++ b/charts/sentry/templates/sentry/subscription-consumer/generic-metrics/serviceaccount-sentry-subscription-consumer-generic-metrics.yaml @@ -0,0 +1,10 @@ +{{- if and .Values.serviceAccount.enabled .Values.sentry.subscriptionConsumerGenericMetrics.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.name }}-subscription-consumer-generic-metrics +{{- if .Values.serviceAccount.annotations }} + annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }} +{{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/charts/sentry/templates/sentry/subscription-consumer/metrics/deployment-sentry-subscription-consumer-metrics.yaml b/charts/sentry/templates/sentry/subscription-consumer/metrics/deployment-sentry-subscription-consumer-metrics.yaml new file mode 100644 index 000000000..36604ee96 --- /dev/null +++ b/charts/sentry/templates/sentry/subscription-consumer/metrics/deployment-sentry-subscription-consumer-metrics.yaml @@ -0,0 +1,187 @@ +{{- if .Values.sentry.subscriptionConsumerMetrics.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "sentry.fullname" . }}-subscription-consumer-metrics + labels: + app: {{ template "sentry.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + app.kubernetes.io/managed-by: "Helm" + {{- if .Values.asHook }} + {{- /* Add the Helm annotations so that deployment after asHook from true to false works */}} + annotations: + meta.helm.sh/release-name: "{{ .Release.Name }}" + meta.helm.sh/release-namespace: "{{ .Release.Namespace }}" + "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook-weight": "10" + {{- end }} +spec: + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: subscription-consumer-metrics + replicas: {{ .Values.sentry.subscriptionConsumerMetrics.replicas }} + template: + metadata: + annotations: + checksum/configYml: {{ .Values.config.configYml | toYaml | toString | sha256sum }} + checksum/sentryConfPy: {{ .Values.config.sentryConfPy | sha256sum }} + checksum/config.yaml: {{ include "sentry.config" . | sha256sum }} + {{- if .Values.sentry.subscriptionConsumerMetrics.annotations }} +{{ toYaml .Values.sentry.subscriptionConsumerMetrics.annotations | indent 8 }} + {{- end }} + labels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: subscription-consumer-metrics + {{- if .Values.sentry.subscriptionConsumerMetrics.podLabels }} +{{ toYaml .Values.sentry.subscriptionConsumerMetrics.podLabels | indent 8 }} + {{- end }} + spec: + affinity: + {{- if .Values.sentry.subscriptionConsumerMetrics.affinity }} +{{ toYaml .Values.sentry.subscriptionConsumerMetrics.affinity | indent 8 }} + {{- end }} + {{- if .Values.sentry.subscriptionConsumerMetrics.nodeSelector }} + nodeSelector: +{{ toYaml .Values.sentry.subscriptionConsumerMetrics.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.sentry.subscriptionConsumerMetrics.tolerations }} + tolerations: +{{ toYaml .Values.sentry.subscriptionConsumerMetrics.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.sentry.subscriptionConsumerMetrics.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.sentry.subscriptionConsumerMetrics.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if .Values.images.sentry.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.images.sentry.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy | quote }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: +{{ toYaml .Values.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.sentry.subscriptionConsumerMetrics.securityContext }} + securityContext: +{{ toYaml .Values.sentry.subscriptionConsumerMetrics.securityContext | indent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-subscription-consumer-metrics + image: "{{ template "sentry.image" . }}" + imagePullPolicy: {{ default "IfNotPresent" .Values.images.sentry.pullPolicy }} + command: ["sentry"] + args: + - "run" + - "consumer" + - "metrics-subscription-results" + - "--consumer-group" + - "query-subscription-consumer" + {{- if .Values.sentry.subscriptionConsumerMetrics.autoOffsetReset }} + - "--auto-offset-reset" + - "{{ .Values.sentry.subscriptionConsumerMetrics.autoOffsetReset }}" + {{- end }} + {{- if .Values.sentry.subscriptionConsumerMetrics.noStrictOffsetReset }} + - "--no-strict-offset-reset" + {{- end }} + {{- if .Values.sentry.subscriptionConsumerMetrics.livenessProbe.enabled }} + - "--healthcheck-file-path" + - "/tmp/health.txt" + {{- end }} + - "--" + {{- if .Values.sentry.subscriptionConsumerMetrics.maxBatchSize }} + - "--max-batch-size" + - "{{ .Values.sentry.subscriptionConsumerMetrics.maxBatchSize }}" + {{- end }} + {{- if .Values.sentry.subscriptionConsumerMetrics.concurrency }} + - "--processes" + - "{{ .Values.sentry.subscriptionConsumerMetrics.concurrency }}" + {{- end }} + {{- if .Values.sentry.subscriptionConsumerMetrics.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.sentry.subscriptionConsumerMetrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentry.subscriptionConsumerMetrics.livenessProbe.periodSeconds }} + {{- end }} + env: + - name: C_FORCE_ROOT + value: "true" +{{ include "sentry.env" . | indent 8 }} +{{- if .Values.sentry.subscriptionConsumerMetrics.env }} +{{ toYaml .Values.sentry.subscriptionConsumerMetrics.env | indent 8 }} +{{- end }} + volumeMounts: + - mountPath: /etc/sentry + name: config + readOnly: true + - mountPath: {{ .Values.filestore.filesystem.path }} + name: sentry-data + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + mountPath: /var/run/secrets/google + {{ end }} +{{- if .Values.sentry.subscriptionConsumerMetrics.volumeMounts }} +{{ toYaml .Values.sentry.subscriptionConsumerMetrics.volumeMounts | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.sentry.subscriptionConsumerMetrics.resources | indent 12 }} +{{- if .Values.sentry.subscriptionConsumerMetrics.containerSecurityContext }} + securityContext: +{{ toYaml .Values.sentry.subscriptionConsumerMetrics.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.sentry.subscriptionConsumerMetrics.sidecars }} +{{ toYaml .Values.sentry.subscriptionConsumerMetrics.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ .Values.serviceAccount.name }}-subscription-consumer-metrics + {{- end }} + volumes: + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-sentry + - name: sentry-data + {{- if and (eq .Values.filestore.backend "filesystem") .Values.filestore.filesystem.persistence.enabled (.Values.filestore.filesystem.persistence.persistentWorkers) }} + {{- if .Values.filestore.filesystem.persistence.existingClaim }} + persistentVolumeClaim: + claimName: {{ .Values.filestore.filesystem.persistence.existingClaim }} + {{- else }} + persistentVolumeClaim: + claimName: {{ template "sentry.fullname" . }}-data + {{- end }} + {{- else }} + emptyDir: {} + {{ end }} + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + secret: + secretName: {{ .Values.filestore.gcs.secretName }} + {{ end }} +{{- if .Values.sentry.subscriptionConsumerMetrics.volumes }} +{{ toYaml .Values.sentry.subscriptionConsumerMetrics.volumes | indent 6 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 6 }} +{{- end }} + {{- if .Values.sentry.subscriptionConsumerMetrics.priorityClassName }} + priorityClassName: "{{ .Values.sentry.subscriptionConsumerMetrics.priorityClassName }}" + {{- end }} +{{- end }} diff --git a/charts/sentry/templates/sentry/subscription-consumer/metrics/serviceaccount-sentry-subscription-consumer-metrics.yaml b/charts/sentry/templates/sentry/subscription-consumer/metrics/serviceaccount-sentry-subscription-consumer-metrics.yaml new file mode 100644 index 000000000..bc4c8259b --- /dev/null +++ b/charts/sentry/templates/sentry/subscription-consumer/metrics/serviceaccount-sentry-subscription-consumer-metrics.yaml @@ -0,0 +1,10 @@ +{{- if and .Values.serviceAccount.enabled .Values.sentry.subscriptionConsumerMetrics.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.name }}-subscription-consumer-metrics +{{- if .Values.serviceAccount.annotations }} + annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }} +{{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/sentry/templates/deployment-sentry-subscription-consumer-transactions.yaml b/charts/sentry/templates/sentry/subscription-consumer/transactions/deployment-sentry-subscription-consumer-transactions.yaml similarity index 69% rename from sentry/templates/deployment-sentry-subscription-consumer-transactions.yaml rename to charts/sentry/templates/sentry/subscription-consumer/transactions/deployment-sentry-subscription-consumer-transactions.yaml index 756c83ac0..91ac80646 100644 --- a/sentry/templates/deployment-sentry-subscription-consumer-transactions.yaml +++ b/charts/sentry/templates/sentry/subscription-consumer/transactions/deployment-sentry-subscription-consumer-transactions.yaml @@ -1,3 +1,4 @@ +{{- if .Values.sentry.subscriptionConsumerTransactions.enabled }} apiVersion: apps/v1 kind: Deployment metadata: @@ -29,7 +30,7 @@ spec: annotations: checksum/configYml: {{ .Values.config.configYml | toYaml | toString | sha256sum }} checksum/sentryConfPy: {{ .Values.config.sentryConfPy | sha256sum }} - checksum/config.yaml: {{ include (print $.Template.BasePath "/configmap-sentry.yaml") . | sha256sum }} + checksum/config.yaml: {{ include "sentry.config" . | sha256sum }} {{- if .Values.sentry.subscriptionConsumerTransactions.annotations }} {{ toYaml .Values.sentry.subscriptionConsumerTransactions.annotations | indent 8 }} {{- end }} @@ -48,10 +49,20 @@ spec: {{- if .Values.sentry.subscriptionConsumerTransactions.nodeSelector }} nodeSelector: {{ toYaml .Values.sentry.subscriptionConsumerTransactions.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} {{- end }} {{- if .Values.sentry.subscriptionConsumerTransactions.tolerations }} tolerations: {{ toYaml .Values.sentry.subscriptionConsumerTransactions.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.sentry.subscriptionConsumerTransactions.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.sentry.subscriptionConsumerTransactions.topologySpreadConstraints | indent 8 }} {{- end }} {{- if .Values.images.sentry.imagePullSecrets }} imagePullSecrets: @@ -68,25 +79,32 @@ spec: command: ["sentry"] args: - "run" - - "query-subscription-consumer" - - "--topic" + - "consumer" - "transactions-subscription-results" - - "--commit-batch-size" - - "{{ default "1" .Values.sentry.subscriptionConsumerEvents.commitBatchSize }}" - env: - - name: SNUBA - value: http://{{ template "sentry.fullname" . }}-snuba:{{ template "snuba.port" }} - {{- if .Values.postgresql.enabled }} - - name: POSTGRES_PASSWORD - valueFrom: - secretKeyRef: - name: {{ default (include "sentry.postgresql.fullname" .) .Values.postgresql.existingSecret }} - key: {{ default "postgresql-password" .Values.postgresql.existingSecretKey }} + - "--consumer-group" + - "query-subscription-consumer" + {{- if .Values.sentry.subscriptionConsumerTransactions.autoOffsetReset }} + - "--auto-offset-reset" + - "{{ .Values.sentry.subscriptionConsumerTransactions.autoOffsetReset }}" + {{- end }} + {{- if .Values.sentry.subscriptionConsumerTransactions.noStrictOffsetReset }} + - "--no-strict-offset-reset" + {{- end }} + {{- if .Values.sentry.subscriptionConsumerTransactions.livenessProbe.enabled }} + - "--healthcheck-file-path" + - "/tmp/health.txt" + {{- end }} + {{- if .Values.sentry.subscriptionConsumerTransactions.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.sentry.subscriptionConsumerTransactions.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentry.subscriptionConsumerTransactions.livenessProbe.periodSeconds }} {{- end }} - {{ if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /var/run/secrets/google/{{ .Values.filestore.gcs.credentialsFile }} - {{ end }} + env: +{{ include "sentry.env" . | indent 8 }} {{- if .Values.sentry.subscriptionConsumerTransactions.env }} {{ toYaml .Values.sentry.subscriptionConsumerTransactions.env | indent 8 }} {{- end }} @@ -100,10 +118,20 @@ spec: - name: sentry-google-cloud-key mountPath: /var/run/secrets/google {{ end }} +{{- if .Values.sentry.subscriptionConsumerTransactions.volumeMounts }} +{{ toYaml .Values.sentry.subscriptionConsumerTransactions.volumeMounts | indent 8 }} +{{- end }} resources: {{ toYaml .Values.sentry.subscriptionConsumerTransactions.resources | indent 12 }} +{{- if .Values.sentry.subscriptionConsumerTransactions.containerSecurityContext }} + securityContext: +{{ toYaml .Values.sentry.subscriptionConsumerTransactions.containerSecurityContext | indent 12 }} +{{- end }} {{- if .Values.sentry.subscriptionConsumerTransactions.sidecars }} {{ toYaml .Values.sentry.subscriptionConsumerTransactions.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} {{- end }} {{- if .Values.serviceAccount.enabled }} serviceAccountName: {{ .Values.serviceAccount.name }}-subscription-consumer-transactions @@ -131,7 +159,11 @@ spec: {{ end }} {{- if .Values.sentry.subscriptionConsumerTransactions.volumes }} {{ toYaml .Values.sentry.subscriptionConsumerTransactions.volumes | indent 6 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 6 }} {{- end }} {{- if .Values.sentry.subscriptionConsumerTransactions.priorityClassName }} priorityClassName: "{{ .Values.sentry.subscriptionConsumerTransactions.priorityClassName }}" {{- end }} +{{- end }} diff --git a/sentry/templates/serviceaccount-sentry-subscription-consumer-transactions.yaml b/charts/sentry/templates/sentry/subscription-consumer/transactions/serviceaccount-sentry-subscription-consumer-transactions.yaml similarity index 77% rename from sentry/templates/serviceaccount-sentry-subscription-consumer-transactions.yaml rename to charts/sentry/templates/sentry/subscription-consumer/transactions/serviceaccount-sentry-subscription-consumer-transactions.yaml index 32af7cb6c..ad6d886eb 100644 --- a/sentry/templates/serviceaccount-sentry-subscription-consumer-transactions.yaml +++ b/charts/sentry/templates/sentry/subscription-consumer/transactions/serviceaccount-sentry-subscription-consumer-transactions.yaml @@ -1,4 +1,4 @@ -{{- if .Values.serviceAccount.enabled }} +{{- if and .Values.serviceAccount.enabled .Values.sentry.subscriptionConsumerTransactions.enabled}} apiVersion: v1 kind: ServiceAccount metadata: diff --git a/charts/sentry/templates/sentry/vroom/deployment-vroom.yaml b/charts/sentry/templates/sentry/vroom/deployment-vroom.yaml new file mode 100644 index 000000000..28cd20f9b --- /dev/null +++ b/charts/sentry/templates/sentry/vroom/deployment-vroom.yaml @@ -0,0 +1,141 @@ +{{- if .Values.sentry.features.enableProfiling }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "sentry.fullname" . }}-vroom + labels: + app: {{ template "sentry.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + app.kubernetes.io/managed-by: "Helm" + {{- if .Values.asHook }} + {{- /* Add the Helm annotations so that deployment after asHook from true to false works */}} + annotations: + meta.helm.sh/release-name: "{{ .Release.Name }}" + meta.helm.sh/release-namespace: "{{ .Release.Namespace }}" + "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook-weight": "25" + {{- end }} +spec: + selector: + matchLabels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: vroom +{{- if not .Values.vroom.autoscaling.enabled }} + replicas: {{ .Values.vroom.replicas }} +{{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + template: + metadata: + annotations: + {{- if .Values.vroom.annotations }} +{{ toYaml .Values.vroom.annotations | indent 8 }} + {{- end }} + labels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: vroom + {{- if .Values.vroom.podLabels }} +{{ toYaml .Values.vroom.podLabels | indent 8 }} + {{- end }} + spec: + affinity: + {{- if .Values.vroom.affinity }} +{{ toYaml .Values.vroom.affinity | indent 8 }} + {{- end }} + {{- if .Values.vroom.nodeSelector }} + nodeSelector: +{{ toYaml .Values.vroom.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.vroom.tolerations }} + tolerations: +{{ toYaml .Values.vroom.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.vroom.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.vroom.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if .Values.images.vroom.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.images.vroom.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.vroom.securityContext }} + securityContext: +{{ toYaml .Values.vroom.securityContext | indent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-vroom +{{- if .Values.vroom.args }} + args: +{{ toYaml .Values.vroom.args | indent 10 }} +{{- end }} + image: "{{ template "vroom.image" . }}" + imagePullPolicy: {{ default "IfNotPresent" .Values.images.vroom.pullPolicy }} + ports: + - containerPort: {{ template "vroom.port" }} + env: + - name: VROOM_PORT + value: '{{ template "vroom.port" }}' +{{- if .Values.vroom.env }} +{{ toYaml .Values.vroom.env | indent 8 }} +{{- end }} +{{- if .Values.vroom.volumeMounts }} + volumeMounts: +{{ toYaml .Values.vroom.volumeMounts | indent 10 }} +{{- end }} + livenessProbe: + failureThreshold: {{ .Values.vroom.probeFailureThreshold }} + httpGet: + path: /health + port: {{ template "vroom.port" }} + scheme: HTTP + initialDelaySeconds: {{ .Values.vroom.probeInitialDelaySeconds }} + periodSeconds: {{ .Values.vroom.probePeriodSeconds }} + successThreshold: {{ .Values.vroom.probeSuccessThreshold }} + timeoutSeconds: {{ .Values.vroom.probeTimeoutSeconds }} + readinessProbe: + failureThreshold: {{ .Values.vroom.probeFailureThreshold }} + httpGet: + path: /health + port: {{ template "vroom.port" }} + scheme: HTTP + initialDelaySeconds: {{ .Values.vroom.probeInitialDelaySeconds }} + periodSeconds: {{ .Values.vroom.probePeriodSeconds }} + successThreshold: {{ .Values.vroom.probeSuccessThreshold }} + timeoutSeconds: {{ .Values.vroom.probeTimeoutSeconds }} + resources: +{{ toYaml .Values.vroom.resources | indent 12 }} +{{- if .Values.vroom.containerSecurityContext }} + securityContext: +{{ toYaml .Values.vroom.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.vroom.sidecars }} +{{ toYaml .Values.vroom.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ .Values.serviceAccount.name }}-vroom + {{- end }} +{{- if or .Values.vroom.volumes .Values.global.volumes }} + volumes: +{{- if .Values.vroom.volumes }} +{{ toYaml .Values.vroom.volumes | indent 6 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 6 }} +{{- end }} +{{- end }} + {{- if .Values.vroom.priorityClassName }} + priorityClassName: "{{ .Values.vroom.priorityClassName }}" + {{- end }} +{{- end }} diff --git a/charts/sentry/templates/sentry/vroom/hpa-vroom.yaml b/charts/sentry/templates/sentry/vroom/hpa-vroom.yaml new file mode 100644 index 000000000..954b8bd8f --- /dev/null +++ b/charts/sentry/templates/sentry/vroom/hpa-vroom.yaml @@ -0,0 +1,33 @@ +{{- if .Values.vroom.autoscaling.enabled }} +apiVersion: {{ template "sentry.autoscaling.apiVersion" . }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "sentry.fullname" . }}-vroom +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "sentry.fullname" . }}-vroom + minReplicas: {{ .Values.vroom.autoscaling.minReplicas }} + maxReplicas: {{ .Values.vroom.autoscaling.maxReplicas }} + {{- if eq (include "sentry.autoscaling.apiVersion" .) "autoscaling/v1" }} + targetCPUUtilizationPercentage: {{ .Values.vroom.autoscaling.targetCPUUtilizationPercentage }} + {{- else if semverCompare ">=1.27-0" .Capabilities.KubeVersion.GitVersion }} + metrics: + - type: ContainerResource + containerResource: + container: {{ .Chart.Name }}-vroom + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.vroom.autoscaling.targetCPUUtilizationPercentage }} + {{- else }} + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.vroom.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/charts/sentry/templates/sentry/vroom/service-vroom.yaml b/charts/sentry/templates/sentry/vroom/service-vroom.yaml new file mode 100644 index 000000000..3161085df --- /dev/null +++ b/charts/sentry/templates/sentry/vroom/service-vroom.yaml @@ -0,0 +1,28 @@ +{{- if .Values.sentry.features.enableProfiling }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "sentry.fullname" . }}-vroom + annotations: + {{- range $key, $value := .Values.vroom.service.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- if and (.Values.ingress.enabled) (eq (default "nginx" .Values.ingress.regexPathStyle) "gke") }} + cloud.google.com/backend-config: '{"ports": {"{{ template "vroom.port" . }}":"{{ include "sentry.fullname" . }}-vroom"}}' + {{- end }} + labels: + app: {{ template "sentry.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ template "vroom.port" . }} + targetPort: {{ template "vroom.port" }} + protocol: TCP + name: {{ .Values.service.name }}-vroom + selector: + app: {{ template "sentry.fullname" . }} + role: vroom +{{- end }} diff --git a/sentry/templates/serviceaccount-sentry-ingest-consumer.yaml b/charts/sentry/templates/sentry/vroom/serviceaccount-sentry-vroom.yaml similarity index 64% rename from sentry/templates/serviceaccount-sentry-ingest-consumer.yaml rename to charts/sentry/templates/sentry/vroom/serviceaccount-sentry-vroom.yaml index a15ecf25e..814f93984 100644 --- a/sentry/templates/serviceaccount-sentry-ingest-consumer.yaml +++ b/charts/sentry/templates/sentry/vroom/serviceaccount-sentry-vroom.yaml @@ -1,10 +1,10 @@ -{{- if .Values.serviceAccount.enabled }} +{{- if and .Values.serviceAccount.enabled .Values.sentry.features.enableProfiling }} apiVersion: v1 kind: ServiceAccount metadata: - name: {{ .Values.serviceAccount.name }}-ingest-consumer + name: {{ .Values.serviceAccount.name }}-vroom {{- if .Values.serviceAccount.annotations }} annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }} {{- end }} automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/sentry/templates/deployment-sentry-web.yaml b/charts/sentry/templates/sentry/web/deployment-sentry-web.yaml similarity index 73% rename from sentry/templates/deployment-sentry-web.yaml rename to charts/sentry/templates/sentry/web/deployment-sentry-web.yaml index 641e775a1..32d9d50a0 100644 --- a/sentry/templates/deployment-sentry-web.yaml +++ b/charts/sentry/templates/sentry/web/deployment-sentry-web.yaml @@ -1,3 +1,4 @@ +{{- if .Values.sentry.web.enabled }} apiVersion: apps/v1 kind: Deployment metadata: @@ -24,7 +25,7 @@ spec: annotations: checksum/configYml: {{ .Values.config.configYml | toYaml | toString | sha256sum }} checksum/sentryConfPy: {{ .Values.config.sentryConfPy | sha256sum }} - checksum/config.yaml: {{ include (print $.Template.BasePath "/configmap-sentry.yaml") . | sha256sum }} + checksum/config.yaml: {{ include "sentry.config" . | sha256sum }} {{- if .Values.sentry.web.annotations }} {{ toYaml .Values.sentry.web.annotations | indent 8 }} {{- end }} @@ -43,10 +44,20 @@ spec: {{- if .Values.sentry.web.nodeSelector }} nodeSelector: {{ toYaml .Values.sentry.web.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} {{- end }} {{- if .Values.sentry.web.tolerations }} tolerations: {{ toYaml .Values.sentry.web.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.sentry.web.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.sentry.web.topologySpreadConstraints | indent 8 }} {{- end }} {{- if .Values.images.sentry.imagePullSecrets }} imagePullSecrets: @@ -67,29 +78,37 @@ spec: - name: {{ .Chart.Name }}-web image: "{{ template "sentry.image" . }}" imagePullPolicy: {{ default "IfNotPresent" .Values.images.sentry.pullPolicy }} - command: ["sentry", "run", "web"] + command: ["sentry"] + args: + - "run" + - "web" + {{- if .Values.sentry.web.workers }} + - "--workers" + - "{{ .Values.sentry.web.workers }}" + {{- end }} + {{- if .Values.sentry.web.logLevel }} + - "--loglevel" + - "{{ .Values.sentry.web.logLevel }}" + {{- end }} + {{- if .Values.sentry.web.logFormat }} + - "--logformat" + - "{{ .Values.sentry.web.logFormat }}" + {{- end }} ports: - containerPort: {{ template "sentry.port" }} env: - - name: SNUBA - value: http://{{ template "sentry.fullname" . }}-snuba:{{ template "snuba.port" }} - {{- if .Values.postgresql.enabled }} - - name: POSTGRES_PASSWORD - valueFrom: - secretKeyRef: - name: {{ default (include "sentry.postgresql.fullname" .) .Values.postgresql.existingSecret }} - key: {{ default "postgresql-password" .Values.postgresql.existingSecretKey }} - {{- end }} - {{ if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /var/run/secrets/google/{{ .Values.filestore.gcs.credentialsFile }} - {{ end }} +{{ include "sentry.env" . | indent 8 }} {{ if .Values.sentry.web.customCA }} - name: REQUESTS_CA_BUNDLE value: /etc/pki/ca-trust/custom/{{ default "ca.crt" .Values.sentry.web.customCA.item }} {{ end }} {{- if .Values.sentry.web.env }} {{ toYaml .Values.sentry.web.env | indent 8 }} +{{- end }} +{{- if .Values.sentry.web.existingSecretEnv }} + envFrom: + - secretRef: + name: {{.Values.sentry.web.existingSecretEnv}} {{- end }} volumeMounts: - mountPath: /etc/sentry @@ -97,6 +116,10 @@ spec: readOnly: true - mountPath: {{ .Values.filestore.filesystem.path }} name: sentry-data + {{- if and .Values.geodata.volumeName .Values.geodata.accountID }} + - name: {{ .Values.geodata.volumeName }} + mountPath: {{ .Values.geodata.mountPath }} + {{- end }} {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} - name: sentry-google-cloud-key mountPath: /var/run/secrets/google @@ -105,6 +128,9 @@ spec: - name: custom-ca mountPath: /etc/pki/ca-trust/custom {{ end }} +{{- if .Values.sentry.web.volumeMounts }} +{{ toYaml .Values.sentry.web.volumeMounts | indent 8 }} +{{- end }} livenessProbe: failureThreshold: {{ .Values.sentry.web.probeFailureThreshold }} httpGet: @@ -127,8 +153,15 @@ spec: timeoutSeconds: {{ .Values.sentry.web.probeTimeoutSeconds }} resources: {{ toYaml .Values.sentry.web.resources | indent 12 }} +{{- if .Values.sentry.web.containerSecurityContext }} + securityContext: +{{ toYaml .Values.sentry.web.containerSecurityContext | indent 12 }} +{{- end }} {{- if .Values.sentry.web.sidecars }} {{ toYaml .Values.sentry.web.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} {{- end }} {{- if .Values.serviceAccount.enabled }} serviceAccountName: {{ .Values.serviceAccount.name }}-web @@ -162,6 +195,15 @@ spec: {{- if .Values.sentry.web.priorityClassName }} priorityClassName: "{{ .Values.sentry.web.priorityClassName }}" {{- end }} + {{- if and .Values.geodata.volumeName .Values.geodata.accountID }} + - name: {{ .Values.geodata.volumeName }} + persistentVolumeClaim: + claimName: {{ .Values.geodata.volumeName }} + {{- end }} {{- if .Values.sentry.web.volumes }} {{ toYaml .Values.sentry.web.volumes | indent 6 }} {{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 6 }} +{{- end }} +{{- end }} diff --git a/charts/sentry/templates/sentry/web/hpa-web.yaml b/charts/sentry/templates/sentry/web/hpa-web.yaml new file mode 100644 index 000000000..227775796 --- /dev/null +++ b/charts/sentry/templates/sentry/web/hpa-web.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.sentry.web.enabled .Values.sentry.web.autoscaling.enabled }} +apiVersion: {{ template "sentry.autoscaling.apiVersion" . }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "sentry.fullname" . }}-sentry-web +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "sentry.fullname" . }}-web + minReplicas: {{ .Values.sentry.web.autoscaling.minReplicas }} + maxReplicas: {{ .Values.sentry.web.autoscaling.maxReplicas }} + {{- if eq (include "sentry.autoscaling.apiVersion" .) "autoscaling/v1" }} + targetCPUUtilizationPercentage: {{ .Values.sentry.web.autoscaling.targetCPUUtilizationPercentage }} + {{- else if semverCompare ">=1.27-0" .Capabilities.KubeVersion.GitVersion }} + metrics: + - type: ContainerResource + containerResource: + container: {{ .Chart.Name }}-web + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.sentry.web.autoscaling.targetCPUUtilizationPercentage }} + {{- else }} + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.sentry.web.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/sentry/templates/service-sentry.yaml b/charts/sentry/templates/sentry/web/service-sentry-web.yaml similarity index 96% rename from sentry/templates/service-sentry.yaml rename to charts/sentry/templates/sentry/web/service-sentry-web.yaml index 71c593a3f..0d9b3c5a8 100644 --- a/sentry/templates/service-sentry.yaml +++ b/charts/sentry/templates/sentry/web/service-sentry-web.yaml @@ -1,3 +1,4 @@ +{{- if .Values.sentry.web.enabled }} apiVersion: v1 kind: Service metadata: @@ -35,3 +36,4 @@ spec: loadBalancerSourceRanges: {{- toYaml . | nindent 4 }} {{- end }} +{{- end }} diff --git a/sentry/templates/serviceaccount-sentry-web.yaml b/charts/sentry/templates/sentry/web/serviceaccount-sentry-web.yaml similarity index 78% rename from sentry/templates/serviceaccount-sentry-web.yaml rename to charts/sentry/templates/sentry/web/serviceaccount-sentry-web.yaml index d1b192899..2fdada503 100644 --- a/sentry/templates/serviceaccount-sentry-web.yaml +++ b/charts/sentry/templates/sentry/web/serviceaccount-sentry-web.yaml @@ -1,4 +1,4 @@ -{{- if .Values.serviceAccount.enabled }} +{{- if and .Values.serviceAccount.enabled .Values.sentry.web.enabled }} apiVersion: v1 kind: ServiceAccount metadata: @@ -7,4 +7,4 @@ metadata: annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }} {{- end }} automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/sentry/templates/sentry/worker/deployment-sentry-worker-events.yaml b/charts/sentry/templates/sentry/worker/deployment-sentry-worker-events.yaml new file mode 100644 index 000000000..44f81181a --- /dev/null +++ b/charts/sentry/templates/sentry/worker/deployment-sentry-worker-events.yaml @@ -0,0 +1,182 @@ +{{- if .Values.sentry.workerEvents.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "sentry.fullname" . }}-worker-events + labels: + app: {{ template "sentry.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: worker-events +{{- if not .Values.sentry.workerEvents.autoscaling.enabled }} + replicas: {{ .Values.sentry.workerEvents.replicas }} +{{- end }} + template: + metadata: + annotations: + checksum/configYml: {{ .Values.config.configYml | toYaml | toString | sha256sum }} + checksum/sentryConfPy: {{ .Values.config.sentryConfPy | sha256sum }} + checksum/config.yaml: {{ include "sentry.config" . | sha256sum }} + {{- if .Values.sentry.workerEvents.annotations }} +{{ toYaml .Values.sentry.workerEvents.annotations | indent 8 }} + {{- end }} + labels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: worker-events + {{- if .Values.sentry.workerEvents.podLabels }} +{{ toYaml .Values.sentry.workerEvents.podLabels | indent 8 }} + {{- end }} + spec: + affinity: + {{- if .Values.sentry.workerEvents.affinity }} +{{ toYaml .Values.sentry.workerEvents.affinity | indent 8 }} + {{- end }} + {{- if .Values.sentry.workerEvents.nodeSelector }} + nodeSelector: +{{ toYaml .Values.sentry.workerEvents.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.sentry.workerEvents.tolerations }} + tolerations: +{{ toYaml .Values.sentry.workerEvents.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.sentry.workerEvents.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.sentry.workerEvents.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if .Values.images.sentry.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.images.sentry.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy | quote }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: +{{ toYaml .Values.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.sentry.workerEvents.securityContext }} + securityContext: +{{ toYaml .Values.sentry.workerEvents.securityContext | indent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-worker + image: "{{ template "sentry.image" . }}" + imagePullPolicy: {{ default "IfNotPresent" .Values.images.sentry.pullPolicy }} + command: ["sentry"] + args: + - "run" + - "worker" + - "-Q" + - {{ .Values.sentry.workerEvents.queues }} + {{- if .Values.sentry.workerEvents.concurrency }} + - "-c" + - "{{ .Values.sentry.workerEvents.concurrency }}" + {{- end }} + {{- if .Values.sentry.workerEvents.logLevel }} + - "--loglevel" + - "{{ .Values.sentry.workerEvents.logLevel }}" + {{- end }} + {{- if .Values.sentry.workerEvents.logFormat }} + - "--logformat" + - "{{ .Values.sentry.workerEvents.logFormat }}" + {{- end }} + {{- if .Values.sentry.workerEvents.maxTasksPerChild }} + - "--max-tasks-per-child" + - "{{ .Values.sentry.workerEvents.maxTasksPerChild }}" + {{- end }} + env: + - name: C_FORCE_ROOT + value: "true" +{{ include "sentry.env" . | indent 8 }} +{{- if .Values.sentry.workerEvents.env }} +{{ toYaml .Values.sentry.workerEvents.env | indent 8 }} +{{- end }} + volumeMounts: + - mountPath: /etc/sentry + name: config + readOnly: true + - mountPath: {{ .Values.filestore.filesystem.path }} + name: sentry-data + {{- if and .Values.geodata.volumeName .Values.geodata.accountID }} + - name: {{ .Values.geodata.volumeName }} + mountPath: {{ .Values.geodata.mountPath }} + {{- end }} + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + mountPath: /var/run/secrets/google + {{ end }} +{{- if .Values.sentry.workerEvents.volumeMounts }} +{{ toYaml .Values.sentry.workerEvents.volumeMounts | indent 8 }} +{{- end }} + {{- if .Values.sentry.workerEvents.livenessProbe.enabled }} + livenessProbe: + periodSeconds: {{ .Values.sentry.workerEvents.livenessProbe.periodSeconds }} + initialDelaySeconds: 10 + timeoutSeconds: {{ .Values.sentry.workerEvents.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.sentry.workerEvents.livenessProbe.failureThreshold }} + exec: + command: + - sentry + - exec + - -c + - 'from sentry.celery import app; import os; dest="celery@{}".format(os.environ["HOSTNAME"]); print(app.control.ping(destination=[dest], timeout=5)[0][dest]["ok"])' +{{- end }} + resources: +{{ toYaml .Values.sentry.workerEvents.resources | indent 12 }} +{{- if .Values.sentry.workerEvents.containerSecurityContext }} + securityContext: +{{ toYaml .Values.sentry.workerEvents.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.sentry.workerEvents.sidecars }} +{{ toYaml .Values.sentry.workerEvents.sidecars | indent 6 }} +{{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ .Values.serviceAccount.name }}-worker + {{- end }} + volumes: + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-sentry + - name: sentry-data + {{- if and (eq .Values.filestore.backend "filesystem") .Values.filestore.filesystem.persistence.enabled (.Values.filestore.filesystem.persistence.persistentWorkers) }} + {{- if .Values.filestore.filesystem.persistence.existingClaim }} + persistentVolumeClaim: + claimName: {{ .Values.filestore.filesystem.persistence.existingClaim }} + {{- else }} + persistentVolumeClaim: + claimName: {{ template "sentry.fullname" . }}-data + {{- end }} + {{- else }} + emptyDir: {} + {{ end }} + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + secret: + secretName: {{ .Values.filestore.gcs.secretName }} + {{ end }} + {{- if and .Values.geodata.volumeName .Values.geodata.accountID }} + - name: {{ .Values.geodata.volumeName }} + persistentVolumeClaim: + claimName: {{ .Values.geodata.volumeName }} + {{- end }} + {{- if .Values.sentry.workerEvents.priorityClassName }} + priorityClassName: "{{ .Values.sentry.workerEvents.priorityClassName }}" + {{- end }} +{{- if .Values.sentry.workerEvents.volumes }} +{{ toYaml .Values.sentry.workerEvents.volumes | indent 6 }} +{{- end }} +{{- end }} diff --git a/charts/sentry/templates/sentry/worker/deployment-sentry-worker-transactions.yaml b/charts/sentry/templates/sentry/worker/deployment-sentry-worker-transactions.yaml new file mode 100644 index 000000000..1b8e30179 --- /dev/null +++ b/charts/sentry/templates/sentry/worker/deployment-sentry-worker-transactions.yaml @@ -0,0 +1,182 @@ +{{- if .Values.sentry.workerTransactions.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "sentry.fullname" . }}-worker-transactions + labels: + app: {{ template "sentry.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: worker-transactions +{{- if not .Values.sentry.workerTransactions.autoscaling.enabled }} + replicas: {{ .Values.sentry.workerTransactions.replicas }} +{{- end }} + template: + metadata: + annotations: + checksum/configYml: {{ .Values.config.configYml | toYaml | toString | sha256sum }} + checksum/sentryConfPy: {{ .Values.config.sentryConfPy | sha256sum }} + checksum/config.yaml: {{ include "sentry.config" . | sha256sum }} + {{- if .Values.sentry.workerTransactions.annotations }} +{{ toYaml .Values.sentry.workerTransactions.annotations | indent 8 }} + {{- end }} + labels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: worker-transactions + {{- if .Values.sentry.workerTransactions.podLabels }} +{{ toYaml .Values.sentry.workerTransactions.podLabels | indent 8 }} + {{- end }} + spec: + affinity: + {{- if .Values.sentry.workerTransactions.affinity }} +{{ toYaml .Values.sentry.workerTransactions.affinity | indent 8 }} + {{- end }} + {{- if .Values.sentry.workerTransactions.nodeSelector }} + nodeSelector: +{{ toYaml .Values.sentry.workerTransactions.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.sentry.workerTransactions.tolerations }} + tolerations: +{{ toYaml .Values.sentry.workerTransactions.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.sentry.workerTransactions.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.sentry.workerTransactions.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if .Values.images.sentry.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.images.sentry.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy | quote }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: +{{ toYaml .Values.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.sentry.workerTransactions.securityContext }} + securityContext: +{{ toYaml .Values.sentry.workerTransactions.securityContext | indent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-worker + image: "{{ template "sentry.image" . }}" + imagePullPolicy: {{ default "IfNotPresent" .Values.images.sentry.pullPolicy }} + command: ["sentry"] + args: + - "run" + - "worker" + - "-Q" + - {{ .Values.sentry.workerTransactions.queues }} + {{- if .Values.sentry.workerTransactions.concurrency }} + - "-c" + - "{{ .Values.sentry.workerTransactions.concurrency }}" + {{- end }} + {{- if .Values.sentry.workerTransactions.logLevel }} + - "--loglevel" + - "{{ .Values.sentry.workerTransactions.logLevel }}" + {{- end }} + {{- if .Values.sentry.workerTransactions.logFormat }} + - "--logformat" + - "{{ .Values.sentry.workerTransactions.logFormat }}" + {{- end }} + {{- if .Values.sentry.workerTransactions.maxTasksPerChild }} + - "--max-tasks-per-child" + - "{{ .Values.sentry.workerTransactions.maxTasksPerChild }}" + {{- end }} + env: + - name: C_FORCE_ROOT + value: "true" +{{ include "sentry.env" . | indent 8 }} +{{- if .Values.sentry.workerTransactions.env }} +{{ toYaml .Values.sentry.workerTransactions.env | indent 8 }} +{{- end }} + volumeMounts: + - mountPath: /etc/sentry + name: config + readOnly: true + - mountPath: {{ .Values.filestore.filesystem.path }} + name: sentry-data + {{- if and .Values.geodata.volumeName .Values.geodata.accountID }} + - name: {{ .Values.geodata.volumeName }} + mountPath: {{ .Values.geodata.mountPath }} + {{- end }} + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + mountPath: /var/run/secrets/google + {{ end }} +{{- if .Values.sentry.workerTransactions.volumeMounts }} +{{ toYaml .Values.sentry.workerTransactions.volumeMounts | indent 8 }} +{{- end }} + {{- if .Values.sentry.workerTransactions.livenessProbe.enabled }} + livenessProbe: + periodSeconds: {{ .Values.sentry.workerTransactions.livenessProbe.periodSeconds }} + initialDelaySeconds: 10 + timeoutSeconds: {{ .Values.sentry.workerTransactions.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.sentry.workerTransactions.livenessProbe.failureThreshold }} + exec: + command: + - sentry + - exec + - -c + - 'from sentry.celery import app; import os; dest="celery@{}".format(os.environ["HOSTNAME"]); print(app.control.ping(destination=[dest], timeout=5)[0][dest]["ok"])' +{{- end }} + resources: +{{ toYaml .Values.sentry.workerTransactions.resources | indent 12 }} +{{- if .Values.sentry.workerTransactions.containerSecurityContext }} + securityContext: +{{ toYaml .Values.sentry.workerTransactions.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.sentry.workerTransactions.sidecars }} +{{ toYaml .Values.sentry.workerTransactions.sidecars | indent 6 }} +{{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ .Values.serviceAccount.name }}-worker + {{- end }} + volumes: + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-sentry + - name: sentry-data + {{- if and (eq .Values.filestore.backend "filesystem") .Values.filestore.filesystem.persistence.enabled (.Values.filestore.filesystem.persistence.persistentWorkers) }} + {{- if .Values.filestore.filesystem.persistence.existingClaim }} + persistentVolumeClaim: + claimName: {{ .Values.filestore.filesystem.persistence.existingClaim }} + {{- else }} + persistentVolumeClaim: + claimName: {{ template "sentry.fullname" . }}-data + {{- end }} + {{- else }} + emptyDir: {} + {{ end }} + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + secret: + secretName: {{ .Values.filestore.gcs.secretName }} + {{ end }} + {{- if .Values.sentry.workerTransactions.priorityClassName }} + priorityClassName: "{{ .Values.sentry.workerTransactions.priorityClassName }}" + {{- end }} + {{- if and .Values.geodata.volumeName .Values.geodata.accountID }} + - name: {{ .Values.geodata.volumeName }} + persistentVolumeClaim: + claimName: {{ .Values.geodata.volumeName }} + {{- end }} +{{- if .Values.sentry.workerTransactions.volumes }} +{{ toYaml .Values.sentry.workerTransactions.volumes | indent 6 }} +{{- end }} +{{- end }} diff --git a/sentry/templates/deployment-sentry-worker.yaml b/charts/sentry/templates/sentry/worker/deployment-sentry-worker.yaml similarity index 67% rename from sentry/templates/deployment-sentry-worker.yaml rename to charts/sentry/templates/sentry/worker/deployment-sentry-worker.yaml index 569cf29fa..fda9446e5 100644 --- a/sentry/templates/deployment-sentry-worker.yaml +++ b/charts/sentry/templates/sentry/worker/deployment-sentry-worker.yaml @@ -1,3 +1,4 @@ +{{- if .Values.sentry.worker.enabled }} apiVersion: apps/v1 kind: Deployment metadata: @@ -22,7 +23,7 @@ spec: annotations: checksum/configYml: {{ .Values.config.configYml | toYaml | toString | sha256sum }} checksum/sentryConfPy: {{ .Values.config.sentryConfPy | sha256sum }} - checksum/config.yaml: {{ include (print $.Template.BasePath "/configmap-sentry.yaml") . | sha256sum }} + checksum/config.yaml: {{ include "sentry.config" . | sha256sum }} {{- if .Values.sentry.worker.annotations }} {{ toYaml .Values.sentry.worker.annotations | indent 8 }} {{- end }} @@ -41,10 +42,20 @@ spec: {{- if .Values.sentry.worker.nodeSelector }} nodeSelector: {{ toYaml .Values.sentry.worker.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} {{- end }} {{- if .Values.sentry.worker.tolerations }} tolerations: {{ toYaml .Values.sentry.worker.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.sentry.worker.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.sentry.worker.topologySpreadConstraints | indent 8 }} {{- end }} {{- if .Values.images.sentry.imagePullSecrets }} imagePullSecrets: @@ -69,28 +80,37 @@ spec: args: - "run" - "worker" + {{- if .Values.sentry.worker.excludeQueues }} + - "--exclude-queues" + - "{{ .Values.sentry.worker.excludeQueues }}" + {{- end }} {{- if .Values.sentry.worker.concurrency }} - "-c" - "{{ .Values.sentry.worker.concurrency }}" {{- end }} + {{- if .Values.sentry.worker.logLevel }} + - "--loglevel" + - "{{ .Values.sentry.worker.logLevel }}" + {{- end }} + {{- if .Values.sentry.worker.logFormat }} + - "--logformat" + - "{{ .Values.sentry.worker.logFormat }}" + {{- end }} + {{- if .Values.sentry.worker.maxTasksPerChild }} + - "--max-tasks-per-child" + - "{{ .Values.sentry.worker.maxTasksPerChild }}" + {{- end }} env: - - name: SNUBA - value: http://{{ template "sentry.fullname" . }}-snuba:{{ template "snuba.port" }} - name: C_FORCE_ROOT value: "true" - {{- if .Values.postgresql.enabled }} - - name: POSTGRES_PASSWORD - valueFrom: - secretKeyRef: - name: {{ default (include "sentry.postgresql.fullname" .) .Values.postgresql.existingSecret }} - key: {{ default "postgresql-password" .Values.postgresql.existingSecretKey }} - {{- end }} - {{ if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /var/run/secrets/google/{{ .Values.filestore.gcs.credentialsFile }} - {{ end }} +{{ include "sentry.env" . | indent 8 }} {{- if .Values.sentry.worker.env }} {{ toYaml .Values.sentry.worker.env | indent 8 }} +{{- end }} +{{- if .Values.sentry.worker.existingSecretEnv }} + envFrom: + - secretRef: + name: {{.Values.sentry.worker.existingSecretEnv}} {{- end }} volumeMounts: - mountPath: /etc/sentry @@ -98,10 +118,17 @@ spec: readOnly: true - mountPath: {{ .Values.filestore.filesystem.path }} name: sentry-data + {{- if and .Values.geodata.volumeName .Values.geodata.accountID }} + - name: {{ .Values.geodata.volumeName }} + mountPath: {{ .Values.geodata.mountPath }} + {{- end }} {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} - name: sentry-google-cloud-key mountPath: /var/run/secrets/google {{ end }} +{{- if .Values.sentry.worker.volumeMounts }} +{{ toYaml .Values.sentry.worker.volumeMounts | indent 8 }} +{{- end }} {{- if .Values.sentry.worker.livenessProbe.enabled }} livenessProbe: periodSeconds: {{ .Values.sentry.worker.livenessProbe.periodSeconds }} @@ -113,12 +140,19 @@ spec: - sentry - exec - -c - - 'import celery, os; dest="celery@{}".format(os.environ["HOSTNAME"]); print(celery.task.control.ping(destination=[dest], timeout=5)[0][dest]["ok"])' + - 'from sentry.celery import app; import os; dest="celery@{}".format(os.environ["HOSTNAME"]); print(app.control.ping(destination=[dest], timeout=5)[0][dest]["ok"])' {{- end }} resources: {{ toYaml .Values.sentry.worker.resources | indent 12 }} +{{- if .Values.sentry.worker.containerSecurityContext }} + securityContext: +{{ toYaml .Values.sentry.worker.containerSecurityContext | indent 12 }} +{{- end }} {{- if .Values.sentry.worker.sidecars }} {{ toYaml .Values.sentry.worker.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} {{- end }} {{- if .Values.serviceAccount.enabled }} serviceAccountName: {{ .Values.serviceAccount.name }}-worker @@ -149,4 +183,13 @@ spec: {{- end }} {{- if .Values.sentry.worker.volumes }} {{ toYaml .Values.sentry.worker.volumes | indent 6 }} +{{- end }} + {{- if and .Values.geodata.volumeName .Values.geodata.accountID }} + - name: {{ .Values.geodata.volumeName }} + persistentVolumeClaim: + claimName: {{ .Values.geodata.volumeName }} + {{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 6 }} +{{- end }} {{- end }} diff --git a/charts/sentry/templates/sentry/worker/hpa-worker-events.yaml b/charts/sentry/templates/sentry/worker/hpa-worker-events.yaml new file mode 100644 index 000000000..8456a5d67 --- /dev/null +++ b/charts/sentry/templates/sentry/worker/hpa-worker-events.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.sentry.workerEvents.enabled .Values.sentry.workerEvents.autoscaling.enabled }} +apiVersion: {{ template "sentry.autoscaling.apiVersion" . }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "sentry.fullname" . }}-sentry-worker-events +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "sentry.fullname" . }}-worker-events + minReplicas: {{ .Values.sentry.workerEvents.autoscaling.minReplicas }} + maxReplicas: {{ .Values.sentry.workerEvents.autoscaling.maxReplicas }} + {{- if eq (include "sentry.autoscaling.apiVersion" .) "autoscaling/v1" }} + targetCPUUtilizationPercentage: {{ .Values.sentry.workerEvents.autoscaling.targetCPUUtilizationPercentage }} + {{- else if semverCompare ">=1.27-0" .Capabilities.KubeVersion.GitVersion }} + metrics: + - type: ContainerResource + containerResource: + container: {{ .Chart.Name }}-worker + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.sentry.workerEvents.autoscaling.targetCPUUtilizationPercentage }} + {{- else }} + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.sentry.workerEvents.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/charts/sentry/templates/sentry/worker/hpa-worker-transactions.yaml b/charts/sentry/templates/sentry/worker/hpa-worker-transactions.yaml new file mode 100644 index 000000000..f347e9b2e --- /dev/null +++ b/charts/sentry/templates/sentry/worker/hpa-worker-transactions.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.sentry.workerTransactions.enabled .Values.sentry.workerTransactions.autoscaling.enabled }} +apiVersion: {{ template "sentry.autoscaling.apiVersion" . }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "sentry.fullname" . }}-sentry-worker-transactions +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "sentry.fullname" . }}-worker-transactions + minReplicas: {{ .Values.sentry.workerTransactions.autoscaling.minReplicas }} + maxReplicas: {{ .Values.sentry.workerTransactions.autoscaling.maxReplicas }} + {{- if eq (include "sentry.autoscaling.apiVersion" .) "autoscaling/v1" }} + targetCPUUtilizationPercentage: {{ .Values.sentry.workerTransactions.autoscaling.targetCPUUtilizationPercentage }} + {{- else if semverCompare ">=1.27-0" .Capabilities.KubeVersion.GitVersion }} + metrics: + - type: ContainerResource + containerResource: + container: {{ .Chart.Name }}-worker + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.sentry.workerTransactions.autoscaling.targetCPUUtilizationPercentage }} + {{- else }} + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.sentry.workerTransactions.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/charts/sentry/templates/sentry/worker/hpa-worker.yaml b/charts/sentry/templates/sentry/worker/hpa-worker.yaml new file mode 100644 index 000000000..470167731 --- /dev/null +++ b/charts/sentry/templates/sentry/worker/hpa-worker.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.sentry.worker.enabled .Values.sentry.worker.autoscaling.enabled }} +apiVersion: {{ template "sentry.autoscaling.apiVersion" . }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "sentry.fullname" . }}-sentry-worker +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "sentry.fullname" . }}-worker + minReplicas: {{ .Values.sentry.worker.autoscaling.minReplicas }} + maxReplicas: {{ .Values.sentry.worker.autoscaling.maxReplicas }} + {{- if eq (include "sentry.autoscaling.apiVersion" .) "autoscaling/v1" }} + targetCPUUtilizationPercentage: {{ .Values.sentry.worker.autoscaling.targetCPUUtilizationPercentage }} + {{- else if semverCompare ">=1.27-0" .Capabilities.KubeVersion.GitVersion }} + metrics: + - type: ContainerResource + containerResource: + container: {{ .Chart.Name }}-worker + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.sentry.worker.autoscaling.targetCPUUtilizationPercentage }} + {{- else }} + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.sentry.worker.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/charts/sentry/templates/sentry/worker/serviceaccount-sentry-worker.yaml b/charts/sentry/templates/sentry/worker/serviceaccount-sentry-worker.yaml new file mode 100644 index 000000000..c92bd1bb3 --- /dev/null +++ b/charts/sentry/templates/sentry/worker/serviceaccount-sentry-worker.yaml @@ -0,0 +1,10 @@ +{{- if and .Values.serviceAccount.enabled ( or .Values.sentry.worker.enabled .Values.sentry.workerEvents.enabled .Values.sentry.workerTransactions.enabled ) }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.serviceAccount.name }}-worker +{{- if .Values.serviceAccount.annotations }} + annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }} +{{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/charts/sentry/templates/snuba/_helper-snuba.tpl b/charts/sentry/templates/snuba/_helper-snuba.tpl new file mode 100644 index 000000000..59b7239da --- /dev/null +++ b/charts/sentry/templates/snuba/_helper-snuba.tpl @@ -0,0 +1,110 @@ +{{- define "sentry.snuba.config" -}} +{{- $redisPass := include "sentry.redis.password" . -}} +{{- $redisSsl := include "sentry.redis.ssl" . -}} +settings.py: | + import os + + from snuba.settings import * + + env = os.environ.get + + DEBUG = env("DEBUG", "0").lower() in ("1", "true") + +{{- if .Values.kafka.enabled -}} + {{ if .Values.kafka.provisioning.enabled }} + + # Set partition counts for provisioning topics from kafka chart. + TOPIC_PARTITION_COUNTS = { + {{- $numPartitions := .Values.kafka.provisioning.numPartitions -}} + {{- range .Values.kafka.provisioning.topics }} + {{ .name | quote }}: {{ default $numPartitions .partitions }}, + {{- end }} + } + {{- end -}} +{{- end }} + + {{- if ((.Values.kafkaTopicOverrides).prefix) }} + SENTRY_CHARTS_KAFKA_TOPIC_PREFIX = {{ .Values.kafkaTopicOverrides.prefix | quote }} + + from snuba.utils.streams.topics import Topic + for topic in Topic: + KAFKA_TOPIC_MAP[topic.value] = f"{SENTRY_CHARTS_KAFKA_TOPIC_PREFIX}{topic.value}" + {{- end }} + + # Clickhouse Options + CLUSTERS = [ + { + "host": env("CLICKHOUSE_HOST", {{ include "sentry.clickhouse.host" . | quote }}), + "port": int({{ include "sentry.clickhouse.port" . }}), + "user": env("CLICKHOUSE_USER", "default"), + "password": env("CLICKHOUSE_PASSWORD", ""), + "max_connections": int(os.environ.get("CLICKHOUSE_MAX_CONNECTIONS", 100)), + "database": env("CLICKHOUSE_DATABASE", "default"), + "http_port": {{ include "sentry.clickhouse.http_port" . }}, + "storage_sets": { + "cdc", + "discover", + "events", + "events_ro", + "metrics", + "migrations", + "outcomes", + "querylog", + "sessions", + "transactions", + "profiles", + "functions", + "replays", + "generic_metrics_sets", + "generic_metrics_distributions", + "search_issues", + "generic_metrics_counters", + "spans", + "events_analytics_platform", + "group_attributes", + "generic_metrics_gauges", + "metrics_summaries", + "profile_chunks", + }, + {{- /* + The default clickhouse installation runs in distributed mode, while the external + clickhouse configured can be configured any way you choose + */}} + {{- if and .Values.externalClickhouse.singleNode (not .Values.clickhouse.enabled) }} + "single_node": True, + {{- else }} + "single_node": False, + {{- end }} + {{- if or .Values.clickhouse.enabled (not .Values.externalClickhouse.singleNode) }} + "cluster_name": {{ include "sentry.clickhouse.cluster.name" . | quote }}, + "distributed_cluster_name": {{ include "sentry.clickhouse.cluster.name" . | quote }}, + {{- end }} + }, + ] + + # Redis Options + REDIS_HOST = {{ include "sentry.redis.host" . | quote }} + REDIS_PORT = {{ include "sentry.redis.port" . }} + {{- if or (not ($redisPass)) (.Values.externalRedis.existingSecret) (.Values.redis.auth.existingSecret) }} + REDIS_PASSWORD = env("REDIS_PASSWORD", "") + {{- else if $redisPass }} + REDIS_PASSWORD = {{ $redisPass | quote }} + {{- end }} + + {{- if .Values.redis.enabled }} + REDIS_DB = int(env("REDIS_DB", {{ default 1 .Values.redis.db }})) + {{- else }} + REDIS_DB = int(env("REDIS_DB", {{ default 1 .Values.externalRedis.db }})) + {{- end }} + + {{- if eq $redisSsl "true" }} + REDIS_SSL = True + {{- end }} + +{{- if .Values.metrics.enabled }} + DOGSTATSD_HOST = "{{ template "sentry.fullname" . }}-metrics" + DOGSTATSD_PORT = 9125 +{{- end }} + + {{ .Values.config.snubaSettingsPy | nindent 2 }} +{{- end -}} diff --git a/charts/sentry/templates/snuba/configmap-snuba.yaml b/charts/sentry/templates/snuba/configmap-snuba.yaml new file mode 100644 index 000000000..6dd59861f --- /dev/null +++ b/charts/sentry/templates/snuba/configmap-snuba.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "sentry.fullname" . }}-snuba + labels: + app: sentry + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +data: + {{ include "sentry.snuba.config" . | nindent 2 }} diff --git a/sentry/templates/deployment-snuba-api.yaml b/charts/sentry/templates/snuba/deployment-snuba-api.yaml similarity index 79% rename from sentry/templates/deployment-snuba-api.yaml rename to charts/sentry/templates/snuba/deployment-snuba-api.yaml index 948509dfa..d3b8bf5c9 100644 --- a/sentry/templates/deployment-snuba-api.yaml +++ b/charts/sentry/templates/snuba/deployment-snuba-api.yaml @@ -1,3 +1,4 @@ +{{- if .Values.snuba.api.enabled }} apiVersion: apps/v1 kind: Deployment metadata: @@ -21,7 +22,7 @@ spec: metadata: annotations: checksum/snubaSettingsPy: {{ .Values.config.snubaSettingsPy | sha256sum }} - checksum/config.yaml: {{ include (print $.Template.BasePath "/configmap-snuba.yaml") . | sha256sum }} + checksum/config.yaml: {{ include "sentry.snuba.config" . | sha256sum }} {{- if .Values.snuba.api.annotations }} {{ toYaml .Values.snuba.api.annotations | indent 8 }} {{- end }} @@ -40,10 +41,20 @@ spec: {{- if .Values.snuba.api.nodeSelector }} nodeSelector: {{ toYaml .Values.snuba.api.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} {{- end }} {{- if .Values.snuba.api.tolerations }} tolerations: {{ toYaml .Values.snuba.api.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.snuba.api.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.snuba.api.topologySpreadConstraints | indent 8 }} {{- end }} {{- if .Values.images.snuba.imagePullSecrets }} imagePullSecrets: @@ -82,10 +93,13 @@ spec: - mountPath: /etc/snuba name: config readOnly: true +{{- if .Values.snuba.api.volumeMounts }} +{{ toYaml .Values.snuba.api.volumeMounts | indent 8 }} +{{- end }} livenessProbe: failureThreshold: 5 httpGet: - path: / + path: /health port: {{ template "snuba.port" }} scheme: HTTP initialDelaySeconds: {{ .Values.snuba.api.probeInitialDelaySeconds }} @@ -95,7 +109,7 @@ spec: readinessProbe: failureThreshold: 10 httpGet: - path: / + path: /health port: {{ template "snuba.port" }} scheme: HTTP initialDelaySeconds: {{ .Values.snuba.api.probeInitialDelaySeconds }} @@ -104,8 +118,15 @@ spec: timeoutSeconds: {{ .Values.snuba.api.readiness.timeoutSeconds }} resources: {{ toYaml .Values.snuba.api.resources | indent 12 }} +{{- if .Values.snuba.api.containerSecurityContext }} + securityContext: +{{ toYaml .Values.snuba.api.containerSecurityContext | indent 12 }} +{{- end }} {{- if .Values.snuba.api.sidecars }} {{ toYaml .Values.snuba.api.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} {{- end }} {{- if .Values.serviceAccount.enabled }} serviceAccountName: {{ .Values.serviceAccount.name }}-snuba @@ -117,3 +138,7 @@ spec: {{- if .Values.snuba.api.volumes }} {{ toYaml .Values.snuba.api.volumes | indent 8 }} {{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 8 }} +{{- end }} +{{- end }} diff --git a/sentry/templates/deployment-snuba-consumer.yaml b/charts/sentry/templates/snuba/deployment-snuba-consumer.yaml similarity index 72% rename from sentry/templates/deployment-snuba-consumer.yaml rename to charts/sentry/templates/snuba/deployment-snuba-consumer.yaml index 89b30a290..136b9d283 100644 --- a/sentry/templates/deployment-snuba-consumer.yaml +++ b/charts/sentry/templates/snuba/deployment-snuba-consumer.yaml @@ -1,3 +1,4 @@ +{{- if .Values.snuba.consumer.enabled }} apiVersion: apps/v1 kind: Deployment metadata: @@ -28,7 +29,7 @@ spec: metadata: annotations: checksum/snubaSettingsPy: {{ .Values.config.snubaSettingsPy | sha256sum }} - checksum/config.yaml: {{ include (print $.Template.BasePath "/configmap-snuba.yaml") . | sha256sum }} + checksum/config.yaml: {{ include "sentry.snuba.config" . | sha256sum }} {{- if .Values.snuba.consumer.annotations }} {{ toYaml .Values.snuba.consumer.annotations | indent 8 }} {{- end }} @@ -47,10 +48,20 @@ spec: {{- if .Values.snuba.consumer.nodeSelector }} nodeSelector: {{ toYaml .Values.snuba.consumer.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} {{- end }} {{- if .Values.snuba.consumer.tolerations }} tolerations: {{ toYaml .Values.snuba.consumer.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.snuba.consumer.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.snuba.consumer.topologySpreadConstraints | indent 8 }} {{- end }} {{- if .Values.images.snuba.imagePullSecrets }} imagePullSecrets: @@ -73,13 +84,15 @@ spec: imagePullPolicy: {{ default "IfNotPresent" .Values.images.snuba.pullPolicy }} command: - "snuba" - - "consumer" + - {{ if .Values.snuba.rustConsumer -}}"rust-consumer"{{- else -}}"consumer"{{- end }} - "--storage" - "errors" + - "--consumer-group" + - "snuba-consumers" + {{- if .Values.snuba.consumer.autoOffsetReset }} - "--auto-offset-reset" - "{{ .Values.snuba.consumer.autoOffsetReset }}" - - "--max-batch-time-ms" - - "750" + {{- end }} {{- if .Values.snuba.consumer.maxBatchSize }} - "--max-batch-size" - "{{ .Values.snuba.consumer.maxBatchSize }}" @@ -108,6 +121,22 @@ spec: - "--queued-min-messages" - "{{ .Values.snuba.consumer.queuedMinMessages }}" {{- end }} + {{- if .Values.snuba.consumer.noStrictOffsetReset }} + - "--no-strict-offset-reset" + {{- end }} + {{- if .Values.snuba.consumer.livenessProbe.enabled }} + - "--health-check-file" + - "/tmp/health.txt" + {{- end }} + {{- if .Values.snuba.consumer.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.snuba.consumer.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.snuba.consumer.livenessProbe.periodSeconds }} + {{- end }} ports: - containerPort: {{ template "snuba.port" }} env: @@ -127,6 +156,16 @@ spec: {{- end }} resources: {{ toYaml .Values.snuba.consumer.resources | indent 12 }} +{{- if .Values.snuba.consumer.containerSecurityContext }} + securityContext: +{{ toYaml .Values.snuba.consumer.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.snuba.consumer.sidecars }} +{{ toYaml .Values.snuba.consumer.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} {{- if .Values.serviceAccount.enabled }} serviceAccountName: {{ .Values.serviceAccount.name }}-snuba {{- end }} @@ -137,3 +176,7 @@ spec: {{- if .Values.snuba.consumer.volumes }} {{ toYaml .Values.snuba.consumer.volumes | indent 8 }} {{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 8 }} +{{- end }} +{{- end }} diff --git a/charts/sentry/templates/snuba/deployment-snuba-generic-metrics-counters-consumer.yaml b/charts/sentry/templates/snuba/deployment-snuba-generic-metrics-counters-consumer.yaml new file mode 100644 index 000000000..d262a2813 --- /dev/null +++ b/charts/sentry/templates/snuba/deployment-snuba-generic-metrics-counters-consumer.yaml @@ -0,0 +1,182 @@ +{{- if .Values.snuba.genericMetricsCountersConsumer.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "sentry.fullname" . }}-snuba-generic-metrics-counters-consumer + labels: + app: {{ template "sentry.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + app.kubernetes.io/managed-by: "Helm" + {{- if .Values.asHook }} + {{- /* Add the Helm annotations so that deployment after asHook from true to false works */}} + annotations: + meta.helm.sh/release-name: "{{ .Release.Name }}" + meta.helm.sh/release-namespace: "{{ .Release.Namespace }}" + "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook-weight": "12" + {{- end }} +spec: + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: snuba-generic-metrics-counters-consumer + replicas: {{ .Values.snuba.genericMetricsCountersConsumer.replicas }} + template: + metadata: + annotations: + checksum/snubaSettingsPy: {{ .Values.config.snubaSettingsPy | sha256sum }} + checksum/config.yaml: {{ include "sentry.snuba.config" . | sha256sum }} + {{- if .Values.snuba.genericMetricsCountersConsumer.annotations }} +{{ toYaml .Values.snuba.genericMetricsCountersConsumer.annotations | indent 8 }} + {{- end }} + labels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: snuba-generic-metrics-counters-consumer + {{- if .Values.snuba.genericMetricsCountersConsumer.podLabels }} +{{ toYaml .Values.snuba.genericMetricsCountersConsumer.podLabels | indent 8 }} + {{- end }} + spec: + affinity: + {{- if .Values.snuba.genericMetricsCountersConsumer.affinity }} +{{ toYaml .Values.snuba.genericMetricsCountersConsumer.affinity | indent 8 }} + {{- end }} + {{- if .Values.snuba.genericMetricsCountersConsumer.nodeSelector }} + nodeSelector: +{{ toYaml .Values.snuba.genericMetricsCountersConsumer.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.snuba.genericMetricsCountersConsumer.tolerations }} + tolerations: +{{ toYaml .Values.snuba.genericMetricsCountersConsumer.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.snuba.genericMetricsCountersConsumer.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.snuba.genericMetricsCountersConsumer.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if .Values.images.snuba.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.images.snuba.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy | quote }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: +{{ toYaml .Values.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.snuba.genericMetricsCountersConsumer.securityContext }} + securityContext: +{{ toYaml .Values.snuba.genericMetricsCountersConsumer.securityContext | indent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-snuba + image: "{{ template "snuba.image" . }}" + imagePullPolicy: {{ default "IfNotPresent" .Values.images.snuba.pullPolicy }} + command: + - "snuba" + - {{ if .Values.snuba.rustConsumer -}}"rust-consumer"{{- else -}}"consumer"{{- end }} + - "--storage" + - "generic_metrics_counters_raw" + - "--consumer-group" + - "snuba-gen-metrics-counters-consumers" + {{- if .Values.snuba.genericMetricsCountersConsumer.autoOffsetReset }} + - "--auto-offset-reset" + - "{{ .Values.snuba.genericMetricsCountersConsumer.autoOffsetReset }}" + {{- end }} + {{- if .Values.snuba.genericMetricsCountersConsumer.maxBatchSize }} + - "--max-batch-size" + - "{{ .Values.snuba.genericMetricsCountersConsumer.maxBatchSize }}" + {{- end }} + {{- if .Values.snuba.genericMetricsCountersConsumer.processes }} + - "--processes" + - "{{ .Values.snuba.genericMetricsCountersConsumer.processes }}" + {{- end }} + {{- if .Values.snuba.genericMetricsCountersConsumer.inputBlockSize }} + - "--input-block-size" + - "{{ .Values.snuba.genericMetricsCountersConsumer.inputBlockSize }}" + {{- end }} + {{- if .Values.snuba.genericMetricsCountersConsumer.outputBlockSize }} + - "--output-block-size" + - "{{ .Values.snuba.genericMetricsCountersConsumer.outputBlockSize }}" + {{- end }} + {{- if .Values.snuba.genericMetricsCountersConsumer.maxBatchTimeMs }} + - "--max-batch-time-ms" + - "{{ .Values.snuba.genericMetricsCountersConsumer.maxBatchTimeMs }}" + {{- end }} + {{- if .Values.snuba.genericMetricsCountersConsumer.queuedMaxMessagesKbytes }} + - "--queued-max-messages-kbytes" + - "{{ .Values.snuba.genericMetricsCountersConsumer.queuedMaxMessagesKbytes }}" + {{- end }} + {{- if .Values.snuba.genericMetricsCountersConsumer.queuedMinMessages }} + - "--queued-min-messages" + - "{{ .Values.snuba.genericMetricsCountersConsumer.queuedMinMessages }}" + {{- end }} + {{- if .Values.snuba.genericMetricsCountersConsumer.noStrictOffsetReset }} + - "--no-strict-offset-reset" + {{- end }} + {{- if .Values.snuba.genericMetricsCountersConsumer.livenessProbe.enabled }} + - "--health-check-file" + - "/tmp/health.txt" + {{- end }} + {{- if .Values.snuba.genericMetricsCountersConsumer.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.snuba.genericMetricsCountersConsumer.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.snuba.genericMetricsCountersConsumer.livenessProbe.periodSeconds }} + {{- end }} + ports: + - containerPort: {{ template "snuba.port" }} + env: +{{ include "sentry.snuba.env" . | indent 8 }} +{{- if .Values.snuba.genericMetricsCountersConsumer.env }} +{{ toYaml .Values.snuba.genericMetricsCountersConsumer.env | indent 8 }} +{{- end }} + envFrom: + - secretRef: + name: {{ template "sentry.fullname" . }}-snuba-env + volumeMounts: + - mountPath: /etc/snuba + name: config + readOnly: true +{{- if .Values.snuba.genericMetricsCountersConsumer.volumeMounts }} +{{ toYaml .Values.snuba.genericMetricsCountersConsumer.volumeMounts | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.snuba.genericMetricsCountersConsumer.resources | indent 12 }} +{{- if .Values.snuba.genericMetricsCountersConsumer.containerSecurityContext }} + securityContext: +{{ toYaml .Values.snuba.genericMetricsCountersConsumer.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.snuba.genericMetricsCountersConsumer.sidecars }} +{{ toYaml .Values.snuba.genericMetricsCountersConsumer.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ .Values.serviceAccount.name }}-snuba + {{- end }} + volumes: + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-snuba +{{- if .Values.snuba.genericMetricsCountersConsumer.volumes }} +{{ toYaml .Values.snuba.genericMetricsCountersConsumer.volumes | indent 8 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 8 }} +{{- end }} +{{- end }} diff --git a/charts/sentry/templates/snuba/deployment-snuba-generic-metrics-distributions-consumer.yaml b/charts/sentry/templates/snuba/deployment-snuba-generic-metrics-distributions-consumer.yaml new file mode 100644 index 000000000..66063a0ce --- /dev/null +++ b/charts/sentry/templates/snuba/deployment-snuba-generic-metrics-distributions-consumer.yaml @@ -0,0 +1,182 @@ +{{- if .Values.snuba.genericMetricsDistributionConsumer.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "sentry.fullname" . }}-snuba-generic-metrics-distributions-consumer + labels: + app: {{ template "sentry.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + app.kubernetes.io/managed-by: "Helm" + {{- if .Values.asHook }} + {{- /* Add the Helm annotations so that deployment after asHook from true to false works */}} + annotations: + meta.helm.sh/release-name: "{{ .Release.Name }}" + meta.helm.sh/release-namespace: "{{ .Release.Namespace }}" + "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook-weight": "12" + {{- end }} +spec: + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: snuba-generic-metrics-distributions-consumer + replicas: {{ .Values.snuba.genericMetricsDistributionConsumer.replicas }} + template: + metadata: + annotations: + checksum/snubaSettingsPy: {{ .Values.config.snubaSettingsPy | sha256sum }} + checksum/config.yaml: {{ include "sentry.snuba.config" . | sha256sum }} + {{- if .Values.snuba.genericMetricsDistributionConsumer.annotations }} +{{ toYaml .Values.snuba.genericMetricsDistributionConsumer.annotations | indent 8 }} + {{- end }} + labels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: snuba-generic-metrics-distributions-consumer + {{- if .Values.snuba.genericMetricsDistributionConsumer.podLabels }} +{{ toYaml .Values.snuba.genericMetricsDistributionConsumer.podLabels | indent 8 }} + {{- end }} + spec: + affinity: + {{- if .Values.snuba.genericMetricsDistributionConsumer.affinity }} +{{ toYaml .Values.snuba.genericMetricsDistributionConsumer.affinity | indent 8 }} + {{- end }} + {{- if .Values.snuba.genericMetricsDistributionConsumer.nodeSelector }} + nodeSelector: +{{ toYaml .Values.snuba.genericMetricsDistributionConsumer.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.snuba.genericMetricsDistributionConsumer.tolerations }} + tolerations: +{{ toYaml .Values.snuba.genericMetricsDistributionConsumer.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.snuba.genericMetricsDistributionConsumer.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.snuba.genericMetricsDistributionConsumer.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if .Values.images.snuba.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.images.snuba.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy | quote }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: +{{ toYaml .Values.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.snuba.genericMetricsDistributionConsumer.securityContext }} + securityContext: +{{ toYaml .Values.snuba.genericMetricsDistributionConsumer.securityContext | indent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-snuba + image: "{{ template "snuba.image" . }}" + imagePullPolicy: {{ default "IfNotPresent" .Values.images.snuba.pullPolicy }} + command: + - "snuba" + - {{ if .Values.snuba.rustConsumer -}}"rust-consumer"{{- else -}}"consumer"{{- end }} + - "--storage" + - "generic_metrics_distributions_raw" + - "--consumer-group" + - "snuba-gen-metrics-distributions-consumers" + {{- if .Values.snuba.genericMetricsDistributionConsumer.autoOffsetReset }} + - "--auto-offset-reset" + - "{{ .Values.snuba.genericMetricsDistributionConsumer.autoOffsetReset }}" + {{- end }} + {{- if .Values.snuba.genericMetricsDistributionConsumer.maxBatchSize }} + - "--max-batch-size" + - "{{ .Values.snuba.genericMetricsDistributionConsumer.maxBatchSize }}" + {{- end }} + {{- if .Values.snuba.genericMetricsDistributionConsumer.processes }} + - "--processes" + - "{{ .Values.snuba.genericMetricsDistributionConsumer.processes }}" + {{- end }} + {{- if .Values.snuba.genericMetricsDistributionConsumer.inputBlockSize }} + - "--input-block-size" + - "{{ .Values.snuba.genericMetricsDistributionConsumer.inputBlockSize }}" + {{- end }} + {{- if .Values.snuba.genericMetricsDistributionConsumer.outputBlockSize }} + - "--output-block-size" + - "{{ .Values.snuba.genericMetricsDistributionConsumer.outputBlockSize }}" + {{- end }} + {{- if .Values.snuba.genericMetricsDistributionConsumer.maxBatchTimeMs }} + - "--max-batch-time-ms" + - "{{ .Values.snuba.genericMetricsDistributionConsumer.maxBatchTimeMs }}" + {{- end }} + {{- if .Values.snuba.genericMetricsDistributionConsumer.queuedMaxMessagesKbytes }} + - "--queued-max-messages-kbytes" + - "{{ .Values.snuba.genericMetricsDistributionConsumer.queuedMaxMessagesKbytes }}" + {{- end }} + {{- if .Values.snuba.genericMetricsDistributionConsumer.queuedMinMessages }} + - "--queued-min-messages" + - "{{ .Values.snuba.genericMetricsDistributionConsumer.queuedMinMessages }}" + {{- end }} + {{- if .Values.snuba.genericMetricsDistributionConsumer.noStrictOffsetReset }} + - "--no-strict-offset-reset" + {{- end }} + {{- if .Values.snuba.genericMetricsDistributionConsumer.livenessProbe.enabled }} + - "--health-check-file" + - "/tmp/health.txt" + {{- end }} + {{- if .Values.snuba.genericMetricsDistributionConsumer.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.snuba.genericMetricsDistributionConsumer.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.snuba.genericMetricsDistributionConsumer.livenessProbe.periodSeconds }} + {{- end }} + ports: + - containerPort: {{ template "snuba.port" }} + env: +{{ include "sentry.snuba.env" . | indent 8 }} +{{- if .Values.snuba.genericMetricsDistributionConsumer.env }} +{{ toYaml .Values.snuba.genericMetricsDistributionConsumer.env | indent 8 }} +{{- end }} + envFrom: + - secretRef: + name: {{ template "sentry.fullname" . }}-snuba-env + volumeMounts: + - mountPath: /etc/snuba + name: config + readOnly: true +{{- if .Values.snuba.genericMetricsDistributionConsumer.volumeMounts }} +{{ toYaml .Values.snuba.genericMetricsDistributionConsumer.volumeMounts | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.snuba.genericMetricsDistributionConsumer.resources | indent 12 }} +{{- if .Values.snuba.genericMetricsDistributionConsumer.containerSecurityContext }} + securityContext: +{{ toYaml .Values.snuba.genericMetricsDistributionConsumer.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.snuba.genericMetricsDistributionConsumer.sidecars }} +{{ toYaml .Values.snuba.genericMetricsDistributionConsumer.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ .Values.serviceAccount.name }}-snuba + {{- end }} + volumes: + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-snuba +{{- if .Values.snuba.genericMetricsDistributionConsumer.volumes }} +{{ toYaml .Values.snuba.genericMetricsDistributionConsumer.volumes | indent 8 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 8 }} +{{- end }} +{{- end }} diff --git a/charts/sentry/templates/snuba/deployment-snuba-generic-metrics-sets-consumer.yaml b/charts/sentry/templates/snuba/deployment-snuba-generic-metrics-sets-consumer.yaml new file mode 100644 index 000000000..01848226e --- /dev/null +++ b/charts/sentry/templates/snuba/deployment-snuba-generic-metrics-sets-consumer.yaml @@ -0,0 +1,182 @@ +{{- if .Values.snuba.genericMetricsSetsConsumer.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "sentry.fullname" . }}-snuba-generic-metrics-sets-consumer + labels: + app: {{ template "sentry.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + app.kubernetes.io/managed-by: "Helm" + {{- if .Values.asHook }} + {{- /* Add the Helm annotations so that deployment after asHook from true to false works */}} + annotations: + meta.helm.sh/release-name: "{{ .Release.Name }}" + meta.helm.sh/release-namespace: "{{ .Release.Namespace }}" + "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook-weight": "12" + {{- end }} +spec: + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: snuba-generic-metrics-sets-consumer + replicas: {{ .Values.snuba.genericMetricsSetsConsumer.replicas }} + template: + metadata: + annotations: + checksum/snubaSettingsPy: {{ .Values.config.snubaSettingsPy | sha256sum }} + checksum/config.yaml: {{ include "sentry.snuba.config" . | sha256sum }} + {{- if .Values.snuba.genericMetricsSetsConsumer.annotations }} +{{ toYaml .Values.snuba.genericMetricsSetsConsumer.annotations | indent 8 }} + {{- end }} + labels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: snuba-generic-metrics-sets-consumer + {{- if .Values.snuba.genericMetricsSetsConsumer.podLabels }} +{{ toYaml .Values.snuba.genericMetricsSetsConsumer.podLabels | indent 8 }} + {{- end }} + spec: + affinity: + {{- if .Values.snuba.genericMetricsSetsConsumer.affinity }} +{{ toYaml .Values.snuba.genericMetricsSetsConsumer.affinity | indent 8 }} + {{- end }} + {{- if .Values.snuba.genericMetricsSetsConsumer.nodeSelector }} + nodeSelector: +{{ toYaml .Values.snuba.genericMetricsSetsConsumer.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.snuba.genericMetricsSetsConsumer.tolerations }} + tolerations: +{{ toYaml .Values.snuba.genericMetricsSetsConsumer.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.snuba.genericMetricsSetsConsumer.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.snuba.genericMetricsSetsConsumer.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if .Values.images.snuba.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.images.snuba.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy | quote }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: +{{ toYaml .Values.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.snuba.genericMetricsSetsConsumer.securityContext }} + securityContext: +{{ toYaml .Values.snuba.genericMetricsSetsConsumer.securityContext | indent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-snuba + image: "{{ template "snuba.image" . }}" + imagePullPolicy: {{ default "IfNotPresent" .Values.images.snuba.pullPolicy }} + command: + - "snuba" + - {{ if .Values.snuba.rustConsumer -}}"rust-consumer"{{- else -}}"consumer"{{- end }} + - "--storage" + - "generic_metrics_sets_raw" + - "--consumer-group" + - "snuba-gen-metrics-sets-consumers" + {{- if .Values.snuba.genericMetricsSetsConsumer.autoOffsetReset }} + - "--auto-offset-reset" + - "{{ .Values.snuba.genericMetricsSetsConsumer.autoOffsetReset }}" + {{- end }} + {{- if .Values.snuba.genericMetricsSetsConsumer.maxBatchSize }} + - "--max-batch-size" + - "{{ .Values.snuba.genericMetricsSetsConsumer.maxBatchSize }}" + {{- end }} + {{- if .Values.snuba.genericMetricsSetsConsumer.processes }} + - "--processes" + - "{{ .Values.snuba.genericMetricsSetsConsumer.processes }}" + {{- end }} + {{- if .Values.snuba.genericMetricsSetsConsumer.inputBlockSize }} + - "--input-block-size" + - "{{ .Values.snuba.genericMetricsSetsConsumer.inputBlockSize }}" + {{- end }} + {{- if .Values.snuba.genericMetricsSetsConsumer.outputBlockSize }} + - "--output-block-size" + - "{{ .Values.snuba.genericMetricsSetsConsumer.outputBlockSize }}" + {{- end }} + {{- if .Values.snuba.genericMetricsSetsConsumer.maxBatchTimeMs }} + - "--max-batch-time-ms" + - "{{ .Values.snuba.genericMetricsSetsConsumer.maxBatchTimeMs }}" + {{- end }} + {{- if .Values.snuba.genericMetricsSetsConsumer.queuedMaxMessagesKbytes }} + - "--queued-max-messages-kbytes" + - "{{ .Values.snuba.genericMetricsSetsConsumer.queuedMaxMessagesKbytes }}" + {{- end }} + {{- if .Values.snuba.genericMetricsSetsConsumer.queuedMinMessages }} + - "--queued-min-messages" + - "{{ .Values.snuba.genericMetricsSetsConsumer.queuedMinMessages }}" + {{- end }} + {{- if .Values.snuba.genericMetricsSetsConsumer.noStrictOffsetReset }} + - "--no-strict-offset-reset" + {{- end }} + {{- if .Values.snuba.genericMetricsSetsConsumer.livenessProbe.enabled }} + - "--health-check-file" + - "/tmp/health.txt" + {{- end }} + {{- if .Values.snuba.genericMetricsSetsConsumer.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.snuba.genericMetricsSetsConsumer.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.snuba.genericMetricsSetsConsumer.livenessProbe.periodSeconds }} + {{- end }} + ports: + - containerPort: {{ template "snuba.port" }} + env: +{{ include "sentry.snuba.env" . | indent 8 }} +{{- if .Values.snuba.genericMetricsSetsConsumer.env }} +{{ toYaml .Values.snuba.genericMetricsSetsConsumer.env | indent 8 }} +{{- end }} + envFrom: + - secretRef: + name: {{ template "sentry.fullname" . }}-snuba-env + volumeMounts: + - mountPath: /etc/snuba + name: config + readOnly: true +{{- if .Values.snuba.genericMetricsSetsConsumer.volumeMounts }} +{{ toYaml .Values.snuba.genericMetricsSetsConsumer.volumeMounts | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.snuba.genericMetricsSetsConsumer.resources | indent 12 }} +{{- if .Values.snuba.genericMetricsSetsConsumer.containerSecurityContext }} + securityContext: +{{ toYaml .Values.snuba.genericMetricsSetsConsumer.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.snuba.genericMetricsSetsConsumer.sidecars }} +{{ toYaml .Values.snuba.genericMetricsSetsConsumer.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ .Values.serviceAccount.name }}-snuba + {{- end }} + volumes: + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-snuba +{{- if .Values.snuba.genericMetricsSetsConsumer.volumes }} +{{ toYaml .Values.snuba.genericMetricsSetsConsumer.volumes | indent 8 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 8 }} +{{- end }} +{{- end }} diff --git a/charts/sentry/templates/snuba/deployment-snuba-group-attributes-consumer.yaml b/charts/sentry/templates/snuba/deployment-snuba-group-attributes-consumer.yaml new file mode 100644 index 000000000..7b68ab7f2 --- /dev/null +++ b/charts/sentry/templates/snuba/deployment-snuba-group-attributes-consumer.yaml @@ -0,0 +1,182 @@ +{{- if .Values.snuba.groupAttributesConsumer.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "sentry.fullname" . }}-snuba-group-attributes-consumer + labels: + app: {{ template "sentry.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + app.kubernetes.io/managed-by: "Helm" + {{- if .Values.asHook }} + {{- /* Add the Helm annotations so that deployment after asHook from true to false works */}} + annotations: + meta.helm.sh/release-name: "{{ .Release.Name }}" + meta.helm.sh/release-namespace: "{{ .Release.Namespace }}" + "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook-weight": "12" + {{- end }} +spec: + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: snuba-group-attributes-consumer + replicas: {{ .Values.snuba.groupAttributesConsumer.replicas }} + template: + metadata: + annotations: + checksum/snubaSettingsPy: {{ .Values.config.snubaSettingsPy | sha256sum }} + checksum/config.yaml: {{ include "sentry.snuba.config" . | sha256sum }} + {{- if .Values.snuba.groupAttributesConsumer.annotations }} +{{ toYaml .Values.snuba.groupAttributesConsumer.annotations | indent 8 }} + {{- end }} + labels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: snuba-group-attributes-consumer + {{- if .Values.snuba.groupAttributesConsumer.podLabels }} +{{ toYaml .Values.snuba.groupAttributesConsumer.podLabels | indent 8 }} + {{- end }} + spec: + affinity: + {{- if .Values.snuba.groupAttributesConsumer.affinity }} +{{ toYaml .Values.snuba.groupAttributesConsumer.affinity | indent 8 }} + {{- end }} + {{- if .Values.snuba.groupAttributesConsumer.nodeSelector }} + nodeSelector: +{{ toYaml .Values.snuba.groupAttributesConsumer.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.snuba.groupAttributesConsumer.tolerations }} + tolerations: +{{ toYaml .Values.snuba.groupAttributesConsumer.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.snuba.groupAttributesConsumer.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.snuba.groupAttributesConsumer.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if .Values.images.snuba.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.images.snuba.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy | quote }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: +{{ toYaml .Values.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.snuba.groupAttributesConsumer.securityContext }} + securityContext: +{{ toYaml .Values.snuba.groupAttributesConsumer.securityContext | indent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-snuba + image: "{{ template "snuba.image" . }}" + imagePullPolicy: {{ default "IfNotPresent" .Values.images.snuba.pullPolicy }} + command: + - "snuba" + - {{ if .Values.snuba.rustConsumer -}}"rust-consumer"{{- else -}}"consumer"{{- end }} + - "--storage" + - "group_attributes" + - "--consumer-group" + - "snuba-group-attributes-group" + {{- if .Values.snuba.groupAttributesConsumer.autoOffsetReset }} + - "--auto-offset-reset" + - "{{ .Values.snuba.groupAttributesConsumer.autoOffsetReset }}" + {{- end }} + {{- if .Values.snuba.groupAttributesConsumer.maxBatchSize }} + - "--max-batch-size" + - "{{ .Values.snuba.groupAttributesConsumer.maxBatchSize }}" + {{- end }} + {{- if .Values.snuba.groupAttributesConsumer.processes }} + - "--processes" + - "{{ .Values.snuba.groupAttributesConsumer.processes }}" + {{- end }} + {{- if .Values.snuba.groupAttributesConsumer.inputBlockSize }} + - "--input-block-size" + - "{{ .Values.snuba.groupAttributesConsumer.inputBlockSize }}" + {{- end }} + {{- if .Values.snuba.groupAttributesConsumer.outputBlockSize }} + - "--output-block-size" + - "{{ .Values.snuba.groupAttributesConsumer.outputBlockSize }}" + {{- end }} + {{- if .Values.snuba.groupAttributesConsumer.maxBatchTimeMs }} + - "--max-batch-time-ms" + - "{{ .Values.snuba.groupAttributesConsumer.maxBatchTimeMs }}" + {{- end }} + {{- if .Values.snuba.groupAttributesConsumer.queuedMaxMessagesKbytes }} + - "--queued-max-messages-kbytes" + - "{{ .Values.snuba.groupAttributesConsumer.queuedMaxMessagesKbytes }}" + {{- end }} + {{- if .Values.snuba.groupAttributesConsumer.queuedMinMessages }} + - "--queued-min-messages" + - "{{ .Values.snuba.groupAttributesConsumer.queuedMinMessages }}" + {{- end }} + {{- if .Values.snuba.groupAttributesConsumer.noStrictOffsetReset }} + - "--no-strict-offset-reset" + {{- end }} + {{- if .Values.snuba.groupAttributesConsumer.livenessProbe.enabled }} + - "--health-check-file" + - "/tmp/health.txt" + {{- end }} + {{- if .Values.snuba.groupAttributesConsumer.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.snuba.groupAttributesConsumer.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.snuba.groupAttributesConsumer.livenessProbe.periodSeconds }} + {{- end }} + ports: + - containerPort: {{ template "snuba.port" }} + env: +{{ include "sentry.snuba.env" . | indent 8 }} +{{- if .Values.snuba.groupAttributesConsumer.env }} +{{ toYaml .Values.snuba.groupAttributesConsumer.env | indent 8 }} +{{- end }} + envFrom: + - secretRef: + name: {{ template "sentry.fullname" . }}-snuba-env + volumeMounts: + - mountPath: /etc/snuba + name: config + readOnly: true +{{- if .Values.snuba.groupAttributesConsumer.volumeMounts }} +{{ toYaml .Values.snuba.groupAttributesConsumer.volumeMounts | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.snuba.groupAttributesConsumer.resources | indent 12 }} +{{- if .Values.snuba.groupAttributesConsumer.containerSecurityContext }} + securityContext: +{{ toYaml .Values.snuba.groupAttributesConsumer.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.snuba.groupAttributesConsumer.sidecars }} +{{ toYaml .Values.snuba.groupAttributesConsumer.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ .Values.serviceAccount.name }}-snuba + {{- end }} + volumes: + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-snuba +{{- if .Values.snuba.groupAttributesConsumer.volumes }} +{{ toYaml .Values.snuba.groupAttributesConsumer.volumes | indent 8 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 8 }} +{{- end }} +{{- end }} diff --git a/charts/sentry/templates/snuba/deployment-snuba-issue-occurrence-consumer.yaml b/charts/sentry/templates/snuba/deployment-snuba-issue-occurrence-consumer.yaml new file mode 100644 index 000000000..b7d74cb22 --- /dev/null +++ b/charts/sentry/templates/snuba/deployment-snuba-issue-occurrence-consumer.yaml @@ -0,0 +1,182 @@ +{{- if .Values.snuba.issueOccurrenceConsumer.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "sentry.fullname" . }}-issue-occurrence-consumer + labels: + app: {{ template "sentry.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + app.kubernetes.io/managed-by: "Helm" + {{- if .Values.asHook }} + {{- /* Add the Helm annotations so that deployment after asHook from true to false works */}} + annotations: + meta.helm.sh/release-name: "{{ .Release.Name }}" + meta.helm.sh/release-namespace: "{{ .Release.Namespace }}" + "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook-weight": "16" + {{- end }} +spec: + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: issue-occurrence-consumer + replicas: {{ .Values.snuba.issueOccurrenceConsumer.replicas }} + template: + metadata: + annotations: + checksum/snubaSettingsPy: {{ .Values.config.snubaSettingsPy | sha256sum }} + checksum/config.yaml: {{ include "sentry.snuba.config" . | sha256sum }} + {{- if .Values.snuba.issueOccurrenceConsumer.annotations }} +{{ toYaml .Values.snuba.issueOccurrenceConsumer.annotations | indent 8 }} + {{- end }} + labels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: issue-occurrence-consumer + {{- if .Values.snuba.issueOccurrenceConsumer.podLabels }} +{{ toYaml .Values.snuba.issueOccurrenceConsumer.podLabels | indent 8 }} + {{- end }} + spec: + affinity: + {{- if .Values.snuba.issueOccurrenceConsumer.affinity }} +{{ toYaml .Values.snuba.issueOccurrenceConsumer.affinity | indent 8 }} + {{- end }} + {{- if .Values.snuba.issueOccurrenceConsumer.nodeSelector }} + nodeSelector: +{{ toYaml .Values.snuba.issueOccurrenceConsumer.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.snuba.issueOccurrenceConsumer.tolerations }} + tolerations: +{{ toYaml .Values.snuba.issueOccurrenceConsumer.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.snuba.issueOccurrenceConsumer.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.snuba.issueOccurrenceConsumer.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if .Values.images.snuba.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.images.snuba.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy | quote }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: +{{ toYaml .Values.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.snuba.issueOccurrenceConsumer.securityContext }} + securityContext: +{{ toYaml .Values.snuba.issueOccurrenceConsumer.securityContext | indent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-snuba + image: "{{ template "snuba.image" . }}" + imagePullPolicy: {{ default "IfNotPresent" .Values.images.snuba.pullPolicy }} + command: + - "snuba" + - {{ if .Values.snuba.rustConsumer -}}"rust-consumer"{{- else -}}"consumer"{{- end }} + - "--storage" + - "search_issues" + - "--consumer-group" + - "generic_events_group" + {{- if .Values.snuba.issueOccurrenceConsumer.autoOffsetReset }} + - "--auto-offset-reset" + - "{{ .Values.snuba.issueOccurrenceConsumer.autoOffsetReset }}" + {{- end }} + {{- if .Values.snuba.issueOccurrenceConsumer.maxBatchSize }} + - "--max-batch-size" + - "{{ .Values.snuba.issueOccurrenceConsumer.maxBatchSize }}" + {{- end }} + {{- if .Values.snuba.issueOccurrenceConsumer.processes }} + - "--processes" + - "{{ .Values.snuba.issueOccurrenceConsumer.processes }}" + {{- end }} + {{- if .Values.snuba.issueOccurrenceConsumer.inputBlockSize }} + - "--input-block-size" + - "{{ .Values.snuba.issueOccurrenceConsumer.inputBlockSize }}" + {{- end }} + {{- if .Values.snuba.issueOccurrenceConsumer.outputBlockSize }} + - "--output-block-size" + - "{{ .Values.snuba.issueOccurrenceConsumer.outputBlockSize }}" + {{- end }} + {{- if .Values.snuba.issueOccurrenceConsumer.maxBatchTimeMs }} + - "--max-batch-time-ms" + - "{{ .Values.snuba.issueOccurrenceConsumer.maxBatchTimeMs }}" + {{- end }} + {{- if .Values.snuba.issueOccurrenceConsumer.queuedMaxMessagesKbytes }} + - "--queued-max-messages-kbytes" + - "{{ .Values.snuba.issueOccurrenceConsumer.queuedMaxMessagesKbytes }}" + {{- end }} + {{- if .Values.snuba.issueOccurrenceConsumer.queuedMinMessages }} + - "--queued-min-messages" + - "{{ .Values.snuba.issueOccurrenceConsumer.queuedMinMessages }}" + {{- end }} + {{- if .Values.snuba.issueOccurrenceConsumer.noStrictOffsetReset }} + - "--no-strict-offset-reset" + {{- end }} + {{- if .Values.snuba.issueOccurrenceConsumer.livenessProbe.enabled }} + - "--health-check-file" + - "/tmp/health.txt" + {{- end }} + {{- if .Values.snuba.issueOccurrenceConsumer.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.snuba.issueOccurrenceConsumer.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.snuba.issueOccurrenceConsumer.livenessProbe.periodSeconds }} + {{- end }} + ports: + - containerPort: {{ template "snuba.port" }} + env: +{{ include "sentry.snuba.env" . | indent 8 }} +{{- if .Values.snuba.issueOccurrenceConsumer.env }} +{{ toYaml .Values.snuba.issueOccurrenceConsumer.env | indent 8 }} +{{- end }} + envFrom: + - secretRef: + name: {{ template "sentry.fullname" . }}-snuba-env + volumeMounts: + - mountPath: /etc/snuba + name: config + readOnly: true +{{- if .Values.snuba.issueOccurrenceConsumer.volumeMounts }} +{{ toYaml .Values.snuba.issueOccurrenceConsumer.volumeMounts | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.snuba.issueOccurrenceConsumer.resources | indent 12 }} +{{- if .Values.snuba.issueOccurrenceConsumer.containerSecurityContext }} + securityContext: +{{ toYaml .Values.snuba.issueOccurrenceConsumer.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.snuba.issueOccurrenceConsumer.sidecars }} +{{ toYaml .Values.snuba.issueOccurrenceConsumer.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ .Values.serviceAccount.name }}-snuba + {{- end }} + volumes: + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-snuba +{{- if .Values.snuba.issueOccurrenceConsumer.volumes }} +{{ toYaml .Values.snuba.issueOccurrenceConsumer.volumes | indent 8 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 8 }} +{{- end }} +{{- end }} diff --git a/charts/sentry/templates/snuba/deployment-snuba-metrics-consumer.yaml b/charts/sentry/templates/snuba/deployment-snuba-metrics-consumer.yaml new file mode 100644 index 000000000..12b16de41 --- /dev/null +++ b/charts/sentry/templates/snuba/deployment-snuba-metrics-consumer.yaml @@ -0,0 +1,182 @@ +{{- if .Values.snuba.metricsConsumer.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "sentry.fullname" . }}-snuba-metrics-consumer + labels: + app: {{ template "sentry.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + app.kubernetes.io/managed-by: "Helm" + {{- if .Values.asHook }} + {{- /* Add the Helm annotations so that deployment after asHook from true to false works */}} + annotations: + meta.helm.sh/release-name: "{{ .Release.Name }}" + meta.helm.sh/release-namespace: "{{ .Release.Namespace }}" + "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook-weight": "12" + {{- end }} +spec: + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: snuba-metrics-consumer + replicas: {{ .Values.snuba.metricsConsumer.replicas }} + template: + metadata: + annotations: + checksum/snubaSettingsPy: {{ .Values.config.snubaSettingsPy | sha256sum }} + checksum/config.yaml: {{ include "sentry.snuba.config" . | sha256sum }} + {{- if .Values.snuba.metricsConsumer.annotations }} +{{ toYaml .Values.snuba.metricsConsumer.annotations | indent 8 }} + {{- end }} + labels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: snuba-metrics-consumer + {{- if .Values.snuba.metricsConsumer.podLabels }} +{{ toYaml .Values.snuba.metricsConsumer.podLabels | indent 8 }} + {{- end }} + spec: + affinity: + {{- if .Values.snuba.metricsConsumer.affinity }} +{{ toYaml .Values.snuba.metricsConsumer.affinity | indent 8 }} + {{- end }} + {{- if .Values.snuba.metricsConsumer.nodeSelector }} + nodeSelector: +{{ toYaml .Values.snuba.metricsConsumer.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.snuba.metricsConsumer.tolerations }} + tolerations: +{{ toYaml .Values.snuba.metricsConsumer.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.snuba.metricsConsumer.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.snuba.metricsConsumer.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if .Values.images.snuba.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.images.snuba.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy | quote }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: +{{ toYaml .Values.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.snuba.metricsConsumer.securityContext }} + securityContext: +{{ toYaml .Values.snuba.metricsConsumer.securityContext | indent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-snuba + image: "{{ template "snuba.image" . }}" + imagePullPolicy: {{ default "IfNotPresent" .Values.images.snuba.pullPolicy }} + command: + - "snuba" + - {{ if .Values.snuba.rustConsumer -}}"rust-consumer"{{- else -}}"consumer"{{- end }} + - "--storage" + - "metrics_raw" + - "--consumer-group" + - "snuba-metrics-consumers" + {{- if .Values.snuba.metricsConsumer.autoOffsetReset }} + - "--auto-offset-reset" + - "{{ .Values.snuba.metricsConsumer.autoOffsetReset }}" + {{- end }} + {{- if .Values.snuba.metricsConsumer.maxBatchSize }} + - "--max-batch-size" + - "{{ .Values.snuba.metricsConsumer.maxBatchSize }}" + {{- end }} + {{- if .Values.snuba.metricsConsumer.processes }} + - "--processes" + - "{{ .Values.snuba.metricsConsumer.processes }}" + {{- end }} + {{- if .Values.snuba.metricsConsumer.inputBlockSize }} + - "--input-block-size" + - "{{ .Values.snuba.metricsConsumer.inputBlockSize }}" + {{- end }} + {{- if .Values.snuba.metricsConsumer.outputBlockSize }} + - "--output-block-size" + - "{{ .Values.snuba.metricsConsumer.outputBlockSize }}" + {{- end }} + {{- if .Values.snuba.metricsConsumer.maxBatchTimeMs }} + - "--max-batch-time-ms" + - "{{ .Values.snuba.metricsConsumer.maxBatchTimeMs }}" + {{- end }} + {{- if .Values.snuba.metricsConsumer.queuedMaxMessagesKbytes }} + - "--queued-max-messages-kbytes" + - "{{ .Values.snuba.metricsConsumer.queuedMaxMessagesKbytes }}" + {{- end }} + {{- if .Values.snuba.metricsConsumer.queuedMinMessages }} + - "--queued-min-messages" + - "{{ .Values.snuba.metricsConsumer.queuedMinMessages }}" + {{- end }} + {{- if .Values.snuba.metricsConsumer.noStrictOffsetReset }} + - "--no-strict-offset-reset" + {{- end }} + {{- if .Values.snuba.metricsConsumer.livenessProbe.enabled }} + - "--health-check-file" + - "/tmp/health.txt" + {{- end }} + {{- if .Values.snuba.metricsConsumer.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.snuba.metricsConsumer.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.snuba.metricsConsumer.livenessProbe.periodSeconds }} + {{- end }} + ports: + - containerPort: {{ template "snuba.port" }} + env: +{{ include "sentry.snuba.env" . | indent 8 }} +{{- if .Values.snuba.metricsConsumer.env }} +{{ toYaml .Values.snuba.metricsConsumer.env | indent 8 }} +{{- end }} + envFrom: + - secretRef: + name: {{ template "sentry.fullname" . }}-snuba-env + volumeMounts: + - mountPath: /etc/snuba + name: config + readOnly: true +{{- if .Values.snuba.metricsConsumer.volumeMounts }} +{{ toYaml .Values.snuba.metricsConsumer.volumeMounts | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.snuba.metricsConsumer.resources | indent 12 }} +{{- if .Values.snuba.metricsConsumer.containerSecurityContext }} + securityContext: +{{ toYaml .Values.snuba.metricsConsumer.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.snuba.metricsConsumer.sidecars }} +{{ toYaml .Values.snuba.metricsConsumer.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ .Values.serviceAccount.name }}-snuba + {{- end }} + volumes: + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-snuba +{{- if .Values.snuba.metricsConsumer.volumes }} +{{ toYaml .Values.snuba.metricsConsumer.volumes | indent 8 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 8 }} +{{- end }} +{{- end }} diff --git a/charts/sentry/templates/snuba/deployment-snuba-outcomes-billing-consumer.yaml b/charts/sentry/templates/snuba/deployment-snuba-outcomes-billing-consumer.yaml new file mode 100644 index 000000000..b28e133df --- /dev/null +++ b/charts/sentry/templates/snuba/deployment-snuba-outcomes-billing-consumer.yaml @@ -0,0 +1,183 @@ +{{- if .Values.snuba.outcomesBillingConsumer.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "sentry.fullname" . }}-snuba-outcomes-billing-consumer + labels: + app: {{ template "sentry.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + app.kubernetes.io/managed-by: "Helm" + {{- if .Values.asHook }} + {{- /* Add the Helm annotations so that deployment after asHook from true to false works */}} + annotations: + meta.helm.sh/release-name: "{{ .Release.Name }}" + meta.helm.sh/release-namespace: "{{ .Release.Namespace }}" + "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook-weight": "12" + {{- end }} +spec: + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: snuba-outcomes-billing-consumer + replicas: {{ .Values.snuba.outcomesBillingConsumer.replicas }} + template: + metadata: + annotations: + checksum/snubaSettingsPy: {{ .Values.config.snubaSettingsPy | sha256sum }} + checksum/config.yaml: {{ include "sentry.snuba.config" . | sha256sum }} + {{- if .Values.snuba.outcomesBillingConsumer.annotations }} +{{ toYaml .Values.snuba.outcomesBillingConsumer.annotations | indent 8 }} + {{- end }} + labels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: snuba-outcomes-billing-consumer + {{- if .Values.snuba.outcomesBillingConsumer.podLabels }} +{{ toYaml .Values.snuba.outcomesBillingConsumer.podLabels | indent 8 }} + {{- end }} + spec: + affinity: + {{- if .Values.snuba.outcomesBillingConsumer.affinity }} +{{ toYaml .Values.snuba.outcomesBillingConsumer.affinity | indent 8 }} + {{- end }} + {{- if .Values.snuba.outcomesBillingConsumer.nodeSelector }} + nodeSelector: +{{ toYaml .Values.snuba.outcomesBillingConsumer.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.snuba.outcomesBillingConsumer.tolerations }} + tolerations: +{{ toYaml .Values.snuba.outcomesBillingConsumer.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.snuba.outcomesBillingConsumer.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.snuba.outcomesBillingConsumer.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if .Values.images.snuba.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.images.snuba.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy | quote }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: +{{ toYaml .Values.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.snuba.outcomesBillingConsumer.securityContext }} + securityContext: +{{ toYaml .Values.snuba.outcomesBillingConsumer.securityContext | indent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-snuba + image: "{{ template "snuba.image" . }}" + imagePullPolicy: {{ default "IfNotPresent" .Values.images.snuba.pullPolicy }} + command: + - "snuba" + - {{ if .Values.snuba.rustConsumer -}}"rust-consumer"{{- else -}}"consumer"{{- end }} + - "--storage" + - "outcomes_raw" + - "--consumer-group" + - "snuba-consumers" + {{- if .Values.snuba.outcomesBillingConsumer.autoOffsetReset }} + - "--auto-offset-reset" + - "{{ .Values.snuba.outcomesBillingConsumer.autoOffsetReset }}" + {{- end }} + - "--raw-events-topic={{ default "" ((.Values.kafkaTopicOverrides).prefix) }}outcomes-billing" + {{- if .Values.snuba.outcomesBillingConsumer.maxBatchSize }} + - "--max-batch-size" + - "{{ .Values.snuba.outcomesBillingConsumer.maxBatchSize }}" + {{- end }} + {{- if .Values.snuba.outcomesBillingConsumer.processes }} + - "--processes" + - "{{ .Values.snuba.outcomesBillingConsumer.processes }}" + {{- end }} + {{- if .Values.snuba.outcomesBillingConsumer.inputBlockSize }} + - "--input-block-size" + - "{{ .Values.snuba.outcomesBillingConsumer.inputBlockSize }}" + {{- end }} + {{- if .Values.snuba.outcomesBillingConsumer.outputBlockSize }} + - "--output-block-size" + - "{{ .Values.snuba.outcomesBillingConsumer.outputBlockSize }}" + {{- end }} + {{- if .Values.snuba.outcomesBillingConsumer.maxBatchTimeMs }} + - "--max-batch-time-ms" + - "{{ .Values.snuba.outcomesBillingConsumer.maxBatchTimeMs }}" + {{- end }} + {{- if .Values.snuba.outcomesBillingConsumer.queuedMaxMessagesKbytes }} + - "--queued-max-messages-kbytes" + - "{{ .Values.snuba.outcomesBillingConsumer.queuedMaxMessagesKbytes }}" + {{- end }} + {{- if .Values.snuba.outcomesBillingConsumer.queuedMinMessages }} + - "--queued-min-messages" + - "{{ .Values.snuba.outcomesBillingConsumer.queuedMinMessages }}" + {{- end }} + {{- if .Values.snuba.outcomesBillingConsumer.noStrictOffsetReset }} + - "--no-strict-offset-reset" + {{- end }} + {{- if .Values.snuba.outcomesBillingConsumer.livenessProbe.enabled }} + - "--health-check-file" + - "/tmp/health.txt" + {{- end }} + {{- if .Values.snuba.outcomesBillingConsumer.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.snuba.outcomesBillingConsumer.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.snuba.outcomesBillingConsumer.livenessProbe.periodSeconds }} + {{- end }} + ports: + - containerPort: {{ template "snuba.port" }} + env: +{{ include "sentry.snuba.env" . | indent 8 }} +{{- if .Values.snuba.outcomesBillingConsumer.env }} +{{ toYaml .Values.snuba.outcomesBillingConsumer.env | indent 8 }} +{{- end }} + envFrom: + - secretRef: + name: {{ template "sentry.fullname" . }}-snuba-env + volumeMounts: + - mountPath: /etc/snuba + name: config + readOnly: true +{{- if .Values.snuba.outcomesBillingConsumer.volumeMounts }} +{{ toYaml .Values.snuba.outcomesBillingConsumer.volumeMounts | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.snuba.outcomesBillingConsumer.resources | indent 12 }} +{{- if .Values.snuba.outcomesBillingConsumer.containerSecurityContext }} + securityContext: +{{ toYaml .Values.snuba.outcomesBillingConsumer.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.snuba.outcomesBillingConsumer.sidecars }} +{{ toYaml .Values.snuba.outcomesBillingConsumer.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ .Values.serviceAccount.name }}-snuba + {{- end }} + volumes: + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-snuba +{{- if .Values.snuba.outcomesBillingConsumer.volumes }} +{{ toYaml .Values.snuba.outcomesBillingConsumer.volumes | indent 8 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 8 }} +{{- end }} +{{- end }} diff --git a/sentry/templates/deployment-snuba-outcomes-consumer.yaml b/charts/sentry/templates/snuba/deployment-snuba-outcomes-consumer.yaml similarity index 72% rename from sentry/templates/deployment-snuba-outcomes-consumer.yaml rename to charts/sentry/templates/snuba/deployment-snuba-outcomes-consumer.yaml index d9d775536..2944a5ec2 100644 --- a/sentry/templates/deployment-snuba-outcomes-consumer.yaml +++ b/charts/sentry/templates/snuba/deployment-snuba-outcomes-consumer.yaml @@ -1,3 +1,4 @@ +{{- if .Values.snuba.outcomesConsumer.enabled }} apiVersion: apps/v1 kind: Deployment metadata: @@ -28,7 +29,7 @@ spec: metadata: annotations: checksum/snubaSettingsPy: {{ .Values.config.snubaSettingsPy | sha256sum }} - checksum/config.yaml: {{ include (print $.Template.BasePath "/configmap-snuba.yaml") . | sha256sum }} + checksum/config.yaml: {{ include "sentry.snuba.config" . | sha256sum }} {{- if .Values.snuba.outcomesConsumer.annotations }} {{ toYaml .Values.snuba.outcomesConsumer.annotations | indent 8 }} {{- end }} @@ -47,10 +48,20 @@ spec: {{- if .Values.snuba.outcomesConsumer.nodeSelector }} nodeSelector: {{ toYaml .Values.snuba.outcomesConsumer.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} {{- end }} {{- if .Values.snuba.outcomesConsumer.tolerations }} tolerations: {{ toYaml .Values.snuba.outcomesConsumer.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.snuba.outcomesConsumer.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.snuba.outcomesConsumer.topologySpreadConstraints | indent 8 }} {{- end }} {{- if .Values.images.snuba.imagePullSecrets }} imagePullSecrets: @@ -73,11 +84,15 @@ spec: imagePullPolicy: {{ default "IfNotPresent" .Values.images.snuba.pullPolicy }} command: - "snuba" - - "consumer" + - {{ if .Values.snuba.rustConsumer -}}"rust-consumer"{{- else -}}"consumer"{{- end }} - "--storage" - "outcomes_raw" + - "--consumer-group" + - "snuba-consumers" + {{- if .Values.snuba.outcomesConsumer.autoOffsetReset }} - "--auto-offset-reset" - "{{ .Values.snuba.outcomesConsumer.autoOffsetReset }}" + {{- end }} - "--max-batch-size" - "{{ default "3" .Values.snuba.outcomesConsumer.maxBatchSize }}" {{- if .Values.snuba.outcomesConsumer.processes }} @@ -104,6 +119,22 @@ spec: - "--queued-min-messages" - "{{ .Values.snuba.outcomesConsumer.queuedMinMessages }}" {{- end }} + {{- if .Values.snuba.outcomesConsumer.noStrictOffsetReset }} + - "--no-strict-offset-reset" + {{- end }} + {{- if .Values.snuba.outcomesConsumer.livenessProbe.enabled }} + - "--health-check-file" + - "/tmp/health.txt" + {{- end }} + {{- if .Values.snuba.outcomesConsumer.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.snuba.outcomesConsumer.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.snuba.outcomesConsumer.livenessProbe.periodSeconds }} + {{- end }} ports: - containerPort: {{ template "snuba.port" }} env: @@ -123,6 +154,16 @@ spec: {{- end }} resources: {{ toYaml .Values.snuba.outcomesConsumer.resources | indent 12 }} +{{- if .Values.snuba.outcomesConsumer.containerSecurityContext }} + securityContext: +{{ toYaml .Values.snuba.outcomesConsumer.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.snuba.outcomesConsumer.sidecars }} +{{ toYaml .Values.snuba.outcomesConsumer.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} {{- if .Values.serviceAccount.enabled }} serviceAccountName: {{ .Values.serviceAccount.name }}-snuba {{- end }} @@ -133,3 +174,7 @@ spec: {{- if .Values.snuba.outcomesConsumer.volumes }} {{ toYaml .Values.snuba.outcomesConsumer.volumes | indent 8 }} {{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 8 }} +{{- end }} +{{- end }} diff --git a/charts/sentry/templates/snuba/deployment-snuba-profiling-functions-consumer.yaml b/charts/sentry/templates/snuba/deployment-snuba-profiling-functions-consumer.yaml new file mode 100644 index 000000000..c9bd375e7 --- /dev/null +++ b/charts/sentry/templates/snuba/deployment-snuba-profiling-functions-consumer.yaml @@ -0,0 +1,182 @@ +{{- if .Values.sentry.features.enableProfiling }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "sentry.fullname" . }}-snuba-profiling-functions-consumer + labels: + app: {{ template "sentry.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + app.kubernetes.io/managed-by: "Helm" + {{- if .Values.asHook }} + {{- /* Add the Helm annotations so that deployment after asHook from true to false works */}} + annotations: + meta.helm.sh/release-name: "{{ .Release.Name }}" + meta.helm.sh/release-namespace: "{{ .Release.Namespace }}" + "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook-weight": "12" + {{- end }} +spec: + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: snuba-profiling-functions-consumer + replicas: {{ .Values.snuba.profilingFunctionsConsumer.replicas }} + template: + metadata: + annotations: + checksum/snubaSettingsPy: {{ .Values.config.snubaSettingsPy | sha256sum }} + checksum/config.yaml: {{ include "sentry.snuba.config" . | sha256sum }} + {{- if .Values.snuba.profilingFunctionsConsumer.annotations }} +{{ toYaml .Values.snuba.profilingFunctionsConsumer.annotations | indent 8 }} + {{- end }} + labels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: snuba-profiling-functions-consumer + {{- if .Values.snuba.profilingFunctionsConsumer.podLabels }} +{{ toYaml .Values.snuba.profilingFunctionsConsumer.podLabels | indent 8 }} + {{- end }} + spec: + affinity: + {{- if .Values.snuba.profilingFunctionsConsumer.affinity }} +{{ toYaml .Values.snuba.profilingFunctionsConsumer.affinity | indent 8 }} + {{- end }} + {{- if .Values.snuba.profilingFunctionsConsumer.nodeSelector }} + nodeSelector: +{{ toYaml .Values.snuba.profilingFunctionsConsumer.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.snuba.profilingFunctionsConsumer.tolerations }} + tolerations: +{{ toYaml .Values.snuba.profilingFunctionsConsumer.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.snuba.profilingFunctionsConsumer.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.snuba.profilingFunctionsConsumer.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if .Values.images.snuba.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.images.snuba.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy | quote }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: +{{ toYaml .Values.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.snuba.profilingFunctionsConsumer.securityContext }} + securityContext: +{{ toYaml .Values.snuba.profilingFunctionsConsumer.securityContext | indent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-snuba + image: "{{ template "snuba.image" . }}" + imagePullPolicy: {{ default "IfNotPresent" .Values.images.snuba.pullPolicy }} + command: + - "snuba" + - {{ if .Values.snuba.rustConsumer -}}"rust-consumer"{{- else -}}"consumer"{{- end }} + - "--storage" + - "functions_raw" + - "--consumer-group" + - "functions_raw_group" + {{- if .Values.snuba.profilingFunctionsConsumer.autoOffsetReset }} + - "--auto-offset-reset" + - "{{ .Values.snuba.profilingFunctionsConsumer.autoOffsetReset }}" + {{- end }} + {{- if .Values.snuba.profilingFunctionsConsumer.maxBatchSize }} + - "--max-batch-size" + - "{{ .Values.snuba.profilingFunctionsConsumer.maxBatchSize }}" + {{- end }} + {{- if .Values.snuba.profilingFunctionsConsumer.processes }} + - "--processes" + - "{{ .Values.snuba.profilingFunctionsConsumer.processes }}" + {{- end }} + {{- if .Values.snuba.profilingFunctionsConsumer.inputBlockSize }} + - "--input-block-size" + - "{{ .Values.snuba.profilingFunctionsConsumer.inputBlockSize }}" + {{- end }} + {{- if .Values.snuba.profilingFunctionsConsumer.outputBlockSize }} + - "--output-block-size" + - "{{ .Values.snuba.profilingFunctionsConsumer.outputBlockSize }}" + {{- end }} + {{- if .Values.snuba.profilingFunctionsConsumer.maxBatchTimeMs }} + - "--max-batch-time-ms" + - "{{ .Values.snuba.profilingFunctionsConsumer.maxBatchTimeMs }}" + {{- end }} + {{- if .Values.snuba.profilingFunctionsConsumer.queuedMaxMessagesKbytes }} + - "--queued-max-messages-kbytes" + - "{{ .Values.snuba.profilingFunctionsConsumer.queuedMaxMessagesKbytes }}" + {{- end }} + {{- if .Values.snuba.profilingFunctionsConsumer.queuedMinMessages }} + - "--queued-min-messages" + - "{{ .Values.snuba.profilingFunctionsConsumer.queuedMinMessages }}" + {{- end }} + {{- if .Values.snuba.profilingFunctionsConsumer.noStrictOffsetReset }} + - "--no-strict-offset-reset" + {{- end }} + {{- if .Values.snuba.profilingFunctionsConsumer.livenessProbe.enabled }} + - "--health-check-file" + - "/tmp/health.txt" + {{- end }} + {{- if .Values.snuba.profilingFunctionsConsumer.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.snuba.profilingFunctionsConsumer.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.snuba.profilingFunctionsConsumer.livenessProbe.periodSeconds }} + {{- end }} + ports: + - containerPort: {{ template "snuba.port" }} + env: +{{ include "sentry.snuba.env" . | indent 8 }} +{{- if .Values.snuba.profilingFunctionsConsumer.env }} +{{ toYaml .Values.snuba.profilingFunctionsConsumer.env | indent 8 }} +{{- end }} + envFrom: + - secretRef: + name: {{ template "sentry.fullname" . }}-snuba-env + volumeMounts: + - mountPath: /etc/snuba + name: config + readOnly: true +{{- if .Values.snuba.profilingFunctionsConsumer.volumeMounts }} +{{ toYaml .Values.snuba.profilingFunctionsConsumer.volumeMounts | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.snuba.profilingFunctionsConsumer.resources | indent 12 }} +{{- if .Values.snuba.profilingFunctionsConsumer.containerSecurityContext }} + securityContext: +{{ toYaml .Values.snuba.profilingFunctionsConsumer.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.snuba.profilingFunctionsConsumer.sidecars }} +{{ toYaml .Values.snuba.profilingFunctionsConsumer.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ .Values.serviceAccount.name }}-snuba + {{- end }} + volumes: + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-snuba +{{- if .Values.snuba.profilingFunctionsConsumer.volumes }} +{{ toYaml .Values.snuba.profilingFunctionsConsumer.volumes | indent 8 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 8 }} +{{- end }} +{{- end }} diff --git a/charts/sentry/templates/snuba/deployment-snuba-profiling-profiles-consumer.yaml b/charts/sentry/templates/snuba/deployment-snuba-profiling-profiles-consumer.yaml new file mode 100644 index 000000000..f8e017ca6 --- /dev/null +++ b/charts/sentry/templates/snuba/deployment-snuba-profiling-profiles-consumer.yaml @@ -0,0 +1,182 @@ +{{- if .Values.sentry.features.enableProfiling }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "sentry.fullname" . }}-snuba-profiling-profiles-consumer + labels: + app: {{ template "sentry.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + app.kubernetes.io/managed-by: "Helm" + {{- if .Values.asHook }} + {{- /* Add the Helm annotations so that deployment after asHook from true to false works */}} + annotations: + meta.helm.sh/release-name: "{{ .Release.Name }}" + meta.helm.sh/release-namespace: "{{ .Release.Namespace }}" + "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook-weight": "12" + {{- end }} +spec: + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: snuba-profiling-profiles-consumer + replicas: {{ .Values.snuba.profilingProfilesConsumer.replicas }} + template: + metadata: + annotations: + checksum/snubaSettingsPy: {{ .Values.config.snubaSettingsPy | sha256sum }} + checksum/config.yaml: {{ include "sentry.snuba.config" . | sha256sum }} + {{- if .Values.snuba.profilingProfilesConsumer.annotations }} +{{ toYaml .Values.snuba.profilingProfilesConsumer.annotations | indent 8 }} + {{- end }} + labels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: snuba-profiling-profiles-consumer + {{- if .Values.snuba.profilingProfilesConsumer.podLabels }} +{{ toYaml .Values.snuba.profilingProfilesConsumer.podLabels | indent 8 }} + {{- end }} + spec: + affinity: + {{- if .Values.snuba.profilingProfilesConsumer.affinity }} +{{ toYaml .Values.snuba.profilingProfilesConsumer.affinity | indent 8 }} + {{- end }} + {{- if .Values.snuba.profilingProfilesConsumer.nodeSelector }} + nodeSelector: +{{ toYaml .Values.snuba.profilingProfilesConsumer.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.snuba.profilingProfilesConsumer.tolerations }} + tolerations: +{{ toYaml .Values.snuba.profilingProfilesConsumer.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.snuba.profilingProfilesConsumer.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.snuba.profilingProfilesConsumer.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if .Values.images.snuba.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.images.snuba.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy | quote }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: +{{ toYaml .Values.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.snuba.profilingProfilesConsumer.securityContext }} + securityContext: +{{ toYaml .Values.snuba.profilingProfilesConsumer.securityContext | indent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-snuba + image: "{{ template "snuba.image" . }}" + imagePullPolicy: {{ default "IfNotPresent" .Values.images.snuba.pullPolicy }} + command: + - "snuba" + - {{ if .Values.snuba.rustConsumer -}}"rust-consumer"{{- else -}}"consumer"{{- end }} + - "--storage" + - "profiles" + - "--consumer-group" + - "profiles_group" + {{- if .Values.snuba.profilingProfilesConsumer.autoOffsetReset }} + - "--auto-offset-reset" + - "{{ .Values.snuba.profilingProfilesConsumer.autoOffsetReset }}" + {{- end }} + {{- if .Values.snuba.profilingProfilesConsumer.maxBatchSize }} + - "--max-batch-size" + - "{{ .Values.snuba.profilingProfilesConsumer.maxBatchSize }}" + {{- end }} + {{- if .Values.snuba.profilingProfilesConsumer.processes }} + - "--processes" + - "{{ .Values.snuba.profilingProfilesConsumer.processes }}" + {{- end }} + {{- if .Values.snuba.profilingProfilesConsumer.inputBlockSize }} + - "--input-block-size" + - "{{ .Values.snuba.profilingProfilesConsumer.inputBlockSize }}" + {{- end }} + {{- if .Values.snuba.profilingProfilesConsumer.outputBlockSize }} + - "--output-block-size" + - "{{ .Values.snuba.profilingProfilesConsumer.outputBlockSize }}" + {{- end }} + {{- if .Values.snuba.profilingProfilesConsumer.maxBatchTimeMs }} + - "--max-batch-time-ms" + - "{{ .Values.snuba.profilingProfilesConsumer.maxBatchTimeMs }}" + {{- end }} + {{- if .Values.snuba.profilingProfilesConsumer.queuedMaxMessagesKbytes }} + - "--queued-max-messages-kbytes" + - "{{ .Values.snuba.profilingProfilesConsumer.queuedMaxMessagesKbytes }}" + {{- end }} + {{- if .Values.snuba.profilingProfilesConsumer.queuedMinMessages }} + - "--queued-min-messages" + - "{{ .Values.snuba.profilingProfilesConsumer.queuedMinMessages }}" + {{- end }} + {{- if .Values.snuba.profilingProfilesConsumer.noStrictOffsetReset }} + - "--no-strict-offset-reset" + {{- end }} + {{- if .Values.snuba.profilingProfilesConsumer.livenessProbe.enabled }} + - "--health-check-file" + - "/tmp/health.txt" + {{- end }} + {{- if .Values.snuba.profilingProfilesConsumer.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.snuba.profilingProfilesConsumer.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.snuba.profilingProfilesConsumer.livenessProbe.periodSeconds }} + {{- end }} + ports: + - containerPort: {{ template "snuba.port" }} + env: +{{ include "sentry.snuba.env" . | indent 8 }} +{{- if .Values.snuba.profilingProfilesConsumer.env }} +{{ toYaml .Values.snuba.profilingProfilesConsumer.env | indent 8 }} +{{- end }} + envFrom: + - secretRef: + name: {{ template "sentry.fullname" . }}-snuba-env + volumeMounts: + - mountPath: /etc/snuba + name: config + readOnly: true +{{- if .Values.snuba.profilingProfilesConsumer.volumeMounts }} +{{ toYaml .Values.snuba.profilingProfilesConsumer.volumeMounts | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.snuba.profilingProfilesConsumer.resources | indent 12 }} +{{- if .Values.snuba.profilingProfilesConsumer.containerSecurityContext }} + securityContext: +{{ toYaml .Values.snuba.profilingProfilesConsumer.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.snuba.profilingProfilesConsumer.sidecars }} +{{ toYaml .Values.snuba.profilingProfilesConsumer.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ .Values.serviceAccount.name }}-snuba + {{- end }} + volumes: + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-snuba +{{- if .Values.snuba.profilingProfilesConsumer.volumes }} +{{ toYaml .Values.snuba.profilingProfilesConsumer.volumes | indent 8 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 8 }} +{{- end }} +{{- end }} diff --git a/sentry/templates/deployment-snuba-replacer.yaml b/charts/sentry/templates/snuba/deployment-snuba-replacer.yaml similarity index 70% rename from sentry/templates/deployment-snuba-replacer.yaml rename to charts/sentry/templates/snuba/deployment-snuba-replacer.yaml index bf5fdd939..81739e2b9 100644 --- a/sentry/templates/deployment-snuba-replacer.yaml +++ b/charts/sentry/templates/snuba/deployment-snuba-replacer.yaml @@ -1,3 +1,4 @@ +{{- if .Values.snuba.replacer.enabled }} apiVersion: apps/v1 kind: Deployment metadata: @@ -28,7 +29,7 @@ spec: metadata: annotations: checksum/snubaSettingsPy: {{ .Values.config.snubaSettingsPy | sha256sum }} - checksum/config.yaml: {{ include (print $.Template.BasePath "/configmap-snuba.yaml") . | sha256sum }} + checksum/config.yaml: {{ include "sentry.snuba.config" . | sha256sum }} {{- if .Values.snuba.replacer.annotations }} {{ toYaml .Values.snuba.replacer.annotations | indent 8 }} {{- end }} @@ -47,10 +48,20 @@ spec: {{- if .Values.snuba.replacer.nodeSelector }} nodeSelector: {{ toYaml .Values.snuba.replacer.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} {{- end }} {{- if .Values.snuba.replacer.tolerations }} tolerations: {{ toYaml .Values.snuba.replacer.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.snuba.replacer.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.snuba.replacer.topologySpreadConstraints | indent 8 }} {{- end }} {{- if .Values.images.snuba.imagePullSecrets }} imagePullSecrets: @@ -76,10 +87,13 @@ spec: - "replacer" - "--storage" - "errors" + {{- if .Values.snuba.replacer.autoOffsetReset }} - "--auto-offset-reset" - "{{ .Values.snuba.replacer.autoOffsetReset }}" - - "--max-batch-size" - - "{{ default "3" .Values.snuba.replacer.maxBatchSize }}" + {{- end }} + {{- if .Values.snuba.replacer.noStrictOffsetReset }} + - "--no-strict-offset-reset" + {{- end }} {{- if .Values.snuba.replacer.maxBatchTimeMs }} - "--max-batch-time-ms" - "{{ .Values.snuba.replacer.maxBatchTimeMs }}" @@ -91,6 +105,9 @@ spec: {{- if .Values.snuba.replacer.queuedMinMessages }} - "--queued-min-messages" - "{{ .Values.snuba.replacer.queuedMinMessages }}" + {{- if .Values.snuba.replacer.noStrictOffsetReset }} + - "--no-strict-offset-reset" + {{- end }} {{- end }} ports: - containerPort: {{ template "snuba.port" }} @@ -106,12 +123,32 @@ spec: - mountPath: /etc/snuba name: config readOnly: true +{{- if .Values.snuba.replacer.volumeMounts }} +{{ toYaml .Values.snuba.replacer.volumeMounts | indent 8 }} +{{- end }} resources: {{ toYaml .Values.snuba.replacer.resources | indent 12 }} +{{- if .Values.snuba.replacer.containerSecurityContext }} + securityContext: +{{ toYaml .Values.snuba.replacer.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.snuba.replacer.sidecars }} +{{ toYaml .Values.snuba.replacer.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} {{- if .Values.serviceAccount.enabled }} serviceAccountName: {{ .Values.serviceAccount.name }}-snuba {{- end }} volumes: - - name: config - configMap: - name: {{ template "sentry.fullname" . }}-snuba + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-snuba +{{- if .Values.snuba.replacer.volumes }} +{{ toYaml .Values.snuba.replacer.volumes | indent 6 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 6 }} +{{- end }} +{{- end }} diff --git a/charts/sentry/templates/snuba/deployment-snuba-replays-consumer.yaml b/charts/sentry/templates/snuba/deployment-snuba-replays-consumer.yaml new file mode 100644 index 000000000..7afceeef3 --- /dev/null +++ b/charts/sentry/templates/snuba/deployment-snuba-replays-consumer.yaml @@ -0,0 +1,182 @@ +{{- if .Values.snuba.replaysConsumer.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "sentry.fullname" . }}-snuba-replays-consumer + labels: + app: {{ template "sentry.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + app.kubernetes.io/managed-by: "Helm" + {{- if .Values.asHook }} + {{- /* Add the Helm annotations so that deployment after asHook from true to false works */}} + annotations: + meta.helm.sh/release-name: "{{ .Release.Name }}" + meta.helm.sh/release-namespace: "{{ .Release.Namespace }}" + "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook-weight": "12" + {{- end }} +spec: + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: snuba-replays-consumer + replicas: {{ .Values.snuba.replaysConsumer.replicas }} + template: + metadata: + annotations: + checksum/snubaSettingsPy: {{ .Values.config.snubaSettingsPy | sha256sum }} + checksum/config.yaml: {{ include "sentry.snuba.config" . | sha256sum }} + {{- if .Values.snuba.replaysConsumer.annotations }} +{{ toYaml .Values.snuba.replaysConsumer.annotations | indent 8 }} + {{- end }} + labels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: snuba-replays-consumer + {{- if .Values.snuba.replaysConsumer.podLabels }} +{{ toYaml .Values.snuba.replaysConsumer.podLabels | indent 8 }} + {{- end }} + spec: + affinity: + {{- if .Values.snuba.replaysConsumer.affinity }} +{{ toYaml .Values.snuba.replaysConsumer.affinity | indent 8 }} + {{- end }} + {{- if .Values.snuba.replaysConsumer.nodeSelector }} + nodeSelector: +{{ toYaml .Values.snuba.replaysConsumer.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.snuba.replaysConsumer.tolerations }} + tolerations: +{{ toYaml .Values.snuba.replaysConsumer.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.snuba.replaysConsumer.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.snuba.replaysConsumer.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if .Values.images.snuba.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.images.snuba.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy | quote }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: +{{ toYaml .Values.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.snuba.replaysConsumer.securityContext }} + securityContext: +{{ toYaml .Values.snuba.replaysConsumer.securityContext | indent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-snuba + image: "{{ template "snuba.image" . }}" + imagePullPolicy: {{ default "IfNotPresent" .Values.images.snuba.pullPolicy }} + command: + - "snuba" + - {{ if .Values.snuba.rustConsumer -}}"rust-consumer"{{- else -}}"consumer"{{- end }} + - "--storage" + - "replays" + - "--consumer-group" + - "replays_group" + {{- if .Values.snuba.replaysConsumer.autoOffsetReset }} + - "--auto-offset-reset" + - "{{ .Values.snuba.replaysConsumer.autoOffsetReset }}" + {{- end }} + {{- if .Values.snuba.replaysConsumer.maxBatchSize }} + - "--max-batch-size" + - "{{ .Values.snuba.replaysConsumer.maxBatchSize }}" + {{- end }} + {{- if .Values.snuba.replaysConsumer.processes }} + - "--processes" + - "{{ .Values.snuba.replaysConsumer.processes }}" + {{- end }} + {{- if .Values.snuba.replaysConsumer.inputBlockSize }} + - "--input-block-size" + - "{{ .Values.snuba.replaysConsumer.inputBlockSize }}" + {{- end }} + {{- if .Values.snuba.replaysConsumer.outputBlockSize }} + - "--output-block-size" + - "{{ .Values.snuba.replaysConsumer.outputBlockSize }}" + {{- end }} + {{- if .Values.snuba.replaysConsumer.maxBatchTimeMs }} + - "--max-batch-time-ms" + - "{{ .Values.snuba.replaysConsumer.maxBatchTimeMs }}" + {{- end }} + {{- if .Values.snuba.replaysConsumer.queuedMaxMessagesKbytes }} + - "--queued-max-messages-kbytes" + - "{{ .Values.snuba.replaysConsumer.queuedMaxMessagesKbytes }}" + {{- end }} + {{- if .Values.snuba.replaysConsumer.queuedMinMessages }} + - "--queued-min-messages" + - "{{ .Values.snuba.replaysConsumer.queuedMinMessages }}" + {{- end }} + {{- if .Values.snuba.replaysConsumer.noStrictOffsetReset }} + - "--no-strict-offset-reset" + {{- end }} + {{- if .Values.snuba.replaysConsumer.livenessProbe.enabled }} + - "--health-check-file" + - "/tmp/health.txt" + {{- end }} + {{- if .Values.snuba.replaysConsumer.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.snuba.replaysConsumer.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.snuba.replaysConsumer.livenessProbe.periodSeconds }} + {{- end }} + ports: + - containerPort: {{ template "snuba.port" }} + env: +{{ include "sentry.snuba.env" . | indent 8 }} +{{- if .Values.snuba.replaysConsumer.env }} +{{ toYaml .Values.snuba.replaysConsumer.env | indent 8 }} +{{- end }} + envFrom: + - secretRef: + name: {{ template "sentry.fullname" . }}-snuba-env + volumeMounts: + - mountPath: /etc/snuba + name: config + readOnly: true +{{- if .Values.snuba.replaysConsumer.volumeMounts }} +{{ toYaml .Values.snuba.replaysConsumer.volumeMounts | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.snuba.replaysConsumer.resources | indent 12 }} +{{- if .Values.snuba.replaysConsumer.containerSecurityContext }} + securityContext: +{{ toYaml .Values.snuba.replaysConsumer.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.snuba.replaysConsumer.sidecars }} +{{ toYaml .Values.snuba.replaysConsumer.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ .Values.serviceAccount.name }}-snuba + {{- end }} + volumes: + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-snuba +{{- if .Values.snuba.replaysConsumer.volumes }} +{{ toYaml .Values.snuba.replaysConsumer.volumes | indent 8 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 8 }} +{{- end }} +{{- end }} diff --git a/charts/sentry/templates/snuba/deployment-snuba-spans-consumer.yaml b/charts/sentry/templates/snuba/deployment-snuba-spans-consumer.yaml new file mode 100644 index 000000000..ea7769aab --- /dev/null +++ b/charts/sentry/templates/snuba/deployment-snuba-spans-consumer.yaml @@ -0,0 +1,182 @@ +{{- if .Values.snuba.spansConsumer.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "sentry.fullname" . }}-snuba-spans-consumer + labels: + app: {{ template "sentry.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + app.kubernetes.io/managed-by: "Helm" + {{- if .Values.asHook }} + {{- /* Add the Helm annotations so that deployment after asHook from true to false works */}} + annotations: + meta.helm.sh/release-name: "{{ .Release.Name }}" + meta.helm.sh/release-namespace: "{{ .Release.Namespace }}" + "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook-weight": "12" + {{- end }} +spec: + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: snuba-spans-consumer + replicas: {{ .Values.snuba.spansConsumer.replicas }} + template: + metadata: + annotations: + checksum/snubaSettingsPy: {{ .Values.config.snubaSettingsPy | sha256sum }} + checksum/config.yaml: {{ include "sentry.snuba.config" . | sha256sum }} + {{- if .Values.snuba.spansConsumer.annotations }} +{{ toYaml .Values.snuba.spansConsumer.annotations | indent 8 }} + {{- end }} + labels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: snuba-spans-consumer + {{- if .Values.snuba.spansConsumer.podLabels }} +{{ toYaml .Values.snuba.spansConsumer.podLabels | indent 8 }} + {{- end }} + spec: + affinity: + {{- if .Values.snuba.spansConsumer.affinity }} +{{ toYaml .Values.snuba.spansConsumer.affinity | indent 8 }} + {{- end }} + {{- if .Values.snuba.spansConsumer.nodeSelector }} + nodeSelector: +{{ toYaml .Values.snuba.spansConsumer.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.snuba.spansConsumer.tolerations }} + tolerations: +{{ toYaml .Values.snuba.spansConsumer.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.snuba.spansConsumer.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.snuba.spansConsumer.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if .Values.images.snuba.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.images.snuba.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy | quote }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: +{{ toYaml .Values.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.snuba.spansConsumer.securityContext }} + securityContext: +{{ toYaml .Values.snuba.spansConsumer.securityContext | indent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-snuba + image: "{{ template "snuba.image" . }}" + imagePullPolicy: {{ default "IfNotPresent" .Values.images.snuba.pullPolicy }} + command: + - "snuba" + - {{ if .Values.snuba.rustConsumer -}}"rust-consumer"{{- else -}}"consumer"{{- end }} + - "--storage" + - "spans" + - "--consumer-group" + - "snuba-spans-group" + {{- if .Values.snuba.spansConsumer.autoOffsetReset }} + - "--auto-offset-reset" + - "{{ .Values.snuba.spansConsumer.autoOffsetReset }}" + {{- end }} + {{- if .Values.snuba.spansConsumer.maxBatchSize }} + - "--max-batch-size" + - "{{ .Values.snuba.spansConsumer.maxBatchSize }}" + {{- end }} + {{- if .Values.snuba.spansConsumer.processes }} + - "--processes" + - "{{ .Values.snuba.spansConsumer.processes }}" + {{- end }} + {{- if .Values.snuba.spansConsumer.inputBlockSize }} + - "--input-block-size" + - "{{ .Values.snuba.spansConsumer.inputBlockSize }}" + {{- end }} + {{- if .Values.snuba.spansConsumer.outputBlockSize }} + - "--output-block-size" + - "{{ .Values.snuba.spansConsumer.outputBlockSize }}" + {{- end }} + {{- if .Values.snuba.spansConsumer.maxBatchTimeMs }} + - "--max-batch-time-ms" + - "{{ .Values.snuba.spansConsumer.maxBatchTimeMs }}" + {{- end }} + {{- if .Values.snuba.spansConsumer.queuedMaxMessagesKbytes }} + - "--queued-max-messages-kbytes" + - "{{ .Values.snuba.spansConsumer.queuedMaxMessagesKbytes }}" + {{- end }} + {{- if .Values.snuba.spansConsumer.queuedMinMessages }} + - "--queued-min-messages" + - "{{ .Values.snuba.spansConsumer.queuedMinMessages }}" + {{- end }} + {{- if .Values.snuba.spansConsumer.noStrictOffsetReset }} + - "--no-strict-offset-reset" + {{- end }} + {{- if .Values.snuba.spansConsumer.livenessProbe.enabled }} + - "--health-check-file" + - "/tmp/health.txt" + {{- end }} + {{- if .Values.snuba.spansConsumer.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.snuba.spansConsumer.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.snuba.spansConsumer.livenessProbe.periodSeconds }} + {{- end }} + ports: + - containerPort: {{ template "snuba.port" }} + env: +{{ include "sentry.snuba.env" . | indent 8 }} +{{- if .Values.snuba.spansConsumer.env }} +{{ toYaml .Values.snuba.spansConsumer.env | indent 8 }} +{{- end }} + envFrom: + - secretRef: + name: {{ template "sentry.fullname" . }}-snuba-env + volumeMounts: + - mountPath: /etc/snuba + name: config + readOnly: true +{{- if .Values.snuba.spansConsumer.volumeMounts }} +{{ toYaml .Values.snuba.spansConsumer.volumeMounts | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.snuba.spansConsumer.resources | indent 12 }} +{{- if .Values.snuba.spansConsumer.containerSecurityContext }} + securityContext: +{{ toYaml .Values.snuba.spansConsumer.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.snuba.spansConsumer.sidecars }} +{{ toYaml .Values.snuba.spansConsumer.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ .Values.serviceAccount.name }}-snuba + {{- end }} + volumes: + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-snuba +{{- if .Values.snuba.spansConsumer.volumes }} +{{ toYaml .Values.snuba.spansConsumer.volumes | indent 8 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 8 }} +{{- end }} +{{- end }} diff --git a/sentry/templates/deployment-snuba-subscription-consumer-events.yaml b/charts/sentry/templates/snuba/deployment-snuba-subscription-consumer-events.yaml similarity index 60% rename from sentry/templates/deployment-snuba-subscription-consumer-events.yaml rename to charts/sentry/templates/snuba/deployment-snuba-subscription-consumer-events.yaml index d567dbfaa..08b045f86 100644 --- a/sentry/templates/deployment-snuba-subscription-consumer-events.yaml +++ b/charts/sentry/templates/snuba/deployment-snuba-subscription-consumer-events.yaml @@ -1,3 +1,4 @@ +{{- if .Values.snuba.subscriptionConsumerEvents.enabled }} apiVersion: apps/v1 kind: Deployment metadata: @@ -28,7 +29,7 @@ spec: metadata: annotations: checksum/snubaSettingsPy: {{ .Values.config.snubaSettingsPy | sha256sum }} - checksum/config.yaml: {{ include (print $.Template.BasePath "/configmap-snuba.yaml") . | sha256sum }} + checksum/config.yaml: {{ include "sentry.snuba.config" . | sha256sum }} {{- if .Values.snuba.subscriptionConsumerEvents.annotations }} {{ toYaml .Values.snuba.subscriptionConsumerEvents.annotations | indent 8 }} {{- end }} @@ -47,10 +48,20 @@ spec: {{- if .Values.snuba.subscriptionConsumerEvents.nodeSelector }} nodeSelector: {{ toYaml .Values.snuba.subscriptionConsumerEvents.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} {{- end }} {{- if .Values.snuba.subscriptionConsumerEvents.tolerations }} tolerations: {{ toYaml .Values.snuba.subscriptionConsumerEvents.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.snuba.subscriptionConsumerEvents.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.snuba.subscriptionConsumerEvents.topologySpreadConstraints | indent 8 }} {{- end }} {{- if .Values.images.snuba.imagePullSecrets }} imagePullSecrets: @@ -67,15 +78,32 @@ spec: command: - "snuba" - "subscriptions-scheduler-executor" - - "--auto-offset-reset={{ .Values.snuba.subscriptionConsumerEvents.autoOffsetReset }}" + {{- if .Values.snuba.subscriptionConsumerEvents.autoOffsetReset }} + - "--auto-offset-reset" + - "{{ .Values.snuba.subscriptionConsumerEvents.autoOffsetReset }}" + {{- end }} - "--dataset=events" - "--entity=events" + {{- if .Values.snuba.subscriptionConsumerEvents.noStrictOffsetReset }} - "--no-strict-offset-reset" + {{- end }} - "--consumer-group=snuba-events-subscriptions-consumers" - "--followed-consumer-group=snuba-consumers" - - "--delay-seconds=60" - "--schedule-ttl=60" - "--stale-threshold-seconds=900" + {{- if .Values.snuba.subscriptionConsumerEvents.livenessProbe.enabled }} + - "--health-check-file" + - "/tmp/health.txt" + {{- end }} + {{- if .Values.snuba.subscriptionConsumerEvents.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.snuba.subscriptionConsumerEvents.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.snuba.subscriptionConsumerEvents.livenessProbe.periodSeconds }} + {{- end }} ports: - containerPort: {{ template "snuba.port" }} env: @@ -90,12 +118,32 @@ spec: - mountPath: /etc/snuba name: config readOnly: true +{{- if .Values.snuba.subscriptionConsumerEvents.volumeMounts }} +{{ toYaml .Values.snuba.subscriptionConsumerEvents.volumeMounts | indent 8 }} +{{- end }} resources: {{ toYaml .Values.snuba.subscriptionConsumerEvents.resources | indent 12 }} +{{- if .Values.snuba.subscriptionConsumerEvents.containerSecurityContext }} + securityContext: +{{ toYaml .Values.snuba.subscriptionConsumerEvents.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.snuba.subscriptionConsumerEvents.sidecars }} +{{ toYaml .Values.snuba.subscriptionConsumerEvents.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} {{- if .Values.serviceAccount.enabled }} serviceAccountName: {{ .Values.serviceAccount.name }}-snuba {{- end }} volumes: - - name: config - configMap: - name: {{ template "sentry.fullname" . }}-snuba + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-snuba +{{- if .Values.snuba.subscriptionConsumerEvents.volumes }} +{{ toYaml .Values.snuba.subscriptionConsumerEvents.volumes | indent 6 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 6 }} +{{- end }} +{{- end }} diff --git a/charts/sentry/templates/snuba/deployment-snuba-subscription-consumer-metrics.yaml b/charts/sentry/templates/snuba/deployment-snuba-subscription-consumer-metrics.yaml new file mode 100644 index 000000000..572177c4c --- /dev/null +++ b/charts/sentry/templates/snuba/deployment-snuba-subscription-consumer-metrics.yaml @@ -0,0 +1,150 @@ +{{- if .Values.snuba.subscriptionConsumerMetrics.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "sentry.fullname" . }}-snuba-subscription-consumer-metrics + labels: + app: {{ template "sentry.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + app.kubernetes.io/managed-by: "Helm" + {{- if .Values.asHook }} + {{- /* Add the Helm annotations so that deployment after asHook from true to false works */}} + annotations: + meta.helm.sh/release-name: "{{ .Release.Name }}" + meta.helm.sh/release-namespace: "{{ .Release.Namespace }}" + "helm.sh/hook": "post-install,post-upgrade" + "helm.sh/hook-weight": "18" + {{- end }} +spec: + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: snuba-subscription-consumer-metrics + replicas: {{ .Values.snuba.subscriptionConsumerMetrics.replicas }} + template: + metadata: + annotations: + checksum/snubaSettingsPy: {{ .Values.config.snubaSettingsPy | sha256sum }} + checksum/config.yaml: {{ include "sentry.snuba.config" . | sha256sum }} + {{- if .Values.snuba.subscriptionConsumerMetrics.annotations }} +{{ toYaml .Values.snuba.subscriptionConsumerMetrics.annotations | indent 8 }} + {{- end }} + labels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: snuba-subscription-consumer-metrics + {{- if .Values.snuba.subscriptionConsumerMetrics.podLabels }} +{{ toYaml .Values.snuba.subscriptionConsumerMetrics.podLabels | indent 8 }} + {{- end }} + spec: + affinity: + {{- if .Values.snuba.subscriptionConsumerMetrics.affinity }} +{{ toYaml .Values.snuba.subscriptionConsumerMetrics.affinity | indent 8 }} + {{- end }} + {{- if .Values.snuba.subscriptionConsumerMetrics.nodeSelector }} + nodeSelector: +{{ toYaml .Values.snuba.subscriptionConsumerMetrics.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.snuba.subscriptionConsumerMetrics.tolerations }} + tolerations: +{{ toYaml .Values.snuba.subscriptionConsumerMetrics.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.snuba.subscriptionConsumerMetrics.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.snuba.subscriptionConsumerMetrics.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if .Values.images.snuba.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.images.snuba.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.snuba.subscriptionConsumerMetrics.securityContext }} + securityContext: +{{ toYaml .Values.snuba.subscriptionConsumerMetrics.securityContext | indent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-snuba + image: "{{ template "snuba.image" . }}" + imagePullPolicy: {{ default "IfNotPresent" .Values.images.snuba.pullPolicy }} + command: + - "snuba" + - "subscriptions-scheduler-executor" + {{- if .Values.snuba.subscriptionConsumerMetrics.autoOffsetReset }} + - "--auto-offset-reset" + - "{{ .Values.snuba.subscriptionConsumerMetrics.autoOffsetReset }}" + {{- end }} + - "--dataset=metrics" + - "--entity=metrics_sets" + - "--entity=metrics_counters" + {{- if .Values.snuba.subscriptionConsumerMetrics.noStrictOffsetReset }} + - "--no-strict-offset-reset" + {{- end }} + - "--consumer-group=snuba-metrics-subscriptions-consumers" + - "--followed-consumer-group=snuba-metrics-consumers" + - "--schedule-ttl=60" + - "--stale-threshold-seconds=900" + {{- if .Values.snuba.subscriptionConsumerMetrics.livenessProbe.enabled }} + - "--health-check-file" + - "/tmp/health.txt" + {{- end }} + {{- if .Values.snuba.subscriptionConsumerMetrics.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.snuba.subscriptionConsumerMetrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.snuba.subscriptionConsumerMetrics.livenessProbe.periodSeconds }} + {{- end }} + ports: + - containerPort: {{ template "snuba.port" }} + env: +{{ include "sentry.snuba.env" . | indent 8 }} +{{- if .Values.snuba.subscriptionConsumerMetrics.env }} +{{ toYaml .Values.snuba.subscriptionConsumerMetrics.env | indent 8 }} +{{- end }} + envFrom: + - secretRef: + name: {{ template "sentry.fullname" . }}-snuba-env + volumeMounts: + - mountPath: /etc/snuba + name: config + readOnly: true +{{- if .Values.snuba.subscriptionConsumerMetrics.volumeMounts }} +{{ toYaml .Values.snuba.subscriptionConsumerMetrics.volumeMounts | indent 8 }} +{{- end }} + resources: +{{ toYaml .Values.snuba.subscriptionConsumerMetrics.resources | indent 12 }} +{{- if .Values.snuba.subscriptionConsumerMetrics.containerSecurityContext }} + securityContext: +{{ toYaml .Values.snuba.subscriptionConsumerMetrics.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.snuba.subscriptionConsumerMetrics.sidecars }} +{{ toYaml .Values.snuba.subscriptionConsumerMetrics.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ .Values.serviceAccount.name }}-snuba + {{- end }} + volumes: + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-snuba +{{- if .Values.snuba.subscriptionConsumerMetrics.volumes }} +{{ toYaml .Values.snuba.subscriptionConsumerMetrics.volumes | indent 6 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 6 }} +{{- end }} +{{- end }} diff --git a/sentry/templates/deployment-snuba-subscription-consumer-transactions.yaml b/charts/sentry/templates/snuba/deployment-snuba-subscription-consumer-transactions.yaml similarity index 60% rename from sentry/templates/deployment-snuba-subscription-consumer-transactions.yaml rename to charts/sentry/templates/snuba/deployment-snuba-subscription-consumer-transactions.yaml index a8ca9e18e..f174b5d8c 100644 --- a/sentry/templates/deployment-snuba-subscription-consumer-transactions.yaml +++ b/charts/sentry/templates/snuba/deployment-snuba-subscription-consumer-transactions.yaml @@ -1,3 +1,4 @@ +{{- if .Values.snuba.subscriptionConsumerTransactions.enabled }} apiVersion: apps/v1 kind: Deployment metadata: @@ -28,7 +29,7 @@ spec: metadata: annotations: checksum/snubaSettingsPy: {{ .Values.config.snubaSettingsPy | sha256sum }} - checksum/config.yaml: {{ include (print $.Template.BasePath "/configmap-snuba.yaml") . | sha256sum }} + checksum/config.yaml: {{ include "sentry.snuba.config" . | sha256sum }} {{- if .Values.snuba.subscriptionConsumerTransactions.annotations }} {{ toYaml .Values.snuba.subscriptionConsumerTransactions.annotations | indent 8 }} {{- end }} @@ -47,10 +48,20 @@ spec: {{- if .Values.snuba.subscriptionConsumerTransactions.nodeSelector }} nodeSelector: {{ toYaml .Values.snuba.subscriptionConsumerTransactions.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} {{- end }} {{- if .Values.snuba.subscriptionConsumerTransactions.tolerations }} tolerations: {{ toYaml .Values.snuba.subscriptionConsumerTransactions.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.snuba.subscriptionConsumerTransactions.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.snuba.subscriptionConsumerTransactions.topologySpreadConstraints | indent 8 }} {{- end }} {{- if .Values.images.snuba.imagePullSecrets }} imagePullSecrets: @@ -67,15 +78,32 @@ spec: command: - "snuba" - "subscriptions-scheduler-executor" - - "--auto-offset-reset={{ .Values.snuba.subscriptionConsumerTransactions.autoOffsetReset }}" + {{- if .Values.snuba.subscriptionConsumerTransactions.autoOffsetReset }} + - "--auto-offset-reset" + - "{{ .Values.snuba.subscriptionConsumerTransactions.autoOffsetReset }}" + {{- end }} - "--dataset=transactions" - "--entity=transactions" + {{- if .Values.snuba.subscriptionConsumerTransactions.noStrictOffsetReset }} - "--no-strict-offset-reset" + {{- end }} - "--consumer-group=snuba-transactions-subscriptions-consumers" - "--followed-consumer-group=transactions_group" - - "--delay-seconds=60" - "--schedule-ttl=60" - "--stale-threshold-seconds=900" + {{- if .Values.snuba.subscriptionConsumerTransactions.livenessProbe.enabled }} + - "--health-check-file" + - "/tmp/health.txt" + {{- end }} + {{- if .Values.snuba.subscriptionConsumerTransactions.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.snuba.subscriptionConsumerTransactions.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.snuba.subscriptionConsumerTransactions.livenessProbe.periodSeconds }} + {{- end }} ports: - containerPort: {{ template "snuba.port" }} env: @@ -90,12 +118,32 @@ spec: - mountPath: /etc/snuba name: config readOnly: true +{{- if .Values.snuba.subscriptionConsumerTransactions.volumeMounts }} +{{ toYaml .Values.snuba.subscriptionConsumerTransactions.volumeMounts | indent 8 }} +{{- end }} resources: {{ toYaml .Values.snuba.subscriptionConsumerTransactions.resources | indent 12 }} +{{- if .Values.snuba.subscriptionConsumerTransactions.containerSecurityContext }} + securityContext: +{{ toYaml .Values.snuba.subscriptionConsumerTransactions.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.snuba.subscriptionConsumerTransactions.sidecars }} +{{ toYaml .Values.snuba.subscriptionConsumerTransactions.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} {{- if .Values.serviceAccount.enabled }} serviceAccountName: {{ .Values.serviceAccount.name }}-snuba {{- end }} volumes: - - name: config - configMap: - name: {{ template "sentry.fullname" . }}-snuba + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-snuba +{{- if .Values.snuba.subscriptionConsumerTransactions.volumes }} +{{ toYaml .Values.snuba.subscriptionConsumerTransactions.volumes | indent 6 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 6 }} +{{- end }} +{{- end }} diff --git a/sentry/templates/deployment-snuba-transactions-consumer.yaml b/charts/sentry/templates/snuba/deployment-snuba-transactions-consumer.yaml similarity index 73% rename from sentry/templates/deployment-snuba-transactions-consumer.yaml rename to charts/sentry/templates/snuba/deployment-snuba-transactions-consumer.yaml index 487d222ac..6dc1889cc 100644 --- a/sentry/templates/deployment-snuba-transactions-consumer.yaml +++ b/charts/sentry/templates/snuba/deployment-snuba-transactions-consumer.yaml @@ -1,3 +1,4 @@ +{{- if .Values.snuba.transactionsConsumer.enabled }} apiVersion: apps/v1 kind: Deployment metadata: @@ -28,7 +29,7 @@ spec: metadata: annotations: checksum/snubaSettingsPy: {{ .Values.config.snubaSettingsPy | sha256sum }} - checksum/config.yaml: {{ include (print $.Template.BasePath "/configmap-snuba.yaml") . | sha256sum }} + checksum/config.yaml: {{ include "sentry.snuba.config" . | sha256sum }} {{- if .Values.snuba.transactionsConsumer.annotations }} {{ toYaml .Values.snuba.transactionsConsumer.annotations | indent 8 }} {{- end }} @@ -47,10 +48,20 @@ spec: {{- if .Values.snuba.transactionsConsumer.nodeSelector }} nodeSelector: {{ toYaml .Values.snuba.transactionsConsumer.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} {{- end }} {{- if .Values.snuba.transactionsConsumer.tolerations }} tolerations: {{ toYaml .Values.snuba.transactionsConsumer.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.snuba.transactionsConsumer.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.snuba.transactionsConsumer.topologySpreadConstraints | indent 8 }} {{- end }} {{- if .Values.images.snuba.imagePullSecrets }} imagePullSecrets: @@ -73,15 +84,15 @@ spec: imagePullPolicy: {{ default "IfNotPresent" .Values.images.snuba.pullPolicy }} command: - "snuba" - - "consumer" + - {{ if .Values.snuba.rustConsumer -}}"rust-consumer"{{- else -}}"consumer"{{- end }} - "--storage" - "transactions" - "--consumer-group" - "transactions_group" + {{- if .Values.snuba.transactionsConsumer.autoOffsetReset }} - "--auto-offset-reset" - "{{ .Values.snuba.transactionsConsumer.autoOffsetReset }}" - - "--max-batch-time-ms" - - "750" + {{- end }} {{- if .Values.snuba.transactionsConsumer.maxBatchSize }} - "--max-batch-size" - "{{ .Values.snuba.transactionsConsumer.maxBatchSize }}" @@ -110,6 +121,22 @@ spec: - "--queued-min-messages" - "{{ .Values.snuba.transactionsConsumer.queuedMinMessages }}" {{- end }} + {{- if .Values.snuba.transactionsConsumer.noStrictOffsetReset }} + - "--no-strict-offset-reset" + {{- end }} + {{- if .Values.snuba.transactionsConsumer.livenessProbe.enabled }} + - "--health-check-file" + - "/tmp/health.txt" + {{- end }} + {{- if .Values.snuba.transactionsConsumer.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - rm + - /tmp/health.txt + initialDelaySeconds: {{ .Values.snuba.transactionsConsumer.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.snuba.transactionsConsumer.livenessProbe.periodSeconds }} + {{- end }} ports: - containerPort: {{ template "snuba.port" }} env: @@ -129,6 +156,16 @@ spec: {{- end }} resources: {{ toYaml .Values.snuba.transactionsConsumer.resources | indent 12 }} +{{- if .Values.snuba.transactionsConsumer.containerSecurityContext }} + securityContext: +{{ toYaml .Values.snuba.transactionsConsumer.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.snuba.transactionsConsumer.sidecars }} +{{ toYaml .Values.snuba.transactionsConsumer.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} {{- if .Values.serviceAccount.enabled }} serviceAccountName: {{ .Values.serviceAccount.name }}-snuba {{- end }} @@ -139,3 +176,7 @@ spec: {{- if .Values.snuba.transactionsConsumer.volumes }} {{ toYaml .Values.snuba.transactionsConsumer.volumes | indent 8 }} {{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 8 }} +{{- end }} +{{- end }} diff --git a/charts/sentry/templates/snuba/hpa-snuba-api.yaml b/charts/sentry/templates/snuba/hpa-snuba-api.yaml new file mode 100644 index 000000000..24dc67210 --- /dev/null +++ b/charts/sentry/templates/snuba/hpa-snuba-api.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.snuba.api.enabled .Values.snuba.api.autoscaling.enabled }} +apiVersion: {{ template "sentry.autoscaling.apiVersion" . }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "sentry.fullname" . }}-snuba-api +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "sentry.fullname" . }}-snuba-api + minReplicas: {{ .Values.snuba.api.autoscaling.minReplicas }} + maxReplicas: {{ .Values.snuba.api.autoscaling.maxReplicas }} + {{- if eq (include "sentry.autoscaling.apiVersion" .) "autoscaling/v1" }} + targetCPUUtilizationPercentage: {{ .Values.snuba.api.autoscaling.targetCPUUtilizationPercentage }} + {{- else if semverCompare ">=1.27-0" .Capabilities.KubeVersion.GitVersion }} + metrics: + - type: ContainerResource + containerResource: + container: {{ .Chart.Name }}-snuba + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.snuba.api.autoscaling.targetCPUUtilizationPercentage }} + {{- else }} + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.snuba.api.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/sentry/templates/secret-snuba-env.yaml b/charts/sentry/templates/snuba/secret-snuba-env.yaml similarity index 78% rename from sentry/templates/secret-snuba-env.yaml rename to charts/sentry/templates/snuba/secret-snuba-env.yaml index ac578bd03..1b40bca69 100644 --- a/sentry/templates/secret-snuba-env.yaml +++ b/charts/sentry/templates/snuba/secret-snuba-env.yaml @@ -9,6 +9,9 @@ metadata: heritage: "{{ .Release.Service }}" type: Opaque data: + CLICKHOUSE_PORT: {{ include "sentry.clickhouse.port" . | b64enc | quote }} CLICKHOUSE_DATABASE: {{ include "sentry.clickhouse.database" . | b64enc | quote }} CLICKHOUSE_USER: {{ include "sentry.clickhouse.username" . | b64enc | quote }} +{{- if not .Values.externalClickhouse.existingSecret }} CLICKHOUSE_PASSWORD: {{ include "sentry.clickhouse.password" . | b64enc | quote }} +{{- end }} diff --git a/sentry/templates/service-snuba.yaml b/charts/sentry/templates/snuba/service-snuba.yaml similarity index 96% rename from sentry/templates/service-snuba.yaml rename to charts/sentry/templates/snuba/service-snuba.yaml index 14b7d58a7..f7641f129 100644 --- a/sentry/templates/service-snuba.yaml +++ b/charts/sentry/templates/snuba/service-snuba.yaml @@ -1,3 +1,4 @@ +{{- if .Values.snuba.api.enabled }} apiVersion: v1 kind: Service metadata: @@ -35,3 +36,4 @@ spec: loadBalancerSourceRanges: {{- toYaml . | nindent 4 }} {{- end }} +{{- end }} diff --git a/sentry/templates/serviceaccount-snuba.yaml b/charts/sentry/templates/snuba/serviceaccount-snuba.yaml similarity index 82% rename from sentry/templates/serviceaccount-snuba.yaml rename to charts/sentry/templates/snuba/serviceaccount-snuba.yaml index c8a389697..8140edb94 100644 --- a/sentry/templates/serviceaccount-snuba.yaml +++ b/charts/sentry/templates/snuba/serviceaccount-snuba.yaml @@ -1,4 +1,4 @@ -{{- if .Values.serviceAccount.enabled }} +{{- if and .Values.snuba.api.enabled .Values.serviceAccount.enabled }} apiVersion: v1 kind: ServiceAccount metadata: diff --git a/charts/sentry/templates/symbolicator/_helper-symbolicator.tpl b/charts/sentry/templates/symbolicator/_helper-symbolicator.tpl new file mode 100644 index 000000000..52e96b4a9 --- /dev/null +++ b/charts/sentry/templates/symbolicator/_helper-symbolicator.tpl @@ -0,0 +1,3 @@ +{{- define "sentry.symbolicator.config" -}} +config.yml: {{ toYaml .Values.symbolicator.api.config }} +{{- end -}} diff --git a/sentry/templates/configmap-symbolicator.yaml b/charts/sentry/templates/symbolicator/configmap-symbolicator.yaml similarity index 82% rename from sentry/templates/configmap-symbolicator.yaml rename to charts/sentry/templates/symbolicator/configmap-symbolicator.yaml index 2b73cfd51..8867ad48a 100644 --- a/sentry/templates/configmap-symbolicator.yaml +++ b/charts/sentry/templates/symbolicator/configmap-symbolicator.yaml @@ -9,5 +9,5 @@ metadata: release: "{{ .Release.Name }}" heritage: "{{ .Release.Service }}" data: - config.yml: {{ toYaml .Values.symbolicator.api.config | indent 2 }} + {{ include "sentry.symbolicator.config" . | nindent 2 }} {{- end }} diff --git a/sentry/templates/deployment-symbolicator.yaml b/charts/sentry/templates/symbolicator/deployment-symbolicator.yaml similarity index 78% rename from sentry/templates/deployment-symbolicator.yaml rename to charts/sentry/templates/symbolicator/deployment-symbolicator.yaml index 5f57c2399..e6f8884fe 100644 --- a/sentry/templates/deployment-symbolicator.yaml +++ b/charts/sentry/templates/symbolicator/deployment-symbolicator.yaml @@ -1,4 +1,5 @@ {{- if .Values.symbolicator.enabled }} +{{- if .Values.symbolicator.api.usedeployment }} apiVersion: apps/v1 kind: Deployment metadata: @@ -21,7 +22,7 @@ spec: template: metadata: annotations: - checksum/config.yaml: {{ include (print $.Template.BasePath "/configmap-symbolicator.yaml") . | sha256sum }} + checksum/config.yaml: {{ include "sentry.symbolicator.config" . | sha256sum }} {{- if .Values.symbolicator.api.annotations }} {{ toYaml .Values.symbolicator.api.annotations | indent 8 }} {{- end }} @@ -40,10 +41,20 @@ spec: {{- if .Values.symbolicator.api.nodeSelector }} nodeSelector: {{ toYaml .Values.symbolicator.api.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} {{- end }} {{- if .Values.symbolicator.api.tolerations }} tolerations: {{ toYaml .Values.symbolicator.api.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.symbolicator.api.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.symbolicator.api.topologySpreadConstraints | indent 8 }} {{- end }} {{- if .Values.images.symbolicator.imagePullSecrets }} imagePullSecrets: @@ -85,6 +96,9 @@ spec: - name: sentry-google-cloud-key mountPath: /var/run/secrets/google {{ end }} +{{- if .Values.symbolicator.api.volumeMounts }} +{{ toYaml .Values.symbolicator.api.volumeMounts | indent 8 }} +{{- end }} livenessProbe: failureThreshold: 5 httpGet: @@ -107,6 +121,16 @@ spec: timeoutSeconds: 2 resources: {{ toYaml .Values.symbolicator.api.resources | indent 12 }} +{{- if .Values.symbolicator.api.containerSecurityContext }} + securityContext: +{{ toYaml .Values.symbolicator.api.containerSecurityContext | indent 12 }} +{{- end }} +{{- if .Values.symbolicator.api.sidecars }} +{{ toYaml .Values.symbolicator.api.sidecars | indent 6 }} +{{- end }} +{{- if .Values.global.sidecars }} +{{ toYaml .Values.global.sidecars | indent 6 }} +{{- end }} {{- if .Values.serviceAccount.enabled }} serviceAccountName: {{ .Values.serviceAccount.name }}-symbolicator-api {{- end }} @@ -121,7 +145,14 @@ spec: secret: secretName: {{ .Values.filestore.gcs.secretName }} {{ end }} +{{- if .Values.symbolicator.api.volumes }} +{{ toYaml .Values.symbolicator.api.volumes | indent 6 }} +{{- end }} +{{- if .Values.global.volumes }} +{{ toYaml .Values.global.volumes | indent 6 }} +{{- end }} {{- if .Values.symbolicator.api.priorityClassName }} priorityClassName: "{{ .Values.symbolicator.api.priorityClassName }}" {{- end }} {{- end }} +{{- end }} diff --git a/sentry/templates/service-symbolicator.yaml b/charts/sentry/templates/symbolicator/service-symbolicator.yaml similarity index 100% rename from sentry/templates/service-symbolicator.yaml rename to charts/sentry/templates/symbolicator/service-symbolicator.yaml diff --git a/sentry/templates/serviceaccount-symbolicator.yaml b/charts/sentry/templates/symbolicator/serviceaccount-symbolicator.yaml similarity index 97% rename from sentry/templates/serviceaccount-symbolicator.yaml rename to charts/sentry/templates/symbolicator/serviceaccount-symbolicator.yaml index a9e16b6e3..974b22716 100644 --- a/sentry/templates/serviceaccount-symbolicator.yaml +++ b/charts/sentry/templates/symbolicator/serviceaccount-symbolicator.yaml @@ -7,4 +7,4 @@ metadata: annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }} {{- end }} automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/sentry/templates/symbolicator/statefulset-symbolicator.yaml b/charts/sentry/templates/symbolicator/statefulset-symbolicator.yaml new file mode 100644 index 000000000..59d1ea316 --- /dev/null +++ b/charts/sentry/templates/symbolicator/statefulset-symbolicator.yaml @@ -0,0 +1,167 @@ +{{- if .Values.symbolicator.enabled }} +{{- if not .Values.symbolicator.api.usedeployment }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "sentry.fullname" . }}-symbolicator-api + labels: + app: {{ template "sentry.fullname" . }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + serviceName: {{ template "sentry.fullname" . }}-symbolicator-api + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: symbolicator-api +{{- if not .Values.symbolicator.api.autoscaling.enabled }} + replicas: {{ .Values.symbolicator.api.replicas }} +{{- end }} + template: + metadata: + annotations: + checksum/config.yaml: {{ include "sentry.symbolicator.config" . | sha256sum }} + {{- if .Values.symbolicator.api.annotations }} +{{ toYaml .Values.symbolicator.api.annotations | indent 8 }} + {{- end }} + labels: + app: {{ template "sentry.fullname" . }} + release: "{{ .Release.Name }}" + role: symbolicator-api + {{- if .Values.symbolicator.api.podLabels }} +{{ toYaml .Values.symbolicator.api.podLabels | indent 8 }} + {{- end }} + spec: + {{- if .Values.symbolicator.api.affinity }} + affinity: +{{ toYaml .Values.symbolicator.api.affinity | indent 8 }} + {{- end }} + {{- if .Values.symbolicator.api.nodeSelector }} + nodeSelector: +{{ toYaml .Values.symbolicator.api.nodeSelector | indent 8 }} + {{- else if .Values.global.nodeSelector }} + nodeSelector: +{{ toYaml .Values.global.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.symbolicator.api.tolerations }} + tolerations: +{{ toYaml .Values.symbolicator.api.tolerations | indent 8 }} + {{- else if .Values.global.tolerations }} + tolerations: +{{ toYaml .Values.global.tolerations | indent 8 }} + {{- end }} + {{- if .Values.symbolicator.api.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.symbolicator.api.topologySpreadConstraints | indent 8 }} + {{- end }} + {{- if .Values.images.symbolicator.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.images.symbolicator.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy | quote }} + {{- end }} + {{- if .Values.dnsConfig }} + dnsConfig: +{{ toYaml .Values.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.symbolicator.api.securityContext }} + securityContext: +{{ toYaml .Values.symbolicator.api.securityContext | indent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-symbolicator + image: "{{ template "symbolicator.image" . }}" + imagePullPolicy: {{ default "IfNotPresent" .Values.images.symbolicator.pullPolicy }} + args: ["run", "-c", "/etc/symbolicator/config.yml"] + ports: + - containerPort: {{ template "symbolicator.port" }} + env: + {{ if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /var/run/secrets/google/{{ .Values.filestore.gcs.credentialsFile }} + {{ end }} +{{- if .Values.symbolicator.api.env }} +{{ toYaml .Values.symbolicator.api.env | indent 8 }} +{{- end }} + volumeMounts: + - mountPath: /etc/symbolicator + name: config + readOnly: true + - mountPath: /data + name: symbolicator-data + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + mountPath: /var/run/secrets/google + {{ end }} +{{- if .Values.symbolicator.api.volumeMounts }} +{{ toYaml .Values.symbolicator.api.volumeMounts | indent 8 }} +{{- end }} + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthcheck + port: {{ template "symbolicator.port" }} + scheme: HTTP + initialDelaySeconds: {{ .Values.symbolicator.api.probeInitialDelaySeconds }} + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 2 + readinessProbe: + failureThreshold: 10 + httpGet: + path: /healthcheck + port: {{ template "symbolicator.port" }} + scheme: HTTP + initialDelaySeconds: {{ .Values.symbolicator.api.probeInitialDelaySeconds }} + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 2 + resources: +{{ toYaml .Values.symbolicator.api.resources | indent 12 }} +{{- if .Values.symbolicator.api.containerSecurityContext }} + securityContext: +{{ toYaml .Values.symbolicator.api.containerSecurityContext | indent 12 }} +{{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ .Values.serviceAccount.name }}-symbolicator-api + {{- end }} + volumes: + - name: config + configMap: + name: {{ template "sentry.fullname" . }}-symbolicator + {{- if not .Values.symbolicator.api.persistence.enabled }} + - name: symbolicator-data + emptyDir: {} + {{- end }} + {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} + - name: sentry-google-cloud-key + secret: + secretName: {{ .Values.filestore.gcs.secretName }} + {{ end }} +{{- if .Values.symbolicator.api.volumes }} +{{ toYaml .Values.symbolicator.api.volumes | indent 6 }} +{{- end }} + {{- if .Values.symbolicator.api.priorityClassName }} + priorityClassName: "{{ .Values.symbolicator.api.priorityClassName }}" + {{- end }} + {{- if .Values.symbolicator.api.persistence.enabled }} + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: symbolicator-data + spec: + accessModes: {{ .Values.symbolicator.api.persistence.accessModes }} + {{- if hasKey .Values.symbolicator.api.persistence "storageClassName" }} + storageClassName: {{ .Values.symbolicator.api.persistence.storageClassName | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.symbolicator.api.persistence.size }} + {{- end }} +{{- end }} +{{- end }} diff --git a/charts/sentry/values.yaml b/charts/sentry/values.yaml new file mode 100644 index 000000000..311f2e1a3 --- /dev/null +++ b/charts/sentry/values.yaml @@ -0,0 +1,2586 @@ +prefix: + +# Set this to true to support IPV6 networks +ipv6: false + +global: + + # Set SAMPLED_DEFAULT_RATE parameter for all projects + # sampledDefaultRate: 1.0 + + nodeSelector: {} + tolerations: [] + sidecars: [] + volumes: [] + +user: + create: true + email: admin@sentry.local + password: aaaa + + ## set this value to an existingSecret name to create the admin user with the password in the secret + # existingSecret: sentry-admin-password + + ## set this value to an existingSecretKey which holds the password to be used for sentry admin user default key is `admin-password` + # existingSecretKey: admin-password + +# this is required on the first installation, as sentry has to be initialized first +# recommended to set false for updating the helm chart afterwards, +# as you will have some downtime on each update if it's a hook +# deploys relay & snuba consumers as post hooks +asHook: true + +images: + sentry: + # repository: getsentry/sentry + # tag: Chart.AppVersion + # pullPolicy: IfNotPresent + imagePullSecrets: [] + snuba: + # repository: getsentry/snuba + # tag: Chart.AppVersion + # pullPolicy: IfNotPresent + imagePullSecrets: [] + relay: + # repository: getsentry/relay + # tag: Chart.AppVersion + # pullPolicy: IfNotPresent + imagePullSecrets: [] + symbolicator: + # repository: getsentry/symbolicator + # tag: Chart.AppVersion + # pullPolicy: IfNotPresent + imagePullSecrets: [] + vroom: + # repository: getsentry/vroom + # tag: Chart.AppVersion + # pullPolicy: IfNotPresent + imagePullSecrets: [] + +serviceAccount: + # serviceAccount.annotations -- Additional Service Account annotations. + annotations: {} + # serviceAccount.enabled -- If `true`, a custom Service Account will be used. + enabled: false + # serviceAccount.name -- The base name of the ServiceAccount to use. Will be appended with e.g. `snuba-api` or `web` for the pods accordingly. + name: "sentry" + # serviceAccount.automountServiceAccountToken -- Automount API credentials for a Service Account. + automountServiceAccountToken: true + +vroom: + # annotations: {} + # args: [] + replicas: 1 + env: [] + probeFailureThreshold: 5 + probeInitialDelaySeconds: 10 + probePeriodSeconds: 10 + probeSuccessThreshold: 1 + probeTimeoutSeconds: 2 + resources: {} + # requests: + # cpu: 100m + # memory: 700Mi + affinity: {} + nodeSelector: {} + securityContext: {} + containerSecurityContext: {} + # priorityClassName: "" + service: + annotations: {} + # tolerations: [] + # podLabels: {} + + autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 5 + targetCPUUtilizationPercentage: 50 + sidecars: [] + # topologySpreadConstraints: [] + volumes: [] + volumeMounts: [] + +relay: + enabled: true + # annotations: {} + replicas: 1 + # args: [] + mode: managed + env: [] + probeFailureThreshold: 5 + probeInitialDelaySeconds: 10 + probePeriodSeconds: 10 + probeSuccessThreshold: 1 + probeTimeoutSeconds: 2 + resources: {} + # requests: + # cpu: 100m + # memory: 700Mi + affinity: {} + nodeSelector: {} + # healthCheck: + # readinessRequestPath: "" + securityContext: {} + # if you are using GKE Ingress controller use 'securityPolicy' to add Google Cloud Armor Ingress policy + securityPolicy: "" + # if you are using GKE Ingress controller use 'customResponseHeaders' to add custom response header + customResponseHeaders: [] + containerSecurityContext: {} + service: + annotations: {} + # tolerations: [] + # podLabels: {} + # priorityClassName: "" + autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 5 + targetCPUUtilizationPercentage: 50 + sidecars: [] + topologySpreadConstraints: [] + volumes: [] + volumeMounts: [] + init: + resources: {} + # additionalArgs: [] + # credentialsSubcommand: "" + # env: [] + # volumes: [] + # volumeMounts: [] + # cache: + # envelopeBufferSize: 1000 + # logging: + # level: info + # format: json + processing: + kafkaConfig: + messageMaxBytes: 50000000 + # messageTimeoutMs: + # requestTimeoutMs: + # deliveryTimeoutMs: + # apiVersionRequestTimeoutMs: + + # additionalKafkaConfig: + # - name: compression.type + # value: "lz4" + +# Override custom Kafka topic names +# WARNING: If you update this and you are also using the Kafka subchart, you need to update the provisioned Topic names in this values as well! +# kafkaTopicOverrides: +# prefix: "" + +# enable and reference the volume +geodata: + accountID: "" + licenseKey: "" + editionIDs: "" + persistence: + ## If defined, storageClassName: + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + # storageClass: "" # for example: csi-s3 + size: 1Gi + volumeName: "" # for example: data-sentry-geoip + # mountPath of the volume containing the database + mountPath: "" # for example: /usr/share/GeoIP + # path to the geoip database inside the volumemount + path: "" # for example: /usr/share/GeoIP/GeoLite2-City.mmdb + +sentry: + # to not generate a sentry-secret, use these 2 values to reference an existing secret + # existingSecret: "my-secret" + # existingSecretKey: "my-secret-key" + singleOrganization: true + web: + enabled: true + # if using filestore backend filesystem with RWO access, set strategyType to Recreate + strategyType: RollingUpdate + replicas: 1 + env: [] + existingSecretEnv: "" + probeFailureThreshold: 5 + probeInitialDelaySeconds: 10 + probePeriodSeconds: 10 + probeSuccessThreshold: 1 + probeTimeoutSeconds: 2 + resources: {} + # requests: + # cpu: 200m + # memory: 850Mi + affinity: {} + nodeSelector: {} + securityContext: {} + # if you are using GKE Ingress controller use 'securityPolicy' to add Google Cloud Armor Ingress policy + securityPolicy: "" + # if you are using GKE Ingress controller use 'customResponseHeaders' to add custom response header + customResponseHeaders: [] + containerSecurityContext: {} + service: + annotations: {} + # tolerations: [] + # podLabels: {} + # Mount and use custom CA + # customCA: + # secretName: custom-ca + # item: ca.crt + # logLevel: "WARNING" # DEBUG|INFO|WARNING|ERROR|CRITICAL|FATAL + # logFormat: "human" # human|machine + autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 5 + targetCPUUtilizationPercentage: 50 + sidecars: [] + topologySpreadConstraints: [] + volumes: [] + volumeMounts: [] + # workers: 3 + + features: + orgSubdomains: false + vstsLimitedScopes: true + enableProfiling: false + enableSessionReplay: true + enableFeedback: false + enableSpan: false + + # example customFeature to enable Metrics(beta) https://docs.sentry.io/product/metrics/ + # customFeatures: + # - organizations:custom-metric + # - organizations:custom-metrics-experimental + # - organizations:derive-code-mappings + + worker: + enabled: true + # annotations: {} + replicas: 1 + # concurrency: 4 + env: [] + existingSecretEnv: "" + resources: {} + # requests: + # cpu: 1000m + # memory: 1100Mi + affinity: {} + nodeSelector: {} + # tolerations: [] + # podLabels: {} + # logLevel: "WARNING" # DEBUG|INFO|WARNING|ERROR|CRITICAL|FATAL + # logFormat: "machine" # human|machine + # excludeQueues: "" + # maxTasksPerChild: 1000 + # it's better to use prometheus adapter and scale based on + # the size of the rabbitmq queue + autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 5 + targetCPUUtilizationPercentage: 50 + livenessProbe: + enabled: true + periodSeconds: 60 + timeoutSeconds: 10 + failureThreshold: 3 + sidecars: [] + # securityContext: {} + # containerSecurityContext: {} + # priorityClassName: "" + topologySpreadConstraints: [] + volumes: [] + volumeMounts: [] + + # allows to dedicate some workers to specific queues + workerEvents: + ## If the number of exceptions increases, it is recommended to enable workerEvents + enabled: false + # annotations: {} + queues: "events.save_event,post_process_errors" + ## When increasing the number of exceptions and enabling workerEvents, it is recommended to increase the number of their replicas + replicas: 1 + # concurrency: 4 + env: [] + resources: {} + affinity: {} + nodeSelector: {} + # tolerations: [] + # podLabels: {} + # logLevel: "WARNING" # DEBUG|INFO|WARNING|ERROR|CRITICAL|FATAL + # logFormat: "machine" # human|machine + # maxTasksPerChild: 1000 + # it's better to use prometheus adapter and scale based on + # the size of the rabbitmq queue + autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 5 + targetCPUUtilizationPercentage: 50 + livenessProbe: + enabled: false + periodSeconds: 60 + timeoutSeconds: 10 + failureThreshold: 3 + sidecars: [] + # securityContext: {} + # containerSecurityContext: {} + # priorityClassName: "" + topologySpreadConstraints: [] + volumes: [] + volumeMounts: [] + + # allows to dedicate some workers to specific queues + workerTransactions: + enabled: false + # annotations: {} + queues: "events.save_event_transaction,post_process_transactions" + replicas: 1 + # concurrency: 4 + env: [] + resources: {} + affinity: {} + nodeSelector: {} + # tolerations: [] + # podLabels: {} + # logLevel: "WARNING" # DEBUG|INFO|WARNING|ERROR|CRITICAL|FATAL + # logFormat: "machine" # human|machine + # maxTasksPerChild: 1000 + # it's better to use prometheus adapter and scale based on + # the size of the rabbitmq queue + autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 5 + targetCPUUtilizationPercentage: 50 + livenessProbe: + enabled: false + periodSeconds: 60 + timeoutSeconds: 10 + failureThreshold: 3 + sidecars: [] + # securityContext: {} + # containerSecurityContext: {} + # priorityClassName: "" + topologySpreadConstraints: [] + volumes: [] + volumeMounts: [] + + ingestConsumerAttachments: + enabled: true + replicas: 1 + # concurrency: 4 + # maxBatchTimeMs: 20000 + # maxPollIntervalMs: 30000 + env: [] + resources: {} + # requests: + # cpu: 200m + # memory: 700Mi + affinity: {} + nodeSelector: {} + securityContext: {} + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + # maxBatchSize: "" + # logLevel: info + # it's better to use prometheus adapter and scale based on + # the size of the rabbitmq queue + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 3 + targetCPUUtilizationPercentage: 50 + sidecars: [] + topologySpreadConstraints: [] + volumes: [] + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # volumeMounts: + # - mountPath: /dev/shm + # name: dshm + # autoOffsetReset: "earliest" + # noStrictOffsetReset: false + + ingestConsumerEvents: + enabled: true + replicas: 1 + # concurrency: 4 + env: [] + resources: {} + # requests: + # cpu: 300m + # memory: 500Mi + affinity: {} + nodeSelector: {} + securityContext: {} + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + # maxBatchSize: "" + # logLevel: "info" + # inputBlockSize: "" + # maxBatchTimeMs: "" + + # it's better to use prometheus adapter and scale based on + # the size of the rabbitmq queue + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 3 + targetCPUUtilizationPercentage: 50 + sidecars: [] + topologySpreadConstraints: [] + volumes: [] + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # volumeMounts: + # - mountPath: /dev/shm + # name: dshm + # autoOffsetReset: "earliest" + # noStrictOffsetReset: false + + ingestConsumerTransactions: + enabled: true + replicas: 1 + # concurrency: 4 + env: [] + resources: {} + # requests: + # cpu: 200m + # memory: 500Mi + affinity: {} + nodeSelector: {} + securityContext: {} + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + # maxBatchSize: "" + # logLevel: "info" + # inputBlockSize: "" + # maxBatchTimeMs: "" + + # it's better to use prometheus adapter and scale based on + # the size of the rabbitmq queue + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 3 + targetCPUUtilizationPercentage: 50 + sidecars: [] + topologySpreadConstraints: [] + volumes: [] + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # volumeMounts: + # - mountPath: /dev/shm + # name: dshm + # autoOffsetReset: "earliest" + # noStrictOffsetReset: false + + ingestReplayRecordings: + enabled: true + replicas: 1 + env: [] + resources: {} + # requests: + # cpu: 100m + # memory: 250Mi + affinity: {} + nodeSelector: {} + securityContext: {} + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + # it's better to use prometheus adapter and scale based on + # the size of the rabbitmq queue + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 3 + targetCPUUtilizationPercentage: 50 + sidecars: [] + topologySpreadConstraints: [] + volumes: [] + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # volumeMounts: + # - mountPath: /dev/shm + # name: dshm + # autoOffsetReset: "earliest" + # noStrictOffsetReset: false + + ingestProfiles: + replicas: 1 + env: [] + resources: {} + affinity: {} + nodeSelector: {} + securityContext: {} + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + # it's better to use prometheus adapter and scale based on + # the size of the rabbitmq queue + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 3 + targetCPUUtilizationPercentage: 50 + sidecars: [] + topologySpreadConstraints: [] + volumes: [] + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # volumeMounts: + # - mountPath: /dev/shm + # name: dshm + # autoOffsetReset: "earliest" + # noStrictOffsetReset: false + + ingestOccurrences: + enabled: true + replicas: 1 + env: [] + resources: {} + # requests: + # cpu: 100m + # memory: 250Mi + affinity: {} + nodeSelector: {} + securityContext: {} + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + # it's better to use prometheus adapter and scale based on + # the size of the rabbitmq queue + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 3 + targetCPUUtilizationPercentage: 50 + sidecars: [] + topologySpreadConstraints: [] + volumes: [] + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # volumeMounts: + # - mountPath: /dev/shm + # name: dshm + # autoOffsetReset: "earliest" + # noStrictOffsetReset: false + + ingestFeedback: + enabled: false + replicas: 1 + env: [] + resources: {} + # requests: + # cpu: 100m + # memory: 250Mi + affinity: {} + nodeSelector: {} + securityContext: {} + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + # it's better to use prometheus adapter and scale based on + # the size of the rabbitmq queue + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 3 + targetCPUUtilizationPercentage: 50 + sidecars: [] + topologySpreadConstraints: [] + volumes: [] + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # volumeMounts: + # - mountPath: /dev/shm + # name: dshm + # autoOffsetReset: "earliest" + # noStrictOffsetReset: false + + ingestMonitors: + enabled: true + replicas: 1 + env: [] + resources: {} + # requests: + # cpu: 100m + # memory: 250Mi + affinity: {} + nodeSelector: {} + securityContext: {} + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + # it's better to use prometheus adapter and scale based on + # the size of the rabbitmq queue + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 3 + targetCPUUtilizationPercentage: 50 + sidecars: [] + topologySpreadConstraints: [] + volumes: [] + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # volumeMounts: + # - mountPath: /dev/shm + # name: dshm + # autoOffsetReset: "earliest" + # noStrictOffsetReset: false + + billingMetricsConsumer: + enabled: true + replicas: 1 + env: [] + resources: {} + # requests: + # cpu: 100m + # memory: 250Mi + affinity: {} + nodeSelector: {} + securityContext: {} + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + # it's better to use prometheus adapter and scale based on + # the size of the rabbitmq queue + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 3 + targetCPUUtilizationPercentage: 50 + sidecars: [] + topologySpreadConstraints: [] + volumes: [] + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # volumeMounts: + # - mountPath: /dev/shm + # name: dshm + # autoOffsetReset: "earliest" + # noStrictOffsetReset: false + + genericMetricsConsumer: + enabled: true + replicas: 1 + # concurrency: 4 + env: [] + resources: {} + # requests: + # cpu: 200m + # memory: 500Mi + affinity: {} + nodeSelector: {} + securityContext: {} + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + # maxPollIntervalMs: "" + # logLevel: "info" + # it's better to use prometheus adapter and scale based on + # the size of the rabbitmq queue + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 3 + targetCPUUtilizationPercentage: 50 + sidecars: [] + topologySpreadConstraints: [] + volumes: [] + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # volumeMounts: + # - mountPath: /dev/shm + # name: dshm + # autoOffsetReset: "earliest" + # noStrictOffsetReset: false + + metricsConsumer: + enabled: true + replicas: 1 + # concurrency: 4 + env: [] + resources: {} + # requests: + # cpu: 200m + # memory: 500Mi + affinity: {} + nodeSelector: {} + securityContext: {} + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + # logLevel: "info" + # maxPollIntervalMs: "" + # it's better to use prometheus adapter and scale based on + # the size of the rabbitmq queue + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 3 + targetCPUUtilizationPercentage: 50 + sidecars: [] + topologySpreadConstraints: [] + volumes: [] + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # volumeMounts: + # - mountPath: /dev/shm + # name: dshm + # autoOffsetReset: "earliest" + # noStrictOffsetReset: false + + cron: + enabled: true + replicas: 1 + env: [] + resources: {} + affinity: {} + nodeSelector: {} + # tolerations: [] + # podLabels: {} + sidecars: [] + topologySpreadConstraints: [] + volumes: [] + # volumeMounts: [] + # logLevel: "WARNING" # DEBUG|INFO|WARNING|ERROR|CRITICAL|FATAL + # logFormat: "machine" # human|machine + + subscriptionConsumerEvents: + enabled: true + replicas: 1 + env: [] + resources: {} + # requests: + # cpu: 200m + # memory: 500Mi + affinity: {} + nodeSelector: {} + securityContext: {} + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + sidecars: [] + topologySpreadConstraints: [] + volumes: [] + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # autoOffsetReset: "earliest" + # noStrictOffsetReset: false + # volumeMounts: [] + + subscriptionConsumerSessions: + replicas: 1 + env: [] + resources: {} + affinity: {} + nodeSelector: {} + securityContext: {} + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + sidecars: [] + topologySpreadConstraints: [] + volumes: [] + # autoOffsetReset: "earliest" + # noStrictOffsetReset: false + # volumeMounts: [] + + subscriptionConsumerTransactions: + enabled: true + replicas: 1 + env: [] + resources: {} + # requests: + # cpu: 200m + # memory: 500Mi + affinity: {} + nodeSelector: {} + securityContext: {} + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + sidecars: [] + topologySpreadConstraints: [] + volumes: [] + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # autoOffsetReset: "earliest" + # noStrictOffsetReset: false + # volumeMounts: [] + + postProcessForwardErrors: + enabled: true + replicas: 1 + env: [] + resources: {} + # requests: + # cpu: 150m + # memory: 500Mi + affinity: {} + nodeSelector: {} + securityContext: {} + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + sidecars: [] + topologySpreadConstraints: [] + volumes: [] + # volumeMounts: [] + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # autoOffsetReset: "earliest" + # noStrictOffsetReset: false + + postProcessForwardTransactions: + enabled: true + replicas: 1 + # processes: 1 + env: [] + resources: {} + # requests: + # cpu: 200m + # memory: 500Mi + affinity: {} + nodeSelector: {} + securityContext: {} + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + sidecars: [] + topologySpreadConstraints: [] + volumes: [] + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # volumeMounts: [] + # autoOffsetReset: "earliest" + # noStrictOffsetReset: false + + postProcessForwardIssuePlatform: + enabled: true + replicas: 1 + env: [] + resources: {} + # requests: + # cpu: 300m + # memory: 500Mi + affinity: {} + nodeSelector: {} + securityContext: {} + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + sidecars: [] + topologySpreadConstraints: [] + volumes: [] + # volumeMounts: [] + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # autoOffsetReset: "earliest" + # noStrictOffsetReset: false + + subscriptionConsumerGenericMetrics: + enabled: true + replicas: 1 + # concurrency: 1 + env: [] + resources: {} + # requests: + # cpu: 200m + # memory: 500Mi + affinity: {} + nodeSelector: {} + securityContext: {} + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + sidecars: [] + topologySpreadConstraints: [] + volumes: [] + # volumeMounts: [] + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # autoOffsetReset: "earliest" + # noStrictOffsetReset: false + + subscriptionConsumerMetrics: + enabled: true + replicas: 1 + # concurrency: 1 + env: [] + resources: {} + # requests: + # cpu: 200m + # memory: 500Mi + affinity: {} + nodeSelector: {} + securityContext: {} + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + sidecars: [] + topologySpreadConstraints: [] + volumes: [] + # volumeMounts: [] + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # autoOffsetReset: "earliest" + # noStrictOffsetReset: false + + cleanup: + successfulJobsHistoryLimit: 5 + failedJobsHistoryLimit: 5 + activeDeadlineSeconds: 100 + concurrencyPolicy: Allow + concurrency: 1 + enabled: true + schedule: "0 0 * * *" + days: 90 + # logLevel: INFO + logLevel: '' + # securityContext: {} + # containerSecurityContext: {} + sidecars: [] + volumes: [] + # volumeMounts: [] + serviceAccount: {} + + # Sentry settings of connections to Kafka + kafka: + message: + max: + bytes: 50000000 + compression: + type: # 'gzip', 'snappy', 'lz4', 'zstd' + socket: + timeout: + ms: 1000 + +snuba: + api: + enabled: true + replicas: 1 + # set command to ["snuba","api"] if securityContext.runAsUser > 0 + # see: https://github.com/getsentry/snuba/issues/956 + command: [] + # - snuba + # - api + env: [] + probeInitialDelaySeconds: 10 + liveness: + timeoutSeconds: 2 + readiness: + timeoutSeconds: 2 + resources: {} + # requests: + # cpu: 100m + # memory: 150Mi + affinity: {} + nodeSelector: {} + securityContext: {} + containerSecurityContext: {} + service: + annotations: {} + # tolerations: [] + # podLabels: {} + + autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 5 + targetCPUUtilizationPercentage: 50 + sidecars: [] + topologySpreadConstraints: [] + volumes: [] + # volumeMounts: [] + + consumer: + enabled: true + replicas: 1 + env: [] + resources: {} + affinity: {} + nodeSelector: {} + securityContext: {} + topologySpreadConstraints: [] + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + # autoOffsetReset: "earliest" + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # noStrictOffsetReset: false + # maxBatchSize: "" + # processes: "" + # inputBlockSize: "" + # outputBlockSize: "" + maxBatchTimeMs: 750 + # queuedMaxMessagesKbytes: "" + # queuedMinMessages: "" + # volumeMounts: + # - mountPath: /dev/shm + # name: dshm + # volumes: + # - name: dshm + # emptyDir: + # medium: Memory + + outcomesConsumer: + enabled: true + replicas: 1 + env: [] + resources: {} + affinity: {} + nodeSelector: {} + securityContext: {} + topologySpreadConstraints: [] + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + # autoOffsetReset: "earliest" + # noStrictOffsetReset: false + maxBatchSize: "3" + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # processes: "" + # inputBlockSize: "" + # outputBlockSize: "" + # maxBatchTimeMs: "" + # queuedMaxMessagesKbytes: "" + # queuedMinMessages: "" + # volumeMounts: + # - mountPath: /dev/shm + # name: dshm + # volumes: + # - name: dshm + # emptyDir: + # medium: Memory + + outcomesBillingConsumer: + enabled: true + replicas: 1 + env: [] + resources: {} + affinity: {} + nodeSelector: {} + securityContext: {} + topologySpreadConstraints: [] + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + # autoOffsetReset: "earliest" + # noStrictOffsetReset: false + maxBatchSize: "3" + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # processes: "" + # inputBlockSize: "" + # outputBlockSize: "" + maxBatchTimeMs: 750 + # queuedMaxMessagesKbytes: "" + # queuedMinMessages: "" + # volumeMounts: + # - mountPath: /dev/shm + # name: dshm + # volumes: + # - name: dshm + # emptyDir: + # medium: Memory + + replacer: + enabled: true + replicas: 1 + env: [] + resources: {} + affinity: {} + nodeSelector: {} + securityContext: {} + topologySpreadConstraints: [] + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + # autoOffsetReset: "earliest" + # maxBatchTimeMs: "" + # queuedMaxMessagesKbytes: "" + # queuedMinMessages: "" + # volumes: [] + # volumeMounts: [] + # noStrictOffsetReset: false + + metricsConsumer: + enabled: true + replicas: 1 + env: [] + resources: {} + affinity: {} + nodeSelector: {} + securityContext: {} + topologySpreadConstraints: [] + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + # autoOffsetReset: "earliest" + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # volumes: [] + # volumeMounts: [] + # maxBatchSize: "" + # processes: "" + # inputBlockSize: "" + # outputBlockSize: "" + maxBatchTimeMs: 750 + # queuedMaxMessagesKbytes: "" + # queuedMinMessages: "" + # noStrictOffsetReset: false + + subscriptionConsumerEvents: + enabled: true + replicas: 1 + env: [] + resources: {} + affinity: {} + nodeSelector: {} + securityContext: {} + topologySpreadConstraints: [] + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # volumes: [] + # volumeMounts: [] + # autoOffsetReset: "earliest" + # noStrictOffsetReset: false + + genericMetricsCountersConsumer: + enabled: true + replicas: 1 + env: [] + resources: {} + affinity: {} + nodeSelector: {} + securityContext: {} + topologySpreadConstraints: [] + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + # autoOffsetReset: "earliest" + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # volumes: [] + # volumeMounts: [] + # maxBatchSize: "" + # processes: "" + # inputBlockSize: "" + # outputBlockSize: "" + maxBatchTimeMs: 750 + # queuedMaxMessagesKbytes: "" + # queuedMinMessages: "" + # noStrictOffsetReset: false + + genericMetricsDistributionConsumer: + enabled: true + replicas: 1 + env: [] + resources: {} + affinity: {} + nodeSelector: {} + securityContext: {} + topologySpreadConstraints: [] + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + # autoOffsetReset: "earliest" + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # volumes: [] + # volumeMounts: [] + # maxBatchSize: "" + # processes: "" + # inputBlockSize: "" + # outputBlockSize: "" + maxBatchTimeMs: 750 + # queuedMaxMessagesKbytes: "" + # queuedMinMessages: "" + # noStrictOffsetReset: false + + genericMetricsSetsConsumer: + enabled: true + replicas: 1 + env: [] + resources: {} + affinity: {} + nodeSelector: {} + securityContext: {} + topologySpreadConstraints: [] + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + # autoOffsetReset: "earliest" + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # volumes: [] + # volumeMounts: [] + # maxBatchSize: "" + # processes: "" + # inputBlockSize: "" + # outputBlockSize: "" + maxBatchTimeMs: 750 + # queuedMaxMessagesKbytes: "" + # queuedMinMessages: "" + # noStrictOffsetReset: false + + subscriptionConsumerMetrics: + enabled: true + replicas: 1 + env: [] + resources: {} + # requests: + # cpu: 200m + # memory: 500Mi + affinity: {} + nodeSelector: {} + securityContext: {} + topologySpreadConstraints: [] + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + # autoOffsetReset: "earliest" + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # volumes: [] + # volumeMounts: [] + + subscriptionConsumerTransactions: + enabled: true + replicas: 1 + env: [] + resources: {} + # requests: + # cpu: 200m + # memory: 500Mi + affinity: {} + nodeSelector: {} + securityContext: {} + topologySpreadConstraints: [] + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + # volumes: [] + # volumeMounts: [] + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # autoOffsetReset: "earliest" + # noStrictOffsetReset: false + + subscriptionConsumerSessions: + replicas: 1 + env: [] + resources: {} + affinity: {} + nodeSelector: {} + securityContext: {} + topologySpreadConstraints: [] + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + # commitBatchSize: 1 + # autoOffsetReset: "earliest" + sidecars: [] + volumes: [] + # noStrictOffsetReset: false + # volumeMounts: [] + + replaysConsumer: + enabled: true + replicas: 1 + env: [] + resources: {} + affinity: {} + nodeSelector: {} + securityContext: {} + topologySpreadConstraints: [] + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + # autoOffsetReset: "earliest" + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # maxBatchSize: "" + # processes: "" + # inputBlockSize: "" + # outputBlockSize: "" + maxBatchTimeMs: 750 + # queuedMaxMessagesKbytes: "" + # queuedMinMessages: "" + # noStrictOffsetReset: false + # volumeMounts: + # - mountPath: /dev/shm + # name: dshm + # volumes: + # - name: dshm + # emptyDir: + # medium: Memory + + sessionsConsumer: + replicas: 1 + env: [] + resources: {} + affinity: {} + nodeSelector: {} + securityContext: {} + topologySpreadConstraints: [] + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + # autoOffsetReset: "earliest" + # noStrictOffsetReset: false + # maxBatchSize: "" + # processes: "" + # inputBlockSize: "" + # outputBlockSize: "" + # maxBatchTimeMs: "" + # queuedMaxMessagesKbytes: "" + # queuedMinMessages: "" + # volumeMounts: + # - mountPath: /dev/shm + # name: dshm + # volumes: + # - name: dshm + # emptyDir: + # medium: Memory + + transactionsConsumer: + enabled: true + replicas: 1 + env: [] + resources: {} + affinity: {} + nodeSelector: {} + securityContext: {} + topologySpreadConstraints: [] + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + # autoOffsetReset: "earliest" + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # maxBatchSize: "" + # processes: "" + # inputBlockSize: "" + # outputBlockSize: "" + maxBatchTimeMs: 750 + # queuedMaxMessagesKbytes: "" + # queuedMinMessages: "" + # noStrictOffsetReset: false + # volumeMounts: + # - mountPath: /dev/shm + # name: dshm + # volumes: + # - name: dshm + # emptyDir: + # medium: Memory + + profilingProfilesConsumer: + replicas: 1 + env: [] + resources: {} + affinity: {} + sidecars: [] + nodeSelector: {} + securityContext: {} + topologySpreadConstraints: [] + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + # autoOffsetReset: "earliest" + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # maxBatchSize: "" + # processes: "" + # inputBlockSize: "" + # outputBlockSize: "" + maxBatchTimeMs: 750 + # queuedMaxMessagesKbytes: "" + # queuedMinMessages: "" + # noStrictOffsetReset: false + + # volumeMounts: + # - mountPath: /dev/shm + # name: dshm + # volumes: + # - name: dshm + # emptyDir: + # medium: Memory + + profilingFunctionsConsumer: + replicas: 1 + env: [] + resources: {} + affinity: {} + sidecars: [] + nodeSelector: {} + securityContext: {} + topologySpreadConstraints: [] + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + # autoOffsetReset: "earliest" + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # maxBatchSize: "" + # processes: "" + # inputBlockSize: "" + # outputBlockSize: "" + maxBatchTimeMs: 750 + # queuedMaxMessagesKbytes: "" + # queuedMinMessages: "" + # noStrictOffsetReset: false + # volumeMounts: + # - mountPath: /dev/shm + # name: dshm + # volumes: + # - name: dshm + # emptyDir: + # medium: Memory + + issueOccurrenceConsumer: + enabled: true + replicas: 1 + env: [] + resources: {} + affinity: {} + nodeSelector: {} + securityContext: {} + topologySpreadConstraints: [] + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + # autoOffsetReset: "earliest" + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # maxBatchSize: "" + # processes: "" + # inputBlockSize: "" + # outputBlockSize: "" + maxBatchTimeMs: 750 + # queuedMaxMessagesKbytes: "" + # queuedMinMessages: "" + # noStrictOffsetReset: false + # volumeMounts: + # - mountPath: /dev/shm + # name: dshm + # volumes: + # - name: dshm + # emptyDir: + # medium: Memory + + spansConsumer: + enabled: true + replicas: 1 + env: [] + resources: {} + affinity: {} + nodeSelector: {} + securityContext: {} + topologySpreadConstraints: [] + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + # autoOffsetReset: "earliest" + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # maxBatchSize: "" + # processes: "" + # inputBlockSize: "" + # outputBlockSize: "" + maxBatchTimeMs: 750 + # queuedMaxMessagesKbytes: "" + # queuedMinMessages: "" + # noStrictOffsetReset: false + # volumeMounts: + # - mountPath: /dev/shm + # name: dshm + # volumes: + # - name: dshm + # emptyDir: + # medium: Memory + + groupAttributesConsumer: + enabled: true + replicas: 1 + env: [] + resources: {} + affinity: {} + nodeSelector: {} + securityContext: {} + topologySpreadConstraints: [] + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + # autoOffsetReset: "earliest" + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 320 + # maxBatchSize: "" + # processes: "" + # inputBlockSize: "" + # outputBlockSize: "" + maxBatchTimeMs: 750 + # queuedMaxMessagesKbytes: "" + # queuedMinMessages: "" + # noStrictOffsetReset: false + + # volumeMounts: + # - mountPath: /dev/shm + # name: dshm + # volumes: + # - name: dshm + # emptyDir: + # medium: Memory + + dbInitJob: + env: [] + + migrateJob: + env: [] + + clickhouse: + maxConnections: 100 + + rustConsumer: false + +hooks: + enabled: true + preUpgrade: false + removeOnSuccess: true + activeDeadlineSeconds: 600 + shareProcessNamespace: false + dbCheck: + enabled: true + image: + # repository: subfuzion/netcat + # tag: latest + # pullPolicy: IfNotPresent + imagePullSecrets: [] + env: [] + # podLabels: {} + podAnnotations: {} + resources: + limits: + memory: 64Mi + requests: + cpu: 100m + memory: 64Mi + affinity: {} + nodeSelector: {} + securityContext: {} + containerSecurityContext: {} + # tolerations: [] + # volumes: [] + # volumeMounts: [] + dbInit: + enabled: true + env: [] + # podLabels: {} + podAnnotations: {} + resources: + limits: + memory: 2048Mi + requests: + cpu: 300m + memory: 2048Mi + sidecars: [] + volumes: [] + affinity: {} + nodeSelector: {} + # tolerations: [] + # volumes: [] + # volumeMounts: [] + snubaInit: + enabled: true + # As snubaInit doesn't support configuring partition and replication factor, you can disable snubaInit's kafka topic creation by setting `kafka.enabled` to `false`, + # and create the topics using `kafka.provisioning.topics` with the desired partition and replication factor. + # Note that when you set `kafka.enabled` to `false`, snuba component might fail to start if newly added topics are not created by `kafka.provisioning`. + kafka: + enabled: true + # podLabels: {} + podAnnotations: {} + resources: + limits: + cpu: 2000m + memory: 1Gi + requests: + cpu: 700m + memory: 1Gi + affinity: {} + nodeSelector: {} + # tolerations: [] + # volumes: [] + # volumeMounts: [] + snubaMigrate: + enabled: true + # podLabels: {} + # volumes: [] + # volumeMounts: [] + +system: + ## be sure to include the scheme on the url, for example: "https://sentry.example.com" + url: "" + adminEmail: "" + ## This should only be used if you’re installing Sentry behind your company’s firewall. + public: false + ## This will generate one for you (it's must be given upon updates) + # secretKey: "xx" + +mail: + # For example: smtp + backend: dummy + useTls: false + useSsl: false + username: "" + password: "" + # existingSecret: secret-name + ## set existingSecretKey if key name inside existingSecret is different from 'mail-password' + # existingSecretKey: secret-key-name + port: 25 + host: "" + from: "" + +symbolicator: + enabled: false + api: + usedeployment: true # Set true to use Deployment, false for StatefulSet + persistence: + enabled: true # Set true for using PersistentVolumeClaim, false for emptyDir + accessModes: ["ReadWriteOnce"] + # storageClassName: standard + size: "10Gi" + replicas: 1 + env: [] + probeInitialDelaySeconds: 10 + resources: {} + affinity: {} + nodeSelector: {} + securityContext: {} + topologySpreadConstraints: [] + containerSecurityContext: {} + # tolerations: [] + # podLabels: {} + # priorityClassName: "xxx" + config: |- + # See: https://getsentry.github.io/symbolicator/#configuration + cache_dir: "/data" + bind: "0.0.0.0:3021" + logging: + level: "warn" + metrics: + statsd: null + prefix: "symbolicator" + sentry_dsn: null + connect_to_reserved_ips: true + # caches: + # downloaded: + # max_unused_for: 1w + # retry_misses_after: 5m + # retry_malformed_after: 5m + # derived: + # max_unused_for: 1w + # retry_misses_after: 5m + # retry_malformed_after: 5m + # diagnostics: + # retention: 1w + + # TODO autoscaling in not yet implemented + autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 5 + targetCPUUtilizationPercentage: 50 + + # volumes: [] + # volumeMounts: [] + # sidecars: [] + + # TODO The cleanup cronjob is not yet implemented + cleanup: + enabled: false + # podLabels: {} + # affinity: {} + # env: [] + # volumes: [] + # sidecars: [] + +auth: + register: true + +service: + name: sentry + type: ClusterIP + externalPort: 9000 + annotations: {} + # externalIPs: + # - 192.168.0.1 + # loadBalancerSourceRanges: [] + +# https://github.com/settings/apps (Create a Github App) +github: {} +# github: +# appId: "xxxx" +# appName: MyAppName +# clientId: "xxxxx" +# clientSecret: "xxxxx" +# privateKey: "-----BEGIN RSA PRIVATE KEY-----\nMIIEpA" !!!! Don't forget a trailing \n +# webhookSecret: "xxxxx" +# +# Note: if you use `existingSecret`, all above `clientId`, `clientSecret`, `privateKey`, `webhookSecret` +# params would be ignored, because chart will suppose that they are stored in `existingSecret`. So you +# must define all required keys and set it at least to empty strings if they are not needed in `existingSecret` +# secret (client-id, client-secret, webhook-secret, private-key) +# +# existingSecret: "xxxxx" +# existingSecretPrivateKeyKey: "" # by default "private-key" +# existingSecretWebhookSecretKey: "" # by default "webhook-secret" +# existingSecretClientIdKey: "" # by default "client-id" +# existingSecretClientSecretKey: "" # by default "client-secret" +# +# Reference -> https://docs.sentry.io/product/integrations/source-code-mgmt/github/ + +# https://developers.google.com/identity/sign-in/web/server-side-flow#step_1_create_a_client_id_and_client_secret +google: {} +# google: +# clientId: "" +# clientSecret: "" +# existingSecret: "" +# existingSecretClientIdKey: "" # by default "client-id" +# existingSecretClientSecretKey: "" # by default "client-secret" + +slack: {} +# slack: +# clientId: +# clientSecret: +# signingSecret: +# existingSecret: +# Reference -> https://develop.sentry.dev/integrations/slack/ + +discord: {} +# discord: +# applicationId: +# publicKey: +# clientSecret: +# botToken: +# existingSecret: +# Reference -> https://develop.sentry.dev/integrations/discord/ + +openai: {} +# existingSecret: "xxxxx" +# existingSecretKey: "" # by default "api-token" + +nginx: + enabled: true # true, if Safari compatibility is needed + containerPort: 8080 + existingServerBlockConfigmap: '{{ template "sentry.fullname" . }}' + resources: {} + replicaCount: 1 + nodeSelector: {} + # tolerations: [] + service: + type: ClusterIP + ports: + http: 80 + extraLocationSnippet: false + customReadinessProbe: + tcpSocket: + port: http + initialDelaySeconds: 5 + timeoutSeconds: 3 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + # extraLocationSnippet: | + # location /admin { + # allow 1.2.3.4; # VPN network + # deny all; + # proxy_pass http://sentry; + # } + # Use this to enable an extra service account + # serviceAccount: + # create: false + # name: nginx + metrics: + serviceMonitor: {} + +ingress: + enabled: true + # If you are using traefik ingress controller, switch this to 'traefik' + # if you are using AWS ALB Ingress controller, switch this to 'aws-alb' + # if you are using GKE Ingress controller, switch this to 'gke' + regexPathStyle: nginx + # ingressClassName: nginx + # If you are using AWS ALB Ingress controller, switch to true if you want activate the http to https redirection. + alb: + httpRedirect: false + # annotations: + # If you are using nginx ingress controller, please use at least those 2 annotations + # kubernetes.io/ingress.class: nginx + # nginx.ingress.kubernetes.io/use-regex: "true" + # https://github.com/getsentry/self-hosted/issues/1927 + # nginx.ingress.kubernetes.io/proxy-buffers-number: "16" + # nginx.ingress.kubernetes.io/proxy-buffer-size: "32k" + # + # hostname: + # additionalHostNames: [] + # + # tls: + # - secretName: + # hosts: + +filestore: + # Set to one of filesystem, gcs or s3 as supported by Sentry. + backend: filesystem + + filesystem: + path: /var/lib/sentry/files + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## database data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessMode: ReadWriteOnce # Set ReadWriteMany for work Replays + size: 10Gi + + ## Whether to mount the persistent volume to the Sentry worker and + ## cron deployments. This setting needs to be enabled for some advanced + ## Sentry features, such as private source maps. If you disable this + ## setting, the Sentry workers will not have access to artifacts you upload + ## through the web deployment. + ## Please note that you may need to change your accessMode to ReadWriteMany + ## if you plan on having the web, worker and cron deployments run on + ## different nodes. + persistentWorkers: false + + ## If existingClaim is specified, no PVC will be created and this claim will + ## be used + existingClaim: "" + + gcs: {} + ## Point this at a pre-configured secret containing a service account. The resulting + ## secret will be mounted at /var/run/secrets/google + # secretName: + # credentialsFile: credentials.json + # bucketName: + + ## Currently unconfigured and changing this has no impact on the template configuration. + ## Note that you can use a secret with default references "s3-access-key-id" and "s3-secret-access-key". + ## Otherwise, you can use custom secret references, or use plain text values. + s3: {} + # existingSecret: + # accessKeyIdRef: + # secretAccessKeyRef: + # accessKey: + # secretKey: + # bucketName: + # endpointUrl: + # signature_version: + # region_name: + # default_acl: + +config: + # No YAML Extension Config Given + configYml: {} + sentryConfPy: | + # No Python Extension Config Given + snubaSettingsPy: | + # No Python Extension Config Given + relay: | + # No YAML relay config given + web: + httpKeepalive: 15 + maxRequests: 100000 + maxRequestsDelta: 500 + maxWorkerLifetime: 86400 + +clickhouse: + enabled: true + nodeSelector: {} + # tolerations: [] + clickhouse: + replicas: "1" + configmap: + remote_servers: + internal_replication: true + replica: + backup: + enabled: false + zookeeper_servers: + enabled: true + config: + - index: "clickhouse" + hostTemplate: "{{ .Release.Name }}-zookeeper-clickhouse" + port: "2181" + users: + enabled: false + user: + # the first user will be used if enabled + - name: default + config: + password: "" + networks: + - ::/0 + profile: default + quota: default + + persistentVolumeClaim: + enabled: true + dataPersistentVolume: + enabled: true + accessModes: + - "ReadWriteOnce" + storage: "30Gi" + + ## Use this to enable an extra service account + # serviceAccount: + # annotations: {} + # enabled: false + # name: "sentry-clickhouse" + # automountServiceAccountToken: true + +## This value is only used when clickhouse.enabled is set to false +## +externalClickhouse: + ## Hostname or ip address of external clickhouse + ## + host: "clickhouse" + tcpPort: 9000 + httpPort: 8123 + username: default + password: "" + database: default + singleNode: true + # existingSecret: secret-name + ## set existingSecretKey if key name inside existingSecret is different from 'postgres-password' + # existingSecretKey: secret-key-name + ## Cluster name, can be found in config + ## (https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings-remote-servers) + ## or by executing `select * from system.clusters` + ## + # clusterName: test_shard_localhost + +# Settings for Zookeeper. +# See https://github.com/bitnami/charts/tree/master/bitnami/zookeeper +zookeeper: + enabled: true + nameOverride: zookeeper-clickhouse + replicaCount: 1 + nodeSelector: {} + # tolerations: [] + ## When increasing the number of exceptions, you need to increase persistence.size + # persistence: + # size: 8Gi + +# Settings for Kafka. +# See https://github.com/bitnami/charts/tree/master/bitnami/kafka +kafka: + enabled: true + provisioning: + ## Increasing the replicationFactor enhances data reliability during Kafka pod failures by replicating data across multiple brokers. + # replicationFactor: 1 + enabled: true + # Topic list is based on files below. + # - https://github.com/getsentry/snuba/blob/master/snuba/utils/streams/topics.py + # - https://github.com/getsentry/self-hosted/blob/master/install/create-kafka-topics.sh#L6 + ## Default number of partitions for topics when unspecified + ## + # numPartitions: 1 + # Note that snuba component might fail if you set `hooks.snubaInit.kafka.enabled` to `false` and remove the topics from this default topic list. + topics: + - name: events + ## Number of partitions for this topic + # partitions: 1 + config: + "message.timestamp.type": LogAppendTime + - name: event-replacements + - name: snuba-commit-log + config: + "cleanup.policy": "compact,delete" + "min.compaction.lag.ms": "3600000" + - name: cdc + - name: transactions + config: + "message.timestamp.type": LogAppendTime + - name: snuba-transactions-commit-log + config: + "cleanup.policy": "compact,delete" + "min.compaction.lag.ms": "3600000" + - name: snuba-metrics + config: + "message.timestamp.type": LogAppendTime + - name: outcomes + - name: outcomes-dlq + - name: outcomes-billing + - name: outcomes-billing-dlq + - name: ingest-sessions + - name: snuba-sessions-commit-log + config: + "cleanup.policy": "compact,delete" + "min.compaction.lag.ms": "3600000" + - name: snuba-metrics-commit-log + config: + "cleanup.policy": "compact,delete" + "min.compaction.lag.ms": "3600000" + - name: scheduled-subscriptions-events + - name: scheduled-subscriptions-transactions + - name: scheduled-subscriptions-sessions + - name: scheduled-subscriptions-metrics + - name: scheduled-subscriptions-generic-metrics-sets + - name: scheduled-subscriptions-generic-metrics-distributions + - name: scheduled-subscriptions-generic-metrics-counters + - name: scheduled-subscriptions-generic-metrics-gauges + - name: events-subscription-results + - name: transactions-subscription-results + - name: sessions-subscription-results + - name: metrics-subscription-results + - name: generic-metrics-subscription-results + - name: snuba-queries + config: + "message.timestamp.type": LogAppendTime + - name: processed-profiles + config: + "message.timestamp.type": LogAppendTime + - name: profiles-call-tree + - name: ingest-replay-events + config: + "message.timestamp.type": LogAppendTime + "max.message.bytes": "15000000" + - name: snuba-generic-metrics + config: + "message.timestamp.type": LogAppendTime + - name: snuba-generic-metrics-sets-commit-log + config: + "cleanup.policy": "compact,delete" + "min.compaction.lag.ms": "3600000" + - name: snuba-generic-metrics-distributions-commit-log + config: + "cleanup.policy": "compact,delete" + "min.compaction.lag.ms": "3600000" + - name: snuba-generic-metrics-counters-commit-log + config: + "cleanup.policy": "compact,delete" + "min.compaction.lag.ms": "3600000" + - name: snuba-generic-metrics-gauges-commit-log + config: + "cleanup.policy": "compact,delete" + "min.compaction.lag.ms": "3600000" + - name: generic-events + config: + "message.timestamp.type": LogAppendTime + - name: snuba-generic-events-commit-log + config: + "cleanup.policy": "compact,delete" + "min.compaction.lag.ms": "3600000" + - name: group-attributes + config: + "message.timestamp.type": LogAppendTime + - name: snuba-attribution + - name: snuba-dead-letter-metrics + - name: snuba-dead-letter-sessions + - name: snuba-dead-letter-generic-metrics + - name: snuba-dead-letter-replays + - name: snuba-dead-letter-generic-events + - name: snuba-dead-letter-querylog + - name: snuba-dead-letter-group-attributes + - name: ingest-attachments + - name: ingest-attachments-dlq + - name: ingest-transactions + - name: ingest-transactions-dlq + - name: ingest-events-dlq + - name: ingest-events + ## If the number of exceptions increases, it is recommended to increase the number of partitions for ingest-events + # partitions: 1 + - name: ingest-replay-recordings + - name: ingest-metrics + - name: ingest-metrics-dlq + - name: ingest-performance-metrics + - name: ingest-feedback-events + - name: ingest-feedback-events-dlq + - name: ingest-monitors + - name: monitors-clock-tasks + - name: monitors-clock-tick + - name: profiles + - name: ingest-occurrences + - name: snuba-spans + - name: snuba-eap-spans-commit-log + - name: scheduled-subscriptions-eap-spans + - name: eap-spans-subscription-results + - name: snuba-eap-mutations + - name: shared-resources-usage + - name: snuba-metrics-summaries + - name: snuba-profile-chunks + - name: buffered-segments + - name: buffered-segments-dlq + - name: uptime-configs + - name: uptime-results + listeners: + client: + protocol: "PLAINTEXT" + controller: + protocol: "PLAINTEXT" + interbroker: + protocol: "PLAINTEXT" + external: + protocol: "PLAINTEXT" + zookeeper: + enabled: false + kraft: + enabled: true + controller: + replicaCount: 3 + nodeSelector: {} + # tolerations: [] + ## if the load on the kafka controller increases, resourcesPreset must be increased + # resourcesPreset: small # small, medium, large, xlarge, 2xlarge + ## if the load on the kafka controller increases, persistence.size must be increased + # persistence: + # size: 8Gi + ## Use this to enable an extra service account + # serviceAccount: + # create: false + # name: kafka + + ## Use this to enable an extra service account + # zookeeper: + # serviceAccount: + # create: false + # name: zookeeper + + # sasl: + # ## Credentials for client communications. + # ## @param sasl.client.users ist of usernames for client communications when SASL is enabled + # ## @param sasl.client.passwords list of passwords for client communications when SASL is enabled, must match the number of client.users + # ## First user and password will be used if enabled + # client: + # users: + # - sentry + # passwords: + # - password + # ## @param sasl.enabledMechanisms Comma-separated list of allowed SASL mechanisms when SASL listeners are configured. Allowed types: `PLAIN`, `SCRAM-SHA-256`, `SCRAM-SHA-512`, `OAUTHBEARER` + # enabledMechanisms: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512 + # listeners: + # ## @param listeners.client.protocol Security protocol for the Kafka client listener. Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL' + # client: + # protocol: SASL_PLAINTEXT + +## This value is only used when kafka.enabled is set to false +## +externalKafka: + ## Multi hosts and ports of external kafka + ## + # cluster: + # - host: "233.5.100.28" + # port: 9092 + # - host: "kafka-confluent-2" + # port: 9093 + # - host: "kafka-confluent-3" + # port: 9094 + ## Or Hostname (ip address) of external kafka + # host: "kafka-confluent" + ## and port of external kafka + # port: 9092 + compression: + type: # 'gzip', 'snappy', 'lz4', 'zstd' + message: + max: + bytes: 50000000 + sasl: + mechanism: None # PLAIN,SCRAM-256,SCRAM-512 + username: None + password: None + security: + protocol: plaintext # 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL' + socket: + timeout: + ms: 1000 + +sourcemaps: + enabled: false + +redis: + enabled: true + replica: + replicaCount: 1 + nodeSelector: {} + # tolerations: [] + auth: + enabled: false + sentinel: false + ## Just omit the password field if your redis cluster doesn't use password + # password: redis + # existingSecret: secret-name + ## set existingSecretPasswordKey if key name inside existingSecret is different from redis-password' + # existingSecretPasswordKey: secret-key-name + nameOverride: sentry-redis + master: + persistence: + enabled: true + nodeSelector: {} + # tolerations: [] + ## Use this to enable an extra service account + # serviceAccount: + # create: false + # name: sentry-redis + +## This value is only used when redis.enabled is set to false +## +externalRedis: + ## Hostname or ip address of external redis cluster + ## + # host: "redis" + port: 6379 + ## Just omit the password field if your redis cluster doesn't use password + # password: redis + # existingSecret: secret-name + ## set existingSecretKey if key name inside existingSecret is different from redis-password' + # existingSecretKey: secret-key-name + ## Integer database number to use for redis (This is an integer) + # db: 0 + ## Use ssl for the connection to Redis (True/False) + # ssl: false + +postgresql: + enabled: true + nameOverride: sentry-postgresql + auth: + database: sentry + replication: + enabled: false + readReplicas: 2 + synchronousCommit: "on" + numSynchronousReplicas: 1 + applicationName: sentry + ## Use this to enable an extra service account + # serviceAccount: + # enabled: false + ## Default connection max age is 0 (unlimited connections) + ## Set to a higher number to close connections after a period of time in seconds + connMaxAge: 0 + ## If you are increasing the number of replicas, you need to increase max_connections + # primary: + # extendedConfiguration: | + # max_connections=100 + # nodeSelector: {} + # tolerations: [] + ## When increasing the number of exceptions, you need to increase persistence.size + # primary: + # persistence: + # size: 8Gi + +## This value is only used when postgresql.enabled is set to false +## Set either externalPostgresql.password or externalPostgresql.existingSecret to configure password +externalPostgresql: + # host: postgres + port: 5432 + username: postgres + # password: postgres + # existingSecret: secret-name + # set existingSecretKeys in a secret, if not specified, value from the secret won't be used + # if externalPostgresql.existingSecret is used, externalPostgresql.existingSecretKeys.password must be specified. + existingSecretKeys: {} + # password: postgresql-password # Required if existingSecret is used. Key in existingSecret. + # username: username + # database: database + # port: port + # host: host + database: sentry + # sslMode: require + ## Default connection max age is 0 (unlimited connections) + ## Set to a higher number to close connections after a period of time in seconds + connMaxAge: 0 + +rabbitmq: + ## If disabled, Redis will be used instead as the broker. + enabled: true + vhost: / + clustering: + forceBoot: true + rebalance: true + replicaCount: 1 + auth: + erlangCookie: pHgpy3Q6adTskzAT6bLHCFqFTF7lMxhA + username: guest + password: guest + nameOverride: "" + pdb: + create: true + persistence: + enabled: true + resources: {} + memoryHighWatermark: {} + # enabled: true + # type: relative + # value: 0.4 + extraSecrets: + load-definition: + load_definition.json: | + { + "users": [ + { + "name": "{{ .Values.auth.username }}", + "password": "{{ .Values.auth.password }}", + "tags": "administrator" + } + ], + "permissions": [{ + "user": "{{ .Values.auth.username }}", + "vhost": "/", + "configure": ".*", + "write": ".*", + "read": ".*" + }], + "policies": [ + { + "name": "ha-all", + "pattern": ".*", + "vhost": "/", + "definition": { + "ha-mode": "all", + "ha-sync-mode": "automatic", + "ha-sync-batch-size": 1 + } + } + ], + "vhosts": [ + { + "name": "/" + } + ] + } + loadDefinition: + enabled: true + existingSecret: load-definition + extraConfiguration: | + load_definitions = /app/load_definition.json + ## Use this to enable an extra service account + # serviceAccount: + # create: false + # name: rabbitmq + metrics: + enabled: false + serviceMonitor: + enabled: false + path: "/metrics/per-object" # https://www.rabbitmq.com/docs/prometheus + labels: + release: "prometheus-operator" # helm release of kube-prometheus-stack + +memcached: + memoryLimit: "2048" + maxItemSize: "26214400" + args: + - "memcached" + - "-u memcached" + - "-p 11211" + - "-v" + - "-m $(MEMCACHED_MEMORY_LIMIT)" + - "-I $(MEMCACHED_MAX_ITEM_SIZE)" + extraEnvVarsCM: "sentry-memcached" + nodeSelector: {} + # tolerations: [] + +## Prometheus Exporter / Metrics +## +metrics: + enabled: false + + podAnnotations: {} + + ## Configure extra options for liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 5 + timeoutSeconds: 2 + failureThreshold: 3 + successThreshold: 1 + readinessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 5 + timeoutSeconds: 2 + failureThreshold: 3 + successThreshold: 1 + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + resources: {} + # limits: + # cpu: 100m + # memory: 100Mi + # requests: + # cpu: 100m + # memory: 100Mi + + nodeSelector: {} + tolerations: [] + affinity: {} + securityContext: {} + containerSecurityContext: {} + + volumes: [] + sidecars: [] + + # schedulerName: + # Optional extra labels for pod, i.e. redis-client: "true" + # podLabels: {} + service: + type: ClusterIP + labels: {} + + image: + repository: prom/statsd-exporter + tag: v0.17.0 + pullPolicy: IfNotPresent + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + additionalLabels: {} + namespace: "" + namespaceSelector: {} + # Default: scrape .Release.Namespace only + # To scrape all, use the following: + # namespaceSelector: + # any: true + scrapeInterval: 30s + # honorLabels: true + relabelings: [] + metricRelabelings: [] + +revisionHistoryLimit: 10 + +# dnsPolicy: "ClusterFirst" +# dnsConfig: +# nameservers: [] +# searches: [] +# options: [] + +extraManifests: [] + +pgbouncer: + enabled: false + postgres: + cp_max: 10 + cp_min: 5 + host: '' + dbname: '' + user: '' + password: '' + image: + repository: "bitnami/pgbouncer" + tag: "1.23.1-debian-12-r5" + pullPolicy: IfNotPresent + replicas: 2 + podDisruptionBudget: + enabled: true + # Define either 'minAvailable' or 'maxUnavailable', never both. + minAvailable: 1 + # -- Maximum unavailable pods set in PodDisruptionBudget. If set, 'minAvailable' is ignored. + # maxUnavailable: 1 + authType: "md5" + maxClientConn: "8192" + poolSize: "50" + poolMode: "transaction" + resources: {} + nodeSelector: {} + tolerations: [] + affinity: {} + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + maxSurge: 25% + priorityClassName: '' + topologySpreadConstraints: [] diff --git a/release-please-config.json b/release-please-config.json new file mode 100644 index 000000000..d29ff6e48 --- /dev/null +++ b/release-please-config.json @@ -0,0 +1,17 @@ +{ + "$schema": "https://raw.githubusercontent.com/googleapis/release-please/main/schemas/config.json", + "packages": { + "charts/clickhouse": { + "release-type": "helm", + "changelog-path": "CHANGELOG.md" + }, + "charts/sentry": { + "release-type": "helm", + "changelog-path": "CHANGELOG.md" + }, + "charts/sentry-kubernetes": { + "release-type": "helm", + "changelog-path": "CHANGELOG.md" + } + } +} diff --git a/renovate.json b/renovate.json new file mode 100644 index 000000000..cee1803ca --- /dev/null +++ b/renovate.json @@ -0,0 +1,23 @@ +{ + "extends": ["config:recommended", ":rebaseStalePrs", "docker:disable"], + "enabled": true, + "prConcurrentLimit": 30, + "enabledManagers": ["helmv3", "github-actions"], + "schedule": [ + "before 5am on Monday" + ], + "packageRules": [ + { + "updateTypes": ["patch", "minor"], + "schedule": [ + "before 5am on Monday" + ] + }, + { + "updateTypes": ["major"], + "schedule": [ + "before 5am on the first day of the month" + ] + } + ] +} diff --git a/sentry-kubernetes/Chart.yaml b/sentry-kubernetes/Chart.yaml deleted file mode 100644 index de14f1188..000000000 --- a/sentry-kubernetes/Chart.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v2 -name: sentry-kubernetes -description: A Helm chart for sentry-kubernetes (https://github.com/getsentry/sentry-kubernetes) -type: application -version: 0.3.2 -appVersion: latest -home: https://github.com/getsentry/sentry-kubernetes -icon: https://sentry-brand.storage.googleapis.com/sentry-glyph-white.png -keywords: -- sentry -- report kubernetes events -sources: -- https://github.com/getsentry/sentry-kubernetes -- https://github.com/sentry-kubernetes/charts diff --git a/sentry-kubernetes/README.md b/sentry-kubernetes/README.md deleted file mode 100644 index a79f63b37..000000000 --- a/sentry-kubernetes/README.md +++ /dev/null @@ -1,27 +0,0 @@ -# sentry-kubernetes - -[sentry-kubernetes](https://github.com/getsentry/sentry-kubernetes) is a utility that pushes Kubernetes events to [Sentry](https://sentry.io). - -# Installation: - -```console -$ helm install sentry/sentry-kubernetes --name my-release --set sentry.dsn= -``` - -## Configuration - -The following table lists the configurable parameters of the sentry-kubernetes chart and their default values. - -| Parameter | Description | Default | -| ----------------------- | --------------------------------------------------------------------------------------------------------------------------- | ----------------------------- | -| `sentry.dsn` | Sentry dsn | Empty | -| `existingSecret` | Existing secret to read DSN from | Empty | -| `sentry.environment` | Sentry environment | Empty | -| `sentry.release` | Sentry release | Empty | -| `sentry.logLevel` | Sentry log level | Empty | -| `image.repository` | Container image name | `getsentry/sentry-kubernetes` | -| `image.tag` | Container image tag | `latest` | -| `rbac.create` | If `true`, create and use RBAC resources | `true` | -| `serviceAccount.name` | Service account to be used. If not set and serviceAccount.create is `true`, a name is generated using the fullname template | `` | -| `serviceAccount.create` | If true, create a new service account | `true` | -| `priorityClassName` | pod priorityClassName | Empty | diff --git a/sentry-kubernetes/templates/deployment.yaml b/sentry-kubernetes/templates/deployment.yaml deleted file mode 100644 index 189e6ccbd..000000000 --- a/sentry-kubernetes/templates/deployment.yaml +++ /dev/null @@ -1,60 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: {{ include "sentry-kubernetes.labels" . | indent 4 }} - name: {{ template "sentry-kubernetes.fullname" . }} -spec: - replicas: {{ .Values.replicaCount }} - selector: - matchLabels: - app: {{ template "sentry-kubernetes.name" . }} - template: - metadata: - annotations: - checksum/secrets: {{ include (print .Template.BasePath "/secret.yaml") . | sha256sum }} - {{- if .Values.podAnnotations }} -{{ toYaml .Values.podAnnotations | indent 8 }} - {{- end }} - labels: - app: {{ template "sentry-kubernetes.name" . }} - release: {{.Release.Name }} - {{- if .Values.podLabels }} -{{ toYaml .Values.podLabels | indent 8 }} - {{- end }} - spec: - {{- if .Values.priorityClassName }} - priorityClassName: "{{ .Values.priorityClassName }}" - {{- end }} - containers: - - name: {{ .Chart.Name }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - env: - - name: DSN - valueFrom: - secretKeyRef: - name: {{ template "sentry-kubernetes.secretName" . }} - key: sentry.dsn - {{ if .Values.sentry.environment }} - - name: ENVIRONMENT - value: {{ .Values.sentry.environment }} - {{ end }} - {{ if .Values.sentry.release }} - - name: RELEASE - value: {{ .Values.sentry.release }} - {{ end }} - {{ if .Values.sentry.logLevel }} - - name: LOG_LEVEL - value: {{ .Values.sentry.logLevel }} - {{ end }} - resources: -{{ toYaml .Values.resources | indent 10 }} - {{- if .Values.nodeSelector }} - nodeSelector: -{{ toYaml .Values.nodeSelector | indent 8 }} - {{- end }} - {{- if .Values.tolerations }} - tolerations: -{{ toYaml .Values.tolerations | indent 8 }} - {{- end }} - serviceAccountName: {{ template "sentry-kubernetes.serviceAccountName" . }} diff --git a/sentry-kubernetes/values.yaml b/sentry-kubernetes/values.yaml deleted file mode 100644 index cfc6fa955..000000000 --- a/sentry-kubernetes/values.yaml +++ /dev/null @@ -1,35 +0,0 @@ -# Default values for sentry-kubernetes. - -sentry: - dsn: - logLevel: ~ -# Sentry DSN config using an existing secret: -# existingSecret: -image: - repository: getsentry/sentry-kubernetes - tag: latest - pullPolicy: Always -resources: {} - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - -serviceAccount: - # Specifies whether a ServiceAccount should be created - create: true - # The name of the ServiceAccount to use. - # If not set and create is true, a name is generated using the fullname template - name: - -rbac: - # Specifies whether RBAC resources should be created - create: true - -# Set priorityCLassName in deployment -# priorityClassName: "" - -podLabels: {} -podAnnotations: {} diff --git a/sentry/Chart.lock b/sentry/Chart.lock deleted file mode 100644 index 2cec84585..000000000 --- a/sentry/Chart.lock +++ /dev/null @@ -1,27 +0,0 @@ -dependencies: -- name: memcached - repository: https://charts.bitnami.com/bitnami - version: 6.1.5 -- name: redis - repository: https://charts.bitnami.com/bitnami - version: 16.12.1 -- name: kafka - repository: https://charts.bitnami.com/bitnami - version: 16.3.2 -- name: clickhouse - repository: https://sentry-kubernetes.github.io/charts - version: 3.1.2 -- name: zookeeper - repository: https://charts.bitnami.com/bitnami - version: 9.0.0 -- name: rabbitmq - repository: https://charts.bitnami.com/bitnami - version: 8.32.2 -- name: postgresql - repository: https://charts.bitnami.com/bitnami - version: 10.16.2 -- name: nginx - repository: https://charts.bitnami.com/bitnami - version: 12.0.4 -digest: sha256:5aa28a73d7a983a60887a63b85cf5ae3b35cede1b25e19c184d912a705cdced1 -generated: "2022-07-16T21:41:22.831533442+02:00" diff --git a/sentry/README.md b/sentry/README.md deleted file mode 100644 index 0495ca5a2..000000000 --- a/sentry/README.md +++ /dev/null @@ -1,127 +0,0 @@ -# Install - -## Add repo - -``` -helm repo add sentry https://sentry-kubernetes.github.io/charts -``` - -## Without overrides - -``` -helm install sentry sentry/sentry -``` - -## With your own values file - -``` -helm install sentry sentry/sentry -f values.yaml -``` - -# Upgrade - -Read the upgrade guide before upgrading to major versions of the chart. -[Upgrade Guide](docs/UPGRADE.md) - -## Configuration - -The following table lists the configurable parameters of the Sentry chart and their default values. - -Note: this table is incomplete, so have a look at the values.yaml in case you miss something - -| Parameter | Description | Default | -| :-------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------ | :----------------------------- | -| `user.create` | if `true`, creates a default admin user defined from `email` and `password` | `true` | -| `user.email` | Admin user email | `admin@sentry.local` | -| `user.password` | Admin user password | `aaaa` | -| `ingress.enabled` | Enabling Ingress | `false` | -| `ingress.regexPathStyle` | Allows setting the style the regex paths are rendered in the ingress for the ingress controller in use. Possible values are `nginx`, `aws-alb`, `gke` and `traefik` | `nginx` | -| `nginx.enabled` | Enabling NGINX | `true` | -| `metrics.enabled` | if `true`, enable Prometheus metrics | `false` | -| `metrics.image.repository` | Metrics exporter image repository | `prom/statsd-exporter` | -| `metrics.image.tag` | Metrics exporter image tag | `v0.10.5` | -| `metrics.image.PullPolicy` | Metrics exporter image pull policy | `IfNotPresent` | -| `metrics.nodeSelector` | Node labels for metrics pod assignment | `{}` | -| `metrics.tolerations` | Toleration labels for metrics pod assignment | `[]` | -| `metrics.affinity` | Affinity settings for metrics | `{}` | -| `metrics.resources` | Metrics resource requests/limit | `{}` | -| `metrics.service.annotations` | annotations for Prometheus metrics service | `{}` | -| `metrics.service.clusterIP` | cluster IP address to assign to service (set to `"-"` to pass an empty value) | `nil` | -| `metrics.service.omitClusterIP` | (Deprecated) To omit the `clusterIP` from the metrics service | `false` | -| `metrics.service.externalIPs` | Prometheus metrics service external IP addresses | `[]` | -| `metrics.service.additionalLabels` | labels for metrics service | `{}` | -| `metrics.service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` | -| `metrics.service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to load balancer (if supported) | `[]` | -| `metrics.service.servicePort` | Prometheus metrics service port | `9913` | -| `metrics.service.type` | type of Prometheus metrics service to create | `ClusterIP` | -| `metrics.serviceMonitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` | -| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | -| `metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels. | `false` | -| `metrics.serviceMonitor.namespace` | namespace where servicemonitor resource should be created | `the same namespace as sentry` | -| `metrics.serviceMonitor.scrapeInterval` | interval between Prometheus scraping | `30s` | -| `serviceAccount.annotations` | Additional Service Account annotations. | `{}` | -| `serviceAccount.enabled` | If `true`, a custom Service Account will be used. | `false` | -| `serviceAccount.name` | The base name of the ServiceAccount to use. Will be appended with e.g. `snuba` or `web` for the pods accordingly. | `"sentry"` | -| `serviceAccount.automountServiceAccountToken` | Automount API credentials for a Service Account. | `true` | -| `system.secretKey` | secret key for the session cookie ([documentation](https://develop.sentry.dev/config/#general)) | `nil` | -| `sentry.features.vstsLimitedScopes` | Disables the azdo-integrations with limited scopes that is the cause of so much pain | `true` | -| `sentry.web.customCA.secretName` | Allows mounting a custom CA secret | `nil` | -| `sentry.web.customCA.item` | Key of CA cert object within the secret | `ca.crt` | -| `symbolicator.api.enabled` | Enable Symbolicator | `false` | -| `symbolicator.api.config` | Config file for Symbolicator, see [its docs](https://getsentry.github.io/symbolicator/#configuration) | see values.yaml | - -## NGINX and/or Ingress - -By default, NGINX is enabled to allow sending the incoming requests to [Sentry Relay](https://getsentry.github.io/relay/) or the Django backend depending on the path. When Sentry is meant to be exposed outside of the Kubernetes cluster, it is recommended to disable NGINX and let the Ingress do the same. It's recommended to go with the go to Ingress Controller, [NGINX Ingress](https://kubernetes.github.io/ingress-nginx/) but others should work as well. - -## Sentry secret key - -For your security, the [`system.secret-key`](https://develop.sentry.dev/config/#general) is generated for you on the first installation. Another one will be regenerated on each upgrade invalidating all the current sessions unless it's been provided. The value is stored in the `sentry-sentry` configmap. - -``` -helm upgrade ... --set system.secretKey=xx -``` - -## Symbolicator and or JavaScript source maps - -For getting native stacktraces and minidumps symbolicated with debug symbols (e.g. iOS/Android), you need to enable Symbolicator via - -```yaml -symbolicator: - enabled: true -``` - -However, you also need to share the data between sentry-worker and sentry-web. This can be done in different ways: - -- Using Cloud Storage like GCP GCS or AWS S3, see `filestore.backend` in `values.yaml` -- Using a filesystem like - -```yaml -filestore: - filesystem: - persistence: - persistentWorkers: true - # storageClass: 'efs-storage' # see note below -``` - -Note: If you need to run or cannot avoid running sentry-worker and sentry-web on different cluster nodes, you need to set `filestore.filesystem.persistence.accessMode: ReadWriteMany` or might get problems. HOWEVER, [not all volume drivers support it](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes), like AWS EBS or GCP disks. -So you would want to create and use a `StorageClass` with a supported volume driver like [AWS EFS](https://github.com/kubernetes-sigs/aws-efs-csi-driver) - -Its also important having `connect_to_reserved_ips: true` in the symbolicator config file, which this Chart defaults to. - -#### Source Maps - -To get javascript source map processing working, you need to activate sourcemaps, which in turn activates the memcached dependency: - -```yaml -sourcemaps: - enabled: true -``` - -For details on the background see this blog post: https://engblog.yext.com/post/sentry-js-source-maps - - -# Usage - -- [AWS + Terraform](docs/usage-aws-terraform.md) -- [DigitalOcean](docs/usage-digitalocean.md) \ No newline at end of file diff --git a/sentry/charts/clickhouse-3.1.2.tgz b/sentry/charts/clickhouse-3.1.2.tgz deleted file mode 100644 index 773e31ead..000000000 Binary files a/sentry/charts/clickhouse-3.1.2.tgz and /dev/null differ diff --git a/sentry/charts/kafka-16.3.2.tgz b/sentry/charts/kafka-16.3.2.tgz deleted file mode 100644 index c1af0bd17..000000000 Binary files a/sentry/charts/kafka-16.3.2.tgz and /dev/null differ diff --git a/sentry/charts/memcached-6.1.5.tgz b/sentry/charts/memcached-6.1.5.tgz deleted file mode 100644 index 52d48e079..000000000 Binary files a/sentry/charts/memcached-6.1.5.tgz and /dev/null differ diff --git a/sentry/charts/nginx-12.0.4.tgz b/sentry/charts/nginx-12.0.4.tgz deleted file mode 100644 index 1ec3530d5..000000000 Binary files a/sentry/charts/nginx-12.0.4.tgz and /dev/null differ diff --git a/sentry/charts/postgresql-10.16.2.tgz b/sentry/charts/postgresql-10.16.2.tgz deleted file mode 100644 index d3a6d0f8f..000000000 Binary files a/sentry/charts/postgresql-10.16.2.tgz and /dev/null differ diff --git a/sentry/charts/rabbitmq-8.32.2.tgz b/sentry/charts/rabbitmq-8.32.2.tgz deleted file mode 100644 index bf27e3d5d..000000000 Binary files a/sentry/charts/rabbitmq-8.32.2.tgz and /dev/null differ diff --git a/sentry/charts/redis-16.12.1.tgz b/sentry/charts/redis-16.12.1.tgz deleted file mode 100644 index 8be70f610..000000000 Binary files a/sentry/charts/redis-16.12.1.tgz and /dev/null differ diff --git a/sentry/charts/zookeeper-9.0.0.tgz b/sentry/charts/zookeeper-9.0.0.tgz deleted file mode 100644 index f1126b9db..000000000 Binary files a/sentry/charts/zookeeper-9.0.0.tgz and /dev/null differ diff --git a/sentry/templates/_helper.tpl b/sentry/templates/_helper.tpl deleted file mode 100644 index 94332c7ec..000000000 --- a/sentry/templates/_helper.tpl +++ /dev/null @@ -1,437 +0,0 @@ -{{/* vim: set filetype=mustache: */}} - -{{- define "sentry.prefix" -}} - {{- if .Values.prefix -}} - {{.Values.prefix}}- - {{- else -}} - {{- end -}} -{{- end -}} - -{{- define "nginx.port" -}}{{ default "8080" .Values.nginx.containerPort }}{{- end -}} -{{- define "relay.port" -}}3000{{- end -}} -{{- define "relay.healthCheck.readinessRequestPath" -}}/api/relay/healthcheck/ready/{{- end -}} -{{- define "relay.healthCheck.livenessRequestPath" -}}/api/relay/healthcheck/live/{{- end -}} -{{- define "sentry.port" -}}9000{{- end -}} -{{- define "sentry.healthCheck.requestPath" -}}/_health/{{- end -}} -{{- define "relay.healthCheck.requestPath" -}}/api/relay/healthcheck/live/{{- end -}} -{{- define "snuba.port" -}}1218{{- end -}} -{{- define "symbolicator.port" -}}3021{{- end -}} - -{{- define "relay.image" -}} -{{- default "getsentry/relay" .Values.images.relay.repository -}} -: -{{- default .Chart.AppVersion .Values.images.relay.tag -}} -{{- end -}} -{{- define "sentry.image" -}} -{{- default "getsentry/sentry" .Values.images.sentry.repository -}} -: -{{- default .Chart.AppVersion .Values.images.sentry.tag -}} -{{- end -}} -{{- define "snuba.image" -}} -{{- default "getsentry/snuba" .Values.images.snuba.repository -}} -: -{{- default .Chart.AppVersion .Values.images.snuba.tag -}} -{{- end -}} - -{{- define "symbolicator.image" -}} -{{- default "getsentry/symbolicator" .Values.images.symbolicator.repository -}} -: -{{- .Values.images.symbolicator.tag -}} -{{- end -}} - -{{- define "dbCheck.image" -}} -{{- default "subfuzion/netcat" .Values.hooks.dbCheck.image.repository -}} -: -{{- default "latest" .Values.hooks.dbCheck.image.tag -}} -{{- end -}} - -{{/* -Expand the name of the chart. -*/}} -{{- define "sentry.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "sentry.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - - -{{/* -Get KubeVersion removing pre-release information. -*/}} -{{- define "sentry.kubeVersion" -}} - {{- default .Capabilities.KubeVersion.Version (regexFind "v[0-9]+\\.[0-9]+\\.[0-9]+" .Capabilities.KubeVersion.Version) -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for ingress. -*/}} -{{- define "sentry.ingress.apiVersion" -}} - {{- if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19.x" (include "sentry.kubeVersion" .)) -}} - {{- print "networking.k8s.io/v1" -}} - {{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" -}} - {{- print "networking.k8s.io/v1beta1" -}} - {{- else -}} - {{- print "extensions/v1beta1" -}} - {{- end -}} -{{- end -}} - -{{/* -Return if ingress is stable. -*/}} -{{- define "sentry.ingress.isStable" -}} - {{- eq (include "sentry.ingress.apiVersion" .) "networking.k8s.io/v1" -}} -{{- end -}} - -{{/* -Return if ingress supports ingressClassName. -*/}} -{{- define "sentry.ingress.supportsIngressClassName" -}} - {{- or (eq (include "sentry.ingress.isStable" .) "true") (and (eq (include "sentry.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18.x" (include "sentry.kubeVersion" .))) -}} -{{- end -}} - -{{/* -Return if ingress supports pathType. -*/}} -{{- define "sentry.ingress.supportsPathType" -}} - {{- or (eq (include "sentry.ingress.isStable" .) "true") (and (eq (include "sentry.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18.x" (include "sentry.kubeVersion" .))) -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "sentry.postgresql.fullname" -}} -{{- if .Values.postgresql.fullnameOverride -}} -{{- .Values.postgresql.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.postgresql.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name "sentry-postgresql" | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{- define "sentry.redis.fullname" -}} -{{- if .Values.redis.fullnameOverride -}} -{{- .Values.redis.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.redis.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name "sentry-redis" | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{- define "sentry.rabbitmq.fullname" -}} -{{- printf "%s-%s" .Release.Name "rabbitmq" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{- define "sentry.clickhouse.fullname" -}} -{{- printf "%s-%s" .Release.Name "clickhouse" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{- define "sentry.kafka.fullname" -}} -{{- printf "%s-%s" .Release.Name "kafka" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{- define "sentry.zookeeper.fullname" -}} -{{- if .Values.kafka.zookeeper.fullnameOverride -}} -{{- .Values.kafka.zookeeper.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.kafka.zookeeper.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name "zookeeper" | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Set postgres host -*/}} -{{- define "sentry.postgresql.host" -}} -{{- if .Values.postgresql.enabled -}} -{{- template "sentry.postgresql.fullname" . -}} -{{- else -}} -{{ required "A valid .Values.externalPostgresql.host is required" .Values.externalPostgresql.host }} -{{- end -}} -{{- end -}} - -{{/* -Set postgres secret -*/}} -{{- define "sentry.postgresql.secret" -}} -{{- if .Values.postgresql.enabled -}} -{{- template "sentry.postgresql.fullname" . -}} -{{- else -}} -{{- template "sentry.fullname" . -}} -{{- end -}} -{{- end -}} - -{{/* -Set postgres port -*/}} -{{- define "sentry.postgresql.port" -}} -{{- if .Values.postgresql.enabled -}} -{{- default 5432 .Values.postgresql.service.port }} -{{- else -}} -{{- required "A valid .Values.externalPostgresql.port is required" .Values.externalPostgresql.port -}} -{{- end -}} -{{- end -}} - -{{/* -Set postgresql username -*/}} -{{- define "sentry.postgresql.username" -}} -{{- if .Values.postgresql.enabled -}} -{{- default "postgres" .Values.postgresql.postgresqlUsername }} -{{- else -}} -{{ required "A valid .Values.externalPostgresql.username is required" .Values.externalPostgresql.username }} -{{- end -}} -{{- end -}} - -{{/* -Set postgresql password -*/}} -{{- define "sentry.postgresql.password" -}} -{{- if .Values.postgresql.enabled -}} -{{- default "" .Values.postgresql.postgresqlPassword }} -{{- else -}} -{{ required "A valid .Values.externalPostgresql.password is required" .Values.externalPostgresql.password }} -{{- end -}} -{{- end -}} - -{{/* -Set postgresql database -*/}} -{{- define "sentry.postgresql.database" -}} -{{- if .Values.postgresql.enabled -}} -{{- default "sentry" .Values.postgresql.postgresqlDatabase }} -{{- else -}} -{{ required "A valid .Values.externalPostgresql.database is required" .Values.externalPostgresql.database }} -{{- end -}} -{{- end -}} - -{{/* -Set redis host -*/}} -{{- define "sentry.redis.host" -}} -{{- if .Values.redis.enabled -}} -{{- template "sentry.redis.fullname" . -}}-master -{{- else -}} -{{ required "A valid .Values.externalRedis.host is required" .Values.externalRedis.host }} -{{- end -}} -{{- end -}} - -{{/* -Set redis secret -*/}} -{{- define "sentry.redis.secret" -}} -{{- if .Values.redis.enabled -}} -{{- template "sentry.redis.fullname" . -}} -{{- else -}} -{{- template "sentry.fullname" . -}} -{{- end -}} -{{- end -}} - -{{/* -Set redis port -*/}} -{{- define "sentry.redis.port" -}} -{{- if .Values.redis.enabled -}} -{{- default 6379 .Values.redis.redisPort }} -{{- else -}} -{{ required "A valid .Values.externalRedis.port is required" .Values.externalRedis.port }} -{{- end -}} -{{- end -}} - -{{/* -Set redis password -*/}} -{{- define "sentry.redis.password" -}} -{{- if .Values.redis.enabled -}} -{{ .Values.redis.password }} -{{- else -}} -{{ .Values.externalRedis.password }} -{{- end -}} -{{- end -}} - -{{/* -Create the name of the service account to use -*/}} -{{- define "sentry.serviceAccountName" -}} -{{- if .Values.serviceAccount.create -}} - {{ default (include "sentry.fullname" .) .Values.serviceAccount.name }} -{{- else -}} - {{ default "default" .Values.serviceAccount.name }} -{{- end -}} -{{- end -}} - -{{/* -Set ClickHouse host -*/}} -{{- define "sentry.clickhouse.host" -}} -{{- if .Values.clickhouse.enabled -}} -{{- template "sentry.clickhouse.fullname" . -}} -{{- else -}} -{{ required "A valid .Values.externalClickhouse.host is required" .Values.externalClickhouse.host }} -{{- end -}} -{{- end -}} - -{{/* -Set ClickHouse port -*/}} -{{- define "sentry.clickhouse.port" -}} -{{- if .Values.clickhouse.enabled -}} -{{- default 9000 .Values.clickhouse.clickhouse.tcp_port }} -{{- else -}} -{{ required "A valid .Values.externalClickhouse.tcpPort is required" .Values.externalClickhouse.tcpPort }} -{{- end -}} -{{- end -}} - -{{/* -Set ClickHouse HTTP port -*/}} -{{- define "sentry.clickhouse.http_port" -}} -{{- if .Values.clickhouse.enabled -}} -{{- default 8123 .Values.clickhouse.clickhouse.http_port }} -{{- else -}} -{{ required "A valid .Values.externalClickhouse.httpPort is required" .Values.externalClickhouse.httpPort }} -{{- end -}} -{{- end -}} - -{{/* -Set ClickHouse Database -*/}} -{{- define "sentry.clickhouse.database" -}} -{{- if .Values.clickhouse.enabled -}} -default -{{- else -}} -{{ required "A valid .Values.externalClickhouse.database is required" .Values.externalClickhouse.database }} -{{- end -}} -{{- end -}} - -{{/* -Set ClickHouse Authorization -*/}} -{{- define "sentry.clickhouse.auth" -}} ---user {{ include "sentry.clickhouse.username" . }} --password {{ include "sentry.clickhouse.password" .| quote }} -{{- end -}} - -{{/* -Set ClickHouse User -*/}} -{{- define "sentry.clickhouse.username" -}} -{{- if .Values.clickhouse.enabled -}} - {{- if .Values.clickhouse.clickhouse.configmap.users.enabled -}} -{{ (index .Values.clickhouse.clickhouse.configmap.users.user 0).name }} - {{- else -}} -default - {{- end -}} -{{- else -}} -{{ required "A valid .Values.externalClickhouse.username is required" .Values.externalClickhouse.username }} -{{- end -}} -{{- end -}} - -{{/* -Set ClickHouse Password -*/}} -{{- define "sentry.clickhouse.password" -}} -{{- if .Values.clickhouse.enabled -}} - {{- if .Values.clickhouse.clickhouse.configmap.users.enabled -}} -{{ (index .Values.clickhouse.clickhouse.configmap.users.user 0).config.password }} - {{- else -}} - {{- end -}} -{{- else -}} -{{ .Values.externalClickhouse.password }} -{{- end -}} -{{- end -}} - -{{/* -Set ClickHouse cluster name -*/}} -{{- define "sentry.clickhouse.cluster.name" -}} -{{- if .Values.clickhouse.enabled -}} -{{ .Release.Name | printf "%s-clickhouse" }} -{{- else -}} -{{ required "A valid .Values.externalClickhouse.clusterName is required" .Values.externalClickhouse.clusterName }} -{{- end -}} -{{- end -}} - -{{/* -Set Kafka Confluent host -*/}} -{{- define "sentry.kafka.host" -}} -{{- if .Values.kafka.enabled -}} -{{- template "sentry.kafka.fullname" . -}} -{{- else if and (.Values.externalKafka) (not (kindIs "slice" .Values.externalKafka)) -}} -{{ required "A valid .Values.externalKafka.host is required" .Values.externalKafka.host }} -{{- end -}} -{{- end -}} - -{{/* -Set Kafka Confluent port -*/}} -{{- define "sentry.kafka.port" -}} -{{- if and (.Values.kafka.enabled) (.Values.kafka.service.ports.client) -}} -{{- .Values.kafka.service.ports.client }} -{{- else if and (.Values.externalKafka) (not (kindIs "slice" .Values.externalKafka)) -}} -{{ required "A valid .Values.externalKafka.port is required" .Values.externalKafka.port }} -{{- end -}} -{{- end -}} - -{{/* -Set Kafka bootstrap servers string -*/}} -{{- define "sentry.kafka.bootstrap_servers_string" -}} -{{- if or (.Values.kafka.enabled) (not (kindIs "slice" .Values.externalKafka)) -}} -{{ printf "%s:%s" (include "sentry.kafka.host" .) (include "sentry.kafka.port" .) }} -{{- else -}} -{{- range $index, $elem := .Values.externalKafka -}} -{{- if $index -}},{{- end -}}{{ printf "%s:%s" $elem.host (toString $elem.port) }} -{{- end -}} -{{- end -}} -{{- end -}} - - -{{/* -Set RabbitMQ host -*/}} -{{- define "sentry.rabbitmq.host" -}} -{{- if .Values.rabbitmq.enabled -}} -{{- default "sentry-rabbitmq-ha" (include "sentry.rabbitmq.fullname" .) -}} -{{- else -}} -{{ .Values.rabbitmq.host }} -{{- end -}} -{{- end -}} - -{{/* -Common Snuba environment variables -*/}} -{{- define "sentry.snuba.env" -}} -- name: SNUBA_SETTINGS - value: /etc/snuba/settings.py -- name: DEFAULT_BROKERS - value: {{ include "sentry.kafka.bootstrap_servers_string" . | quote }} -{{- end -}} diff --git a/sentry/templates/configmap-nginx.yaml b/sentry/templates/configmap-nginx.yaml deleted file mode 100644 index c91464d32..000000000 --- a/sentry/templates/configmap-nginx.yaml +++ /dev/null @@ -1,34 +0,0 @@ -{{- if .Values.nginx.enabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "sentry.fullname" . }}-nginx -data: - server-block.conf: | - upstream relay { - server {{ template "sentry.fullname" . }}-relay:{{ template "relay.port" }}; - } - - upstream sentry { - server {{ template "sentry.fullname" . }}-web:{{ template "sentry.port" }}; - } - - server { - listen {{ template "nginx.port" }}; - - proxy_redirect off; - proxy_set_header Host $host; - - location /api/store/ { - proxy_pass http://relay; - } - - location ~ ^/api/[1-9]\d*/ { - proxy_pass http://relay; - } - - location / { - proxy_pass http://sentry; - } - } -{{- end }} diff --git a/sentry/templates/configmap-relay.yaml b/sentry/templates/configmap-relay.yaml deleted file mode 100644 index cfcd0b938..000000000 --- a/sentry/templates/configmap-relay.yaml +++ /dev/null @@ -1,38 +0,0 @@ -{{- $redisHost := include "sentry.redis.host" . -}} -{{- $redisPort := include "sentry.redis.port" . -}} -{{- $redisPass := include "sentry.redis.password" . -}} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "sentry.fullname" . }}-relay - labels: - app: sentry - chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" -data: - config.yml: |- - relay: - {{- if .Values.relay.mode }} - mode: {{ .Values.relay.mode }} - {{- end }} - upstream: "http://{{ template "sentry.fullname" . }}-web:{{ .Values.service.externalPort }}/" - host: 0.0.0.0 - port: {{ template "relay.port" }} - - processing: - enabled: true - - kafka_config: - - name: "bootstrap.servers" - value: {{ (include "sentry.kafka.bootstrap_servers_string" .) | quote }} - - name: "message.max.bytes" - value: 50000000 # 50MB or bust - - {{- if $redisPass }} - redis: "redis://:{{ $redisPass }}@{{ $redisHost }}:{{ $redisPort }}" - {{- else }} - redis: "redis://{{ $redisHost }}:{{ $redisPort }}" - {{- end }} - -{{ .Values.config.relay | indent 4 }} diff --git a/sentry/templates/configmap-sentry.yaml b/sentry/templates/configmap-sentry.yaml deleted file mode 100644 index 44fc78f98..000000000 --- a/sentry/templates/configmap-sentry.yaml +++ /dev/null @@ -1,459 +0,0 @@ -{{- $redisHost := include "sentry.redis.host" . -}} -{{- $redisPort := include "sentry.redis.port" . -}} -{{- $redisPass := include "sentry.redis.password" . -}} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "sentry.fullname" . }}-sentry - labels: - app: sentry - chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" -data: - config.yml: |- - {{- if .Values.system.adminEmail }} - system.admin-email: {{ .Values.system.adminEmail | quote }} - {{- end }} - system.secret-key: {{ .Values.system.secretKey | default (randAlphaNum 50) | quote }} - {{- if .Values.system.url }} - system.url-prefix: {{ .Values.system.url | quote }} - {{- end }} - - # This URL will be used to tell Symbolicator where to obtain the Sentry source. - # See https://getsentry.github.io/symbolicator/api/ - system.internal-url-prefix: 'http://{{ template "sentry.fullname" . }}-web:{{ .Values.service.externalPort }}' - symbolicator.enabled: {{ .Values.symbolicator.enabled }} - {{- if .Values.symbolicator.enabled }} - symbolicator.options: - url: "http://{{ template "sentry.fullname" . }}-symbolicator:{{ template "symbolicator.port" }}" - {{- end }} - - ########## - # Github # - ########## - {{- if .Values.github.appId }} - github-app.id: {{ .Values.github.appId }} - {{- end }} - {{- if .Values.github.appName }} - github-app.name: {{ .Values.github.appName | quote }} - {{- end }} - {{- if .Values.github.privateKey }} - github-app.private-key: |- -{{ .Values.github.privateKey | indent 8 }} - {{- end }} - {{- if .Values.github.webhookSecret }} - github-app.webhook-secret: {{ .Values.github.webhookSecret | quote }} - {{- end }} - {{- if .Values.github.clientId }} - github-app.client-id: {{ .Values.github.clientId | quote }} - {{- end }} - {{- if .Values.github.clientSecret }} - github-app.client-secret: {{ .Values.github.clientSecret | quote }} - {{- end }} - - ########## - # Google # - ########## - {{- if .Values.google.clientId }} - auth-google.client-id: {{ .Values.google.clientId | quote }} - auth-google.client-secret: {{ .Values.google.clientSecret | quote }} - {{ end }} - - ######### - # Slack # - ######### - {{- if .Values.slack.clientId }} - slack.client-id: {{ .Values.slack.clientId | quote }} - slack.client-secret: {{ .Values.slack.clientSecret | quote }} - slack.signing-secret: {{ .Values.slack.signingSecret | quote }} - {{ end }} - - ######### - # Redis # - ######### - redis.clusters: - default: - hosts: - 0: - host: {{ $redisHost | quote }} - port: {{ $redisPort }} - {{- if $redisPass }} - password: {{ $redisPass | quote }} - {{- end }} - - ################ - # File storage # - ################ - # Uploaded media uses these `filestore` settings. The available - # backends are either `filesystem` or `s3`. - filestore.backend: {{ .Values.filestore.backend | quote }} - {{- if eq .Values.filestore.backend "filesystem" }} - filestore.options: - location: {{ .Values.filestore.filesystem.path | quote }} - {{ end }} - {{- if eq .Values.filestore.backend "gcs" }} - filestore.options: - bucket_name: {{ .Values.filestore.gcs.bucketName | quote }} - {{ end }} - {{- if eq .Values.filestore.backend "s3" }} - filestore.options: - {{- if .Values.filestore.s3.accessKey }} - access_key: {{ .Values.filestore.s3.accessKey | quote }} - {{- end }} - {{- if .Values.filestore.s3.secretKey }} - secret_key: {{ .Values.filestore.s3.secretKey | quote }} - {{- end }} - {{- if .Values.filestore.s3.bucketName }} - bucket_name: {{ .Values.filestore.s3.bucketName | quote }} - {{- end }} - {{- if .Values.filestore.s3.endpointUrl }} - endpoint_url: {{ .Values.filestore.s3.endpointUrl | quote }} - {{- end }} - {{- if .Values.filestore.s3.signature_version }} - signature_version: {{ .Values.filestore.s3.signature_version | quote }} - {{- end }} - {{- if .Values.filestore.s3.region_name }} - region_name: {{ .Values.filestore.s3.region_name | quote }} - {{- end }} - {{- if .Values.filestore.s3.default_acl }} - default_acl: {{ .Values.filestore.s3.default_acl | quote }} - {{- end }} - {{ end }} - - {{- if .Values.config.configYml }} -{{ .Values.config.configYml | toYaml | indent 4 }} - {{- end }} - sentry.conf.py: |- - from sentry.conf.server import * # NOQA - from distutils.util import strtobool - - - {{- if .Values.sourcemaps.enabled }} - CACHES = { - "default": { - "BACKEND": "django.core.cache.backends.memcached.MemcachedCache", - "LOCATION": [ - "sentry-memcached:11211" - ] - } - } - {{- end }} - - DATABASES = { - "default": { - "ENGINE": "sentry.db.postgres", - "NAME": {{ include "sentry.postgresql.database" . | quote }}, - "USER": {{ include "sentry.postgresql.username" . | quote }}, - "PASSWORD": os.environ.get("POSTGRES_PASSWORD", {{ include "sentry.postgresql.password" . | quote }}), - "HOST": {{ include "sentry.postgresql.host" . | quote }}, - "PORT": {{ template "sentry.postgresql.port" . }}, - {{- if .Values.externalPostgresql.sslMode }} - 'OPTIONS': { - 'sslmode': '{{ .Values.externalPostgresql.sslMode }}', - }, - {{- end }} - } - } - - # You should not change this setting after your database has been created - # unless you have altered all schemas first - SENTRY_USE_BIG_INTS = True - - ########### - # General # - ########### - - # Instruct Sentry that this install intends to be run by a single organization - # and thus various UI optimizations should be enabled. - SENTRY_SINGLE_ORGANIZATION = {{ if .Values.sentry.singleOrganization }}True{{ else }}False{{ end }} - - SENTRY_OPTIONS["system.event-retention-days"] = int(env('SENTRY_EVENT_RETENTION_DAYS') or {{ .Values.sentry.cleanup.days | quote }}) - - ######### - # Queue # - ######### - - # See https://docs.getsentry.com/on-premise/server/queue/ for more - # information on configuring your queue broker and workers. Sentry relies - # on a Python framework called Celery to manage queues. - - {{- if or (.Values.rabbitmq.enabled) (.Values.rabbitmq.host) }} - BROKER_URL = os.environ.get("BROKER_URL", "amqp://{{ .Values.rabbitmq.auth.username }}:{{ .Values.rabbitmq.auth.password }}@{{ template "sentry.rabbitmq.host" . }}:5672//") - {{- else if $redisPass }} - BROKER_URL = os.environ.get("BROKER_URL", "redis://:{{ $redisPass }}@{{ $redisHost }}:{{ $redisPort }}/0") - {{- else }} - BROKER_URL = os.environ.get("BROKER_URL", "redis://{{ $redisHost }}:{{ $redisPort }}/0") - {{- end }} - - ######### - # Cache # - ######### - - # Sentry currently utilizes two separate mechanisms. While CACHES is not a - # requirement, it will optimize several high throughput patterns. - - # CACHES = { - # "default": { - # "BACKEND": "django.core.cache.backends.memcached.MemcachedCache", - # "LOCATION": ["memcached:11211"], - # "TIMEOUT": 3600, - # } - # } - - # A primary cache is required for things such as processing events - SENTRY_CACHE = "sentry.cache.redis.RedisCache" - - DEFAULT_KAFKA_OPTIONS = { - "bootstrap.servers": {{ (include "sentry.kafka.bootstrap_servers_string" .) | quote }}, - "message.max.bytes": 50000000, - "socket.timeout.ms": 1000, - } - - SENTRY_EVENTSTREAM = "sentry.eventstream.kafka.KafkaEventStream" - SENTRY_EVENTSTREAM_OPTIONS = {"producer_configuration": DEFAULT_KAFKA_OPTIONS} - - KAFKA_CLUSTERS["default"] = DEFAULT_KAFKA_OPTIONS - - ############### - # Rate Limits # - ############### - - # Rate limits apply to notification handlers and are enforced per-project - # automatically. - - SENTRY_RATELIMITER = "sentry.ratelimits.redis.RedisRateLimiter" - - ################## - # Update Buffers # - ################## - - # Buffers (combined with queueing) act as an intermediate layer between the - # database and the storage API. They will greatly improve efficiency on large - # numbers of the same events being sent to the API in a short amount of time. - # (read: if you send any kind of real data to Sentry, you should enable buffers) - - SENTRY_BUFFER = "sentry.buffer.redis.RedisBuffer" - - ########## - # Quotas # - ########## - - # Quotas allow you to rate limit individual projects or the Sentry install as - # a whole. - - SENTRY_QUOTAS = "sentry.quotas.redis.RedisQuota" - - ######## - # TSDB # - ######## - - # The TSDB is used for building charts as well as making things like per-rate - # alerts possible. - - SENTRY_TSDB = "sentry.tsdb.redissnuba.RedisSnubaTSDB" - - ######### - # SNUBA # - ######### - - SENTRY_SEARCH = "sentry.search.snuba.EventsDatasetSnubaSearchBackend" - SENTRY_SEARCH_OPTIONS = {} - SENTRY_TAGSTORE_OPTIONS = {} - - ########### - # Digests # - ########### - - # The digest backend powers notification summaries. - - SENTRY_DIGESTS = "sentry.digests.backends.redis.RedisBackend" - - ############## - # Web Server # - ############## - - SENTRY_WEB_HOST = "0.0.0.0" - SENTRY_WEB_PORT = {{ template "sentry.port" }} - SENTRY_PUBLIC = {{ .Values.system.public | ternary "True" "False" }} - SENTRY_WEB_OPTIONS = { - "http": "%s:%s" % (SENTRY_WEB_HOST, SENTRY_WEB_PORT), - "protocol": "uwsgi", - # This is needed to prevent https://git.io/fj7Lw - "uwsgi-socket": None, - - # These ase for proper HTTP/1.1 support from uWSGI - # Without these it doesn't do keep-alives causing - # issues with Relay's direct requests. - "http-keepalive": True, - "http-chunked-input": True, - # the number of web workers - 'workers': 3, - # Turn off memory reporting - "memory-report": False, - # Some stuff so uwsgi will cycle workers sensibly - 'max-requests': 100000, - 'max-requests-delta': 500, - 'max-worker-lifetime': 86400, - # Duplicate options from sentry default just so we don't get - # bit by sentry changing a default value that we depend on. - 'thunder-lock': True, - 'log-x-forwarded-for': False, - 'buffer-size': 32768, - 'limit-post': 209715200, - 'disable-logging': True, - 'reload-on-rss': 600, - 'ignore-sigpipe': True, - 'ignore-write-errors': True, - 'disable-write-exception': True, - } - - ########### - # SSL/TLS # - ########### - - # If you're using a reverse SSL proxy, you should enable the X-Forwarded-Proto - # header and enable the settings below - - # SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') - # SESSION_COOKIE_SECURE = True - # CSRF_COOKIE_SECURE = True - # SOCIAL_AUTH_REDIRECT_IS_HTTPS = True - - # End of SSL/TLS settings - - ############ - # Features # - ############ - - - SENTRY_FEATURES = { - "auth:register": {{ .Values.auth.register | ternary "True" "False" }} - } - SENTRY_FEATURES["projects:sample-events"] = False - SENTRY_FEATURES.update( - { - feature: True - for feature in ( - {{- if not .Values.sentry.singleOrganization }} - "organizations:create", - {{ end -}} - - {{- if .Values.sentry.features.orgSubdomains }} - "organizations:org-subdomains", - {{ end -}} - - "organizations:advanced-search", - "organizations:android-mappings", - "organizations:api-keys", - "organizations:boolean-search", - "organizations:related-events", - "organizations:alert-filters", - "organizations:custom-symbol-sources", - "organizations:dashboards-basic", - "organizations:dashboards-edit", - "organizations:data-forwarding", - "organizations:discover", - "organizations:discover-basic", - "organizations:discover-query", - "organizations:enterprise-perf", - "organizations:event-attachments", - "organizations:events", - "organizations:global-views", - "organizations:incidents", - "organizations:metric-alert-builder-aggregate", - "organizations:metric-alert-gui-filters", - "organizations:integrations-event-hooks", - "organizations:integrations-issue-basic", - "organizations:integrations-issue-sync", - "organizations:integrations-alert-rule", - "organizations:integrations-chat-unfurl", - "organizations:integrations-incident-management", - "organizations:integrations-ticket-rules", - - {{- if .Values.sentry.features.vstsLimitedScopes }} - "organizations:integrations-vsts-limited-scopes", - {{ end -}} - - "organizations:integrations-stacktrace-link", - "organizations:internal-catchall", - "organizations:invite-members", - "organizations:large-debug-files", - "organizations:monitors", - "organizations:onboarding", - "organizations:org-saved-searches", - "organizations:performance-view", - "organizations:project-detail", - "organizations:relay", - "organizations:release-performance-views", - "organizations:rule-page", - "organizations:set-grouping-config", - "organizations:custom-event-title", - "organizations:slack-migration", - "organizations:sso-basic", - "organizations:sso-rippling", - "organizations:sso-saml2", - "organizations:sso-migration", - "organizations:stacktrace-hover-preview", - "organizations:symbol-sources", - "organizations:transaction-comparison", - "organizations:usage-stats-graph", - "organizations:inbox", - "organizations:unhandled-issue-flag", - "organizations:invite-members-rate-limits", - "organizations:dashboards-v2", - - "projects:alert-filters", - "projects:custom-inbound-filters", - "projects:data-forwarding", - "projects:discard-groups", - "projects:issue-alerts-targeting", - "projects:minidump", - "projects:rate-limits", - "projects:sample-events", - "projects:servicehooks", - "projects:similarity-view", - "projects:similarity-indexing", - "projects:similarity-view-v2", - "projects:similarity-indexing-v2", - "projects:reprocessing-v2", - - "projects:plugins", - ) - } - ) - - ####################### - # Email Configuration # - ####################### - SENTRY_OPTIONS['mail.backend'] = os.getenv("SENTRY_EMAIL_BACKEND", {{ .Values.mail.backend | quote }}) - SENTRY_OPTIONS['mail.use-tls'] = bool(strtobool(os.getenv("SENTRY_EMAIL_USE_TLS", {{ .Values.mail.useTls | quote }}))) - SENTRY_OPTIONS['mail.use-ssl'] = bool(strtobool(os.getenv("SENTRY_EMAIL_USE_SSL", {{ .Values.mail.useSsl | quote }}))) - SENTRY_OPTIONS['mail.username'] = os.getenv("SENTRY_EMAIL_USERNAME", {{ .Values.mail.username | quote }}) - SENTRY_OPTIONS['mail.password'] = os.getenv("SENTRY_EMAIL_PASSWORD", {{ .Values.mail.password | quote }}) - SENTRY_OPTIONS['mail.port'] = int(os.getenv("SENTRY_EMAIL_PORT", {{ .Values.mail.port | quote }})) - SENTRY_OPTIONS['mail.host'] = os.getenv("SENTRY_EMAIL_HOST", {{ .Values.mail.host | quote }}) - SENTRY_OPTIONS['mail.from'] = os.getenv("SENTRY_EMAIL_FROM", {{ .Values.mail.from | quote }}) - - ######################### - # Bitbucket Integration # - ######################## - - # BITBUCKET_CONSUMER_KEY = 'YOUR_BITBUCKET_CONSUMER_KEY' - # BITBUCKET_CONSUMER_SECRET = 'YOUR_BITBUCKET_CONSUMER_SECRET' - - ######### - # Relay # - ######### - SENTRY_RELAY_WHITELIST_PK = [] - SENTRY_RELAY_OPEN_REGISTRATION = True - -{{- if .Values.metrics.enabled }} - SENTRY_METRICS_BACKEND = 'sentry.metrics.statsd.StatsdMetricsBackend' - SENTRY_METRICS_OPTIONS = { - 'host': '{{ template "sentry.fullname" . }}-metrics', - 'port': 9125, - } -{{- end }} - -{{ .Values.config.sentryConfPy | indent 4 }} diff --git a/sentry/templates/configmap-snuba.yaml b/sentry/templates/configmap-snuba.yaml deleted file mode 100644 index b41d9c388..000000000 --- a/sentry/templates/configmap-snuba.yaml +++ /dev/null @@ -1,78 +0,0 @@ -{{- $redisPass := include "sentry.redis.password" . -}} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "sentry.fullname" . }}-snuba - labels: - app: sentry - chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" -data: - settings.py: | - import os - - from snuba.settings import * - - env = os.environ.get - - DEBUG = env("DEBUG", "0").lower() in ("1", "true") - - # Clickhouse Options - CLUSTERS = [ - { - "host": env("CLICKHOUSE_HOST", {{ include "sentry.clickhouse.host" . | quote }}), - "port": int({{ include "sentry.clickhouse.port" . }}), - "user": env("CLICKHOUSE_USER", "default"), - "password": env("CLICKHOUSE_PASSWORD", ""), - "database": env("CLICKHOUSE_DATABASE", "default"), - "http_port": {{ include "sentry.clickhouse.http_port" . }}, - "storage_sets": { - "cdc", - "discover", - "events", - "events_ro", - "metrics", - "migrations", - "outcomes", - "querylog", - "sessions", - "transactions", - "transactions_ro", - "transactions_v2", - "errors_v2", - "errors_v2_ro", - "profiles", - "replays", - "generic_metrics_sets", - }, - {{- /* - The default clickhouse installation runs in distributed mode, while the external - clickhouse configured can be configured any way you choose - */}} - {{- if and .Values.externalClickhouse.singleNode (not .Values.clickhouse.enabled) }} - "single_node": True, - {{- else }} - "single_node": False, - {{- end }} - {{- if or .Values.clickhouse.enabled (not .Values.externalClickhouse.singleNode) }} - "cluster_name": {{ include "sentry.clickhouse.cluster.name" . | quote }}, - "distributed_cluster_name": {{ include "sentry.clickhouse.cluster.name" . | quote }}, - {{- end }} - }, - ] - - # Redis Options - REDIS_HOST = {{ include "sentry.redis.host" . | quote }} - REDIS_PORT = {{ include "sentry.redis.port" . }} - {{- if $redisPass }} - REDIS_PASSWORD = {{ $redisPass | quote }} - {{- end }} - REDIS_DB = int(env("REDIS_DB", 1)) - -{{- if .Values.metrics.enabled }} - DOGSTATSD_HOST = "{{ template "sentry.fullname" . }}-metrics" - DOGSTATSD_PORT = 9125 -{{- end }} - -{{ .Values.config.snubaSettingsPy | indent 4 }} diff --git a/sentry/templates/cronjob-snuba-cleanup-errors.yaml b/sentry/templates/cronjob-snuba-cleanup-errors.yaml deleted file mode 100644 index e2a5d447f..000000000 --- a/sentry/templates/cronjob-snuba-cleanup-errors.yaml +++ /dev/null @@ -1,98 +0,0 @@ -{{- if .Values.snuba.cleanupErrors.enabled }} -apiVersion: batch/v1beta1 -kind: CronJob -metadata: - name: {{ template "sentry.fullname" . }}-snuba-cleanup-errors - labels: - app: {{ template "sentry.fullname" . }} - chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" -spec: - schedule: "{{ .Values.snuba.cleanupErrors.schedule }}" - concurrencyPolicy: "{{ .Values.snuba.cleanupErrors.concurrencyPolicy }}" - jobTemplate: - spec: - template: - metadata: - annotations: - checksum/snubaSettingsPy: {{ .Values.config.snubaSettingsPy | sha256sum }} - checksum/config.yaml: {{ include (print $.Template.BasePath "/configmap-snuba.yaml") . | sha256sum }} - {{- if .Values.snuba.cleanupErrors.annotations }} -{{ toYaml .Values.snuba.cleanupErrors.annotations | indent 12 }} - {{- end }} - labels: - app: {{ template "sentry.fullname" . }} - release: "{{ .Release.Name }}" - {{- if .Values.snuba.cleanupErrors.podLabels }} -{{ toYaml .Values.snuba.cleanupErrors.podLabels | indent 12 }} - {{- end }} - spec: - affinity: - {{- if .Values.snuba.cleanupErrors.affinity }} -{{ toYaml .Values.snuba.cleanupErrors.affinity | indent 12 }} - {{- end }} - {{- if .Values.snuba.cleanupErrors.nodeSelector }} - nodeSelector: -{{ toYaml .Values.snuba.cleanupErrors.nodeSelector | indent 12 }} - {{- end }} - {{- if .Values.snuba.cleanupErrors.tolerations }} - tolerations: -{{ toYaml .Values.snuba.cleanupErrors.tolerations | indent 12 }} - {{- end }} - {{- if .Values.dnsPolicy }} - dnsPolicy: {{ .Values.dnsPolicy | quote }} - {{- end }} - {{- if .Values.dnsConfig }} - dnsConfig: -{{ toYaml .Values.dnsConfig | indent 12 }} - {{- end }} - {{- if .Values.images.snuba.imagePullSecrets }} - imagePullSecrets: -{{ toYaml .Values.images.snuba.imagePullSecrets | indent 12 }} - {{- end }} - containers: - - name: {{ .Chart.Name }}-snuba-cleanup-errors - image: "{{ template "snuba.image" . }}" - imagePullPolicy: {{ default "IfNotPresent" .Values.images.snuba.pullPolicy }} - command: - - "snuba" - - "cleanup" - - "--storage" - - "errors" - - "--dry-run" - - "False" - - "--clickhouse-host" - - {{ include "sentry.clickhouse.host" . | quote }} - - "--clickhouse-port" - - {{ include "sentry.clickhouse.port" . | quote }} - env: - - name: SNUBA_SETTINGS - value: /etc/snuba/settings.py - {{- if .Values.snuba.cleanupErrors.env }} -{{ toYaml .Values.snuba.cleanupErrors.env | indent 14 }} - {{- end }} - envFrom: - - secretRef: - name: {{ template "sentry.fullname" . }}-snuba-env - volumeMounts: - - mountPath: /etc/snuba - name: config - readOnly: true - resources: -{{ toYaml .Values.snuba.cleanupErrors.resources | indent 14 }} -{{- if .Values.snuba.cleanupErrors.sidecars }} -{{ toYaml .Values.snuba.cleanupErrors.sidecars | indent 10 }} -{{- end }} - restartPolicy: Never - volumes: - - name: config - configMap: - name: {{ template "sentry.fullname" . }}-snuba -{{- if .Values.snuba.cleanupErrors.volumes }} -{{ toYaml .Values.snuba.cleanupErrors.volumes | indent 10 }} -{{- end }} - {{- if .Values.snuba.cleanupErrors.priorityClassName }} - priorityClassName: "{{ .Values.snuba.cleanupErrors.priorityClassName }}" - {{- end }} -{{- end }} diff --git a/sentry/templates/cronjob-snuba-cleanup-transactions.yaml b/sentry/templates/cronjob-snuba-cleanup-transactions.yaml deleted file mode 100644 index 463933442..000000000 --- a/sentry/templates/cronjob-snuba-cleanup-transactions.yaml +++ /dev/null @@ -1,98 +0,0 @@ -{{- if .Values.snuba.cleanupTransactions.enabled }} -apiVersion: batch/v1beta1 -kind: CronJob -metadata: - name: {{ template "sentry.fullname" . }}-snuba-cleanup-transactions - labels: - app: {{ template "sentry.fullname" . }} - chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" -spec: - schedule: "{{ .Values.snuba.cleanupTransactions.schedule }}" - concurrencyPolicy: "{{ .Values.snuba.cleanupTransactions.concurrencyPolicy }}" - jobTemplate: - spec: - template: - metadata: - annotations: - checksum/snubaSettingsPy: {{ .Values.config.snubaSettingsPy | sha256sum }} - checksum/config.yaml: {{ include (print $.Template.BasePath "/configmap-snuba.yaml") . | sha256sum }} - {{- if .Values.snuba.cleanupTransactions.annotations }} -{{ toYaml .Values.snuba.cleanupTransactions.annotations | indent 12 }} - {{- end }} - labels: - app: {{ template "sentry.fullname" . }} - release: "{{ .Release.Name }}" - {{- if .Values.snuba.cleanupTransactions.podLabels }} -{{ toYaml .Values.snuba.cleanupTransactions.podLabels | indent 12 }} - {{- end }} - spec: - affinity: - {{- if .Values.snuba.cleanupTransactions.affinity }} -{{ toYaml .Values.snuba.cleanupTransactions.affinity | indent 12 }} - {{- end }} - {{- if .Values.snuba.cleanupTransactions.nodeSelector }} - nodeSelector: -{{ toYaml .Values.snuba.cleanupTransactions.nodeSelector | indent 12 }} - {{- end }} - {{- if .Values.snuba.cleanupTransactions.tolerations }} - tolerations: -{{ toYaml .Values.snuba.cleanupTransactions.tolerations | indent 12 }} - {{- end }} - {{- if .Values.dnsPolicy }} - dnsPolicy: {{ .Values.dnsPolicy | quote }} - {{- end }} - {{- if .Values.dnsConfig }} - dnsConfig: -{{ toYaml .Values.dnsConfig | indent 12 }} - {{- end }} - {{- if .Values.images.snuba.imagePullSecrets }} - imagePullSecrets: -{{ toYaml .Values.images.snuba.imagePullSecrets | indent 12 }} - {{- end }} - containers: - - name: {{ .Chart.Name }}-snuba-cleanup-errors - image: "{{ template "snuba.image" . }}" - imagePullPolicy: {{ default "IfNotPresent" .Values.images.snuba.pullPolicy }} - command: - - "snuba" - - "cleanup" - - "--storage" - - "transactions" - - "--dry-run" - - "False" - - "--clickhouse-host" - - {{ include "sentry.clickhouse.host" . | quote }} - - "--clickhouse-port" - - {{ include "sentry.clickhouse.port" . | quote }} - env: - - name: SNUBA_SETTINGS - value: /etc/snuba/settings.py - {{- if .Values.snuba.cleanupTransactions.env }} -{{ toYaml .Values.snuba.cleanupTransactions.env | indent 14 }} - {{- end }} - envFrom: - - secretRef: - name: {{ template "sentry.fullname" . }}-snuba-env - volumeMounts: - - mountPath: /etc/snuba - name: config - readOnly: true - resources: -{{ toYaml .Values.snuba.cleanupTransactions.resources | indent 14 }} -{{- if .Values.snuba.cleanupTransactions.sidecars }} -{{ toYaml .Values.snuba.cleanupTransactions.sidecars | indent 10 }} -{{- end }} - restartPolicy: Never - volumes: - - name: config - configMap: - name: {{ template "sentry.fullname" . }}-snuba -{{- if .Values.snuba.cleanupTransactions.volumes }} -{{ toYaml .Values.snuba.cleanupTransactions.volumes | indent 10 }} -{{- end }} - {{- if .Values.snuba.cleanupTransactions.priorityClassName }} - priorityClassName: "{{ .Values.snuba.cleanupTransactions.priorityClassName }}" - {{- end }} -{{- end }} diff --git a/sentry/templates/deployment-sentry-post-process-forwarder.yaml b/sentry/templates/deployment-sentry-post-process-forwarder.yaml deleted file mode 100644 index 9d0fa67ba..000000000 --- a/sentry/templates/deployment-sentry-post-process-forwarder.yaml +++ /dev/null @@ -1,137 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ template "sentry.fullname" . }}-post-process-forward - labels: - app: sentry - chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" - app.kubernetes.io/managed-by: "Helm" - {{- if .Values.asHook }} - {{- /* Add the Helm annotations so that deployment after asHook from true to false works */}} - annotations: - meta.helm.sh/release-name: "{{ .Release.Name }}" - meta.helm.sh/release-namespace: "{{ .Release.Namespace }}" - "helm.sh/hook": "post-install,post-upgrade" - "helm.sh/hook-weight": "10" - {{- end }} -spec: - revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} - selector: - matchLabels: - app: sentry - release: "{{ .Release.Name }}" - role: sentry-post-process-forward - replicas: {{ .Values.sentry.postProcessForward.replicas }} - template: - metadata: - annotations: - checksum/configYml: {{ .Values.config.configYml | toYaml | toString | sha256sum }} - checksum/sentryConfPy: {{ .Values.config.sentryConfPy | sha256sum }} - checksum/config.yaml: {{ include (print $.Template.BasePath "/configmap-sentry.yaml") . | sha256sum }} - {{- if .Values.sentry.postProcessForward.annotations }} -{{ toYaml .Values.sentry.postProcessForward.annotations | indent 8 }} - {{- end }} - labels: - app: sentry - release: "{{ .Release.Name }}" - role: sentry-post-process-forward - {{- if .Values.sentry.postProcessForward.podLabels }} -{{ toYaml .Values.sentry.postProcessForward.podLabels | indent 8 }} - {{- end }} - spec: - affinity: - {{- if .Values.sentry.postProcessForward.affinity }} -{{ toYaml .Values.sentry.postProcessForward.affinity | indent 8 }} - {{- end }} - {{- if .Values.sentry.postProcessForward.nodeSelector }} - nodeSelector: -{{ toYaml .Values.sentry.postProcessForward.nodeSelector | indent 8 }} - {{- end }} - {{- if .Values.sentry.postProcessForward.tolerations }} - tolerations: -{{ toYaml .Values.sentry.postProcessForward.tolerations | indent 8 }} - {{- end }} - {{- if .Values.images.sentry.imagePullSecrets }} - imagePullSecrets: -{{ toYaml .Values.images.sentry.imagePullSecrets | indent 8 }} - {{- end }} - {{- if .Values.dnsPolicy }} - dnsPolicy: {{ .Values.dnsPolicy | quote }} - {{- end }} - {{- if .Values.dnsConfig }} - dnsConfig: -{{ toYaml .Values.dnsConfig | indent 8 }} - {{- end }} - {{- if .Values.sentry.postProcessForward.securityContext }} - securityContext: -{{ toYaml .Values.sentry.postProcessForward.securityContext | indent 8 }} - {{- end }} - containers: - - name: {{ .Chart.Name }}-post-process-forward - image: "{{ template "sentry.image" . }}" - imagePullPolicy: {{ default "IfNotPresent" .Values.images.sentry.pullPolicy }} - command: ["sentry", "run", "post-process-forwarder", "--commit-batch-size", "{{ default "1" .Values.sentry.postProcessForward.commitBatchSize }}"] - env: - - name: SNUBA - value: http://{{ template "sentry.fullname" . }}-snuba:{{ template "snuba.port" }} - {{- if .Values.postgresql.enabled }} - - name: POSTGRES_PASSWORD - valueFrom: - secretKeyRef: - name: {{ default (include "sentry.postgresql.fullname" .) .Values.postgresql.existingSecret }} - key: {{ default "postgresql-password" .Values.postgresql.existingSecretKey }} - {{- end }} - {{ if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} - - name: GOOGLE_APPLICATION_CREDENTIALS - value: /var/run/secrets/google/{{ .Values.filestore.gcs.credentialsFile }} - {{ end }} -{{- if .Values.sentry.postProcessForward.env }} -{{ toYaml .Values.sentry.postProcessForward.env | indent 8 }} -{{- end }} - volumeMounts: - - mountPath: /etc/sentry - name: config - readOnly: true - - mountPath: {{ .Values.filestore.filesystem.path }} - name: sentry-data - {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} - - name: sentry-google-cloud-key - mountPath: /var/run/secrets/google - {{ end }} - resources: -{{ toYaml .Values.sentry.postProcessForward.resources | indent 12 }} -{{- if .Values.sentry.postProcessForward.sidecars }} -{{ toYaml .Values.sentry.postProcessForward.sidecars | indent 6 }} -{{- end }} - {{- if .Values.serviceAccount.enabled }} - serviceAccountName: {{ .Values.serviceAccount.name }}-post-process-forwarder - {{- end }} - volumes: - - name: config - configMap: - name: {{ template "sentry.fullname" . }}-sentry - - name: sentry-data - {{- if and (eq .Values.filestore.backend "filesystem") .Values.filestore.filesystem.persistence.enabled (.Values.filestore.filesystem.persistence.persistentWorkers) }} - {{- if .Values.filestore.filesystem.persistence.existingClaim }} - persistentVolumeClaim: - claimName: {{ .Values.filestore.filesystem.persistence.existingClaim }} - {{- else }} - persistentVolumeClaim: - claimName: {{ template "sentry.fullname" . }}-data - {{- end }} - {{- else }} - emptyDir: {} - {{ end }} - {{- if and (eq .Values.filestore.backend "gcs") .Values.filestore.gcs.secretName }} - - name: sentry-google-cloud-key - secret: - secretName: {{ .Values.filestore.gcs.secretName }} - {{ end }} -{{- if .Values.sentry.postProcessForward.volumes }} -{{ toYaml .Values.sentry.postProcessForward.volumes | indent 6 }} -{{- end }} - {{- if .Values.sentry.postProcessForward.priorityClassName }} - priorityClassName: "{{ .Values.sentry.postProcessForward.priorityClassName }}" - {{- end }} diff --git a/sentry/templates/deployment-snuba-sessions-consumer.yaml b/sentry/templates/deployment-snuba-sessions-consumer.yaml deleted file mode 100644 index ee1263d00..000000000 --- a/sentry/templates/deployment-snuba-sessions-consumer.yaml +++ /dev/null @@ -1,139 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ template "sentry.fullname" . }}-sessions-consumer - labels: - app: {{ template "sentry.fullname" . }} - chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" - release: "{{ .Release.Name }}" - heritage: "{{ .Release.Service }}" - app.kubernetes.io/managed-by: "Helm" - {{- if .Values.asHook }} - {{- /* Add the Helm annotations so that deployment after asHook from true to false works */}} - annotations: - meta.helm.sh/release-name: "{{ .Release.Name }}" - meta.helm.sh/release-namespace: "{{ .Release.Namespace }}" - "helm.sh/hook": "post-install,post-upgrade" - "helm.sh/hook-weight": "16" - {{- end }} -spec: - revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} - selector: - matchLabels: - app: {{ template "sentry.fullname" . }} - release: "{{ .Release.Name }}" - role: sessions-consumer - replicas: {{ .Values.snuba.sessionsConsumer.replicas }} - template: - metadata: - annotations: - checksum/snubaSettingsPy: {{ .Values.config.snubaSettingsPy | sha256sum }} - checksum/config.yaml: {{ include (print $.Template.BasePath "/configmap-snuba.yaml") . | sha256sum }} - {{- if .Values.snuba.sessionsConsumer.annotations }} -{{ toYaml .Values.snuba.sessionsConsumer.annotations | indent 8 }} - {{- end }} - labels: - app: {{ template "sentry.fullname" . }} - release: "{{ .Release.Name }}" - role: sessions-consumer - {{- if .Values.snuba.sessionsConsumer.podLabels }} -{{ toYaml .Values.snuba.sessionsConsumer.podLabels | indent 8 }} - {{- end }} - spec: - affinity: - {{- if .Values.snuba.sessionsConsumer.affinity }} -{{ toYaml .Values.snuba.sessionsConsumer.affinity | indent 8 }} - {{- end }} - {{- if .Values.snuba.sessionsConsumer.nodeSelector }} - nodeSelector: -{{ toYaml .Values.snuba.sessionsConsumer.nodeSelector | indent 8 }} - {{- end }} - {{- if .Values.snuba.sessionsConsumer.tolerations }} - tolerations: -{{ toYaml .Values.snuba.sessionsConsumer.tolerations | indent 8 }} - {{- end }} - {{- if .Values.images.snuba.imagePullSecrets }} - imagePullSecrets: -{{ toYaml .Values.images.snuba.imagePullSecrets | indent 8 }} - {{- end }} - {{- if .Values.dnsPolicy }} - dnsPolicy: {{ .Values.dnsPolicy | quote }} - {{- end }} - {{- if .Values.dnsConfig }} - dnsConfig: -{{ toYaml .Values.dnsConfig | indent 8 }} - {{- end }} - {{- if .Values.snuba.sessionsConsumer.securityContext }} - securityContext: -{{ toYaml .Values.snuba.sessionsConsumer.securityContext | indent 8 }} - {{- end }} - containers: - - name: {{ .Chart.Name }}-snuba - image: "{{ template "snuba.image" . }}" - imagePullPolicy: {{ default "IfNotPresent" .Values.images.snuba.pullPolicy }} - command: - - "snuba" - - "consumer" - - "--storage" - - "sessions_raw" - - "--auto-offset-reset" - - "{{ .Values.snuba.sessionsConsumer.autoOffsetReset }}" - - "--max-batch-time-ms" - - "750" - {{- if .Values.snuba.sessionsConsumer.maxBatchSize }} - - "--max-batch-size" - - "{{ .Values.snuba.sessionsConsumer.maxBatchSize }}" - {{- end }} - {{- if .Values.snuba.sessionsConsumer.processes }} - - "--processes" - - "{{ .Values.snuba.sessionsConsumer.processes }}" - {{- end }} - {{- if .Values.snuba.sessionsConsumer.inputBlockSize }} - - "--input-block-size" - - "{{ .Values.snuba.sessionsConsumer.inputBlockSize }}" - {{- end }} - {{- if .Values.snuba.sessionsConsumer.outputBlockSize }} - - "--output-block-size" - - "{{ .Values.snuba.sessionsConsumer.outputBlockSize }}" - {{- end }} - {{- if .Values.snuba.sessionsConsumer.maxBatchTimeMs }} - - "--max-batch-time-ms" - - "{{ .Values.snuba.sessionsConsumer.maxBatchTimeMs }}" - {{- end }} - {{- if .Values.snuba.sessionsConsumer.queuedMaxMessagesKbytes }} - - "--queued-max-messages-kbytes" - - "{{ .Values.snuba.sessionsConsumer.queuedMaxMessagesKbytes }}" - {{- end }} - {{- if .Values.snuba.sessionsConsumer.queuedMinMessages }} - - "--queued-min-messages" - - "{{ .Values.snuba.sessionsConsumer.queuedMinMessages }}" - {{- end }} - ports: - - containerPort: {{ template "snuba.port" }} - env: -{{ include "sentry.snuba.env" . | indent 8 }} -{{- if .Values.snuba.sessionsConsumer.env }} -{{ toYaml .Values.snuba.sessionsConsumer.env | indent 8 }} -{{- end }} - envFrom: - - secretRef: - name: {{ template "sentry.fullname" . }}-snuba-env - volumeMounts: - - mountPath: /etc/snuba - name: config - readOnly: true -{{- if .Values.snuba.sessionsConsumer.volumeMounts }} -{{ toYaml .Values.snuba.sessionsConsumer.volumeMounts | indent 8 }} -{{- end }} - resources: -{{ toYaml .Values.snuba.sessionsConsumer.resources | indent 12 }} - {{- if .Values.serviceAccount.enabled }} - serviceAccountName: {{ .Values.serviceAccount.name }}-snuba - {{- end }} - volumes: - - name: config - configMap: - name: {{ template "sentry.fullname" . }}-snuba -{{- if .Values.snuba.sessionsConsumer.volumes }} -{{ toYaml .Values.snuba.sessionsConsumer.volumes | indent 8 }} -{{- end }} diff --git a/sentry/templates/hpa-ingestConsumer.yaml b/sentry/templates/hpa-ingestConsumer.yaml deleted file mode 100644 index 296c974f7..000000000 --- a/sentry/templates/hpa-ingestConsumer.yaml +++ /dev/null @@ -1,14 +0,0 @@ -{{- if .Values.sentry.ingestConsumer.autoscaling.enabled }} -apiVersion: autoscaling/v1 -kind: HorizontalPodAutoscaler -metadata: - name: {{ template "sentry.fullname" . }}-sentry-ingest-consumer -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ template "sentry.fullname" . }}-ingest-consumer - minReplicas: {{ .Values.sentry.ingestConsumer.autoscaling.minReplicas }} - maxReplicas: {{ .Values.sentry.ingestConsumer.autoscaling.maxReplicas }} - targetCPUUtilizationPercentage: {{ .Values.sentry.ingestConsumer.autoscaling.targetCPUUtilizationPercentage }} -{{- end }} diff --git a/sentry/templates/hpa-relay.yaml b/sentry/templates/hpa-relay.yaml deleted file mode 100644 index 6b3403feb..000000000 --- a/sentry/templates/hpa-relay.yaml +++ /dev/null @@ -1,14 +0,0 @@ -{{- if .Values.relay.autoscaling.enabled }} -apiVersion: autoscaling/v1 -kind: HorizontalPodAutoscaler -metadata: - name: {{ template "sentry.fullname" . }}-relay -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ template "sentry.fullname" . }}-relay - minReplicas: {{ .Values.relay.autoscaling.minReplicas }} - maxReplicas: {{ .Values.relay.autoscaling.maxReplicas }} - targetCPUUtilizationPercentage: {{ .Values.relay.autoscaling.targetCPUUtilizationPercentage }} -{{- end }} diff --git a/sentry/templates/hpa-snuba-api.yaml b/sentry/templates/hpa-snuba-api.yaml deleted file mode 100644 index 9f6b00524..000000000 --- a/sentry/templates/hpa-snuba-api.yaml +++ /dev/null @@ -1,14 +0,0 @@ -{{- if .Values.snuba.api.autoscaling.enabled }} -apiVersion: autoscaling/v1 -kind: HorizontalPodAutoscaler -metadata: - name: {{ template "sentry.fullname" . }}-snuba-api -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ template "sentry.fullname" . }}-snuba-api - minReplicas: {{ .Values.snuba.api.autoscaling.minReplicas }} - maxReplicas: {{ .Values.snuba.api.autoscaling.maxReplicas }} - targetCPUUtilizationPercentage: {{ .Values.snuba.api.autoscaling.targetCPUUtilizationPercentage }} -{{- end }} diff --git a/sentry/templates/hpa-web.yaml b/sentry/templates/hpa-web.yaml deleted file mode 100644 index ebdc1be8a..000000000 --- a/sentry/templates/hpa-web.yaml +++ /dev/null @@ -1,14 +0,0 @@ -{{- if .Values.sentry.web.autoscaling.enabled }} -apiVersion: autoscaling/v1 -kind: HorizontalPodAutoscaler -metadata: - name: {{ template "sentry.fullname" . }}-sentry-web -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ template "sentry.fullname" . }}-web - minReplicas: {{ .Values.sentry.web.autoscaling.minReplicas }} - maxReplicas: {{ .Values.sentry.web.autoscaling.maxReplicas }} - targetCPUUtilizationPercentage: {{ .Values.sentry.web.autoscaling.targetCPUUtilizationPercentage }} -{{- end }} diff --git a/sentry/templates/hpa-worker.yaml b/sentry/templates/hpa-worker.yaml deleted file mode 100644 index 96412a3cb..000000000 --- a/sentry/templates/hpa-worker.yaml +++ /dev/null @@ -1,14 +0,0 @@ -{{- if .Values.sentry.worker.autoscaling.enabled }} -apiVersion: autoscaling/v1 -kind: HorizontalPodAutoscaler -metadata: - name: {{ template "sentry.fullname" . }}-sentry-worker -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ template "sentry.fullname" . }}-worker - minReplicas: {{ .Values.sentry.worker.autoscaling.minReplicas }} - maxReplicas: {{ .Values.sentry.worker.autoscaling.maxReplicas }} - targetCPUUtilizationPercentage: {{ .Values.sentry.worker.autoscaling.targetCPUUtilizationPercentage }} -{{- end }} diff --git a/sentry/values.yaml b/sentry/values.yaml deleted file mode 100644 index 679f09120..000000000 --- a/sentry/values.yaml +++ /dev/null @@ -1,973 +0,0 @@ -prefix: - -user: - create: true - email: admin@sentry.local - password: aaaa - - ## set this value to an existingSecret name to create the admin user with the password in the secret - # existingSecret: sentry-admin-password - - ## set this value to an existingSecretKey which holds the password to be used for sentry admin user default key is `admin-password` - # existingSecretKey: admin-password - -# this is required on the first installation, as sentry has to be initialized first -# recommended to set false for updating the helm chart afterwards, -# as you will have some downtime on each update if it's a hook -# deploys relay & snuba consumers as post hooks -asHook: true - -images: - sentry: - # repository: getsentry/sentry - # tag: Chart.AppVersion - # pullPolicy: IfNotPresent - imagePullSecrets: [] - snuba: - # repository: getsentry/snuba - # tag: Chart.AppVersion - # pullPolicy: IfNotPresent - imagePullSecrets: [] - relay: - # repository: getsentry/relay - # tag: Chart.AppVersion - # pullPolicy: IfNotPresent - imagePullSecrets: [] - symbolicator: - # repository: getsentry/symbolicator - tag: 0.5.1 - # pullPolicy: IfNotPresent - imagePullSecrets: [] - -serviceAccount: - # serviceAccount.annotations -- Additional Service Account annotations. - annotations: {} - # serviceAccount.enabled -- If `true`, a custom Service Account will be used. - enabled: false - # serviceAccount.name -- The base name of the ServiceAccount to use. Will be appended with e.g. `snuba-api` or `web` for the pods accordingly. - name: "sentry" - # serviceAccount.automountServiceAccountToken -- Automount API credentials for a Service Account. - automountServiceAccountToken: true - -relay: - replicas: 1 - mode: managed - env: [] - probeFailureThreshold: 5 - probeInitialDelaySeconds: 10 - probePeriodSeconds: 10 - probeSuccessThreshold: 1 - probeTimeoutSeconds: 2 - resources: {} - affinity: {} - nodeSelector: {} - securityContext: {} - service: - annotations: {} - # tolerations: [] - # podLabels: [] - - autoscaling: - enabled: false - minReplicas: 2 - maxReplicas: 5 - targetCPUUtilizationPercentage: 50 - sidecars: [] - volumes: [] - -sentry: - singleOrganization: true - web: - # if using filestore backend filesystem with RWO access, set strategyType to Recreate - strategyType: RollingUpdate - replicas: 1 - env: [] - probeFailureThreshold: 5 - probeInitialDelaySeconds: 10 - probePeriodSeconds: 10 - probeSuccessThreshold: 1 - probeTimeoutSeconds: 2 - resources: {} - affinity: {} - nodeSelector: {} - securityContext: {} - service: - annotations: {} - # tolerations: [] - # podLabels: [] - # Mount and use custom CA - # customCA: - # secretName: custom-ca - # item: ca.crt - - autoscaling: - enabled: false - minReplicas: 2 - maxReplicas: 5 - targetCPUUtilizationPercentage: 50 - sidecars: [] - volumes: [] - - features: - orgSubdomains: false - vstsLimitedScopes: true - - worker: - replicas: 3 - # concurrency: 4 - env: [] - resources: {} - affinity: {} - nodeSelector: {} - # tolerations: [] - # podLabels: [] - - # it's better to use prometheus adapter and scale based on - # the size of the rabbitmq queue - autoscaling: - enabled: false - minReplicas: 2 - maxReplicas: 5 - targetCPUUtilizationPercentage: 50 - livenessProbe: - enabled: false - periodSeconds: 60 - timeoutSeconds: 10 - failureThreshold: 3 - sidecars: [] - volumes: [] - - ingestConsumer: - replicas: 1 - # concurrency: 4 - env: [] - resources: {} - affinity: {} - nodeSelector: {} - securityContext: {} - # tolerations: [] - # podLabels: [] - # maxBatchSize: "" - - # it's better to use prometheus adapter and scale based on - # the size of the rabbitmq queue - autoscaling: - enabled: false - minReplicas: 1 - maxReplicas: 3 - targetCPUUtilizationPercentage: 50 - sidecars: [] - volumes: [] - - # volumeMounts: - # - mountPath: /dev/shm - # name: dshm - - cron: - replicas: 1 - env: [] - resources: {} - affinity: {} - nodeSelector: {} - # tolerations: [] - # podLabels: [] - sidecars: [] - volumes: [] - subscriptionConsumerEvents: - replicas: 1 - env: [] - resources: {} - affinity: {} - nodeSelector: {} - securityContext: {} - # tolerations: [] - # podLabels: [] - # commitBatchSize: 1 - sidecars: [] - volumes: [] - subscriptionConsumerTransactions: - replicas: 1 - env: [] - resources: {} - affinity: {} - nodeSelector: {} - securityContext: {} - # tolerations: [] - # podLabels: [] - # commitBatchSize: 1 - sidecars: [] - volumes: [] - postProcessForward: - replicas: 1 - env: [] - resources: {} - affinity: {} - nodeSelector: {} - securityContext: {} - # tolerations: [] - # podLabels: [] - # commitBatchSize: 1 - sidecars: [] - volumes: [] - cleanup: - concurrencyPolicy: Allow - enabled: true - schedule: "0 0 * * *" - days: 90 - sidecars: [] - volumes: [] - -snuba: - api: - replicas: 1 - # set command to ["snuba","api"] if securityContext.runAsUser > 0 - # see: https://github.com/getsentry/snuba/issues/956 - command: {} - # - snuba - # - api - env: [] - probeInitialDelaySeconds: 10 - liveness: - timeoutSeconds: 2 - readiness: - timeoutSeconds: 2 - resources: {} - affinity: {} - nodeSelector: {} - securityContext: {} - service: - annotations: {} - # tolerations: [] - # podLabels: [] - - autoscaling: - enabled: false - minReplicas: 2 - maxReplicas: 5 - targetCPUUtilizationPercentage: 50 - sidecars: [] - volumes: [] - - consumer: - replicas: 1 - env: [] - resources: {} - affinity: {} - nodeSelector: {} - securityContext: {} - # tolerations: [] - # podLabels: [] - autoOffsetReset: "earliest" - # maxBatchSize: "" - # processes: "" - # inputBlockSize: "" - # outputBlockSize: "" - # maxBatchTimeMs: "" - # queuedMaxMessagesKbytes: "" - # queuedMinMessages: "" - - # volumeMounts: - # - mountPath: /dev/shm - # name: dshm - # volumes: - # - name: dshm - # emptyDir: - # medium: Memory - - outcomesConsumer: - replicas: 1 - env: [] - resources: {} - affinity: {} - nodeSelector: {} - securityContext: {} - # tolerations: [] - # podLabels: [] - autoOffsetReset: "earliest" - maxBatchSize: "3" - # processes: "" - # inputBlockSize: "" - # outputBlockSize: "" - # maxBatchTimeMs: "" - # queuedMaxMessagesKbytes: "" - # queuedMinMessages: "" - - # volumeMounts: - # - mountPath: /dev/shm - # name: dshm - # volumes: - # - name: dshm - # emptyDir: - # medium: Memory - - replacer: - replicas: 1 - env: [] - resources: {} - affinity: {} - nodeSelector: {} - securityContext: {} - # tolerations: [] - # podLabels: [] - autoOffsetReset: "earliest" - maxBatchSize: "3" - # maxBatchTimeMs: "" - # queuedMaxMessagesKbytes: "" - # queuedMinMessages: "" - - subscriptionConsumerEvents: - replicas: 1 - env: [] - resources: {} - affinity: {} - nodeSelector: {} - securityContext: {} - # tolerations: [] - # podLabels: [] - autoOffsetReset: "earliest" - - subscriptionConsumerTransactions: - replicas: 1 - env: [] - resources: {} - affinity: {} - nodeSelector: {} - securityContext: {} - # tolerations: [] - # podLabels: [] - autoOffsetReset: "earliest" - - sessionsConsumer: - replicas: 1 - env: [] - resources: {} - affinity: {} - nodeSelector: {} - securityContext: {} - # tolerations: [] - # podLabels: [] - autoOffsetReset: "earliest" - # maxBatchSize: "" - # processes: "" - # inputBlockSize: "" - # outputBlockSize: "" - # maxBatchTimeMs: "" - # queuedMaxMessagesKbytes: "" - # queuedMinMessages: "" - - # volumeMounts: - # - mountPath: /dev/shm - # name: dshm - # volumes: - # - name: dshm - # emptyDir: - # medium: Memory - - transactionsConsumer: - replicas: 1 - env: [] - resources: {} - affinity: {} - nodeSelector: {} - securityContext: {} - # tolerations: [] - # podLabels: [] - autoOffsetReset: "earliest" - # maxBatchSize: "" - # processes: "" - # inputBlockSize: "" - # outputBlockSize: "" - # maxBatchTimeMs: "" - # queuedMaxMessagesKbytes: "" - # queuedMinMessages: "" - - # volumeMounts: - # - mountPath: /dev/shm - # name: dshm - # volumes: - # - name: dshm - # emptyDir: - # medium: Memory - - dbInitJob: - env: [] - - migrateJob: - env: [] - - cleanupErrors: - concurrencyPolicy: Allow - enabled: true - schedule: "0 * * * *" - sidecars: [] - volumes: [] - - cleanupTransactions: - concurrencyPolicy: Allow - enabled: true - schedule: "0 * * * *" - sidecars: [] - volumes: [] - -hooks: - enabled: true - removeOnSuccess: true - dbCheck: - image: - # repository: subfuzion/netcat - # tag: latest - # pullPolicy: IfNotPresent - imagePullSecrets: [] - env: [] - podAnnotations: {} - resources: - limits: - memory: 64Mi - requests: - cpu: 100m - memory: 64Mi - affinity: {} - nodeSelector: {} - securityContext: {} - # tolerations: [] - dbInit: - env: [] - podAnnotations: {} - resources: - limits: - memory: 2048Mi - requests: - cpu: 300m - memory: 2048Mi - sidecars: [] - volumes: [] - affinity: {} - nodeSelector: {} - # tolerations: [] - snubaInit: - podAnnotations: {} - resources: - limits: - cpu: 2000m - memory: 1Gi - requests: - cpu: 700m - memory: 1Gi - affinity: {} - nodeSelector: {} - # tolerations: [] - -system: - ## be sure to include the scheme on the url, for example: "https://sentry.example.com" - url: "" - adminEmail: "" - ## This should only be used if you’re installing Sentry behind your company’s firewall. - public: false - ## This will generate one for you (it's must be given upon updates) - # secretKey: "xx" - -mail: - # For example: smtp - backend: dummy - useTls: false - useSsl: false - username: "" - password: "" - port: 25 - host: "" - from: "" - -symbolicator: - enabled: false - api: - replicas: 1 - env: [] - probeInitialDelaySeconds: 10 - resources: {} - affinity: {} - nodeSelector: {} - securityContext: {} - # tolerations: [] - # podLabels: [] - # priorityClassName: "xxx" - config: |- - # See: https://getsentry.github.io/symbolicator/#configuration - cache_dir: "/data" - bind: "0.0.0.0:3021" - logging: - level: "warn" - metrics: - statsd: null - prefix: "symbolicator" - sentry_dsn: null - connect_to_reserved_ips: true - # caches: - # downloaded: - # max_unused_for: 1w - # retry_misses_after: 5m - # retry_malformed_after: 5m - # derived: - # max_unused_for: 1w - # retry_misses_after: 5m - # retry_malformed_after: 5m - # diagnostics: - # retention: 1w - - # TODO autoscaling in not yet implemented - autoscaling: - enabled: false - minReplicas: 2 - maxReplicas: 5 - targetCPUUtilizationPercentage: 50 - # TODO The cleanup cronjob is not yet implemented - cleanup: - enabled: false - # podLabels: [] - # affinity: {} - # env: [] - -auth: - register: true - -service: - name: sentry - type: ClusterIP - externalPort: 9000 - annotations: {} - # externalIPs: - # - 192.168.0.1 - # loadBalancerSourceRanges: [] - -# https://github.com/settings/apps (Create a Github App) -github: {} -# github: -# appId: "xxxx" -# appName: MyAppName -# clientId: "xxxxx" -# clientSecret: "xxxxx" -# privateKey: "-----BEGIN RSA PRIVATE KEY-----\nMIIEpA" !!!! Don't forget a trailing \n -# webhookSecret: "xxxxx`" - -# https://developers.google.com/identity/sign-in/web/server-side-flow#step_1_create_a_client_id_and_client_secret -google: {} -# google: -# clientId: -# clientSecret: - -slack: {} -# slack: -# clientId: -# clientSecret: -# signingSecret: -# Reference -> https://develop.sentry.dev/integrations/slack/ - -nginx: - enabled: true - containerPort: 8080 - existingServerBlockConfigmap: '{{ template "sentry.fullname" . }}' - resources: {} - replicaCount: 1 - service: - type: ClusterIP - ports: - http: 80 - ## Use this to enable an extra service account - # serviceAccount: - # create: false - # name: nginx - -ingress: - enabled: false - # If you are using traefik ingress controller, switch this to 'traefik' - # if you are using AWS ALB Ingress controller, switch this to 'aws-alb' - # if you are using GKE Ingress controller, switch this to 'gke' - regexPathStyle: nginx - # If you are using AWS ALB Ingress controller, switch to true if you want activate the http to https redirection. - alb: - httpRedirect: false - # annotations: - # If you are using nginx ingress controller, please use at least those 2 annotations - # kubernetes.io/ingress.class: nginx - # nginx.ingress.kubernetes.io/use-regex: "true" - # - # hostname: - # additionalHostNames: [] - # - # tls: - # - secretName: - # hosts: - -filestore: - # Set to one of filesystem, gcs or s3 as supported by Sentry. - backend: filesystem - - filesystem: - path: /var/lib/sentry/files - - ## Enable persistence using Persistent Volume Claims - ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - persistence: - enabled: true - ## database data Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "-" - accessMode: ReadWriteOnce - size: 10Gi - - ## Whether to mount the persistent volume to the Sentry worker and - ## cron deployments. This setting needs to be enabled for some advanced - ## Sentry features, such as private source maps. If you disable this - ## setting, the Sentry workers will not have access to artifacts you upload - ## through the web deployment. - ## Please note that you may need to change your accessMode to ReadWriteMany - ## if you plan on having the web, worker and cron deployments run on - ## different nodes. - persistentWorkers: false - - ## If existingClaim is specified, no PVC will be created and this claim will - ## be used - existingClaim: "" - - gcs: {} - ## Point this at a pre-configured secret containing a service account. The resulting - ## secret will be mounted at /var/run/secrets/google - # secretName: - # credentialsFile: credentials.json - # bucketName: - - ## Currently unconfigured and changing this has no impact on the template configuration. - s3: {} - # accessKey: - # secretKey: - # bucketName: - # endpointUrl: - # signature_version: - # region_name: - # default_acl: - -config: - # No YAML Extension Config Given - configYml: {} - sentryConfPy: | - # No Python Extension Config Given - snubaSettingsPy: | - # No Python Extension Config Given - relay: | - # No YAML relay config given - -clickhouse: - enabled: true - clickhouse: - imageVersion: "20.8.19.4" - configmap: - remote_servers: - internal_replication: true - replica: - backup: - enabled: false - zookeeper_servers: - enabled: true - config: - - index: "clickhouse" - hostTemplate: "{{ .Release.Name }}-zookeeper-clickhouse" - port: "2181" - users: - enabled: false - user: - # the first user will be used if enabled - - name: default - config: - password: "" - networks: - - ::/0 - profile: default - quota: default - - persistentVolumeClaim: - enabled: true - dataPersistentVolume: - enabled: true - accessModes: - - "ReadWriteOnce" - storage: "30Gi" - - ## Use this to enable an extra service account - # serviceAccount: - # annotations: {} - # enabled: false - # name: "sentry-clickhouse" - # automountServiceAccountToken: true - -## This value is only used when clickhouse.enabled is set to false -## -externalClickhouse: - ## Hostname or ip address of external clickhouse - ## - host: "clickhouse" - tcpPort: 9000 - httpPort: 8123 - username: default - password: "" - database: default - singleNode: true - ## Cluster name, can be found in config - ## (https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings-remote-servers) - ## or by executing `select * from system.clusters` - ## - # clusterName: test_shard_localhost - -# Settings for Zookeeper. -# See https://github.com/bitnami/charts/tree/master/bitnami/zookeeper -zookeeper: - enabled: true - nameOverride: zookeeper-clickhouse - replicaCount: 3 - -# Settings for Kafka. -# See https://github.com/bitnami/charts/tree/master/bitnami/kafka -kafka: - enabled: true - replicaCount: 3 - allowPlaintextListener: true - defaultReplicationFactor: 3 - offsetsTopicReplicationFactor: 3 - transactionStateLogReplicationFactor: 3 - transactionStateLogMinIsr: 3 - # 50 MB - maxMessageBytes: "50000000" - # 50 MB - socketRequestMaxBytes: "50000000" - - service: - ports: - client: 9092 - - ## Use this to enable an extra service account - # serviceAccount: - # create: false - # name: kafka - - ## Use this to enable an extra service account - # zookeeper: - # serviceAccount: - # create: false - # name: zookeeper - -## This value is only used when kafka.enabled is set to false -## -externalKafka: - ## Hostname or ip address of external kafka - ## - # host: "kafka-confluent" - port: 9092 - -sourcemaps: - enabled: false - -redis: - enabled: true - auth: - enabled: false - sentinel: false - nameOverride: sentry-redis - usePassword: false - ## Just omit the password field if your redis cluster doesn't use password - # password: redis - master: - persistence: - enabled: true - ## Use this to enable an extra service account - # serviceAccount: - # create: false - # name: sentry-redis - -## This value is only used when redis.enabled is set to false -## -externalRedis: - ## Hostname or ip address of external redis cluster - ## - # host: "redis" - port: 6379 - ## Just omit the password field if your redis cluster doesn't use password - # password: redis - -postgresql: - enabled: true - nameOverride: sentry-postgresql - postgresqlUsername: postgres - postgresqlDatabase: sentry - replication: - enabled: false - readReplicas: 2 - synchronousCommit: "on" - numSynchronousReplicas: 1 - applicationName: sentry - ## Use this to enable an extra service account - # serviceAccount: - # enabled: false - -## This value is only used when postgresql.enabled is set to false -## -externalPostgresql: - # host: postgres - port: 5432 - username: postgres - # password: postgres - database: sentry - # sslMode: require - -rabbitmq: - ## If disabled, Redis will be used instead as the broker. - enabled: true - clustering: - forceBoot: true - rebalance: true - replicaCount: 3 - auth: - erlangCookie: pHgpy3Q6adTskzAT6bLHCFqFTF7lMxhA - username: guest - password: guest - nameOverride: "" - - pdb: - create: true - persistence: - enabled: true - resources: {} - memoryHighWatermark: {} - # enabled: true - # type: relative - # value: 0.4 - - extraSecrets: - load-definition: - load_definition.json: | - { - "users": [ - { - "name": "{{ .Values.auth.username }}", - "password": "{{ .Values.auth.password }}", - "tags": "administrator" - } - ], - "permissions": [{ - "user": "{{ .Values.auth.username }}", - "vhost": "/", - "configure": ".*", - "write": ".*", - "read": ".*" - }], - "policies": [ - { - "name": "ha-all", - "pattern": ".*", - "vhost": "/", - "definition": { - "ha-mode": "all", - "ha-sync-mode": "automatic", - "ha-sync-batch-size": 1 - } - } - ], - "vhosts": [ - { - "name": "/" - } - ] - } - loadDefinition: - enabled: true - existingSecret: load-definition - extraConfiguration: | - load_definitions = /app/load_definition.json - ## Use this to enable an extra service account - # serviceAccount: - # create: false - # name: rabbitmq - -memcached: - memoryLimit: "2048" - maxItemSize: "26214400" - args: - - "memcached" - - "-u memcached" - - "-p 11211" - - "-v" - - "-m $(MEMCACHED_MEMORY_LIMIT)" - - "-I $(MEMCACHED_MAX_ITEM_SIZE)" - extraEnvVarsCM: "sentry-memcached" - -## Prometheus Exporter / Metrics -## -metrics: - enabled: false - - ## Configure extra options for liveness and readiness probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) - livenessProbe: - enabled: true - initialDelaySeconds: 30 - periodSeconds: 5 - timeoutSeconds: 2 - failureThreshold: 3 - successThreshold: 1 - readinessProbe: - enabled: true - initialDelaySeconds: 30 - periodSeconds: 5 - timeoutSeconds: 2 - failureThreshold: 3 - successThreshold: 1 - - ## Metrics exporter resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - resources: {} - # limits: - # cpu: 100m - # memory: 100Mi - # requests: - # cpu: 100m - # memory: 100Mi - - nodeSelector: {} - tolerations: [] - affinity: {} - securityContext: {} - # schedulerName: - # Optional extra labels for pod, i.e. redis-client: "true" - # podLabels: [] - service: - type: ClusterIP - labels: {} - - image: - repository: prom/statsd-exporter - tag: v0.17.0 - pullPolicy: IfNotPresent - - # Enable this if you're using https://github.com/coreos/prometheus-operator - serviceMonitor: - enabled: false - additionalLabels: {} - namespace: "" - namespaceSelector: {} - # Default: scrape .Release.Namespace only - # To scrape all, use the following: - # namespaceSelector: - # any: true - scrapeInterval: 30s - # honorLabels: true - -revisionHistoryLimit: 10 - -# dnsPolicy: "ClusterFirst" -# dnsConfig: -# nameservers: [] -# searches: [] -# options: []