From 861c8ff4b134e357b1beeb8aac684b4809e2d76f Mon Sep 17 00:00:00 2001 From: Carlos Rodriguez Hernandez Date: Fri, 6 Mar 2020 21:12:38 +0000 Subject: [PATCH] [bitnami/rabbitmq] Copy rabbitmq chart from stable (https://github.com/helm/charts/pull/21310) Signed-off-by: Carlos Rodriguez Hernandez --- bitnami/rabbitmq/.helmignore | 2 + bitnami/rabbitmq/Chart.yaml | 17 + bitnami/rabbitmq/README.md | 421 +++++++++++++ .../ci/affinity-toleration-values.yaml | 14 + bitnami/rabbitmq/ci/default-values.yaml | 1 + bitnami/rabbitmq/ci/networkpolicy-values.yaml | 11 + bitnami/rabbitmq/templates/NOTES.txt | 79 +++ bitnami/rabbitmq/templates/_helpers.tpl | 258 ++++++++ bitnami/rabbitmq/templates/certs.yaml | 19 + bitnami/rabbitmq/templates/configuration.yaml | 41 ++ bitnami/rabbitmq/templates/healthchecks.yaml | 32 + bitnami/rabbitmq/templates/ingress.yaml | 41 ++ bitnami/rabbitmq/templates/networkpolicy.yaml | 39 ++ bitnami/rabbitmq/templates/pdb.yaml | 17 + .../rabbitmq/templates/prometheusrule.yaml | 23 + bitnami/rabbitmq/templates/role.yaml | 15 + bitnami/rabbitmq/templates/rolebinding.yaml | 18 + bitnami/rabbitmq/templates/secrets.yaml | 38 ++ .../rabbitmq/templates/serviceaccount.yaml | 11 + .../rabbitmq/templates/servicemonitor.yaml | 36 ++ bitnami/rabbitmq/templates/statefulset.yaml | 372 ++++++++++++ bitnami/rabbitmq/templates/svc-headless.yaml | 32 + bitnami/rabbitmq/templates/svc.yaml | 78 +++ bitnami/rabbitmq/values-production.yaml | 574 ++++++++++++++++++ bitnami/rabbitmq/values.schema.json | 100 +++ bitnami/rabbitmq/values.yaml | 555 +++++++++++++++++ 26 files changed, 2844 insertions(+) create mode 100644 bitnami/rabbitmq/.helmignore create mode 100644 bitnami/rabbitmq/Chart.yaml create mode 100644 bitnami/rabbitmq/README.md create mode 100644 bitnami/rabbitmq/ci/affinity-toleration-values.yaml create mode 100644 bitnami/rabbitmq/ci/default-values.yaml create mode 100644 bitnami/rabbitmq/ci/networkpolicy-values.yaml create mode 100644 bitnami/rabbitmq/templates/NOTES.txt create mode 100644 bitnami/rabbitmq/templates/_helpers.tpl create mode 100644 bitnami/rabbitmq/templates/certs.yaml create mode 100644 bitnami/rabbitmq/templates/configuration.yaml create mode 100644 bitnami/rabbitmq/templates/healthchecks.yaml create mode 100644 bitnami/rabbitmq/templates/ingress.yaml create mode 100644 bitnami/rabbitmq/templates/networkpolicy.yaml create mode 100644 bitnami/rabbitmq/templates/pdb.yaml create mode 100644 bitnami/rabbitmq/templates/prometheusrule.yaml create mode 100644 bitnami/rabbitmq/templates/role.yaml create mode 100644 bitnami/rabbitmq/templates/rolebinding.yaml create mode 100644 bitnami/rabbitmq/templates/secrets.yaml create mode 100644 bitnami/rabbitmq/templates/serviceaccount.yaml create mode 100644 bitnami/rabbitmq/templates/servicemonitor.yaml create mode 100644 bitnami/rabbitmq/templates/statefulset.yaml create mode 100644 bitnami/rabbitmq/templates/svc-headless.yaml create mode 100644 bitnami/rabbitmq/templates/svc.yaml create mode 100644 bitnami/rabbitmq/values-production.yaml create mode 100644 bitnami/rabbitmq/values.schema.json create mode 100644 bitnami/rabbitmq/values.yaml diff --git a/bitnami/rabbitmq/.helmignore b/bitnami/rabbitmq/.helmignore new file mode 100644 index 00000000000000..acbcabf42a7986 --- /dev/null +++ b/bitnami/rabbitmq/.helmignore @@ -0,0 +1,2 @@ +.git +OWNERS diff --git a/bitnami/rabbitmq/Chart.yaml b/bitnami/rabbitmq/Chart.yaml new file mode 100644 index 00000000000000..f9601f9ca070e6 --- /dev/null +++ b/bitnami/rabbitmq/Chart.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +name: rabbitmq +version: 6.18.1 +appVersion: 3.8.2 +description: Open source message broker software that implements the Advanced Message Queuing Protocol (AMQP) +keywords: +- rabbitmq +- message queue +- AMQP +home: https://www.rabbitmq.com +icon: https://bitnami.com/assets/stacks/rabbitmq/img/rabbitmq-stack-220x234.png +sources: +- https://github.com/bitnami/bitnami-docker-rabbitmq +maintainers: +- name: Bitnami + email: containers@bitnami.com +engine: gotpl diff --git a/bitnami/rabbitmq/README.md b/bitnami/rabbitmq/README.md new file mode 100644 index 00000000000000..49af4b71e409ea --- /dev/null +++ b/bitnami/rabbitmq/README.md @@ -0,0 +1,421 @@ +# RabbitMQ + +[RabbitMQ](https://www.rabbitmq.com/) is an open source message broker software that implements the Advanced Message Queuing Protocol (AMQP). + +## TL;DR; + +```bash +$ helm install my-release bitnami/rabbitmq +``` + +## Introduction + +This chart bootstraps a [RabbitMQ](https://github.com/bitnami/bitnami-docker-rabbitmq) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.11+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release bitnami/rabbitmq +``` + +The command deploys RabbitMQ on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following table lists the configurable parameters of the RabbitMQ chart and their default values. + +| Parameter | Description | Default | +| -------------------------------------------- | ------------------------------------------------ | ------------------------------------------------------- | +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `image.registry` | Rabbitmq Image registry | `docker.io` | +| `image.repository` | Rabbitmq Image name | `bitnami/rabbitmq` | +| `image.tag` | Rabbitmq Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `image.debug` | Specify if debug values should be set | `false` | +| `nameOverride` | String to partially override rabbitmq.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override rabbitmq.fullname template with a string | `nil` | +| `rbacEnabled` | Specify if rbac is enabled in your cluster | `true` | +| `podManagementPolicy` | Pod management policy | `OrderedReady` | +| `rabbitmq.username` | RabbitMQ application username | `user` | +| `rabbitmq.password` | RabbitMQ application password | _random 10 character long alphanumeric string_ | +| `rabbitmq.existingPasswordSecret` | Existing secret with RabbitMQ credentials | `nil` | +| `rabbitmq.erlangCookie` | Erlang cookie | _random 32 character long alphanumeric string_ | +| `rabbitmq.existingErlangSecret` | Existing secret with RabbitMQ Erlang cookie | `nil` | +| `rabbitmq.plugins` | List of plugins to enable | `rabbitmq_management rabbitmq_peer_discovery_k8s` | +| `rabbitmq.extraPlugins` | Extra plugings to enable | `nil` | +| `rabbitmq.clustering.address_type` | Switch clustering mode | `ip` or `hostname` | +| `rabbitmq.clustering.k8s_domain` | Customize internal k8s cluster domain | `cluster.local` | +| `rabbitmq.clustering.rebalance` | Rebalance master for queues in cluster when new replica is created | `false` | +| `rabbitmq.logs` | Value for the RABBITMQ_LOGS environment variable | `-` | +| `rabbitmq.setUlimitNofiles` | Specify if max file descriptor limit should be set | `true` | +| `rabbitmq.ulimitNofiles` | Max File Descriptor limit | `65536` | +| `rabbitmq.maxAvailableSchedulers` | RabbitMQ maximum available scheduler threads | `2` | +| `rabbitmq.onlineSchedulers` | RabbitMQ online scheduler threads | `1` | +| `rabbitmq.env` | RabbitMQ [environment variables](https://www.rabbitmq.com/configure.html#customise-environment) | `{}` | +| `rabbitmq.configuration` | Required cluster configuration | See values.yaml | +| `rabbitmq.extraConfiguration` | Extra configuration to add to rabbitmq.conf | See values.yaml | +| `rabbitmq.advancedConfiguration` | Extra configuration (in classic format) to add to advanced.config | See values.yaml | +| `rabbitmq.tls.enabled` | Enable TLS support to rabbitmq | `false` | +| `rabbitmq.tls.failIfNoPeerCert` | When set to true, TLS connection will be rejected if client fails to provide a certificate | `true` | +| `rabbitmq.tls.sslOptionsVerify` | `verify_peer` | Should [peer verification](https://www.rabbitmq.com/ssl.html#peer-verification) be enabled? | +| `rabbitmq.tls.caCertificate` | Ca certificate | Certificate Authority (CA) bundle content | +| `rabbitmq.tls.serverCertificate` | Server certificate | Server certificate content | +| `rabbitmq.tls.serverKey` | Server Key | Server private key content | +| `rabbitmq.tls.existingSecret` | Existing secret with certificate content to rabbitmq credentials | `nil` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.server` | LDAP server | `""` | +| `ldap.port` | LDAP port | `389` | +| `ldap.user_dn_pattern` | DN used to bind to LDAP | `cn=${username},dc=example,dc=org` | +| `ldap.tls.enabled` | Enable TLS for LDAP connections | `false` (if set to true, check advancedConfiguration parameter in values.yml) | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.port` | Amqp port | `5672` | +| `service.loadBalancerIP` | LoadBalancerIP for the service | `nil` | +| `service.tlsPort` | Amqp TLS port | `5671` | +| `service.distPort` | Erlang distribution server port | `25672` | +| `service.nodePort` | Node port override, if serviceType NodePort | _random available between 30000-32767_ | +| `service.nodeTlsPort` | Node port override, if serviceType NodePort | _random available between 30000-32767_ | +| `service.managerPort` | RabbitMQ Manager port | `15672` | +| `service.extraPorts` | Extra ports to expose in the service | `nil` | +| `service.extraContainerPorts` | Extra ports to be included in container spec, primarily informational | `nil` | +| `persistence.enabled` | Use a PVC to persist data | `true` | +| `service.annotations` | service annotations | {} | +| `schedulerName` | Name of the k8s service (other than default) | `nil` | +| `persistence.storageClass` | Storage class of backing PVC | `nil` (uses alpha storage class annotation) | +| `persistence.existingClaim` | RabbitMQ data Persistent Volume existing claim name, evaluated as a template | "" | +| `persistence.accessMode` | Use volume as ReadOnly or ReadWrite | `ReadWriteOnce` | +| `persistence.size` | Size of data volume | `8Gi` | +| `persistence.path` | Mount path of the data volume | `/opt/bitnami/rabbitmq/var/lib/rabbitmq` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `resources` | resource needs and limits to apply to the pod | {} | +| `replicas` | Replica count | `1` | +| `priorityClassName` | Pod priority class name | `` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.additionalRules` | Additional NetworkPolicy rules | `nil` | +| `nodeSelector` | Node labels for pod assignment | {} | +| `affinity` | Affinity settings for pod assignment | {} | +| `tolerations` | Toleration labels for pod assignment | [] | +| `updateStrategy` | Statefulset update strategy policy | `RollingUpdate` | +| `ingress.enabled` | Enable ingress resource for Management console | `false` | +| `ingress.hostName` | Hostname to your RabbitMQ installation | `nil` | +| `ingress.path` | Path within the url structure | `/` | +| `ingress.tls` | enable ingress with tls | `false` | +| `ingress.tlsSecret` | tls type secret to be used | `myTlsSecret` | +| `ingress.annotations` | ingress annotations as an array | [] | +| `livenessProbe.enabled` | would you like a livenessProbed to be enabled | `true` | +| `livenessProbe.initialDelaySeconds` | number of seconds | 120 | +| `livenessProbe.timeoutSeconds` | number of seconds | 20 | +| `livenessProbe.periodSeconds` | number of seconds | 30 | +| `livenessProbe.failureThreshold` | number of failures | 6 | +| `livenessProbe.successThreshold` | number of successes | 1 | +| `podDisruptionBudget` | Pod Disruption Budget settings | {} | +| `readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `readinessProbe.initialDelaySeconds` | number of seconds | 10 | +| `readinessProbe.timeoutSeconds` | number of seconds | 20 | +| `readinessProbe.periodSeconds` | number of seconds | 30 | +| `readinessProbe.failureThreshold` | number of failures | 3 | +| `readinessProbe.successThreshold` | number of successes | 1 | +| `metrics.enabled` | Start a side-car prometheus exporter | `false` | +| `metrics.image.registry` | Exporter image registry | `docker.io` | +| `metrics.image.repository` | Exporter image name | `bitnami/rabbitmq-exporter` | +| `metrics.image.tag` | Exporter image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | Exporter image pull policy | `IfNotPresent` | +| `metrics.livenessProbe.enabled` | would you like a livenessProbed to be enabled | `true` | +| `metrics.livenessProbe.initialDelaySeconds` | number of seconds | 15 | +| `metrics.livenessProbe.timeoutSeconds` | number of seconds | 5 | +| `metrics.livenessProbe.periodSeconds` | number of seconds | 30 | +| `metrics.livenessProbe.failureThreshold` | number of failures | 6 | +| `metrics.livenessProbe.successThreshold` | number of successes | 1 | +| `metrics.readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | number of seconds | 5 | +| `metrics.readinessProbe.timeoutSeconds` | number of seconds | 5 | +| `metrics.readinessProbe.periodSeconds` | number of seconds | 30 | +| `metrics.readinessProbe.failureThreshold` | number of failures | 3 | +| `metrics.readinessProbe.successThreshold` | number of successes | 1 | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using PrometheusOperator | `false` | +| `metrics.serviceMonitor.namespace` | Namespace where servicemonitor resource should be created | `nil` | +| `metrics.serviceMonitor.interval` | Specify the interval at which metrics should be scraped | `30s` | +| `metrics.serviceMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `nil` | +| `metrics.serviceMonitor.relabellings` | Specify Metric Relabellings to add to the scrape endpoint | `nil` | +| `metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels. | `false` | +| `metrics.serviceMonitor.additionalLabels` | Used to pass Labels that are required by the Installed Prometheus Operator | `{}` | +| `metrics.serviceMonitor.release` | Used to pass Labels release that sometimes should be custom for Prometheus Operator | `nil` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | Same namespace as rabbitmq | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `metrics.port` | Prometheus metrics exporter port | `9419` | +| `metrics.env` | Exporter [configuration environment variables](https://github.com/kbudde/rabbitmq_exporter#configuration) | `{}` | +| `metrics.resources` | Exporter resource requests/limit | `nil` | +| `metrics.capabilities` | Exporter: Comma-separated list of extended [scraping capabilities supported by the target RabbitMQ server](https://github.com/kbudde/rabbitmq_exporter#extended-rabbitmq-capabilities) | `bert,no_sort` | +| `podLabels` | Additional labels for the statefulset pod(s). | {} | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.resources` | Init container resource requests/limit | `nil` | +| `forceBoot.enabled` | Executes 'rabbitmqctl force_boot' to force boot cluster shut down unexpectedly in an unknown order. Use it only if you prefer availability over integrity. | `false` | +| `extraSecrets` | Optionally specify extra secrets to be created by the chart. | `{}` | + +The above parameters map to the env variables defined in [bitnami/rabbitmq](http://github.com/bitnami/bitnami-docker-rabbitmq). For more information please refer to the [bitnami/rabbitmq](http://github.com/bitnami/bitnami-docker-rabbitmq) image documentation. + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set rabbitmq.username=admin,rabbitmq.password=secretpassword,rabbitmq.erlangCookie=secretcookie \ + bitnami/rabbitmq +``` + +The above command sets the RabbitMQ admin username and password to `admin` and `secretpassword` respectively. Additionally the secure erlang cookie is set to `secretcookie`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml bitnami/rabbitmq +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration and horizontal scaling + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Resource needs and limits to apply to the pod: +```diff +- resources: {} ++ resources: ++ requests: ++ memory: 256Mi ++ cpu: 100m +``` + +- Replica count: +```diff +- replicas: 1 ++ replicas: 3 +``` + +- Node labels for pod assignment: +```diff +- nodeSelector: {} ++ nodeSelector: ++ beta.kubernetes.io/arch: amd64 +``` + +- Enable ingress with TLS: +```diff +- ingress.tls: false ++ ingress.tls: true +``` + +- Start a side-car prometheus exporter: +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +- Enable init container that changes volume permissions in the data directory: +```diff +- volumePermissions.enabled: false ++ volumePermissions.enabled: true +``` + +To horizontally scale this chart once it has been deployed you have two options: + +- Use `kubectl scale` command + +- Upgrading the chart with the following parameters: + +```console +replicas=3 +rabbitmq.password="$RABBITMQ_PASSWORD" +rabbitmq.erlangCookie="$RABBITMQ_ERLANG_COOKIE" +``` + +> Note: please note it's mandatory to indicate the password and erlangCookie that was set the first time the chart was installed to upgrade the chart. Otherwise, new pods won't be able to join the cluster. + +### Load Definitions +It is possible to [load a RabbitMQ definitions file to configure RabbitMQ](http://www.rabbitmq.com/management.html#load-definitions). Because definitions may contain RabbitMQ credentials, [store the JSON as a Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets-as-files-from-a-pod). Within the secret's data, choose a key name that corresponds with the desired load definitions filename (i.e. `load_definition.json`) and use the JSON object as the value. For example: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: rabbitmq-load-definition +type: Opaque +stringData: + load_definition.json: |- + { + "vhosts": [ + { + "name": "/" + } + ] + } +``` + +Then, specify the `management.load_definitions` property as an `extraConfiguration` pointing to the load definition file path within the container (i.e. `/app/load_definition.json`) and set `loadDefinition.enable` to `true`. + +Any load definitions specified will be available within in the container at `/app`. + +> Loading a definition will take precedence over any configuration done through [Helm values](#parameters). + +If needed, you can use `extraSecrets` to let the chart create the secret for you. This way, you don't need to manually create it before deploying a release. For example : + +```yaml +extraSecrets: + load-definition: + load_definition.json: | + { + "vhosts": [ + { + "name": "/" + } + ] + } +rabbitmq: + loadDefinition: + enabled: true + secretName: load-definition + extraConfiguration: | + management.load_definitions = /app/load_definition.json +``` + +### Enabling TLS support + +To enable TLS support you must generate the certificates using RabbitMQ [documentation](https://www.rabbitmq.com/ssl.html#automated-certificate-generation). + +You must include in your values.yaml the caCertificate, serverCertificate and serverKey files. + +```yaml + caCertificate: |- + -----BEGIN CERTIFICATE----- + MIIDRTCCAi2gAwIBAgIJAJPh+paO6a3cMA0GCSqGSIb3DQEBCwUAMDExIDAeBgNV + ... + -----END CERTIFICATE----- + serverCertificate: |- + -----BEGIN CERTIFICATE----- + MIIDqjCCApKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAxMSAwHgYDVQQDDBdUTFNH + ... + -----END CERTIFICATE----- + serverKey: |- + -----BEGIN RSA PRIVATE KEY----- + MIIEpAIBAAKCAQEA2iX3M4d3LHrRAoVUbeFZN3EaGzKhyBsz7GWwTgETiNj+AL7p + .... + -----END RSA PRIVATE KEY----- +``` + +This will be generate a secret with the certs, but is possible specify an existing secret using `existingSecret: name-of-existing-secret-to-rabbitmq`. The secret is of type `kubernetes.io/tls`. + +Disabling [failIfNoPeerCert](https://www.rabbitmq.com/ssl.html#peer-verification-configuration) allows a TLS connection if client fails to provide a certificate + +[sslOptionsVerify](https://www.rabbitmq.com/ssl.html#peer-verification-configuration): When the sslOptionsVerify option is set to verify_peer, the client does send us a certificate, the node must perform peer verification. When set to verify_none, peer verification will be disabled and certificate exchange won't be performed. + +### LDAP + +LDAP support can be enabled in the chart by specifying the `ldap.` parameters while creating a release. The following parameters should be configured to properly enable the LDAP support in the chart. + +- `ldap.enabled`: Enable LDAP support. Defaults to `false`. +- `ldap.server`: LDAP server host. No defaults. +- `ldap.port`: LDAP server port. `389`. +- `ldap.user_dn_pattern`: DN used to bind to LDAP. `cn=${username},dc=example,dc=org`. +- `ldap.tls.enabled`: Enable TLS for LDAP connections. Defaults to `false`. + +For example: + +```console +ldap.enabled="true" +ldap.server="my-ldap-server" +ldap.port="389" +ldap.user_dn_pattern="cn=${username},dc=example,dc=org" +``` + +If `ldap.tls.enabled` is set to true, consider using `ldap.port=636` and checking the settings in the advancedConfiguration. + +### Common issues + +- Changing the password through RabbitMQ's UI can make the pod fail due to the default liveness probes. If you do so, remember to make the chart aware of the new password. Updating the default secret with the password you set through RabbitMQ's UI will automatically recreate the pods. If you are using your own secret, you may have to manually recreate the pods. + +## Persistence + +The [Bitnami RabbitMQ](https://github.com/bitnami/bitnami-docker-rabbitmq) image stores the RabbitMQ data and configurations at the `/opt/bitnami/rabbitmq/var/lib/rabbitmq/` path of the container. + +The chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at this location. By default, the volume is created using dynamic volume provisioning. An existing PersistentVolumeClaim can also be defined. + +### Existing PersistentVolumeClaims + +1. Create the PersistentVolume +1. Create the PersistentVolumeClaim +1. Install the chart + +```bash +$ helm install my-release --set persistence.existingClaim=PVC_NAME bitnami/rabbitmq +``` + +### Adjust permissions of the persistence volume mountpoint + +As the image runs as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an `initContainer` to change the ownership of the volume before mounting it in the final destination. + +You can enable this `initContainer` by setting `volumePermissions.enabled` to `true`. + +## Upgrading + +### To 6.0.0 + +This new version updates the RabbitMQ image to a [new version based on bash instead of node.js](https://github.com/bitnami/bitnami-docker-rabbitmq#3715-r18-3715-ol-7-r19). However, since this Chart overwrites the container's command, the changes to the container shouldn't affect the Chart. To upgrade, it may be needed to enable the `fastBoot` option, as it is already the case from upgrading from 5.X to 5.Y. + +### To 5.0.0 + +This major release changes the clustering method from `ip` to `hostname`. +This change is needed to fix the persistence. The data dir will now depend on the hostname which is stable instead of the pod IP that might change. + +> IMPORTANT: Note that if you upgrade from a previous version you will lose your data. + +### To 3.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 3.0.0. The following example assumes that the release name is rabbitmq: + +```console +$ kubectl delete statefulset rabbitmq --cascade=false +``` diff --git a/bitnami/rabbitmq/ci/affinity-toleration-values.yaml b/bitnami/rabbitmq/ci/affinity-toleration-values.yaml new file mode 100644 index 00000000000000..6be0ee1d3dba49 --- /dev/null +++ b/bitnami/rabbitmq/ci/affinity-toleration-values.yaml @@ -0,0 +1,14 @@ +tolerations: + - key: foo + operator: "Equal" + value: bar +affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: foo + operator: In + values: + - bar diff --git a/bitnami/rabbitmq/ci/default-values.yaml b/bitnami/rabbitmq/ci/default-values.yaml new file mode 100644 index 00000000000000..fc2ba605adaef4 --- /dev/null +++ b/bitnami/rabbitmq/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/bitnami/rabbitmq/ci/networkpolicy-values.yaml b/bitnami/rabbitmq/ci/networkpolicy-values.yaml new file mode 100644 index 00000000000000..67ef8d1489bbaf --- /dev/null +++ b/bitnami/rabbitmq/ci/networkpolicy-values.yaml @@ -0,0 +1,11 @@ +networkPolicy: + enable: true + allowExternal: false + additionalRules: + - matchLabels: + - role: foo + - matchExpressions: + - key: role + operator: In + values: + - bar diff --git a/bitnami/rabbitmq/templates/NOTES.txt b/bitnami/rabbitmq/templates/NOTES.txt new file mode 100644 index 00000000000000..3d9faafb1bd38c --- /dev/null +++ b/bitnami/rabbitmq/templates/NOTES.txt @@ -0,0 +1,79 @@ + +** Please be patient while the chart is being deployed ** + +Credentials: + + Username : {{ .Values.rabbitmq.username }} + echo "Password : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "rabbitmq.fullname" . }} -o jsonpath="{.data.rabbitmq-password}" | base64 --decode)" + echo "ErLang Cookie : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "rabbitmq.fullname" . }} -o jsonpath="{.data.rabbitmq-erlang-cookie}" | base64 --decode)" + +RabbitMQ can be accessed within the cluster on port {{ .Values.service.nodePort }} at {{ template "rabbitmq.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.rabbitmq.clustering.k8s_domain }} + +To access for outside the cluster, perform the following steps: + +{{- if contains "NodePort" .Values.service.type }} + +Obtain the NodePort IP and ports: + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT_AMQP=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[1].nodePort}" services {{ template "rabbitmq.fullname" . }}) + export NODE_PORT_STATS=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[3].nodePort}" services {{ template "rabbitmq.fullname" . }}) + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://$NODE_IP:$NODE_PORT_AMQP/" + +To Access the RabbitMQ Management interface: + + echo "URL : http://$NODE_IP:$NODE_PORT_STATS/" + +{{- else if contains "LoadBalancer" .Values.service.type }} + +Obtain the LoadBalancer IP: + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "rabbitmq.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "rabbitmq.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://$SERVICE_IP:{{ .Values.service.port }}/" + +To Access the RabbitMQ Management interface: + + echo "URL : http://$SERVICE_IP:{{ .Values.service.managerPort }}/" + +{{- else if contains "ClusterIP" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "rabbitmq.fullname" . }} {{ .Values.service.port }}:{{ .Values.service.port }} + echo "URL : amqp://127.0.0.1:{{ .Values.service.port }}/" + +To Access the RabbitMQ Management interface: + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "rabbitmq.fullname" . }} {{ .Values.service.managerPort }}:{{ .Values.service.managerPort }} + echo "URL : http://127.0.0.1:{{ .Values.service.managerPort }}/" + +{{- end }} + +{{- if .Values.metrics.enabled }} + +To access the RabbitMQ Prometheus metrics, get the RabbitMQ Prometheus exporter URL by running: + + echo "Prometheus Metrics URL: http://127.0.0.1:{{ .Values.metrics.port }}/metrics" + kubectl port-forward --namespace {{ .Release.Namespace }} {{ template "rabbitmq.fullname" . }}-0 {{ .Values.metrics.port }}:{{ .Values.metrics.port }} + +Then, open the URL obtained in a browser. + +{{- end }} + +{{- include "rabbitmq.validateValues" . -}} + +{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} + +WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ + +{{- end }} diff --git a/bitnami/rabbitmq/templates/_helpers.tpl b/bitnami/rabbitmq/templates/_helpers.tpl new file mode 100644 index 00000000000000..23b3edbf58393c --- /dev/null +++ b/bitnami/rabbitmq/templates/_helpers.tpl @@ -0,0 +1,258 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "rabbitmq.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "rabbitmq.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "rabbitmq.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper RabbitMQ plugin list +*/}} +{{- define "rabbitmq.plugins" -}} +{{- $plugins := .Values.rabbitmq.plugins | replace " " ", " -}} +{{- if .Values.rabbitmq.extraPlugins -}} +{{- $extraPlugins := .Values.rabbitmq.extraPlugins | replace " " ", " -}} +{{- printf "[%s, %s]." $plugins $extraPlugins | indent 4 -}} +{{- else -}} +{{- printf "[%s]." $plugins | indent 4 -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper RabbitMQ image name +*/}} +{{- define "rabbitmq.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper metrics image name +*/}} +{{- define "rabbitmq.metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "rabbitmq.secretPasswordName" -}} + {{- if .Values.rabbitmq.existingPasswordSecret -}} + {{- printf "%s" .Values.rabbitmq.existingPasswordSecret -}} + {{- else -}} + {{- printf "%s" (include "rabbitmq.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Get the erlang secret. +*/}} +{{- define "rabbitmq.secretErlangName" -}} + {{- if .Values.rabbitmq.existingErlangSecret -}} + {{- printf "%s" .Values.rabbitmq.existingErlangSecret -}} + {{- else -}} + {{- printf "%s" (include "rabbitmq.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "rabbitmq.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "rabbitmq.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class +*/}} +{{- define "rabbitmq.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "rabbitmq.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.ldap" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - LDAP support +*/}} +{{- define "rabbitmq.validateValues.ldap" -}} +{{- if .Values.ldap.enabled }} +{{- if not (and .Values.ldap.server .Values.ldap.port .Values.ldap.user_dn_pattern) }} +rabbitmq: LDAP + Invalid LDAP configuration. When enabling LDAP support, the parameters "ldap.server", + "ldap.port", and "ldap. user_dn_pattern" are mandatory. Please provide them: + + $ helm install {{ .Release.Name }} bitnami/rabbitmq \ + --set ldap.enabled=true \ + --set ldap.server="lmy-ldap-server" \ + --set ldap.port="389" \ + --set user_dn_pattern="cn=${username},dc=example,dc=org" +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "rabbitmq.tplValue" (dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "rabbitmq.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/bitnami/rabbitmq/templates/certs.yaml b/bitnami/rabbitmq/templates/certs.yaml new file mode 100644 index 00000000000000..1aa999be049b8b --- /dev/null +++ b/bitnami/rabbitmq/templates/certs.yaml @@ -0,0 +1,19 @@ +{{- if and (not .Values.rabbitmq.tls.existingSecret) ( .Values.rabbitmq.tls.enabled) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "rabbitmq.fullname" . }}-certs + labels: + app: {{ template "rabbitmq.name" . }} + chart: {{ template "rabbitmq.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: kubernetes.io/tls +data: + ca.crt: + {{ required "A valid .Values.rabbitmq.tls.caCertificate entry required!" .Values.rabbitmq.tls.caCertificate | b64enc | quote }} + tls.crt: + {{ required "A valid .Values.rabbitmq.tls.serverCertificate entry required!" .Values.rabbitmq.tls.serverCertificate| b64enc | quote }} + tls.key: + {{ required "A valid .Values.rabbitmq.tls.serverKey entry required!" .Values.rabbitmq.tls.serverKey | b64enc | quote }} +{{- end }} diff --git a/bitnami/rabbitmq/templates/configuration.yaml b/bitnami/rabbitmq/templates/configuration.yaml new file mode 100644 index 00000000000000..acf71bb72b8a3d --- /dev/null +++ b/bitnami/rabbitmq/templates/configuration.yaml @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "rabbitmq.fullname" . }}-config + labels: + app: {{ template "rabbitmq.name" . }} + chart: {{ template "rabbitmq.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +data: + enabled_plugins: |- +{{ template "rabbitmq.plugins" . }} + rabbitmq.conf: |- + ##username and password + default_user={{.Values.rabbitmq.username}} + default_pass=CHANGEME +{{ .Values.rabbitmq.configuration | indent 4 }} +{{ .Values.rabbitmq.extraConfiguration | indent 4 }} +{{- if .Values.rabbitmq.tls.enabled }} + ssl_options.verify={{ .Values.rabbitmq.tls.sslOptionsVerify }} + listeners.ssl.default={{ .Values.service.tlsPort }} + ssl_options.fail_if_no_peer_cert={{ .Values.rabbitmq.tls.failIfNoPeerCert }} + ssl_options.cacertfile = /opt/bitnami/rabbitmq/certs/ca_certificate.pem + ssl_options.certfile = /opt/bitnami/rabbitmq/certs/server_certificate.pem + ssl_options.keyfile = /opt/bitnami/rabbitmq/certs/server_key.pem +{{- end }} +{{- if .Values.ldap.enabled }} + auth_backends.1 = rabbit_auth_backend_ldap + auth_backends.2 = internal + auth_ldap.servers.1 = {{ .Values.ldap.server }} + auth_ldap.port = {{ .Values.ldap.port }} + auth_ldap.user_dn_pattern = {{ .Values.ldap.user_dn_pattern }} +{{- if .Values.ldap.tls.enabled }} + auth_ldap.use_ssl = true +{{- end }} +{{- end }} + +{{ if .Values.rabbitmq.advancedConfiguration}} + advanced.config: |- +{{ .Values.rabbitmq.advancedConfiguration | indent 4 }} +{{- end }} diff --git a/bitnami/rabbitmq/templates/healthchecks.yaml b/bitnami/rabbitmq/templates/healthchecks.yaml new file mode 100644 index 00000000000000..8b5ed466bc9a0f --- /dev/null +++ b/bitnami/rabbitmq/templates/healthchecks.yaml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "rabbitmq.fullname" . }}-healthchecks + labels: + app: {{ template "rabbitmq.name" . }} + chart: {{ template "rabbitmq.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +data: + rabbitmq-health-check: |- + #!/bin/sh + START_FLAG=/opt/bitnami/rabbitmq/var/lib/rabbitmq/.start + if [ -f ${START_FLAG} ]; then + rabbitmqctl node_health_check + RESULT=$? + if [ $RESULT -ne 0 ]; then + rabbitmqctl status + exit $? + fi + rm -f ${START_FLAG} + exit ${RESULT} + fi + rabbitmq-api-check $1 $2 + rabbitmq-api-check: |- + #!/bin/sh + set -e + URL=$1 + EXPECTED=$2 + ACTUAL=$(curl --silent --show-error --fail "${URL}") + echo "${ACTUAL}" + test "${EXPECTED}" = "${ACTUAL}" \ No newline at end of file diff --git a/bitnami/rabbitmq/templates/ingress.yaml b/bitnami/rabbitmq/templates/ingress.yaml new file mode 100644 index 00000000000000..4fa890d0026dc8 --- /dev/null +++ b/bitnami/rabbitmq/templates/ingress.yaml @@ -0,0 +1,41 @@ +{{- if .Values.ingress.enabled }} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: "{{ template "rabbitmq.fullname" . }}" + labels: + app: "{{ template "rabbitmq.name" . }}" + chart: "{{ template "rabbitmq.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + annotations: + {{- if .Values.ingress.tls }} + ingress.kubernetes.io/secure-backends: "true" + {{- end }} + {{- range $key, $value := .Values.ingress.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + rules: + {{- if .Values.ingress.hostName }} + - host: {{ .Values.ingress.hostName }} + http: + {{- else }} + - http: + {{- end }} + paths: + - path: {{ .Values.ingress.path }} + backend: + serviceName: {{ template "rabbitmq.fullname" . }} + servicePort: {{ .Values.service.managerPort }} +{{- if .Values.ingress.tls }} + tls: + - hosts: + {{- if .Values.ingress.hostName }} + - {{ .Values.ingress.hostName }} + secretName: {{ .Values.ingress.tlsSecret }} + {{- else}} + - secretName: {{ .Values.ingress.tlsSecret }} + {{- end }} +{{- end }} +{{- end }} diff --git a/bitnami/rabbitmq/templates/networkpolicy.yaml b/bitnami/rabbitmq/templates/networkpolicy.yaml new file mode 100644 index 00000000000000..6d0dc5a08086c7 --- /dev/null +++ b/bitnami/rabbitmq/templates/networkpolicy.yaml @@ -0,0 +1,39 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: {{ template "rabbitmq.fullname" . }} + labels: + app: {{ template "rabbitmq.name" . }} + chart: {{ template "rabbitmq.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: {{ template "rabbitmq.name" . }} + release: {{ .Release.Name | quote }} + ingress: + # Allow inbound connections + + - ports: + - port: 4369 # EPMD + - port: {{ .Values.service.port }} + - port: {{ .Values.service.tlsPort }} + - port: {{ .Values.service.distPort }} + - port: {{ .Values.service.managerPort }} + + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "rabbitmq.fullname" . }}-client: "true" + {{- with .Values.networkPolicy.additionalRules }} +{{ toYaml . | indent 8 }} + {{- end }} + {{- end }} + + # Allow prometheus scrapes + - ports: + - port: {{ .Values.metrics.port }} +{{- end }} diff --git a/bitnami/rabbitmq/templates/pdb.yaml b/bitnami/rabbitmq/templates/pdb.yaml new file mode 100644 index 00000000000000..0e5f5448459b6e --- /dev/null +++ b/bitnami/rabbitmq/templates/pdb.yaml @@ -0,0 +1,17 @@ +{{- if .Values.podDisruptionBudget -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "rabbitmq.fullname" . }} + labels: + app: {{ template "rabbitmq.name" . }} + chart: {{ template "rabbitmq.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + selector: + matchLabels: + app: {{ template "rabbitmq.name" . }} + release: "{{ .Release.Name }}" +{{ toYaml .Values.podDisruptionBudget | indent 2 }} +{{- end -}} diff --git a/bitnami/rabbitmq/templates/prometheusrule.yaml b/bitnami/rabbitmq/templates/prometheusrule.yaml new file mode 100644 index 00000000000000..15f05e947e8459 --- /dev/null +++ b/bitnami/rabbitmq/templates/prometheusrule.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "rabbitmq.fullname" . }} +{{- with .Values.metrics.prometheusRule.namespace }} + namespace: {{ . }} +{{- end }} + labels: + app: {{ template "rabbitmq.name" . }} + chart: {{ template "rabbitmq.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.metrics.prometheusRule.additionalLabels }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "rabbitmq.name" $ }} + rules: {{ tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/bitnami/rabbitmq/templates/role.yaml b/bitnami/rabbitmq/templates/role.yaml new file mode 100644 index 00000000000000..f4bea319cca2cf --- /dev/null +++ b/bitnami/rabbitmq/templates/role.yaml @@ -0,0 +1,15 @@ +{{- if .Values.rbacEnabled }} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "rabbitmq.fullname" . }}-endpoint-reader + labels: + app: {{ template "rabbitmq.name" . }} + chart: {{ template "rabbitmq.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +rules: +- apiGroups: [""] + resources: ["endpoints"] + verbs: ["get"] +{{- end }} diff --git a/bitnami/rabbitmq/templates/rolebinding.yaml b/bitnami/rabbitmq/templates/rolebinding.yaml new file mode 100644 index 00000000000000..bf315b5ea2e6aa --- /dev/null +++ b/bitnami/rabbitmq/templates/rolebinding.yaml @@ -0,0 +1,18 @@ +{{- if .Values.rbacEnabled }} +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "rabbitmq.fullname" . }}-endpoint-reader + labels: + app: {{ template "rabbitmq.name" . }} + chart: {{ template "rabbitmq.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +subjects: +- kind: ServiceAccount + name: {{ template "rabbitmq.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "rabbitmq.fullname" . }}-endpoint-reader +{{- end }} diff --git a/bitnami/rabbitmq/templates/secrets.yaml b/bitnami/rabbitmq/templates/secrets.yaml new file mode 100644 index 00000000000000..619215f3d5cabb --- /dev/null +++ b/bitnami/rabbitmq/templates/secrets.yaml @@ -0,0 +1,38 @@ +{{- if or (not .Values.rabbitmq.existingErlangSecret) (not .Values.rabbitmq.existingPasswordSecret) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "rabbitmq.fullname" . }} + labels: + app: {{ template "rabbitmq.name" . }} + chart: {{ template "rabbitmq.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + {{ if not .Values.rabbitmq.existingPasswordSecret }}{{ if .Values.rabbitmq.password }} + rabbitmq-password: {{ .Values.rabbitmq.password | b64enc | quote }} + {{ else }} + rabbitmq-password: {{ randAlphaNum 10 | b64enc | quote }} + {{ end }}{{ end }} + {{ if not .Values.rabbitmq.existingErlangSecret }}{{ if .Values.rabbitmq.erlangCookie }} + rabbitmq-erlang-cookie: {{ .Values.rabbitmq.erlangCookie | b64enc | quote }} + {{ else }} + rabbitmq-erlang-cookie: {{ randAlphaNum 32 | b64enc | quote }} + {{ end }}{{ end }} +{{- end }} +{{- range $key, $value := .Values.extraSecrets }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ $key }} + labels: + app: {{ template "rabbitmq.name" $ }} + chart: {{ template "rabbitmq.chart" $ }} + release: "{{ $.Release.Name }}" + heritage: "{{ $.Release.Service }}" +type: Opaque +stringData: +{{ $value | toYaml | nindent 2 }} +{{- end }} diff --git a/bitnami/rabbitmq/templates/serviceaccount.yaml b/bitnami/rabbitmq/templates/serviceaccount.yaml new file mode 100644 index 00000000000000..b4ac6aa917c5c7 --- /dev/null +++ b/bitnami/rabbitmq/templates/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if .Values.rbacEnabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "rabbitmq.fullname" . }} + labels: + app: {{ template "rabbitmq.name" . }} + chart: {{ template "rabbitmq.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- end }} diff --git a/bitnami/rabbitmq/templates/servicemonitor.yaml b/bitnami/rabbitmq/templates/servicemonitor.yaml new file mode 100644 index 00000000000000..0b556f6e96d007 --- /dev/null +++ b/bitnami/rabbitmq/templates/servicemonitor.yaml @@ -0,0 +1,36 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "rabbitmq.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + app: {{ template "rabbitmq.name" . }} + chart: {{ template "rabbitmq.chart" . }} + heritage: "{{ .Release.Service }}" + release: {{ if .Values.metrics.serviceMonitor.release }}"{{ .Values.metrics.serviceMonitor.release }}"{{ else }}"{{ .Release.Name }}"{{ end }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} +{{ toYaml .Values.metrics.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} +spec: + endpoints: + - port: metrics + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- if .Values.metrics.serviceMonitor.relabellings }} + metricRelabelings: +{{ toYaml .Values.metrics.serviceMonitor.relabellings | indent 6 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + app: {{ template "rabbitmq.name" . }} + release: "{{ .Release.Name }}" +{{- end }} diff --git a/bitnami/rabbitmq/templates/statefulset.yaml b/bitnami/rabbitmq/templates/statefulset.yaml new file mode 100644 index 00000000000000..08d4364bca7e9f --- /dev/null +++ b/bitnami/rabbitmq/templates/statefulset.yaml @@ -0,0 +1,372 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "rabbitmq.fullname" . }} + labels: + app: {{ template "rabbitmq.name" . }} + chart: {{ template "rabbitmq.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + serviceName: {{ template "rabbitmq.fullname" . }}-headless + podManagementPolicy: {{ .Values.podManagementPolicy }} + replicas: {{ .Values.replicas }} + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: + app: {{ template "rabbitmq.name" . }} + release: "{{ .Release.Name }}" + template: + metadata: + labels: + app: {{ template "rabbitmq.name" . }} + release: "{{ .Release.Name }}" + chart: {{ template "rabbitmq.chart" . }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} + annotations: + {{- if or (not .Values.rabbitmq.existingErlangSecret) (not .Values.rabbitmq.existingPasswordSecret) }} + checksum/secret: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "rabbitmq.imagePullSecrets" . | indent 6 }} + {{- if .Values.rbacEnabled}} + serviceAccountName: {{ template "rabbitmq.fullname" . }} + {{- end }} + {{- if .Values.affinity }} + affinity: {{- include "rabbitmq.tplValue" (dict "value" .Values.affinity "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + terminationGracePeriodSeconds: 10 + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled .Values.securityContext.enabled }} + initContainers: + - name: volume-permissions + image: "{{ template "rabbitmq.volumePermissions.image" . }}" + imagePullPolicy: {{ default "" .Values.volumePermissions.image.pullPolicy | quote }} + command: ["/bin/chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "{{ .Values.persistence.path }}"] + securityContext: + runAsUser: 0 + resources: +{{ toYaml .Values.volumePermissions.resources | indent 10 }} + volumeMounts: + - name: data + mountPath: "{{ .Values.persistence.path }}" + {{- end }} + containers: + - name: rabbitmq + image: {{ template "rabbitmq.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + command: + - bash + - -ec + - | + mkdir -p /opt/bitnami/rabbitmq/.rabbitmq/ + mkdir -p /opt/bitnami/rabbitmq/etc/rabbitmq/ + touch /opt/bitnami/rabbitmq/var/lib/rabbitmq/.start + #persist the erlang cookie in both places for server and cli tools + echo $RABBITMQ_ERL_COOKIE > /opt/bitnami/rabbitmq/var/lib/rabbitmq/.erlang.cookie + cp /opt/bitnami/rabbitmq/var/lib/rabbitmq/.erlang.cookie /opt/bitnami/rabbitmq/.rabbitmq/ + #change permission so only the user has access to the cookie file + chmod 600 /opt/bitnami/rabbitmq/.rabbitmq/.erlang.cookie /opt/bitnami/rabbitmq/var/lib/rabbitmq/.erlang.cookie + #copy the mounted configuration to both places + cp /opt/bitnami/rabbitmq/conf/* /opt/bitnami/rabbitmq/etc/rabbitmq + # Apply resources limits + {{- if .Values.rabbitmq.setUlimitNofiles }} + ulimit -n "${RABBITMQ_ULIMIT_NOFILES}" + {{- end }} + #replace the default password that is generated + sed -i "/CHANGEME/cdefault_pass=${RABBITMQ_PASSWORD//\\/\\\\}" /opt/bitnami/rabbitmq/etc/rabbitmq/rabbitmq.conf + {{- if and .Values.persistence.enabled .Values.forceBoot.enabled }} + if [ -d "{{ .Values.persistence.path }}/mnesia/${RABBITMQ_NODENAME}" ]; then rabbitmqctl force_boot; fi + {{- end }} + exec rabbitmq-server + {{- if .Values.resources }} + resources: +{{ toYaml .Values.resources | indent 10 }} + {{- end }} + volumeMounts: + - name: config-volume + mountPath: /opt/bitnami/rabbitmq/conf + - name: healthchecks + mountPath: /usr/local/sbin/rabbitmq-api-check + subPath: rabbitmq-api-check + - name: healthchecks + mountPath: /usr/local/sbin/rabbitmq-health-check + subPath: rabbitmq-health-check + {{- if .Values.rabbitmq.tls.enabled }} + - name: {{ template "rabbitmq.fullname" . }}-certs + mountPath: /opt/bitnami/rabbitmq/certs + {{- end }} + - name: data + mountPath: "{{ .Values.persistence.path }}" + {{- if .Values.rabbitmq.loadDefinition.enabled }} + - name: load-definition-volume + mountPath: /app + readOnly: true + {{- end }} + ports: + - name: epmd + containerPort: 4369 + - name: amqp + containerPort: {{ .Values.service.port }} + {{- if .Values.rabbitmq.tls.enabled }} + - name: amqp-ssl + containerPort: {{ .Values.service.tlsPort }} + {{- end }} + - name: dist + containerPort: {{ .Values.service.distPort }} + - name: stats + containerPort: {{ .Values.service.managerPort }} +{{- if .Values.service.extraContainerPorts }} +{{ toYaml .Values.service.extraContainerPorts | indent 8 }} +{{- end }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - sh + - -c + - rabbitmq-api-check "http://{{ .Values.rabbitmq.username }}:$RABBITMQ_PASSWORD@127.0.0.1:{{ .Values.service.managerPort }}/api/healthchecks/node" '{"status":"ok"}' + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - sh + - -c + - rabbitmq-health-check "http://{{ .Values.rabbitmq.username }}:$RABBITMQ_PASSWORD@127.0.0.1:{{ .Values.service.managerPort }}/api/healthchecks/node" '{"status":"ok"}' + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + {{- end }} + {{- if and (gt (.Values.replicas | int) 1) ( eq .Values.rabbitmq.clustering.rebalance true) }} + lifecycle: + postStart: + exec: + command: + - /bin/sh + - -c + - until rabbitmqctl cluster_status >/dev/null; do echo Waiting for + cluster readiness...; sleep 5 ; done; rabbitmq-queues rebalance "all" + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: K8S_SERVICE_NAME + value: "{{ template "rabbitmq.fullname" . }}-headless" + - name: K8S_ADDRESS_TYPE + value: {{ .Values.rabbitmq.clustering.address_type }} + {{- if (eq "hostname" .Values.rabbitmq.clustering.address_type) }} + - name: RABBITMQ_NODENAME + value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.rabbitmq.clustering.k8s_domain }}" + - name: K8S_HOSTNAME_SUFFIX + value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.rabbitmq.clustering.k8s_domain }}" + {{- else }} + - name: RABBITMQ_NODENAME + {{- if .Values.rabbitmq.rabbitmqClusterNodeName }} + value: {{ .Values.rabbitmq.rabbitmqClusterNodeName | quote }} + {{- else }} + value: "rabbit@$(MY_POD_NAME)" + {{- end }} + {{- end }} + {{- if .Values.ldap.enabled }} + - name: RABBITMQ_LDAP_ENABLE + value: "yes" + - name: RABBITMQ_LDAP_TLS + value: {{ ternary "yes" "no" .Values.ldap.tls.enabled | quote }} + - name: RABBITMQ_LDAP_SERVER + value: {{ .Values.ldap.server }} + - name: RABBITMQ_LDAP_SERVER_PORT + value: {{ .Values.ldap.port | quote }} + - name: RABBITMQ_LDAP_USER_DN_PATTERN + value: {{ .Values.ldap.user_dn_pattern }} + {{- end }} + - name: RABBITMQ_LOGS + value: {{ .Values.rabbitmq.logs | quote }} + - name: RABBITMQ_ULIMIT_NOFILES + value: {{ .Values.rabbitmq.ulimitNofiles | quote }} + {{- if and .Values.rabbitmq.maxAvailableSchedulers }} + - name: RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS + value: {{ printf "+S %s:%s" (toString .Values.rabbitmq.maxAvailableSchedulers) (toString .Values.rabbitmq.onlineSchedulers) -}} + {{- end }} + - name: RABBITMQ_USE_LONGNAME + value: "true" + - name: RABBITMQ_ERL_COOKIE + valueFrom: + secretKeyRef: + name: {{ template "rabbitmq.secretErlangName" . }} + key: rabbitmq-erlang-cookie + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "rabbitmq.secretPasswordName" . }} + key: rabbitmq-password + {{- range $key, $value := .Values.rabbitmq.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "rabbitmq.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + env: + - name: RABBIT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "rabbitmq.secretPasswordName" . }} + key: rabbitmq-password + - name: RABBIT_URL + value: "http://{{ .Values.metrics.rabbitmqAddress }}:{{ .Values.service.managerPort }}" + - name: RABBIT_USER + value: {{ .Values.rabbitmq.username }} + - name: PUBLISH_PORT + value: "{{ .Values.metrics.port }}" + {{ if .Values.metrics.capabilities }} + - name: RABBIT_CAPABILITIES + value: "{{ .Values.metrics.capabilities }}" + {{- end }} + {{- range $key, $value := .Values.metrics.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + ports: + - name: metrics + containerPort: {{ .Values.metrics.port }} + {{- if .Values.metrics.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + {{- end }} + {{- if .Values.metrics.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + {{- end }} + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- if .Values.securityContext.extra }} + {{- toYaml .Values.securityContext.extra | nindent 8 }} + {{- end }} + {{- end }} + volumes: + {{- if .Values.rabbitmq.tls.enabled }} + - name: {{ template "rabbitmq.fullname" . }}-certs + secret: + secretName: {{ if .Values.rabbitmq.tls.existingSecret }}{{ .Values.rabbitmq.tls.existingSecret }}{{- else }}{{ template "rabbitmq.fullname" . }}-certs{{- end }} + items: + - key: ca.crt + path: ca_certificate.pem + - key: tls.crt + path: server_certificate.pem + - key: tls.key + path: server_key.pem + {{- end }} + - name: config-volume + configMap: + name: {{ template "rabbitmq.fullname" . }}-config + items: + - key: rabbitmq.conf + path: rabbitmq.conf + {{- if .Values.rabbitmq.advancedConfiguration}} + - key: advanced.config + path: advanced.config + {{- end }} + - key: enabled_plugins + path: enabled_plugins + - name: healthchecks + configMap: + name: {{ template "rabbitmq.fullname" . }}-healthchecks + items: + - key: rabbitmq-health-check + path: rabbitmq-health-check + mode: 111 + - key: rabbitmq-api-check + path: rabbitmq-api-check + mode: 111 + {{- if .Values.rabbitmq.loadDefinition.enabled }} + - name: load-definition-volume + secret: + secretName: {{ .Values.rabbitmq.loadDefinition.secretName | quote }} + {{- end }} + {{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- else if .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + {{- with .Values.persistence.existingClaim }} + claimName: {{ tpl . $ }} + {{- end }} + {{- else }} + volumeClaimTemplates: + - metadata: + name: data + labels: + app: {{ template "rabbitmq.name" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "rabbitmq.storageClass" . }} + {{- end }} diff --git a/bitnami/rabbitmq/templates/svc-headless.yaml b/bitnami/rabbitmq/templates/svc-headless.yaml new file mode 100644 index 00000000000000..14ad08e8ecc7ae --- /dev/null +++ b/bitnami/rabbitmq/templates/svc-headless.yaml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "rabbitmq.fullname" . }}-headless + labels: + app: {{ template "rabbitmq.name" . }} + chart: {{ template "rabbitmq.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + clusterIP: None + ports: + - name: epmd + port: 4369 + targetPort: epmd + - name: amqp + port: {{ .Values.service.port }} + targetPort: amqp +{{- if .Values.rabbitmq.tls.enabled }} + - name: amqp-tls + port: {{ .Values.service.tlsPort }} + targetPort: amqp-tls +{{- end }} + - name: dist + port: {{ .Values.service.distPort }} + targetPort: dist + - name: stats + port: {{ .Values.service.managerPort }} + targetPort: stats + selector: + app: {{ template "rabbitmq.name" . }} + release: "{{ .Release.Name }}" diff --git a/bitnami/rabbitmq/templates/svc.yaml b/bitnami/rabbitmq/templates/svc.yaml new file mode 100644 index 00000000000000..f811a329fd6774 --- /dev/null +++ b/bitnami/rabbitmq/templates/svc.yaml @@ -0,0 +1,78 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "rabbitmq.fullname" . }} + labels: + app: {{ template "rabbitmq.name" . }} + chart: {{ template "rabbitmq.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- if or .Values.service.annotations .Values.metrics.enabled }} + annotations: +{{- end }} +{{- if .Values.service.annotations }} +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +{{- if .Values.metrics.enabled }} +{{ toYaml .Values.metrics.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} +{{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{ with .Values.service.loadBalancerSourceRanges }} +{{ toYaml . | indent 4 }} +{{- end }} +{{- end }} + {{- if (and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP))) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + ports: + - name: epmd + port: 4369 + targetPort: epmd + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- end }} + - name: amqp + port: {{ .Values.service.port }} + targetPort: amqp + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + {{- if .Values.rabbitmq.tls.enabled }} + - name: amqp-ssl + port: {{ .Values.service.tlsPort }} + targetPort: amqp-ssl + {{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodeTlsPort))) }} + nodePort: {{ .Values.service.nodeTlsPort }} + {{- end }} + {{- end }} + - name: dist + port: {{ .Values.service.distPort }} + targetPort: dist + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- end }} + - name: stats + port: {{ .Values.service.managerPort }} + targetPort: stats + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + port: {{ .Values.metrics.port }} + targetPort: metrics + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- end }} +{{- end }} +{{- if .Values.service.extraPorts }} +{{ toYaml .Values.service.extraPorts | indent 2 }} +{{- end }} + selector: + app: {{ template "rabbitmq.name" . }} + release: "{{ .Release.Name }}" diff --git a/bitnami/rabbitmq/values-production.yaml b/bitnami/rabbitmq/values-production.yaml new file mode 100644 index 00000000000000..532194796993af --- /dev/null +++ b/bitnami/rabbitmq/values-production.yaml @@ -0,0 +1,574 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami RabbitMQ image version +## ref: https://hub.docker.com/r/bitnami/rabbitmq/tags/ +## +image: + registry: docker.io + repository: bitnami/rabbitmq + tag: 3.8.2-debian-10-r30 + + ## set to true if you would like to see extra information on logs + ## it turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override rabbitmq.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override rabbitmq.fullname template +## +# fullnameOverride: + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## does your cluster have rbac enabled? assume yes by default +rbacEnabled: true + +## RabbitMQ should be initialized one by one when building cluster for the first time. +## Therefore, the default value of podManagementPolicy is 'OrderedReady' +## Once the RabbitMQ participates in the cluster, it waits for a response from another +## RabbitMQ in the same cluster at reboot, except the last RabbitMQ of the same cluster. +## If the cluster exits gracefully, you do not need to change the podManagementPolicy +## because the first RabbitMQ of the statefulset always will be last of the cluster. +## However if the last RabbitMQ of the cluster is not the first RabbitMQ due to a failure, +## you must change podManagementPolicy to 'Parallel'. +## ref : https://www.rabbitmq.com/clustering.html#restarting +## +podManagementPolicy: OrderedReady + +## section of specific values for rabbitmq +rabbitmq: + ## RabbitMQ application username + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + username: user + + ## RabbitMQ application password + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + # password: + # existingPasswordSecret: name-of-existing-secret + + ## Erlang cookie to determine whether different nodes are allowed to communicate with each other + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + # erlangCookie: + # existingErlangSecret: name-of-existing-secret + + ## Node name to cluster with. e.g.: `clusternode@hostname` + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + # rabbitmqClusterNodeName: + + ## Value for the RABBITMQ_LOGS environment variable + ## ref: https://www.rabbitmq.com/logging.html#log-file-location + ## + logs: '-' + + ## RabbitMQ Max File Descriptors + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## ref: https://www.rabbitmq.com/install-debian.html#kernel-resource-limits + ## + setUlimitNofiles: true + ulimitNofiles: '65536' + + ## RabbitMQ maximum available scheduler threads and online scheduler threads + ## ref: https://hamidreza-s.github.io/erlang/scheduling/real-time/preemptive/migration/2016/02/09/erlang-scheduler-details.html#scheduler-threads + ## + maxAvailableSchedulers: 2 + onlineSchedulers: 1 + + ## Plugins to enable + plugins: "rabbitmq_management rabbitmq_peer_discovery_k8s" + + ## Extra plugins to enable + ## Use this instead of `plugins` to add new plugins + extraPlugins: "rabbitmq_auth_backend_ldap" + + ## Clustering settings + clustering: + address_type: hostname + k8s_domain: cluster.local + ## Rebalance master for queues in cluster when new replica is created + ## ref: https://www.rabbitmq.com/rabbitmq-queues.8.html#rebalance + rebalance: false + + loadDefinition: + enabled: false + secretName: load-definition + + ## environment variables to configure rabbitmq + ## ref: https://www.rabbitmq.com/configure.html#customise-environment + env: {} + + ## Configuration file content: required cluster configuration + ## Do not override unless you know what you are doing. To add more configuration, use `extraConfiguration` of `advancedConfiguration` instead + configuration: |- + ## Clustering + cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s + cluster_formation.k8s.host = kubernetes.default.svc.cluster.local + cluster_formation.node_cleanup.interval = 10 + cluster_formation.node_cleanup.only_log_warning = true + cluster_partition_handling = autoheal + # queue master locator + queue_master_locator=min-masters + # enable guest user + loopback_users.guest = false + + ## Configuration file content: extra configuration + ## Use this instead of `configuration` to add more configuration + extraConfiguration: |- + #disk_free_limit.absolute = 50MB + #management.load_definitions = /app/load_definition.json + + ## Configuration file content: advanced configuration + ## Use this as additional configuraton in classic config format (Erlang term configuration format) + ## If you set LDAP with TLS/SSL enabled and you are using self-signed certificates, uncomment these lines. + ## advancedConfiguration: |- + ## [{ + ## rabbitmq_auth_backend_ldap, + ## [{ + ## ssl_options, + ## [{ + ## verify, verify_none + ## }, { + ## fail_if_no_peer_cert, + ## false + ## }] + ## ]} + ## }]. + ## + advancedConfiguration: |- + + ## Enable encryption to rabbitmq + ## ref: https://www.rabbitmq.com/ssl.html + ## + tls: + enabled: false + failIfNoPeerCert: true + sslOptionsVerify: verify_peer + caCertificate: |- + serverCertificate: |- + serverKey: |- + # existingSecret: name-of-existing-secret-to-rabbitmq + +## LDAP configuration +## +ldap: + enabled: false + server: "" + port: "389" + user_dn_pattern: cn=${username},dc=example,dc=org + tls: + # If you enabled TLS/SSL you can set advaced options using the advancedConfiguration parameter. + enabled: false + +## Kubernetes service type +service: + type: ClusterIP + ## Node port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + # nodePort: 30672 + + ## Set the LoadBalancerIP + ## + # loadBalancerIP: + + ## Node port Tls + ## + # nodeTlsPort: 30671 + + ## Amqp port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + port: 5672 + + ## Amqp Tls port + ## + tlsPort: 5671 + + ## Dist port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + distPort: 25672 + + ## RabbitMQ Manager port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + managerPort: 15672 + + ## Service annotations + annotations: {} + # service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 + + ## Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + + ## Extra ports to expose + # extraPorts: + + ## Extra ports to be included in container spec, primarily informational + # extraContainerPorts: + +# Additional pod labels to apply +podLabels: {} + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + extra: {} + +persistence: + ## this enables PVC templates that will create one per pod + enabled: true + + ## rabbitmq data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessMode: ReadWriteOnce + + ## Existing PersistentVolumeClaims + ## The value is evaluated as a template + ## So, for example, the name can depend on .Release or .Chart + # existingClaim: "" + + # If you change this value, you might have to adjust `rabbitmq.diskFreeLimit` as well. + size: 8Gi + + # persistence directory, maps to the rabbitmq data directory + path: /opt/bitnami/rabbitmq/var/lib/rabbitmq + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 100m + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port RabbitMQ is listening + ## on. When true, RabbitMQ will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed. + ## + # additionalRules: + # - matchLabels: + # - role: frontend + # - matchExpressions: + # - key: role + # operator: In + # values: + # - frontend + +## Replica count, set to 3 to provide a default available cluster +replicas: 3 + +## Pod priority +## https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +# priorityClassName: "" + +## updateStrategy for RabbitMQ statefulset +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## Node labels and tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature +nodeSelector: + beta.kubernetes.io/arch: amd64 +tolerations: [] +affinity: {} + +## affinity: | +## podAntiAffinity: +## requiredDuringSchedulingIgnoredDuringExecution: +## - labelSelector: +## matchLabels: +## app: {{ template "rabbitmq.name" . }} +## release: {{ .Release.Name | quote }} +## topologyKey: kubernetes.io/hostname +## preferredDuringSchedulingIgnoredDuringExecution: +## - weight: 100 +## podAffinityTerm: +## labelSelector: +## matchLabels: +## app: {{ template "rabbitmq.name" . }} +## release: {{ .Release.Name | quote }} +## topologyKey: failure-domain.beta.kubernetes.io/zone + +## annotations for rabbitmq pods +podAnnotations: {} + +## Configure the podDisruptionBudget +podDisruptionBudget: {} +# maxUnavailable: 1 +# minAvailable: 1 + +## Configure the ingress resource that allows you to access the +## Wordpress installation. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## Set to true to enable ingress record generation + enabled: false + + ## The list of hostnames to be covered with this ingress record. + ## Most likely this will be just one host, but in the event more hosts are needed, this is an array + ## hostName: foo.bar.com + path: / + + ## Set this to true in order to enable TLS on the ingress record + ## A side effect of this will be that the backend wordpress service will be connected at port 443 + tls: true + + ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS + tlsSecret: myTlsSecret + + ## Ingress annotations done as key:value pairs + ## If you're using kube-lego, you will want to add: + ## kubernetes.io/tls-acme: true + ## + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md + ## + ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: true + +## The following settings are to configure the frequency of the lifeness and readiness probes +livenessProbe: + enabled: true + initialDelaySeconds: 120 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 10 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 3 + successThreshold: 1 + +metrics: + enabled: true + image: + registry: docker.io + repository: bitnami/rabbitmq-exporter + tag: 0.29.0-debian-10-r28 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## environment variables to configure rabbitmq_exporter + ## ref: https://github.com/kbudde/rabbitmq_exporter#configuration + env: {} + ## Metrics exporter port + port: 9419 + ## Comma-separated list of extended scraping capabilities supported by the target RabbitMQ server + ## ref: https://github.com/kbudde/rabbitmq_exporter#extended-rabbitmq-capabilities + capabilities: "bert,no_sort" + resources: {} + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9419" + + livenessProbe: + enabled: true + initialDelaySeconds: 15 + timeoutSeconds: 5 + periodSeconds: 30 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + timeoutSeconds: 5 + periodSeconds: 30 + failureThreshold: 3 + successThreshold: 1 + + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + serviceMonitor: + ## If the operator is installed in your cluster, set to true to create a Service Monitor Entry + enabled: false + ## Specify the namespace in which the serviceMonitor resource will be created + # namespace: "" + ## Specify the interval at which metrics should be scraped + interval: 30s + ## Specify the timeout after which the scrape is ended + # scrapeTimeout: 30s + ## Specify Metric Relabellings to add to the scrape endpoint + # relabellings: + ## Specify honorLabels parameter to add the scrape endpoint + honorLabels: false + ## Specify the release for ServiceMonitor. Sometimes it should be custom for prometheus operator to work + # release: "" + ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + additionalLabels: {} + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + rules: [] + ## List of reules, used as template by Helm. + ## These are just examples rules inspired from https://awesome-prometheus-alerts.grep.to/rules.html + ## Please adapt them to your needs. + ## Make sure to constraint the rules to the current rabbitmq service. + ## Also make sure to escape what looks like helm template. + # - alert: RabbitmqDown + # expr: rabbitmq_up{service="{{ template "rabbitmq.fullname" . }}"} == 0 + # for: 5m + # labels: + # severity: error + # annotations: + # summary: Rabbitmq down (instance {{ "{{ $labels.instance }}" }}) + # description: RabbitMQ node down + + # - alert: ClusterDown + # expr: | + # sum(rabbitmq_running{service="{{ template "rabbitmq.fullname" . }}"}) + # < {{ .Values.replicas }} + # for: 5m + # labels: + # severity: error + # annotations: + # summary: Cluster down (instance {{ "{{ $labels.instance }}" }}) + # description: | + # Less than {{ .Values.replicas }} nodes running in RabbitMQ cluster + # VALUE = {{ "{{ $value }}" }} + + # - alert: ClusterPartition + # expr: rabbitmq_partitions{service="{{ template "rabbitmq.fullname" . }}"} > 0 + # for: 5m + # labels: + # severity: error + # annotations: + # summary: Cluster partition (instance {{ "{{ $labels.instance }}" }}) + # description: | + # Cluster partition + # VALUE = {{ "{{ $value }}" }} + + # - alert: OutOfMemory + # expr: | + # rabbitmq_node_mem_used{service="{{ template "rabbitmq.fullname" . }}"} + # / rabbitmq_node_mem_limit{service="{{ template "rabbitmq.fullname" . }}"} + # * 100 > 90 + # for: 5m + # labels: + # severity: warning + # annotations: + # summary: Out of memory (instance {{ "{{ $labels.instance }}" }}) + # description: | + # Memory available for RabbmitMQ is low (< 10%)\n VALUE = {{ "{{ $value }}" }} + # LABELS: {{ "{{ $labels }}" }} + + # - alert: TooManyConnections + # expr: rabbitmq_connectionsTotal{service="{{ template "rabbitmq.fullname" . }}"} > 1000 + # for: 5m + # labels: + # severity: warning + # annotations: + # summary: Too many connections (instance {{ "{{ $labels.instance }}" }}) + # description: | + # RabbitMQ instance has too many connections (> 1000) + # VALUE = {{ "{{ $value }}" }}\n LABELS: {{ "{{ $labels }}" }} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: true + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + +## forceBoot: executes 'rabbitmqctl force_boot' to force boot cluster shut down unexpectedly in an +## unknown order. +## ref: https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot +## +forceBoot: + enabled: false + +## Optionally specify extra secrets to be created by the chart. +## This can be useful when combined with load_definitions to automatically create the secret containing the definitions to be loaded. +## +extraSecrets: {} + # load-definition: + # load_definition.json: | + # { + # ... + # } diff --git a/bitnami/rabbitmq/values.schema.json b/bitnami/rabbitmq/values.schema.json new file mode 100644 index 00000000000000..038f577994758a --- /dev/null +++ b/bitnami/rabbitmq/values.schema.json @@ -0,0 +1,100 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "rabbitmq": { + "type": "object", + "properties": { + "username": { + "type": "string", + "title": "RabbitMQ user", + "form": true + }, + "password": { + "type": "string", + "title": "RabbitMQ password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set" + }, + "extraConfiguration": { + "type": "string", + "title": "Extra RabbitMQ Configuration", + "form": true, + "render": "textArea", + "description": "Extra configuration to be appended to RabbitMQ Configuration" + } + } + }, + "replicas": { + "type": "integer", + "form": true, + "title": "Number of replicas", + "description": "Number of replicas to deploy" + }, + "persistence": { + "type": "object", + "title": "Persistence configuration", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "condition": false, + "value": "persistence.enabled" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus metrics exporter", + "description": "Create a side-car container to expose Prometheus metrics", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "condition": false, + "value": "metrics.enabled" + } + } + } + } + } + } + } +} diff --git a/bitnami/rabbitmq/values.yaml b/bitnami/rabbitmq/values.yaml new file mode 100644 index 00000000000000..3416294f8c5f66 --- /dev/null +++ b/bitnami/rabbitmq/values.yaml @@ -0,0 +1,555 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami RabbitMQ image version +## ref: https://hub.docker.com/r/bitnami/rabbitmq/tags/ +## +image: + registry: docker.io + repository: bitnami/rabbitmq + tag: 3.8.2-debian-10-r30 + + ## set to true if you would like to see extra information on logs + ## it turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override rabbitmq.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override rabbitmq.fullname template +## +# fullnameOverride: + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## does your cluster have rbac enabled? assume yes by default +rbacEnabled: true + +## RabbitMQ should be initialized one by one when building cluster for the first time. +## Therefore, the default value of podManagementPolicy is 'OrderedReady' +## Once the RabbitMQ participates in the cluster, it waits for a response from another +## RabbitMQ in the same cluster at reboot, except the last RabbitMQ of the same cluster. +## If the cluster exits gracefully, you do not need to change the podManagementPolicy +## because the first RabbitMQ of the statefulset always will be last of the cluster. +## However if the last RabbitMQ of the cluster is not the first RabbitMQ due to a failure, +## you must change podManagementPolicy to 'Parallel'. +## ref : https://www.rabbitmq.com/clustering.html#restarting +## +podManagementPolicy: OrderedReady + +## section of specific values for rabbitmq +rabbitmq: + ## RabbitMQ application username + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + username: user + + ## RabbitMQ application password + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + # password: + # existingPasswordSecret: name-of-existing-secret + + ## Erlang cookie to determine whether different nodes are allowed to communicate with each other + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + # erlangCookie: + # existingErlangSecret: name-of-existing-secret + + ## Node name to cluster with. e.g.: `clusternode@hostname` + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + # rabbitmqClusterNodeName: + + ## Value for the RABBITMQ_LOGS environment variable + ## ref: https://www.rabbitmq.com/logging.html#log-file-location + ## + logs: '-' + + ## RabbitMQ Max File Descriptors + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## ref: https://www.rabbitmq.com/install-debian.html#kernel-resource-limits + ## + setUlimitNofiles: true + ulimitNofiles: '65536' + + ## RabbitMQ maximum available scheduler threads and online scheduler threads + ## ref: https://hamidreza-s.github.io/erlang/scheduling/real-time/preemptive/migration/2016/02/09/erlang-scheduler-details.html#scheduler-threads + ## + maxAvailableSchedulers: 2 + onlineSchedulers: 1 + + ## Plugins to enable + plugins: "rabbitmq_management rabbitmq_peer_discovery_k8s" + + ## Extra plugins to enable + ## Use this instead of `plugins` to add new plugins + extraPlugins: "rabbitmq_auth_backend_ldap" + + ## Clustering settings + clustering: + address_type: hostname + k8s_domain: cluster.local + ## Rebalance master for queues in cluster when new replica is created + ## ref: https://www.rabbitmq.com/rabbitmq-queues.8.html#rebalance + rebalance: false + + loadDefinition: + enabled: false + secretName: load-definition + + ## environment variables to configure rabbitmq + ## ref: https://www.rabbitmq.com/configure.html#customise-environment + env: {} + + ## Configuration file content: required cluster configuration + ## Do not override unless you know what you are doing. To add more configuration, use `extraConfiguration` of `advancedConfiguration` instead + configuration: |- + ## Clustering + cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s + cluster_formation.k8s.host = kubernetes.default.svc.cluster.local + cluster_formation.node_cleanup.interval = 10 + cluster_formation.node_cleanup.only_log_warning = true + cluster_partition_handling = autoheal + # queue master locator + queue_master_locator=min-masters + # enable guest user + loopback_users.guest = false + + ## Configuration file content: extra configuration + ## Use this instead of `configuration` to add more configuration + extraConfiguration: |- + #disk_free_limit.absolute = 50MB + #management.load_definitions = /app/load_definition.json + + ## Configuration file content: advanced configuration + ## Use this as additional configuraton in classic config format (Erlang term configuration format) + ## + ## If you set LDAP with TLS/SSL enabled and you are using self-signed certificates, uncomment these lines. + ## advancedConfiguration: |- + ## [{ + ## rabbitmq_auth_backend_ldap, + ## [{ + ## ssl_options, + ## [{ + ## verify, verify_none + ## }, { + ## fail_if_no_peer_cert, + ## false + ## }] + ## ]} + ## }]. + ## + advancedConfiguration: |- + + ## Enable encryption to rabbitmq + ## ref: https://www.rabbitmq.com/ssl.html + ## + tls: + enabled: false + failIfNoPeerCert: true + sslOptionsVerify: verify_peer + caCertificate: |- + serverCertificate: |- + serverKey: |- + # existingSecret: name-of-existing-secret-to-rabbitmq + +## LDAP configuration +## +ldap: + enabled: false + server: "" + port: "389" + user_dn_pattern: cn=${username},dc=example,dc=org + tls: + # If you enabled TLS/SSL you can set advaced options using the advancedConfiguration parameter. + enabled: false + +## Kubernetes service type +service: + type: ClusterIP + ## Node port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + # nodePort: 30672 + + ## Set the LoadBalancerIP + ## + # loadBalancerIP: + + ## Node port Tls + ## + # nodeTlsPort: 30671 + + ## Amqp port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + port: 5672 + + ## Amqp Tls port + ## + tlsPort: 5671 + + ## Dist port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + distPort: 25672 + + ## RabbitMQ Manager port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + managerPort: 15672 + + ## Service annotations + annotations: {} + # service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 + + ## Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + + ## Extra ports to expose + # extraPorts: + + ## Extra ports to be included in container spec, primarily informational + # extraContainerPorts: + +# Additional pod labels to apply +podLabels: {} + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + extra: {} + +persistence: + ## this enables PVC templates that will create one per pod + enabled: true + + ## rabbitmq data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessMode: ReadWriteOnce + + ## Existing PersistentVolumeClaims + ## The value is evaluated as a template + ## So, for example, the name can depend on .Release or .Chart + # existingClaim: "" + + # If you change this value, you might have to adjust `rabbitmq.diskFreeLimit` as well. + size: 8Gi + + # persistence directory, maps to the rabbitmq data directory + path: /opt/bitnami/rabbitmq/var/lib/rabbitmq + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the ports RabbitMQ is listening + ## on. When true, RabbitMQ will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed. + ## + # additionalRules: + # - matchLabels: + # - role: frontend + # - matchExpressions: + # - key: role + # operator: In + # values: + # - frontend + +## Replica count, set to 1 to provide a default available cluster +replicas: 1 + +## Pod priority +## https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +# priorityClassName: "" + +## updateStrategy for RabbitMQ statefulset +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## Node labels and tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature +nodeSelector: {} +tolerations: [] +affinity: {} +podDisruptionBudget: {} + # maxUnavailable: 1 + # minAvailable: 1 +## annotations for rabbitmq pods +podAnnotations: {} + +## Configure the ingress resource that allows you to access the +## Wordpress installation. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## Set to true to enable ingress record generation + enabled: false + + ## The list of hostnames to be covered with this ingress record. + ## Most likely this will be just one host, but in the event more hosts are needed, this is an array + ## hostName: foo.bar.com + path: / + + ## Set this to true in order to enable TLS on the ingress record + ## A side effect of this will be that the backend wordpress service will be connected at port 443 + tls: false + + ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS + tlsSecret: myTlsSecret + + ## Ingress annotations done as key:value pairs + ## If you're using kube-lego, you will want to add: + ## kubernetes.io/tls-acme: true + ## + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md + ## + ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: true + +## The following settings are to configure the frequency of the lifeness and readiness probes +livenessProbe: + enabled: true + initialDelaySeconds: 120 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 10 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 3 + successThreshold: 1 + +metrics: + enabled: false + image: + registry: docker.io + repository: bitnami/rabbitmq-exporter + tag: 0.29.0-debian-10-r28 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## environment variables to configure rabbitmq_exporter + ## ref: https://github.com/kbudde/rabbitmq_exporter#configuration + env: {} + ## Metrics exporter port + port: 9419 + ## RabbitMQ address to connect to (from the same Pod, usually the local loopback address). + ## If your Kubernetes cluster does not support IPv6, you can change to `127.0.0.1` in order to force IPv4. + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/#networking + rabbitmqAddress: localhost + ## Comma-separated list of extended scraping capabilities supported by the target RabbitMQ server + ## ref: https://github.com/kbudde/rabbitmq_exporter#extended-rabbitmq-capabilities + capabilities: "bert,no_sort" + resources: {} + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9419" + + livenessProbe: + enabled: true + initialDelaySeconds: 15 + timeoutSeconds: 5 + periodSeconds: 30 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + timeoutSeconds: 5 + periodSeconds: 30 + failureThreshold: 3 + successThreshold: 1 + + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + serviceMonitor: + ## If the operator is installed in your cluster, set to true to create a Service Monitor Entry + enabled: false + ## Specify the namespace in which the serviceMonitor resource will be created + # namespace: "" + ## Specify the interval at which metrics should be scraped + interval: 30s + ## Specify the timeout after which the scrape is ended + # scrapeTimeout: 30s + ## Specify Metric Relabellings to add to the scrape endpoint + # relabellings: + ## Specify honorLabels parameter to add the scrape endpoint + honorLabels: false + ## Specify the release for ServiceMonitor. Sometimes it should be custom for prometheus operator to work + # release: "" + ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + additionalLabels: {} + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + rules: [] + ## List of reules, used as template by Helm. + ## These are just examples rules inspired from https://awesome-prometheus-alerts.grep.to/rules.html + ## Please adapt them to your needs. + ## Make sure to constraint the rules to the current rabbitmq service. + ## Also make sure to escape what looks like helm template. + # - alert: RabbitmqDown + # expr: rabbitmq_up{service="{{ template "rabbitmq.fullname" . }}"} == 0 + # for: 5m + # labels: + # severity: error + # annotations: + # summary: Rabbitmq down (instance {{ "{{ $labels.instance }}" }}) + # description: RabbitMQ node down + + # - alert: ClusterDown + # expr: | + # sum(rabbitmq_running{service="{{ template "rabbitmq.fullname" . }}"}) + # < {{ .Values.replicas }} + # for: 5m + # labels: + # severity: error + # annotations: + # summary: Cluster down (instance {{ "{{ $labels.instance }}" }}) + # description: | + # Less than {{ .Values.replicas }} nodes running in RabbitMQ cluster + # VALUE = {{ "{{ $value }}" }} + + # - alert: ClusterPartition + # expr: rabbitmq_partitions{service="{{ template "rabbitmq.fullname" . }}"} > 0 + # for: 5m + # labels: + # severity: error + # annotations: + # summary: Cluster partition (instance {{ "{{ $labels.instance }}" }}) + # description: | + # Cluster partition + # VALUE = {{ "{{ $value }}" }} + + # - alert: OutOfMemory + # expr: | + # rabbitmq_node_mem_used{service="{{ template "rabbitmq.fullname" . }}"} + # / rabbitmq_node_mem_limit{service="{{ template "rabbitmq.fullname" . }}"} + # * 100 > 90 + # for: 5m + # labels: + # severity: warning + # annotations: + # summary: Out of memory (instance {{ "{{ $labels.instance }}" }}) + # description: | + # Memory available for RabbmitMQ is low (< 10%)\n VALUE = {{ "{{ $value }}" }} + # LABELS: {{ "{{ $labels }}" }} + + # - alert: TooManyConnections + # expr: rabbitmq_connectionsTotal{service="{{ template "rabbitmq.fullname" . }}"} > 1000 + # for: 5m + # labels: + # severity: warning + # annotations: + # summary: Too many connections (instance {{ "{{ $labels.instance }}" }}) + # description: | + # RabbitMQ instance has too many connections (> 1000) + # VALUE = {{ "{{ $value }}" }}\n LABELS: {{ "{{ $labels }}" }} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + +## forceBoot: executes 'rabbitmqctl force_boot' to force boot cluster shut down unexpectedly in an +## unknown order. +## ref: https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot +## +forceBoot: + enabled: false + +## Optionally specify extra secrets to be created by the chart. +## This can be useful when combined with load_definitions to automatically create the secret containing the definitions to be loaded. +## +extraSecrets: {} + # load-definition: + # load_definition.json: | + # { + # ... + # }