From ad8bf535c166ab47e7206cd149f005903d3ca59e Mon Sep 17 00:00:00 2001 From: Philex Date: Wed, 8 May 2024 16:35:19 -0700 Subject: [PATCH 01/27] feat: add chart to backup juno data and upload to cloudflare r2 --- charts/juno-node/Chart.yaml | 2 +- .../templates/juno-data-backup-cronjob.yaml | 215 ++++++++++++++++++ charts/juno-node/values.yaml | 12 + 3 files changed, 228 insertions(+), 1 deletion(-) create mode 100644 charts/juno-node/templates/juno-data-backup-cronjob.yaml diff --git a/charts/juno-node/Chart.yaml b/charts/juno-node/Chart.yaml index 5fab205e1..f043b9566 100644 --- a/charts/juno-node/Chart.yaml +++ b/charts/juno-node/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: juno-chart -version: 0.1.4 +version: 0.1.5 appVersion: "1" description: A Helm chart for deploying Juno service maintainers: diff --git a/charts/juno-node/templates/juno-data-backup-cronjob.yaml b/charts/juno-node/templates/juno-data-backup-cronjob.yaml new file mode 100644 index 000000000..ae1a4c07a --- /dev/null +++ b/charts/juno-node/templates/juno-data-backup-cronjob.yaml @@ -0,0 +1,215 @@ +{{- if .Values.backupJunoDataJob.enabled -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.deployment.projectName }}-backup-junodata-sa + namespace: {{ .Values.deployment.namespace }} + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ .Values.deployment.projectName }}-backup-junodata-role + namespace: {{ .Values.deployment.namespace }} +rules: + - apiGroups: [""] + resources: ["pods", "persistentvolumeclaims"] + verbs: ["get", "list", "create", "update", "patch", "delete"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ .Values.deployment.projectName }}-backup-junodata-rolebinding + namespace: {{ .Values.deployment.namespace }} +subjects: + - kind: ServiceAccount + name: {{ .Values.deployment.projectName }}-backup-junodata-sa + namespace: {{ .Values.deployment.namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ .Values.deployment.projectName }}-backup-junodata-role +--- +apiVersion: v1 +kind: Secret +metadata: + name: rclone-config + namespace: {{ .Values.deployment.namespace }} +stringData: + rclone.conf: | + [R2] + type = s3 + provider = Cloudflare + access_key_id = {{ .Values.backupJunoDataJob.key }} + secret_access_key = {{ .Values.backupJunoDataJob.secret }} + endpoint = {{ .Values.backupJunoDataJob.endpoint }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ .Values.deployment.namespace }}-juno-data-backup-pvc + namespace: {{ .Values.deployment.namespace }} +spec: + accessModes: + - ReadWriteOnce + storageClassName: premium-rwo + resources: + requests: + storage: 200Gi +--- + +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloning-disk-manifest + namespace: {{ .Values.deployment.namespace }} +data: + cloning-disk-manifest.yaml: | + kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: {{ .Values.deployment.namespace }}-pv-ssd-snapshot + namespace: {{ .Values.deployment.namespace }} + spec: + dataSource: + name: {{ .Values.backupJunoDataJob.dataSource }} + kind: PersistentVolumeClaim + accessModes: + - ReadWriteOnce + storageClassName: premium-rwo + resources: + requests: + storage: {{ .Values.backupJunoDataJob.strageSize }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: cloning-juno-manifest + namespace: {{ .Values.deployment.namespace }} +data: + cloning-juno-manifest.yaml: | + apiVersion: v1 + kind: Pod + metadata: + name: juno-data-archival-pod-0 + namespace: {{ .Values.deployment.namespace }} + spec: + serviceAccountName: {{ .Values.deployment.projectName }}-backup-junodata-sa + volumes: + - name: juno-data-volume + persistentVolumeClaim: + claimName: {{ .Values.deployment.namespace }}-pv-ssd-snapshot + - name: rclone-config + secret: + secretName: rclone-config + - name: tar-backup-volume + persistentVolumeClaim: + claimName: {{ .Values.deployment.namespace }}-juno-data-backup-pvc + initContainers: + - name: juno-archival-tar + image: ukemzyskywalker/archiver:v2 + command: ["/bin/sh", "-c"] + args: + - | + rm -rf /mnt/juno-tar-backup/*.tar && + rm -rf /mnt/data/*.tar && + tar -czvf /mnt/juno-tar-backup/juno_{{ .Values.backupJunoDataJob.network }}_{{ .Values.deployment.imagetag }}_$(date +\%Y\%m\%d).tar --exclude=./lost+found -C /mnt/data . && sleep 10 + volumeMounts: + - name: juno-data-volume + mountPath: /mnt/data + - name: tar-backup-volume + mountPath: /mnt/juno-tar-backup + containers: + - name: rclone-upload-container + image: rclone/rclone:latest + command: ["/bin/sh"] + args: ["-c", "rclone copy /mnt/juno-tar-backup/*.tar R2:/juno-snapshot/{{ .Values.backupJunoDataJob.network}}"] + volumeMounts: + - name: rclone-config + mountPath: /config/rclone + - name: tar-backup-volume + mountPath: /mnt/juno-tar-backup + restartPolicy: OnFailure +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: {{ .Values.deployment.projectName }}-backup-junodata-cronjob + namespace: {{ .Values.deployment.namespace }} +spec: + schedule: "{{ .Values.backupJunoDataJob.backupSchedule}}" + concurrencyPolicy: Forbid + successfulJobsHistoryLimit: 1 + failedJobsHistoryLimit: 1 + jobTemplate: + spec: + completions: 1 + ttlSecondsAfterFinished: 30 + template: + spec: + serviceAccountName: {{ .Values.deployment.projectName }}-backup-junodata-sa + restartPolicy: Never + initContainers: + - name: copy-disk-kubectl-container + image: bitnami/kubectl:latest + command: ["/bin/sh"] + args: ["-c", "kubectl apply -f /cloning-disk-manifest/cloning-disk-manifest.yaml"] + volumeMounts: + - name: cloning-disk-manifest-volume + mountPath: /cloning-disk-manifest + containers: + - name: clone-juno-kubectl-container + image: bitnami/kubectl:latest + command: ["/bin/sh"] + args: ["-c", "kubectl apply -f /cloning-juno-manifest/cloning-juno-manifest.yaml"] + volumeMounts: + - name: cloning-juno-manifest-volume + mountPath: /cloning-juno-manifest + volumes: + - name: cloning-juno-manifest-volume + configMap: + name: cloning-juno-manifest + - name: cloning-disk-manifest-volume + configMap: + name: cloning-disk-manifest +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: delete-completed-pod-cronjob + namespace: {{ .Values.deployment.namespace }} +spec: + schedule: "{{ .Values.backupJunoDataJob.cleanupSchedule}}" + concurrencyPolicy: Forbid + successfulJobsHistoryLimit: 1 + failedJobsHistoryLimit: 1 + jobTemplate: + spec: + completions: 1 + ttlSecondsAfterFinished: 30 + template: + spec: + serviceAccountName: {{ .Values.deployment.projectName }}-backup-junodata-sa + restartPolicy: OnFailure + containers: + - name: kubectl-container + image: bitnami/kubectl:latest + command: + - "/bin/bash" + - "-c" + - | + kubectl delete pod --field-selector=status.phase==Succeeded + sleep 10 + describe_output=$(kubectl describe pvc {{ .Values.deployment.namespace }}-pv-ssd-snapshot) + if echo "$describe_output" | grep -q "Used By:[[:space:]]*"; then + echo "deleting {{ .Values.deployment.namespace }}-pv-ssd-snapshot....." + kubectl delete pvc {{ .Values.deployment.namespace }}-pv-ssd-snapshot + sleep 30 + fi + describe_output=$(kubectl describe pvc {{ .Values.deployment.namespace }}-juno-data-backup-pvc) + if echo "$describe_output" | grep -q "Used By:[[:space:]]*"; then + echo "deleting {{ .Values.deployment.namespace }}-juno-data-backup-pvc....." + kubectl delete pvc {{ .Values.deployment.namespace }}-juno-data-backup-pvc + sleep 30 + fi + {{- end -}} \ No newline at end of file diff --git a/charts/juno-node/values.yaml b/charts/juno-node/values.yaml index 39fcffce9..21a9d71de 100644 --- a/charts/juno-node/values.yaml +++ b/charts/juno-node/values.yaml @@ -209,3 +209,15 @@ env: data: - name: NETWORK value: "juno" + +### Back up juno data and upload to R2 cloud +backupJunoDataJob: + enabled: true + dataSource: "juno-sepolia-pv-ssd-juno-sepolia-0" + backupSchedule: "*/20 * * * *" + cleanupSchedule: "*/40 * * * *" + network: "sepolia" + strageSize: 200Gi + key: key-1234 + secret: secret-12345 + endpoint: "https://12345543.r2.cloudflarestorage.com" \ No newline at end of file From d7881524959f7550faf90b6222441a9abf716a1b Mon Sep 17 00:00:00 2001 From: Philex Date: Wed, 8 May 2024 16:52:00 -0700 Subject: [PATCH 02/27] feat: add chart to backup juno data and upload to cloudflare r2 - add new line at the end of file --- charts/juno-node/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/juno-node/values.yaml b/charts/juno-node/values.yaml index 21a9d71de..a3b35c6b2 100644 --- a/charts/juno-node/values.yaml +++ b/charts/juno-node/values.yaml @@ -220,4 +220,4 @@ backupJunoDataJob: strageSize: 200Gi key: key-1234 secret: secret-12345 - endpoint: "https://12345543.r2.cloudflarestorage.com" \ No newline at end of file + endpoint: "https://12345543.r2.cloudflarestorage.com" From 21ecd7c635687dd495a7e68e514b0ffaad6cc76c Mon Sep 17 00:00:00 2001 From: Philex Date: Wed, 8 May 2024 17:06:11 -0700 Subject: [PATCH 03/27] feat: add chart to backup juno data and upload to cloudflare r2 - change readme and change typo. --- charts/juno-node/README.md | 12 +++++++++++- .../templates/juno-data-backup-cronjob.yaml | 2 +- charts/juno-node/values.yaml | 2 +- 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/charts/juno-node/README.md b/charts/juno-node/README.md index 610901c05..917e93033 100644 --- a/charts/juno-node/README.md +++ b/charts/juno-node/README.md @@ -1,6 +1,6 @@ # juno-chart -![Version: 0.1.4](https://img.shields.io/badge/Version-0.1.4-informational?style=flat-square) ![AppVersion: 1](https://img.shields.io/badge/AppVersion-1-informational?style=flat-square) +![Version: 0.1.5](https://img.shields.io/badge/Version-0.1.4-informational?style=flat-square) ![AppVersion: 1](https://img.shields.io/badge/AppVersion-1-informational?style=flat-square) A Helm chart for deploying Juno service @@ -147,6 +147,16 @@ A Helm chart for deploying Juno service | svc.wss.port | string | `"6061"` | | | taintsToleration.enabled | bool | `false` | | | taintsToleration.tolerations.network | string | `"juno"` | | +| backupJunoDataJob.enable | bool | `false` | | +| backupJunoDataJob.dataSource | string | `"juno-sepolia-pv-ssd-juno-sepolia-0"` | The juno disk source to be backup | +| backupJunoDataJob.backupSchedule | string | `"*/20 * * * *"` | The schedule to run data backup | +| backupJunoDataJob.cleanupSchedule | string | `"*/40 * * * *"` | The schedule to clean backup data node after execution | +| backupJunoDataJob.network | string | `"sepolia"` | Juno network | +| backupJunoDataJob.storageSize | string | `"200Gi"` | disk storage size | +| backupJunoDataJob.key | string | `"key-1234"` | R2 cloud backup key | +| backupJunoDataJob.secret | string | `"secret-12345"` | R2 cloud backup secret | +| backupJunoDataJob.endpoint | string | `"https://12345543.r2.cloudflarestorage.com"` | R2 cloud backup endpoint url | + ---------------------------------------------- Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/juno-node/templates/juno-data-backup-cronjob.yaml b/charts/juno-node/templates/juno-data-backup-cronjob.yaml index ae1a4c07a..474c44171 100644 --- a/charts/juno-node/templates/juno-data-backup-cronjob.yaml +++ b/charts/juno-node/templates/juno-data-backup-cronjob.yaml @@ -79,7 +79,7 @@ data: storageClassName: premium-rwo resources: requests: - storage: {{ .Values.backupJunoDataJob.strageSize }} + storage: {{ .Values.backupJunoDataJob.storageSize }} --- apiVersion: v1 kind: ConfigMap diff --git a/charts/juno-node/values.yaml b/charts/juno-node/values.yaml index a3b35c6b2..0a5752d11 100644 --- a/charts/juno-node/values.yaml +++ b/charts/juno-node/values.yaml @@ -217,7 +217,7 @@ backupJunoDataJob: backupSchedule: "*/20 * * * *" cleanupSchedule: "*/40 * * * *" network: "sepolia" - strageSize: 200Gi + storageSize: 200Gi key: key-1234 secret: secret-12345 endpoint: "https://12345543.r2.cloudflarestorage.com" From 2546d2cd12fe78866110aee67d95e3b4917375b4 Mon Sep 17 00:00:00 2001 From: Philex Date: Wed, 8 May 2024 18:03:11 -0700 Subject: [PATCH 04/27] feat: add chart to backup juno data and upload to cloudflare r2 - auto generate README.md and helm-docs v1.13.1 changed. --- charts/juno-node/README.md | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/charts/juno-node/README.md b/charts/juno-node/README.md index 917e93033..cd718a837 100644 --- a/charts/juno-node/README.md +++ b/charts/juno-node/README.md @@ -1,6 +1,6 @@ # juno-chart -![Version: 0.1.5](https://img.shields.io/badge/Version-0.1.4-informational?style=flat-square) ![AppVersion: 1](https://img.shields.io/badge/AppVersion-1-informational?style=flat-square) +![Version: 0.1.5](https://img.shields.io/badge/Version-0.1.5-informational?style=flat-square) ![AppVersion: 1](https://img.shields.io/badge/AppVersion-1-informational?style=flat-square) A Helm chart for deploying Juno service @@ -27,6 +27,15 @@ A Helm chart for deploying Juno service | args.--ws | string | `"true"` | | | args.--ws-host | string | `"0.0.0.0"` | | | args.--ws-port | string | `"6061"` | | +| backupJunoDataJob.backupSchedule | string | `"*/20 * * * *"` | | +| backupJunoDataJob.cleanupSchedule | string | `"*/40 * * * *"` | | +| backupJunoDataJob.dataSource | string | `"juno-sepolia-pv-ssd-juno-sepolia-0"` | | +| backupJunoDataJob.enabled | bool | `true` | | +| backupJunoDataJob.endpoint | string | `"https://12345543.r2.cloudflarestorage.com"` | | +| backupJunoDataJob.key | string | `"key-1234"` | | +| backupJunoDataJob.network | string | `"sepolia"` | | +| backupJunoDataJob.secret | string | `"secret-12345"` | | +| backupJunoDataJob.storageSize | string | `"200Gi"` | | | batchjob.enabled | bool | `false` | | | batchjob.schedule | string | `"* */1 * * *"` | | | deployment.healthCheck.enabled | bool | `false` | | @@ -78,6 +87,7 @@ A Helm chart for deploying Juno service | serviceAccount.enabled | bool | `false` | | | serviceAccount.gcpServiceAccount | string | `"monitoring-sa-euw1@juno-prod-nth.iam.gserviceaccount.com"` | | | serviceAccount.name | string | `"juno-pgo"` | | +| svc.externalTrafficPolicy | string | `""` | | | svc.globalStaticInternalIpName | string | `""` | | | svc.globalStaticIpName | string | `""` | | | svc.ingress.enabled | bool | `true` | | @@ -147,16 +157,6 @@ A Helm chart for deploying Juno service | svc.wss.port | string | `"6061"` | | | taintsToleration.enabled | bool | `false` | | | taintsToleration.tolerations.network | string | `"juno"` | | -| backupJunoDataJob.enable | bool | `false` | | -| backupJunoDataJob.dataSource | string | `"juno-sepolia-pv-ssd-juno-sepolia-0"` | The juno disk source to be backup | -| backupJunoDataJob.backupSchedule | string | `"*/20 * * * *"` | The schedule to run data backup | -| backupJunoDataJob.cleanupSchedule | string | `"*/40 * * * *"` | The schedule to clean backup data node after execution | -| backupJunoDataJob.network | string | `"sepolia"` | Juno network | -| backupJunoDataJob.storageSize | string | `"200Gi"` | disk storage size | -| backupJunoDataJob.key | string | `"key-1234"` | R2 cloud backup key | -| backupJunoDataJob.secret | string | `"secret-12345"` | R2 cloud backup secret | -| backupJunoDataJob.endpoint | string | `"https://12345543.r2.cloudflarestorage.com"` | R2 cloud backup endpoint url | - ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) +Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) From fb69ab075adecf5990f4f126b091ba99e9270f7f Mon Sep 17 00:00:00 2001 From: Philex Date: Tue, 14 May 2024 17:34:48 -0700 Subject: [PATCH 05/27] Fix: change dynamic name --- .../templates/juno-data-backup-cronjob.yaml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/charts/juno-node/templates/juno-data-backup-cronjob.yaml b/charts/juno-node/templates/juno-data-backup-cronjob.yaml index 474c44171..eb4e23612 100644 --- a/charts/juno-node/templates/juno-data-backup-cronjob.yaml +++ b/charts/juno-node/templates/juno-data-backup-cronjob.yaml @@ -33,7 +33,7 @@ roleRef: apiVersion: v1 kind: Secret metadata: - name: rclone-config + name: {{ .Values.deployment.projectName }}-rclone-config namespace: {{ .Values.deployment.namespace }} stringData: rclone.conf: | @@ -61,7 +61,7 @@ spec: apiVersion: v1 kind: ConfigMap metadata: - name: cloning-disk-manifest + name: {{ .Values.deployment.namespace }}-cloning-disk-manifest namespace: {{ .Values.deployment.namespace }} data: cloning-disk-manifest.yaml: | @@ -84,7 +84,7 @@ data: apiVersion: v1 kind: ConfigMap metadata: - name: cloning-juno-manifest + name: {{ .Values.deployment.namespace }}-cloning-juno-manifest namespace: {{ .Values.deployment.namespace }} data: cloning-juno-manifest.yaml: | @@ -99,15 +99,15 @@ data: - name: juno-data-volume persistentVolumeClaim: claimName: {{ .Values.deployment.namespace }}-pv-ssd-snapshot - - name: rclone-config + - name: {{ .Values.deployment.projectName }}-rclone-config secret: - secretName: rclone-config + secretName: {{ .Values.deployment.projectName }}-rclone-config - name: tar-backup-volume persistentVolumeClaim: claimName: {{ .Values.deployment.namespace }}-juno-data-backup-pvc initContainers: - name: juno-archival-tar - image: ukemzyskywalker/archiver:v2 + image: busybox command: ["/bin/sh", "-c"] args: - | @@ -125,7 +125,7 @@ data: command: ["/bin/sh"] args: ["-c", "rclone copy /mnt/juno-tar-backup/*.tar R2:/juno-snapshot/{{ .Values.backupJunoDataJob.network}}"] volumeMounts: - - name: rclone-config + - name: {{ .Values.deployment.projectName }}-rclone-config mountPath: /config/rclone - name: tar-backup-volume mountPath: /mnt/juno-tar-backup @@ -168,10 +168,10 @@ spec: volumes: - name: cloning-juno-manifest-volume configMap: - name: cloning-juno-manifest + name: {{ .Values.deployment.namespace }}-cloning-juno-manifest - name: cloning-disk-manifest-volume configMap: - name: cloning-disk-manifest + name: {{ .Values.deployment.namespace }}-cloning-disk-manifest --- apiVersion: batch/v1 kind: CronJob From 0c633a2439bb199980b9bf2c0a4aa379962f447a Mon Sep 17 00:00:00 2001 From: Philex Date: Sun, 26 May 2024 18:18:14 -0700 Subject: [PATCH 06/27] Fix: enhance to replace pod with job. --- .../templates/juno-data-backup-cronjob.yaml | 124 ++++++++++-------- 1 file changed, 69 insertions(+), 55 deletions(-) diff --git a/charts/juno-node/templates/juno-data-backup-cronjob.yaml b/charts/juno-node/templates/juno-data-backup-cronjob.yaml index eb4e23612..58b69b79c 100644 --- a/charts/juno-node/templates/juno-data-backup-cronjob.yaml +++ b/charts/juno-node/templates/juno-data-backup-cronjob.yaml @@ -1,21 +1,24 @@ {{- if .Values.backupJunoDataJob.enabled -}} +# Service Account for the Backup Job apiVersion: v1 kind: ServiceAccount metadata: name: {{ .Values.deployment.projectName }}-backup-junodata-sa namespace: {{ .Values.deployment.namespace }} - --- + +# Role for Backup Job with necessary permissions apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: {{ .Values.deployment.projectName }}-backup-junodata-role namespace: {{ .Values.deployment.namespace }} rules: - - apiGroups: [""] - resources: ["pods", "persistentvolumeclaims"] - verbs: ["get", "list", "create", "update", "patch", "delete"] + - apiGroups: ["", "apps","batch"] + resources: ["pods", "jobs", "persistentvolumeclaims"] + verbs: ["get", "list","create", "update", "patch", "delete"] --- +# RoleBinding to bind Role with ServiceAccount apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -30,6 +33,8 @@ roleRef: kind: Role name: {{ .Values.deployment.projectName }}-backup-junodata-role --- + +# Secret to store R2 Cloud credentials apiVersion: v1 kind: Secret metadata: @@ -44,6 +49,7 @@ stringData: secret_access_key = {{ .Values.backupJunoDataJob.secret }} endpoint = {{ .Values.backupJunoDataJob.endpoint }} --- +# PVC for storing backup data apiVersion: v1 kind: PersistentVolumeClaim metadata: @@ -57,7 +63,7 @@ spec: requests: storage: 200Gi --- - +# ConfigMap for cloning disk manifest apiVersion: v1 kind: ConfigMap metadata: @@ -81,6 +87,8 @@ data: requests: storage: {{ .Values.backupJunoDataJob.storageSize }} --- + +# ConfigMap for cloning juno manifest apiVersion: v1 kind: ConfigMap metadata: @@ -88,56 +96,60 @@ metadata: namespace: {{ .Values.deployment.namespace }} data: cloning-juno-manifest.yaml: | - apiVersion: v1 - kind: Pod + apiVersion: batch/v1 + kind: Job metadata: - name: juno-data-archival-pod-0 + name: {{ .Values.deployment.namespace }}-juno-data-archival-job namespace: {{ .Values.deployment.namespace }} spec: - serviceAccountName: {{ .Values.deployment.projectName }}-backup-junodata-sa - volumes: - - name: juno-data-volume - persistentVolumeClaim: - claimName: {{ .Values.deployment.namespace }}-pv-ssd-snapshot - - name: {{ .Values.deployment.projectName }}-rclone-config - secret: - secretName: {{ .Values.deployment.projectName }}-rclone-config - - name: tar-backup-volume - persistentVolumeClaim: - claimName: {{ .Values.deployment.namespace }}-juno-data-backup-pvc - initContainers: - - name: juno-archival-tar - image: busybox - command: ["/bin/sh", "-c"] - args: - - | - rm -rf /mnt/juno-tar-backup/*.tar && - rm -rf /mnt/data/*.tar && - tar -czvf /mnt/juno-tar-backup/juno_{{ .Values.backupJunoDataJob.network }}_{{ .Values.deployment.imagetag }}_$(date +\%Y\%m\%d).tar --exclude=./lost+found -C /mnt/data . && sleep 10 - volumeMounts: - - name: juno-data-volume - mountPath: /mnt/data - - name: tar-backup-volume - mountPath: /mnt/juno-tar-backup - containers: - - name: rclone-upload-container - image: rclone/rclone:latest - command: ["/bin/sh"] - args: ["-c", "rclone copy /mnt/juno-tar-backup/*.tar R2:/juno-snapshot/{{ .Values.backupJunoDataJob.network}}"] - volumeMounts: - - name: {{ .Values.deployment.projectName }}-rclone-config - mountPath: /config/rclone - - name: tar-backup-volume - mountPath: /mnt/juno-tar-backup - restartPolicy: OnFailure + ttlSecondsAfterFinished: 60 + template: + spec: + serviceAccountName: {{ .Values.deployment.projectName }}-backup-junodata-sa + volumes: + - name: juno-data-volume + persistentVolumeClaim: + claimName: {{ .Values.deployment.namespace }}-pv-ssd-snapshot + - name: {{ .Values.deployment.projectName }}-rclone-config + secret: + secretName: {{ .Values.deployment.projectName }}-rclone-config + - name: tar-backup-volume + persistentVolumeClaim: + claimName: {{ .Values.deployment.namespace }}-juno-data-backup-pvc + initContainers: + - name: juno-archival-tar + image: busybox + command: ["/bin/sh", "-c"] + args: + - | + rm -rf /mnt/juno-tar-backup/*.tar && + rm -rf /mnt/data/*.tar && + tar -czvf /mnt/juno-tar-backup/juno_{{ .Values.backupJunoDataJob.network }}_{{ .Values.deployment.imagetag }}_$(date +\%Y\%m\%d).tar --exclude=./lost+found -C /mnt/data . && sleep 10 + volumeMounts: + - name: juno-data-volume + mountPath: /mnt/data + - name: tar-backup-volume + mountPath: /mnt/juno-tar-backup + containers: + - name: rclone-upload-container + image: rclone/rclone:latest + command: ["/bin/sh"] + args: ["-c", "rclone copy /mnt/juno-tar-backup/*.tar R2:/juno-snapshot/{{ .Values.backupJunoDataJob.network}}"] + volumeMounts: + - name: {{ .Values.deployment.projectName }}-rclone-config + mountPath: /config/rclone + - name: tar-backup-volume + mountPath: /mnt/juno-tar-backup + restartPolicy: OnFailure --- +# CronJob for Backup Task apiVersion: batch/v1 kind: CronJob metadata: name: {{ .Values.deployment.projectName }}-backup-junodata-cronjob namespace: {{ .Values.deployment.namespace }} spec: - schedule: "{{ .Values.backupJunoDataJob.backupSchedule}}" + schedule: "{{ .Values.backupJunoDataJob.backupSchedule }}" concurrencyPolicy: Forbid successfulJobsHistoryLimit: 1 failedJobsHistoryLimit: 1 @@ -166,20 +178,22 @@ spec: - name: cloning-juno-manifest-volume mountPath: /cloning-juno-manifest volumes: - - name: cloning-juno-manifest-volume - configMap: - name: {{ .Values.deployment.namespace }}-cloning-juno-manifest - name: cloning-disk-manifest-volume configMap: name: {{ .Values.deployment.namespace }}-cloning-disk-manifest + - name: cloning-juno-manifest-volume + configMap: + name: {{ .Values.deployment.namespace }}-cloning-juno-manifest + --- +# CronJob for Cleaning up Completed Pods and PVCs apiVersion: batch/v1 kind: CronJob metadata: - name: delete-completed-pod-cronjob + name: {{ .Values.deployment.namespace }}-delete-used-pvc-after-backup namespace: {{ .Values.deployment.namespace }} spec: - schedule: "{{ .Values.backupJunoDataJob.cleanupSchedule}}" + schedule: "{{ .Values.backupJunoDataJob.cleanupSchedule }}" concurrencyPolicy: Forbid successfulJobsHistoryLimit: 1 failedJobsHistoryLimit: 1 @@ -198,18 +212,18 @@ spec: - "/bin/bash" - "-c" - | - kubectl delete pod --field-selector=status.phase==Succeeded - sleep 10 + # Delete PVC if not used describe_output=$(kubectl describe pvc {{ .Values.deployment.namespace }}-pv-ssd-snapshot) if echo "$describe_output" | grep -q "Used By:[[:space:]]*"; then - echo "deleting {{ .Values.deployment.namespace }}-pv-ssd-snapshot....." + echo "Deleting {{ .Values.deployment.namespace }}-pv-ssd-snapshot..." kubectl delete pvc {{ .Values.deployment.namespace }}-pv-ssd-snapshot sleep 30 fi describe_output=$(kubectl describe pvc {{ .Values.deployment.namespace }}-juno-data-backup-pvc) if echo "$describe_output" | grep -q "Used By:[[:space:]]*"; then - echo "deleting {{ .Values.deployment.namespace }}-juno-data-backup-pvc....." + echo "Deleting {{ .Values.deployment.namespace }}-juno-data-backup-pvc..." kubectl delete pvc {{ .Values.deployment.namespace }}-juno-data-backup-pvc sleep 30 fi - {{- end -}} \ No newline at end of file +--- +{{- end -}} \ No newline at end of file From 4508eee27bc18a375d3c38f05b8ec8747967585b Mon Sep 17 00:00:00 2001 From: Philex Date: Sun, 26 May 2024 18:37:02 -0700 Subject: [PATCH 07/27] Fix: remove tail space --- charts/juno-node/values.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/juno-node/values.yaml b/charts/juno-node/values.yaml index 0c2aef266..939402306 100644 --- a/charts/juno-node/values.yaml +++ b/charts/juno-node/values.yaml @@ -75,7 +75,7 @@ pgo: ENV: "juno-integration" URL: "http://localhost:6062/debug/pprof/profile" - ## cache warmup side container + ## cache warmup side container cache: enabled: false image: "us-east1-docker.pkg.dev/juno-stg-nth/juno-cache/cache:2.0" @@ -85,7 +85,7 @@ cache: memory: 512Mi requests: cpu: "100m" - memory: 100Mi + memory: 100Mi ### Service account serviceAccount: From 41297ce4995afbff126fa8c3e47fa6114afd5e25 Mon Sep 17 00:00:00 2001 From: Philex Date: Sun, 26 May 2024 18:45:10 -0700 Subject: [PATCH 08/27] Fix: add space --- charts/juno-node/templates/juno-data-backup-cronjob.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/juno-node/templates/juno-data-backup-cronjob.yaml b/charts/juno-node/templates/juno-data-backup-cronjob.yaml index 58b69b79c..b5785cb21 100644 --- a/charts/juno-node/templates/juno-data-backup-cronjob.yaml +++ b/charts/juno-node/templates/juno-data-backup-cronjob.yaml @@ -134,7 +134,7 @@ data: - name: rclone-upload-container image: rclone/rclone:latest command: ["/bin/sh"] - args: ["-c", "rclone copy /mnt/juno-tar-backup/*.tar R2:/juno-snapshot/{{ .Values.backupJunoDataJob.network}}"] + args: ["-c", "rclone copy /mnt/juno-tar-backup/*.tar R2:/juno-snapshot/{{ .Values.backupJunoDataJob.network }}"] volumeMounts: - name: {{ .Values.deployment.projectName }}-rclone-config mountPath: /config/rclone From fc6c45907168d0ed7ddf9434bcb3aed2a28deb74 Mon Sep 17 00:00:00 2001 From: Philex Date: Mon, 27 May 2024 13:28:08 -0700 Subject: [PATCH 09/27] Fix: add space to correct syntax error --- charts/juno-node/templates/juno-data-backup-cronjob.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/juno-node/templates/juno-data-backup-cronjob.yaml b/charts/juno-node/templates/juno-data-backup-cronjob.yaml index b5785cb21..9ef9fa512 100644 --- a/charts/juno-node/templates/juno-data-backup-cronjob.yaml +++ b/charts/juno-node/templates/juno-data-backup-cronjob.yaml @@ -182,8 +182,8 @@ spec: configMap: name: {{ .Values.deployment.namespace }}-cloning-disk-manifest - name: cloning-juno-manifest-volume - configMap: - name: {{ .Values.deployment.namespace }}-cloning-juno-manifest + configMap: + name: {{ .Values.deployment.namespace }}-cloning-juno-manifest --- # CronJob for Cleaning up Completed Pods and PVCs From e44e3775e0be205d7721ffdc9094e0942d5d068f Mon Sep 17 00:00:00 2001 From: Philex Date: Sun, 2 Jun 2024 19:28:54 -0700 Subject: [PATCH 10/27] Fix: change rclone's secret from local file to secret managers --- .../templates/juno-data-backup-cronjob.yaml | 60 +++++++++++-------- charts/juno-node/values.yaml | 22 ++++++- 2 files changed, 54 insertions(+), 28 deletions(-) diff --git a/charts/juno-node/templates/juno-data-backup-cronjob.yaml b/charts/juno-node/templates/juno-data-backup-cronjob.yaml index 9ef9fa512..29569aac3 100644 --- a/charts/juno-node/templates/juno-data-backup-cronjob.yaml +++ b/charts/juno-node/templates/juno-data-backup-cronjob.yaml @@ -3,7 +3,7 @@ apiVersion: v1 kind: ServiceAccount metadata: - name: {{ .Values.deployment.projectName }}-backup-junodata-sa + name: {{ .Values.deployment.namespace }}-backup-junodata-sa namespace: {{ .Values.deployment.namespace }} --- @@ -11,10 +11,10 @@ metadata: apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: - name: {{ .Values.deployment.projectName }}-backup-junodata-role + name: {{ .Values.deployment.namespace }}-backup-junodata-role namespace: {{ .Values.deployment.namespace }} rules: - - apiGroups: ["", "apps","batch"] + - apiGroups: [ "", "apps","batch"] resources: ["pods", "jobs", "persistentvolumeclaims"] verbs: ["get", "list","create", "update", "patch", "delete"] --- @@ -22,32 +22,31 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: - name: {{ .Values.deployment.projectName }}-backup-junodata-rolebinding + name: {{ .Values.deployment.namespace }}-backup-junodata-rolebinding namespace: {{ .Values.deployment.namespace }} subjects: - kind: ServiceAccount - name: {{ .Values.deployment.projectName }}-backup-junodata-sa + name: {{ .Values.deployment.namespace }}-backup-junodata-sa namespace: {{ .Values.deployment.namespace }} roleRef: apiGroup: rbac.authorization.k8s.io kind: Role - name: {{ .Values.deployment.projectName }}-backup-junodata-role + name: {{ .Values.deployment.namespace }}-backup-junodata-role --- # Secret to store R2 Cloud credentials apiVersion: v1 kind: Secret metadata: - name: {{ .Values.deployment.projectName }}-rclone-config + name: {{ .Values.deployment.namespace }}-rclone-config namespace: {{ .Values.deployment.namespace }} stringData: rclone.conf: | [R2] type = s3 provider = Cloudflare - access_key_id = {{ .Values.backupJunoDataJob.key }} - secret_access_key = {{ .Values.backupJunoDataJob.secret }} - endpoint = {{ .Values.backupJunoDataJob.endpoint }} + env_auth = true + endpoint = https://d1cc7d59ae8f8dc2b1aa530c41b5c6ec.r2.cloudflarestorage.com --- # PVC for storing backup data apiVersion: v1 @@ -105,14 +104,14 @@ data: ttlSecondsAfterFinished: 60 template: spec: - serviceAccountName: {{ .Values.deployment.projectName }}-backup-junodata-sa + serviceAccountName: {{ .Values.deployment.namespace }}-backup-junodata-sa volumes: - name: juno-data-volume persistentVolumeClaim: claimName: {{ .Values.deployment.namespace }}-pv-ssd-snapshot - - name: {{ .Values.deployment.projectName }}-rclone-config + - name: {{ .Values.deployment.namespace }}-rclone-config secret: - secretName: {{ .Values.deployment.projectName }}-rclone-config + secretName: {{ .Values.deployment.namespace }}-rclone-config - name: tar-backup-volume persistentVolumeClaim: claimName: {{ .Values.deployment.namespace }}-juno-data-backup-pvc @@ -133,10 +132,21 @@ data: containers: - name: rclone-upload-container image: rclone/rclone:latest + env: + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: {{ .Values.secret.data.targetName }} + key: r2_access_key_id + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: {{ .Values.secret.data.targetName }} + key: r2_secret_access_key command: ["/bin/sh"] args: ["-c", "rclone copy /mnt/juno-tar-backup/*.tar R2:/juno-snapshot/{{ .Values.backupJunoDataJob.network }}"] volumeMounts: - - name: {{ .Values.deployment.projectName }}-rclone-config + - name: {{ .Values.deployment.namespace }}-rclone-config mountPath: /config/rclone - name: tar-backup-volume mountPath: /mnt/juno-tar-backup @@ -146,7 +156,7 @@ data: apiVersion: batch/v1 kind: CronJob metadata: - name: {{ .Values.deployment.projectName }}-backup-junodata-cronjob + name: {{ .Values.deployment.namespace }}-backup-junodata-cronjob namespace: {{ .Values.deployment.namespace }} spec: schedule: "{{ .Values.backupJunoDataJob.backupSchedule }}" @@ -159,7 +169,7 @@ spec: ttlSecondsAfterFinished: 30 template: spec: - serviceAccountName: {{ .Values.deployment.projectName }}-backup-junodata-sa + serviceAccountName: {{ .Values.deployment.namespace }}-backup-junodata-sa restartPolicy: Never initContainers: - name: copy-disk-kubectl-container @@ -182,8 +192,8 @@ spec: configMap: name: {{ .Values.deployment.namespace }}-cloning-disk-manifest - name: cloning-juno-manifest-volume - configMap: - name: {{ .Values.deployment.namespace }}-cloning-juno-manifest + configMap: + name: {{ .Values.deployment.namespace }}-cloning-juno-manifest --- # CronJob for Cleaning up Completed Pods and PVCs @@ -203,7 +213,7 @@ spec: ttlSecondsAfterFinished: 30 template: spec: - serviceAccountName: {{ .Values.deployment.projectName }}-backup-junodata-sa + serviceAccountName: {{ .Values.deployment.namespace }}-backup-junodata-sa restartPolicy: OnFailure containers: - name: kubectl-container @@ -219,11 +229,11 @@ spec: kubectl delete pvc {{ .Values.deployment.namespace }}-pv-ssd-snapshot sleep 30 fi - describe_output=$(kubectl describe pvc {{ .Values.deployment.namespace }}-juno-data-backup-pvc) - if echo "$describe_output" | grep -q "Used By:[[:space:]]*"; then - echo "Deleting {{ .Values.deployment.namespace }}-juno-data-backup-pvc..." - kubectl delete pvc {{ .Values.deployment.namespace }}-juno-data-backup-pvc - sleep 30 - fi +# describe_output=$(kubectl describe pvc {{ .Values.deployment.namespace }}-juno-data-backup-pvc) +# if echo "$describe_output" | grep -q "Used By:[[:space:]]*"; then +# echo "Deleting {{ .Values.deployment.namespace }}-juno-data-backup-pvc..." +# #kubectl delete pvc {{ .Values.deployment.namespace }}-juno-data-backup-pvc-a +# sleep 30 +# fi --- {{- end -}} \ No newline at end of file diff --git a/charts/juno-node/values.yaml b/charts/juno-node/values.yaml index 939402306..2c9e3653d 100644 --- a/charts/juno-node/values.yaml +++ b/charts/juno-node/values.yaml @@ -230,6 +230,22 @@ backupJunoDataJob: cleanupSchedule: "*/40 * * * *" network: "sepolia" storageSize: 200Gi - key: key-1234 - secret: secret-12345 - endpoint: "https://12345543.r2.cloudflarestorage.com" + +secret: + feederGateway: + refreshInterval: 10m + secretStoreName: juno-store + secretStoreKind: ClusterSecretStore + targetName: juno-goerli # name of the k8s secret to be created + targetCreationPolicy: Owner + key: feeder-gateway # name of the secret to target secret manager + property: testnet # name of the property to retrieve from secret manager + version: "1" # version of secret + secretKey: testnet # name of the secret data key + data: + refreshInterval: 10m + secretStoreName: juno-store # external store name (ClusterSecretStore), it is used to connect to a secret manager. + secretStoreKind: ClusterSecretStore # external store name + targetName: juno-sepolia-common # name of the k8s secret to be created + targetCreationPolicy: Owner + dataFromKey: secret-store # name of the secret in secret manager (GCP secrent manager) \ No newline at end of file From dfa8ba5a5c47e4dc9180856622dc31a02b2e7e43 Mon Sep 17 00:00:00 2001 From: Philex Date: Sun, 2 Jun 2024 19:51:45 -0700 Subject: [PATCH 11/27] Fix: change schedule --- charts/juno-node/values.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/juno-node/values.yaml b/charts/juno-node/values.yaml index 2c9e3653d..f43cb61d6 100644 --- a/charts/juno-node/values.yaml +++ b/charts/juno-node/values.yaml @@ -226,8 +226,8 @@ env: backupJunoDataJob: enabled: true dataSource: "juno-sepolia-pv-ssd-juno-sepolia-0" - backupSchedule: "*/20 * * * *" - cleanupSchedule: "*/40 * * * *" + backupSchedule: "0 0 * * *" + cleanupSchedule: "0 12 * * *" network: "sepolia" storageSize: 200Gi From da2dae9c07b3cf6a56afefeeae085e2d0bad332b Mon Sep 17 00:00:00 2001 From: Philex Date: Sun, 2 Jun 2024 19:56:03 -0700 Subject: [PATCH 12/27] Fix: add blank line and add externalsecret-common.yaml --- .../templates/externalsecret-common.yaml | 20 +++++++++++++++++++ charts/juno-node/values.yaml | 2 +- 2 files changed, 21 insertions(+), 1 deletion(-) create mode 100644 charts/juno-node/templates/externalsecret-common.yaml diff --git a/charts/juno-node/templates/externalsecret-common.yaml b/charts/juno-node/templates/externalsecret-common.yaml new file mode 100644 index 000000000..91e7650cd --- /dev/null +++ b/charts/juno-node/templates/externalsecret-common.yaml @@ -0,0 +1,20 @@ +{{- if .Values.secret }} +{{- with .Values.secret.data }} +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + name: {{ $.Values.deployment.projectName }} + namespace: {{ $.Values.deployment.namespace }} +spec: + refreshInterval: {{ .refreshInterval }} + secretStoreRef: + name: {{ .secretStoreName }} + kind: {{ .secretStoreKind }} + target: + name: {{ .targetName }} + creationPolicy: {{ .targetCreationPolicy }} + dataFrom: + - extract: + key: {{ dataFromKey }} # name of the secret in secret manager (GCP secret manager) +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/juno-node/values.yaml b/charts/juno-node/values.yaml index f43cb61d6..384f1fdbf 100644 --- a/charts/juno-node/values.yaml +++ b/charts/juno-node/values.yaml @@ -248,4 +248,4 @@ secret: secretStoreKind: ClusterSecretStore # external store name targetName: juno-sepolia-common # name of the k8s secret to be created targetCreationPolicy: Owner - dataFromKey: secret-store # name of the secret in secret manager (GCP secrent manager) \ No newline at end of file + dataFromKey: secret-store # name of the secret in secret manager (GCP secrent manager) From 6c1a19fe26079ba908cd0516b3d2cd1c7ab4d5a6 Mon Sep 17 00:00:00 2001 From: Philex Date: Sun, 2 Jun 2024 20:02:53 -0700 Subject: [PATCH 13/27] Fix: add relative path ofdataFromKey --- charts/juno-node/templates/externalsecret-common.yaml | 2 +- charts/juno-node/values.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/juno-node/templates/externalsecret-common.yaml b/charts/juno-node/templates/externalsecret-common.yaml index 91e7650cd..1ceec4d62 100644 --- a/charts/juno-node/templates/externalsecret-common.yaml +++ b/charts/juno-node/templates/externalsecret-common.yaml @@ -15,6 +15,6 @@ spec: creationPolicy: {{ .targetCreationPolicy }} dataFrom: - extract: - key: {{ dataFromKey }} # name of the secret in secret manager (GCP secret manager) + key: {{ .dataFromKey }} # name of the secret in secret manager (GCP secret manager) {{- end }} {{- end }} \ No newline at end of file diff --git a/charts/juno-node/values.yaml b/charts/juno-node/values.yaml index 384f1fdbf..453905002 100644 --- a/charts/juno-node/values.yaml +++ b/charts/juno-node/values.yaml @@ -248,4 +248,4 @@ secret: secretStoreKind: ClusterSecretStore # external store name targetName: juno-sepolia-common # name of the k8s secret to be created targetCreationPolicy: Owner - dataFromKey: secret-store # name of the secret in secret manager (GCP secrent manager) + dataFromKey: secret-store # name of the secret in secret manager (GCP secret manager) From 7e86cd4117e30328139982316840ccdfdad0d427 Mon Sep 17 00:00:00 2001 From: Philex Date: Sun, 2 Jun 2024 20:05:24 -0700 Subject: [PATCH 14/27] Fix: add relative path ofdataFromKey --- charts/juno-node/templates/juno-data-backup-cronjob.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/juno-node/templates/juno-data-backup-cronjob.yaml b/charts/juno-node/templates/juno-data-backup-cronjob.yaml index 29569aac3..5a8e36903 100644 --- a/charts/juno-node/templates/juno-data-backup-cronjob.yaml +++ b/charts/juno-node/templates/juno-data-backup-cronjob.yaml @@ -192,8 +192,8 @@ spec: configMap: name: {{ .Values.deployment.namespace }}-cloning-disk-manifest - name: cloning-juno-manifest-volume - configMap: - name: {{ .Values.deployment.namespace }}-cloning-juno-manifest + configMap: + name: {{ .Values.deployment.namespace }}-cloning-juno-manifest --- # CronJob for Cleaning up Completed Pods and PVCs From 9e5871061f1b3fbea354187858091300b98d5107 Mon Sep 17 00:00:00 2001 From: Philex Date: Thu, 13 Jun 2024 21:16:54 -0700 Subject: [PATCH 15/27] Fix: change DB size --- charts/cert-manager/README.md | 2 +- charts/common/README.md | 2 +- charts/dirk/README.md | 2 +- charts/ethereum-node/README.md | 2 +- charts/execution-beacon/README.md | 2 +- charts/external-dns/README.md | 2 +- charts/juno-node/README.md | 34 +- .../templates/juno-data-backup-cronjob.yaml | 2 +- charts/kong/README.md | 2 +- charts/kube-prometheus-stack/README.md | 2 +- charts/lodestar/README.md | 2 +- charts/loki/README.md | 2 +- charts/mev-boost/README.md | 2 +- charts/mysql/README.md | 2 +- charts/nethgate/README.md | 1526 ++++------------- charts/posmoni/README.md | 2 +- charts/rpc-saas-secretStore/README.md | 2 +- charts/validator-ejector/README.md | 2 +- charts/validator-kapi/README.md | 2 +- charts/validators/README.md | 2 +- charts/vouch/README.md | 2 +- charts/web3signer/README.md | 2 +- 22 files changed, 334 insertions(+), 1266 deletions(-) diff --git a/charts/cert-manager/README.md b/charts/cert-manager/README.md index bff63e4e2..fb9719fa9 100644 --- a/charts/cert-manager/README.md +++ b/charts/cert-manager/README.md @@ -185,4 +185,4 @@ Kubernetes: `>= 1.22.0-0` | webhook.volumes | list | `[]` | | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) +Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) diff --git a/charts/common/README.md b/charts/common/README.md index 7327fd7c9..70f1654bd 100644 --- a/charts/common/README.md +++ b/charts/common/README.md @@ -18,4 +18,4 @@ A Library Helm Chart for grouping common logic between stakewise charts. This ch | exampleValue | string | `"common-chart"` | | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) +Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) diff --git a/charts/dirk/README.md b/charts/dirk/README.md index 8f8d69e31..824415dcf 100644 --- a/charts/dirk/README.md +++ b/charts/dirk/README.md @@ -89,4 +89,4 @@ A Helm chart for installing and configuring large scale ETH staking infrastructu | tolerations | object | `{}` | Tolerations for pod assignment ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) +Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) diff --git a/charts/ethereum-node/README.md b/charts/ethereum-node/README.md index 8d0051523..8bde79b9b 100644 --- a/charts/ethereum-node/README.md +++ b/charts/ethereum-node/README.md @@ -87,4 +87,4 @@ This chart acts as an umbrella chart and allows to run a ethereum execution and | prysm.resources.requests.memory | string | `"2Gi"` | | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) +Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) diff --git a/charts/execution-beacon/README.md b/charts/execution-beacon/README.md index 859233aef..07831d6e4 100644 --- a/charts/execution-beacon/README.md +++ b/charts/execution-beacon/README.md @@ -201,4 +201,4 @@ Kubernetes: `^1.23.0-0` | serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) +Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) diff --git a/charts/external-dns/README.md b/charts/external-dns/README.md index a9e7248e5..9bc33c814 100644 --- a/charts/external-dns/README.md +++ b/charts/external-dns/README.md @@ -101,4 +101,4 @@ ExternalDNS synchronizes exposed Kubernetes Services and Ingresses with DNS prov | txtSuffix | string | `""` | | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) +Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) diff --git a/charts/juno-node/README.md b/charts/juno-node/README.md index 41794a0a4..7c7aa8bd4 100644 --- a/charts/juno-node/README.md +++ b/charts/juno-node/README.md @@ -27,17 +27,20 @@ A Helm chart for deploying Juno service | args.--ws | string | `"true"` | | | args.--ws-host | string | `"0.0.0.0"` | | | args.--ws-port | string | `"6061"` | | -| backupJunoDataJob.backupSchedule | string | `"*/20 * * * *"` | | -| backupJunoDataJob.cleanupSchedule | string | `"*/40 * * * *"` | | +| backupJunoDataJob.backupSchedule | string | `"0 0 * * *"` | | +| backupJunoDataJob.cleanupSchedule | string | `"0 12 * * *"` | | | backupJunoDataJob.dataSource | string | `"juno-sepolia-pv-ssd-juno-sepolia-0"` | | | backupJunoDataJob.enabled | bool | `true` | | -| backupJunoDataJob.endpoint | string | `"https://12345543.r2.cloudflarestorage.com"` | | -| backupJunoDataJob.key | string | `"key-1234"` | | | backupJunoDataJob.network | string | `"sepolia"` | | -| backupJunoDataJob.secret | string | `"secret-12345"` | | | backupJunoDataJob.storageSize | string | `"200Gi"` | | | batchjob.enabled | bool | `false` | | | batchjob.schedule | string | `"* */1 * * *"` | | +| cache.enabled | bool | `false` | | +| cache.image | string | `"us-east1-docker.pkg.dev/juno-stg-nth/juno-cache/cache:2.0"` | | +| cache.resources.limits.cpu | string | `"100m"` | | +| cache.resources.limits.memory | string | `"512Mi"` | | +| cache.resources.requests.cpu | string | `"100m"` | | +| cache.resources.requests.memory | string | `"100Mi"` | | | deployment.healthCheck.enabled | bool | `false` | | | deployment.healthCheck.livenessProbe.failureThreshold | int | `6` | | | deployment.healthCheck.livenessProbe.initialDelaySeconds | int | `9600` | | @@ -78,18 +81,27 @@ A Helm chart for deploying Juno service | pgo.resources.limits.memory | string | `"4Gi"` | | | pgo.resources.requests.cpu | string | `"1"` | | | pgo.resources.requests.memory | string | `"2Gi"` | | -| cache.enabled | bool | `false` | | -| cache.image | string | `"us-east1-docker.pkg.dev/juno-stg-nth/juno-cache/cache:2.0"` | | -| cache.resources.limits.cpu | string | `"100m"` | | -| cache.resources.limits.memory | string | `"512Mi"` | | -| cache.resources.requests.cpu | string | `"100m"` | | -| cache.resources.requests.memory | string | `"100Mi"` | | | pvc.datasource | string | `""` | | | pvc.enabled | bool | `true` | | | pvc.mount[0].mountPath | string | `"/var/lib/juno"` | | | pvc.mount[0].pvName | string | `"pv"` | | | pvc.mount[0].storageSize | string | `"250Gi"` | | | pvc.storageClassName | string | `"standard"` | | +| secret.data.dataFromKey | string | `"secret-store"` | | +| secret.data.refreshInterval | string | `"10m"` | | +| secret.data.secretStoreKind | string | `"ClusterSecretStore"` | | +| secret.data.secretStoreName | string | `"juno-store"` | | +| secret.data.targetCreationPolicy | string | `"Owner"` | | +| secret.data.targetName | string | `"juno-sepolia-common"` | | +| secret.feederGateway.key | string | `"feeder-gateway"` | | +| secret.feederGateway.property | string | `"testnet"` | | +| secret.feederGateway.refreshInterval | string | `"10m"` | | +| secret.feederGateway.secretKey | string | `"testnet"` | | +| secret.feederGateway.secretStoreKind | string | `"ClusterSecretStore"` | | +| secret.feederGateway.secretStoreName | string | `"juno-store"` | | +| secret.feederGateway.targetCreationPolicy | string | `"Owner"` | | +| secret.feederGateway.targetName | string | `"juno-goerli"` | | +| secret.feederGateway.version | string | `"1"` | | | serviceAccount.enabled | bool | `false` | | | serviceAccount.gcpServiceAccount | string | `"monitoring-sa-euw1@juno-prod-nth.iam.gserviceaccount.com"` | | | serviceAccount.name | string | `"juno-pgo"` | | diff --git a/charts/juno-node/templates/juno-data-backup-cronjob.yaml b/charts/juno-node/templates/juno-data-backup-cronjob.yaml index 5a8e36903..c2eed4b04 100644 --- a/charts/juno-node/templates/juno-data-backup-cronjob.yaml +++ b/charts/juno-node/templates/juno-data-backup-cronjob.yaml @@ -60,7 +60,7 @@ spec: storageClassName: premium-rwo resources: requests: - storage: 200Gi + storage: {{ .Values.backupJunoDataJob.storageSize }} --- # ConfigMap for cloning disk manifest apiVersion: v1 diff --git a/charts/kong/README.md b/charts/kong/README.md index 040833bee..6b9789b0a 100644 --- a/charts/kong/README.md +++ b/charts/kong/README.md @@ -267,4 +267,4 @@ The Cloud-Native Ingress and API-management | waitImage | object | `{"enabled":true,"pullPolicy":"IfNotPresent"}` | --------------------------------------------------------------------------- | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) +Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) diff --git a/charts/kube-prometheus-stack/README.md b/charts/kube-prometheus-stack/README.md index 48b5e057c..9a2e4dbeb 100644 --- a/charts/kube-prometheus-stack/README.md +++ b/charts/kube-prometheus-stack/README.md @@ -1002,4 +1002,4 @@ Kubernetes: `>=1.16.0-0` | windowsMonitoring.job | string | `"prometheus-windows-exporter"` | | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) +Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) diff --git a/charts/lodestar/README.md b/charts/lodestar/README.md index b3c9060ae..cc5621563 100644 --- a/charts/lodestar/README.md +++ b/charts/lodestar/README.md @@ -59,4 +59,4 @@ A Helm chart to deploy the Lodestar Consensus Client using Kubernetes | tolerations | list | `[]` | | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) +Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) diff --git a/charts/loki/README.md b/charts/loki/README.md index ec569a1a1..02916f27c 100644 --- a/charts/loki/README.md +++ b/charts/loki/README.md @@ -137,4 +137,4 @@ Kubernetes: `^1.10.0-0` | useExistingAlertingGroup.enabled | bool | `false` | | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) +Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) diff --git a/charts/mev-boost/README.md b/charts/mev-boost/README.md index ec2ab8f04..413e63233 100644 --- a/charts/mev-boost/README.md +++ b/charts/mev-boost/README.md @@ -57,4 +57,4 @@ mev-boost allows proof-of-stake Ethereum consensus clients to outsource block co | tolerations | list | `[]` | | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) +Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) diff --git a/charts/mysql/README.md b/charts/mysql/README.md index 714c16050..88afb56ed 100644 --- a/charts/mysql/README.md +++ b/charts/mysql/README.md @@ -22,4 +22,4 @@ A Helm chart for deploying MySQL with StatefulSet, Service, Secret, and PVC. | storageSize | string | `"100Gi"` | | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) +Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) diff --git a/charts/nethgate/README.md b/charts/nethgate/README.md index df6c9f7f1..e7cff4fb6 100644 --- a/charts/nethgate/README.md +++ b/charts/nethgate/README.md @@ -1,1235 +1,291 @@ -## Kong for Kubernetes - -[Kong for Kubernetes](https://github.com/Kong/kubernetes-ingress-controller) -is an open-source Ingress Controller for Kubernetes that offers -API management capabilities with a plugin architecture. - -This chart bootstraps all the components needed to run Kong on a -[Kubernetes](http://kubernetes.io) cluster using the -[Helm](https://helm.sh) package manager. - -## TL;DR; - -```bash -helm repo add kong https://charts.konghq.com -helm repo update - -helm install kong/kong --generate-name -``` - -## Table of contents - -- [Prerequisites](#prerequisites) -- [Install](#install) -- [Uninstall](#uninstall) -- [FAQs](#faqs) -- [Kong Enterprise](#kong-enterprise) -- [Deployment Options](#deployment-options) - - [Database](#database) - - [DB-less deployment](#db-less-deployment) - - [Using the Postgres sub-chart](#using-the-postgres-sub-chart) - - [Postgres sub-chart considerations for OpenShift](#postgres-sub-chart-considerations-for-openshift) - - [Runtime package](#runtime-package) - - [Configuration method](#configuration-method) - - [Separate admin and proxy nodes](#separate-admin-and-proxy-nodes) - - [Standalone controller nodes](#standalone-controller-nodes) - - [Hybrid mode](#hybrid-mode) - - [Certificates](#certificates) - - [Control plane node configuration](#control-plane-node-configuration) - - [Data plane node configuration](#data-plane-node-configuration) - - [Cert Manager Integration](#cert-manager-integration) - - [CRD management](#crd-management) - - [InitContainers](#initcontainers) - - [HostAliases](#hostaliases) - - [Sidecar Containers](#sidecar-containers) - - [Migration Sidecar Containers](#migration-sidecar-containers) - - [User Defined Volumes](#user-defined-volumes) - - [User Defined Volume Mounts](#user-defined-volume-mounts) - - [Removing cluster-scoped permissions](#removing-cluster-scoped-permissions) - - [Using a DaemonSet](#using-a-daemonset) - - [Using dnsPolicy and dnsConfig](#using-dnspolicy-and-dnsconfig) - - [Example configurations](#example-configurations) -- [Configuration](#configuration) - - [Kong parameters](#kong-parameters) - - [Kong Service Parameters](#kong-service-parameters) - - [Admin Service mTLS](#admin-service-mtls) - - [Stream listens](#stream-listens) - - [Ingress Controller Parameters](#ingress-controller-parameters) - - [The `env` section](#the-env-section) - - [The `customEnv` section](#the-customenv-section) - - [General Parameters](#general-parameters) - - [The `env` section](#the-env-section-1) - - [The `customEnv` section](#the-customenv-section-1) - - [The `extraLabels` section](#the-extralabels-section) -- [Kong Enterprise Parameters](#kong-enterprise-parameters) - - [Overview](#overview) - - [Prerequisites](#prerequisites-1) - - [Kong Enterprise License](#kong-enterprise-license) - - [Kong Enterprise Docker registry access](#kong-enterprise-docker-registry-access) - - [Service location hints](#service-location-hints) - - [RBAC](#rbac) - - [Sessions](#sessions) - - [Email/SMTP](#emailsmtp) -- [Prometheus Operator integration](#prometheus-operator-integration) -- [Argo CD considerations](#argo-cd-considerations) -- [Changelog](https://github.com/Kong/charts/blob/main/charts/kong/CHANGELOG.md) -- [Upgrading](https://github.com/Kong/charts/blob/main/charts/kong/UPGRADE.md) -- [Seeking help](#seeking-help) - -## Prerequisites - -- Kubernetes 1.17+. Older chart releases support older Kubernetes versions. - Refer to the [supported version matrix](https://docs.konghq.com/kubernetes-ingress-controller/latest/references/version-compatibility/#kubernetes) - and the [chart changelog](https://github.com/Kong/charts/blob/main/charts/kong/CHANGELOG.md) - for information about the default chart controller versions and Kubernetes - versions supported by controller releases. -- PV provisioner support in the underlying infrastructure if persistence - is needed for Kong datastore. - -## Install - -To install Kong: - -```bash -helm repo add kong https://charts.konghq.com -helm repo update - -helm install kong/kong --generate-name -``` - -## Uninstall - -To uninstall/delete a Helm release `my-release`: - -```bash -helm delete my-release -``` - -The command removes all the Kubernetes components associated with the -chart and deletes the release. - -> **Tip**: List all releases using `helm list` - -## FAQs - -Please read the -[FAQs](https://github.com/Kong/charts/blob/main/charts/kong/FAQs.md) -document. - -## Kong Enterprise - -If using Kong Enterprise, several additional steps are necessary before -installing the chart: - -- Set `enterprise.enabled` to `true` in `values.yaml` file. -- Update values.yaml to use a Kong Enterprise image. -- Satisfy the two prerequisites below for - [Enterprise License](#kong-enterprise-license) and - [Enterprise Docker Registry](#kong-enterprise-docker-registry-access). -- (Optional) [set a `password` environment variable](#rbac) to create the - initial super-admin. Though not required, this is recommended for users that - wish to use RBAC, as it cannot be done after initial setup. - -Once you have these set, it is possible to install Kong Enterprise. - -Please read through -[Kong Enterprise considerations](#kong-enterprise-parameters) -to understand all settings that are enterprise specific. - -## Deployment Options - -Kong is a highly configurable piece of software that can be deployed -in a number of different ways, depending on your use-case. - -All combinations of various runtimes, databases and configuration methods are -supported by this Helm chart. -The recommended approach is to use the Ingress Controller based configuration -along-with DB-less mode. - -Following sections detail on various high-level architecture options available: - -### Database - -Kong can run with or without a database (DB-less). By default, this chart -installs Kong without a database. - -You can set the database the `env.database` parameter. For more details, please -read the [env](#the-env-section) section. - -#### DB-less deployment - -When deploying Kong in DB-less mode(`env.database: "off"`) -and without the Ingress Controller(`ingressController.enabled: false`), -you have to provide a [declarative configuration](https://docs.konghq.com/gateway-oss/latest/db-less-and-declarative-config/#the-declarative-configuration-format) for Kong to run. -You can provide an existing ConfigMap -(`dblessConfig.configMap`) or Secret (`dblessConfig.secret`) or place the whole -configuration into `values.yaml` (`dblessConfig.config`) parameter. See the -example configuration in the default values.yaml for more details. You can use -`--set-file dblessConfig.config=/path/to/declarative-config.yaml` in Helm -commands to substitute in a complete declarative config file. - -Note that externally supplied ConfigMaps are not hashed or tracked in deployment annotations. -Subsequent ConfigMap updates will require user-initiated new deployment rollouts -to apply the new configuration. You should run `kubectl rollout restart deploy` -after updating externally supplied ConfigMap content. - -#### Using the Postgres sub-chart - -The chart can optionally spawn a Postgres instance using [Bitnami's Postgres -chart](https://github.com/bitnami/charts/blob/master/bitnami/postgresql/README.md) -as a sub-chart. Set `postgresql.enabled=true` to enable the sub-chart. Enabling -this will auto-populate Postgres connection settings in Kong's environment. - -The Postgres sub-chart is best used to quickly provision temporary environments -without installing and configuring your database separately. For longer-lived -environments, we recommend you manage your database outside the Kong Helm -release. - -##### Postgres sub-chart considerations for OpenShift - -Due to the default `securityContexts` in the postgres sub-chart, you will need to add the following values to the `postgresql` section to get postgres running on OpenShift: - -```yaml - volumePermissions: - enabled: false - securityContext: - runAsUser: "auto" - primary: - containerSecurityContext: - enabled: false - podSecurityContext: - enabled: false -``` - -### Runtime package - -There are three different packages of Kong that are available: - -- **Kong Gateway**\ - This is the [Open-Source](https://github.com/kong/kong) offering. It is a - full-blown API Gateway and Ingress solution with a wide-array of functionality. - When Kong Gateway is combined with the Ingress based configuration method, - you get Kong for Kubernetes. This is the default deployment for this Helm - Chart. -- **Kong Enterprise K8S**\ - This package builds up on top of the Open-Source Gateway and bundles in all - the Enterprise-only plugins as well. - When Kong Enterprise K8S is combined with the Ingress based - configuration method, you get Kong for Kubernetes Enterprise. - This package also comes with 24x7 support from Kong Inc. -- **Kong Enterprise**\ - This is the full-blown Enterprise package which packs with itself all the - Enterprise functionality like Manager, Portal, Vitals, etc. - This package can't be run in DB-less mode. - -The package to run can be changed via `image.repository` and `image.tag` -parameters. If you would like to run the Enterprise package, please read -the [Kong Enterprise Parameters](#kong-enterprise-parameters) section. - -### Configuration method - -Kong can be configured via two methods: -- **Ingress and CRDs**\ - The configuration for Kong is done via `kubectl` and Kubernetes-native APIs. - This is also known as Kong Ingress Controller or Kong for Kubernetes and is - the default deployment pattern for this Helm Chart. The configuration - for Kong is managed via Ingress and a few - [Custom Resources](https://docs.konghq.com/kubernetes-ingress-controller/latest/concepts/custom-resources). - For more details, please read the - [documentation](https://docs.konghq.com/kubernetes-ingress-controller/) - on Kong Ingress Controller. - To configure and fine-tune the controller, please read the - [Ingress Controller Parameters](#ingress-controller-parameters) section. -- **Admin API**\ - This is the traditional method of running and configuring Kong. - By default, the Admin API of Kong is not exposed as a Service. This - can be controlled via `admin.enabled` and `env.admin_listen` parameters. - -### Separate admin and proxy nodes - -*Note: although this section is titled "Separate admin and proxy nodes", this -split release technique is generally applicable to any deployment with -different types of Kong nodes. Separating Admin API and proxy nodes is one of -the more common use cases for splitting across multiple releases, but you can -also split releases for split proxy and Developer Portal nodes, multiple groups -of proxy nodes with separate listen configurations for network segmentation, etc. -However, it does not apply to hybrid mode, as only the control plane release -interacts with the database.* - -Users may wish to split their Kong deployment into multiple instances that only -run some of Kong's services (i.e. you run `helm install` once for every -instance type you wish to create). - -To disable Kong services on an instance, you should set `SVC.enabled`, -`SVC.http.enabled`, `SVC.tls.enabled`, and `SVC.ingress.enabled` all to -`false`, where `SVC` is `proxy`, `admin`, `manager`, `portal`, or `portalapi`. - -The standard chart upgrade automation process assumes that there is only a -single Kong release in the Kong cluster, and runs both `migrations up` and -`migrations finish` jobs. To handle clusters split across multiple releases, -you should: -1. Upgrade one of the releases with `helm upgrade RELEASENAME -f values.yaml - --set migrations.preUpgrade=true --set migrations.postUpgrade=false`. -2. Upgrade all but one of the remaining releases with `helm upgrade RELEASENAME - -f values.yaml --set migrations.preUpgrade=false --set - migrations.postUpgrade=false`. -3. Upgrade the final release with `helm upgrade RELEASENAME -f values.yaml - --set migrations.preUpgrade=false --set migrations.postUpgrade=true`. - -This ensures that all instances are using the new Kong package before running -`kong migrations finish`. - -Users should note that Helm supports supplying multiple values.yaml files, -allowing you to separate shared configuration from instance-specific -configuration. For example, you may have a shared values.yaml that contains -environment variables and other common settings, and then several -instance-specific values.yamls that contain service configuration only. You can -then create releases with: - -```bash -helm install proxy-only -f shared-values.yaml -f only-proxy.yaml kong/kong -helm install admin-only -f shared-values.yaml -f only-admin.yaml kong/kong -``` - -### Standalone controller nodes - -The chart can deploy releases that contain the controller only, with no Kong -container, by setting `deployment.kong.enabled: false` in values.yaml. There -are several controller settings that must be populated manually in this -scenario and several settings that are useful when using multiple controllers: - -* `ingressController.env.kong_admin_url` must be set to the Kong Admin API URL. - If the Admin API is exposed by a service in the cluster, this should look - something like `https://my-release-kong-admin.kong-namespace.svc:8444` -* `ingressController.env.publish_service` must be set to the Kong proxy - service, e.g. `namespace/my-release-kong-proxy`. -* `ingressController.ingressClass` should be set to a different value for each - instance of the controller. -* `ingressController.env.kong_admin_filter_tag` should be set to a different value - for each instance of the controller. -* If using Kong Enterprise, `ingressController.env.kong_workspace` can - optionally create configuration in a workspace other than `default`. - -Standalone controllers require a database-backed Kong instance, as DB-less mode -requires that a single controller generate a complete Kong configuration. - -### Hybrid mode - -Kong supports [hybrid mode -deployments](https://docs.konghq.com/2.0.x/hybrid-mode/) as of Kong 2.0.0 and -[Kong Enterprise 2.1.0](https://docs.konghq.com/enterprise/2.1.x/deployment/hybrid-mode/). -These deployments split Kong nodes into control plane (CP) nodes, which provide -the admin API and interact with the database, and data plane (DP) nodes, which -provide the proxy and receive configuration from control plane nodes. - -You can deploy hybrid mode Kong clusters by [creating separate releases for each node -type](#separate-admin-and-proxy-nodes), i.e. use separate control and data -plane values.yamls that are then installed separately. The [control -plane](#control-plane-node-configuration) and [data -plane](#data-plane-node-configuration) configuration sections below cover the -values.yaml specifics for each. - -Cluster certificates are not generated automatically. You must [create a -certificate and key pair](#certificates) for intra-cluster communication. - -When upgrading the Kong version, you must [upgrade the control plane release -first and then upgrade the data plane release](https://docs.konghq.com/gateway/latest/plan-and-deploy/hybrid-mode/#version-compatibility). - -#### Certificates - -> This example shows how to use Kong Hybrid mode with `cluster_mtls: shared`. -> For an example of `cluster_mtls: pki` see the [hybrid-cert-manager example](https://github.com/Kong/charts/blob/main/charts/kong/example-values/hybrid-cert-manager/) - -Hybrid mode uses TLS to secure the CP/DP node communication channel, and -requires certificates for it. You can generate these either using `kong hybrid -gen_cert` on a local Kong installation or using OpenSSL: - -```bash -openssl req -new -x509 -nodes -newkey ec:<(openssl ecparam -name secp384r1) \ - -keyout /tmp/cluster.key -out /tmp/cluster.crt \ - -days 1095 -subj "/CN=kong_clustering" -``` - -You must then place these certificates in a Secret: - -```bash -kubectl create secret tls kong-cluster-cert --cert=/tmp/cluster.crt --key=/tmp/cluster.key -``` - -#### Control plane node configuration - -You must configure the control plane nodes to mount the certificate secret on -the container filesystem is serve it from the cluster listen. In values.yaml: - -```yaml -secretVolumes: -- kong-cluster-cert -``` - -```yaml -env: - role: control_plane - cluster_cert: /etc/secrets/kong-cluster-cert/tls.crt - cluster_cert_key: /etc/secrets/kong-cluster-cert/tls.key -``` - -Furthermore, you must enable the cluster listen and Kubernetes Service, and -should typically disable the proxy: - -```yaml -cluster: - enabled: true - tls: - enabled: true - servicePort: 8005 - containerPort: 8005 - -proxy: - enabled: false -``` - -Enterprise users with Vitals enabled must also enable the cluster telemetry -service: - -```yaml -clustertelemetry: - enabled: true - tls: - enabled: true - servicePort: 8006 - containerPort: 8006 -``` - -If using the ingress controller, you must also specify the DP proxy service as -its publish target to keep Ingress status information up to date: - -``` -ingressController: - env: - publish_service: hybrid/example-release-data-kong-proxy -``` - -Replace `hybrid` with your DP nodes' namespace and `example-release-data` with -the name of the DP release. - -#### Data plane node configuration - -Data plane configuration also requires the certificate and `role` -configuration, and the database should always be set to `off`. You must also -trust the cluster certificate and indicate what hostname/port Kong should use -to find control plane nodes. - -Though not strictly required, you should disable the admin service (it will not -work on DP nodes anyway, but should be disabled to avoid creating an invalid -Service resource). - -```yaml -secretVolumes: -- kong-cluster-cert -``` - -```yaml -admin: - enabled: false -``` - -```yaml -env: - role: data_plane - database: "off" - cluster_cert: /etc/secrets/kong-cluster-cert/tls.crt - cluster_cert_key: /etc/secrets/kong-cluster-cert/tls.key - lua_ssl_trusted_certificate: /etc/secrets/kong-cluster-cert/tls.crt - cluster_control_plane: control-plane-release-name-kong-cluster.hybrid.svc.cluster.local:8005 - cluster_telemetry_endpoint: control-plane-release-name-kong-clustertelemetry.hybrid.svc.cluster.local:8006 # Enterprise-only -``` - -Note that the `cluster_control_plane` value will differ depending on your -environment. `control-plane-release-name` will change to your CP release name, -`hybrid` will change to whatever namespace it resides in. See [Kubernetes' -documentation on Service -DNS](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/) -for more detail. - -If you use multiple Helm releases to manage different data plane configurations -attached to the same control plane, setting the `deployment.hostname` field -will help you keep track of which is which in the `/clustering/data-plane` -endpoint. - -### Cert Manager Integration - -By default, Kong will create self-signed certificates on start for its TLS -listens if you do not provide your own. The chart can create -[cert-manager](https://cert-manager.io/docs/) Certificates for its Services and -configure them for you. To use this integration, install cert-manager, create -an issuer, set `certificates.enabled: true` in values.yaml, and set your issuer -name in `certificates.issuer` or `certificates.clusterIssuer` depending on the -issuer type. - -If you do not have an issuer available, you can install the example [self-signed ClusterIssuer](https://cert-manager.io/docs/configuration/selfsigned/#bootstrapping-ca-issuers) -and set `certificates.clusterIssuer: selfsigned-issuer` for testing. You -should, however, migrate to an issuer using a CA your clients trust for actual -usage. - -The `proxy`, `admin`, `portal`, and `cluster` subsections under `certificates` -let you choose hostnames, override issuers, set `subject` or set `privateKey` on a per-certificate basis for the -proxy, admin API and Manager, Portal and Portal API, and hybrid mode mTLS -services, respectively. - -To use hybrid mode, the control and data plane releases must use the same -issuer for their cluster certificates. - -### CRD management - -Earlier versions of this chart (<2.0) created CRDs associated with the ingress -controller as part of the release. This raised two challenges: - -- Multiple release of the chart would conflict with one another, as each would - attempt to create its own set of CRDs. -- Because deleting a CRD also deletes any custom resources associated with it, - deleting a release of the chart could destroy user configuration without - providing any means to restore it. - -Helm 3 introduced a simplified CRD management method that was safer, but -requires some manual work when a chart added or modified CRDs: CRDs are created -on install if they are not already present, but are not modified during -release upgrades or deletes. Our chart release upgrade instructions call out -when manual action is necessary to update CRDs. This CRD handling strategy is -recommended for most users. - -Some users may wish to manage their CRDs automatically. If you manage your CRDs -this way, we _strongly_ recommend that you back up all associated custom -resources in the event you need to recover from unintended CRD deletion. - -While Helm 3's CRD management system is recommended, there is no simple means -of migrating away from release-managed CRDs if you previously installed your -release with the old system (you would need to back up your existing custom -resources, delete your release, reinstall, and restore your custom resources -after). As such, the chart detects if you currently use release-managed CRDs -and continues to use the old CRD templates when using chart version 2.0+. If -you do (your resources will have a `meta.helm.sh/release-name` annotation), we -_strongly_ recommend that you back up all associated custom resources in the -event you need to recover from unintended CRD deletion. - -### InitContainers - -The chart is able to deploy initContainers along with Kong. This can be very -useful when there's a requirement for custom initialization. The -`deployment.initContainers` field in values.yaml takes an array of objects that -get appended as-is to the existing `spec.template.initContainers` array in the -kong deployment resource. - -### HostAliases - -The chart is able to inject host aliases into containers. This can be very useful -when it's required to resolve additional domain name which can't be looked-up -directly from dns server. The `deployment.hostAliases` field in values.yaml -takes an array of objects that set to `spec.template.hostAliases` field in the -kong deployment resource. - -### Sidecar Containers - -The chart can deploy additional containers along with the Kong and Ingress -Controller containers, sometimes referred to as "sidecar containers". This can -be useful to include network proxies or logging services along with Kong. The -`deployment.sidecarContainers` field in values.yaml takes an array of objects -that get appended as-is to the existing `spec.template.spec.containers` array -in the Kong deployment resource. - -### Migration Sidecar Containers - -In the same way sidecar containers are attached to the Kong and Ingress -Controller containers the chart can add sidecars to the containers that runs -the migrations. The -`migrations.sidecarContainers` field in values.yaml takes an array of objects -that get appended as-is to the existing `spec.template.spec.containers` array -in the pre-upgrade-migrations, post-upgrade-migrations and migration resrouces. -Keep in mind the containers should be finite and they should be terminated -with the migration containers, otherwise the migration could get the status -as finished and the deployment of the chart will reach the timeout. - -### User Defined Volumes - -The chart can deploy additional volumes along with Kong. This can be useful to -include additional volumes which required during iniatilization phase -(InitContainer). The `deployment.userDefinedVolumes` field in values.yaml -takes an array of objects that get appended as-is to the existing -`spec.template.spec.volumes` array in the kong deployment resource. - -### User Defined Volume Mounts - -The chart can mount user-defined volumes. The -`deployment.userDefinedVolumeMounts` and -`ingressController.userDefinedVolumeMounts` fields in values.yaml take an array -of object that get appended as-is to the existing -`spec.template.spec.containers[].volumeMounts` and -`spec.template.spec.initContainers[].volumeMounts` array in the kong deployment -resource. - -### Removing cluster-scoped permissions - -You can limit the controller's access to allow it to only watch specific -namespaces for namespaced resources. By default, the controller watches all -namespaces. Limiting access requires several changes to configuration: - -- Set `ingressController.watchNamespaces` to a list of namespaces you want to - watch. The chart will automatically generate roles for each namespace and - assign them to the controller's service account. -- Optionally set `ingressController.installCRDs=false` if your user role (the - role you use when running `helm install`, not the controller service - account's role) does not have access to get CRDs. By default, the chart - attempts to look up the controller CRDs for [a legacy behavior - check](#crd-management). - -### Using a DaemonSet - -Setting `deployment.daemonset: true` deploys Kong using a [DaemonSet -controller](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) -instead of a Deployment controller. This runs a Kong Pod on every kubelet in -the Kubernetes cluster. For such configuration it may be desirable to configure -Pods to use the network of the host they run on instead of a dedicated network -namespace. The benefit of this approach is that the Kong can bind ports directly -to Kubernetes nodes' network interfaces, without the extra network translation -imposed by NodePort Services. It can be achieved by setting `deployment.hostNetwork: true`. - -### Using dnsPolicy and dnsConfig - -The chart able to inject custom DNS configuration into containers. This can be useful when you have EKS cluster with [NodeLocal DNSCache](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) configured and attach AWS security groups directly to pod using [security groups for pods feature](https://docs.aws.amazon.com/eks/latest/userguide/security-groups-for-pods.html). - -### Example configurations - -Several example values.yaml are available in the -[example-values](https://github.com/Kong/charts/blob/main/charts/kong/example-values/) -directory. - -## Configuration - -### Kong parameters - -| Parameter | Description | Default | -| ---------------------------------- | ------------------------------------------------------------------------------------- | ------------------- | -| image.repository | Kong image | `kong` | -| image.tag | Kong image version | `3.5` | -| image.effectiveSemver | Semantic version to use for version-dependent features (if `tag` is not a semver) | | -| image.pullPolicy | Image pull policy | `IfNotPresent` | -| image.pullSecrets | Image pull secrets | `null` | -| replicaCount | Kong instance count. It has no effect when `autoscaling.enabled` is set to true | `1` | -| plugins | Install custom plugins into Kong via ConfigMaps or Secrets | `{}` | -| env | Additional [Kong configurations](https://getkong.org/docs/latest/configuration/) | | -| customEnv | Custom Environment variables without `KONG_` prefix | | -| migrations.preUpgrade | Run "kong migrations up" jobs | `true` | -| migrations.postUpgrade | Run "kong migrations finish" jobs | `true` | -| migrations.annotations | Annotations for migration job pods | `{"sidecar.istio.io/inject": "false" | -| migrations.jobAnnotations | Additional annotations for migration jobs | `{}` | -| migrations.backoffLimit | Override the system backoffLimit | `{}` | -| waitImage.enabled | Spawn init containers that wait for the database before starting Kong | `true` | -| waitImage.repository | Image used to wait for database to become ready. Uses the Kong image if none set | | -| waitImage.tag | Tag for image used to wait for database to become ready | | -| waitImage.pullPolicy | Wait image pull policy | `IfNotPresent` | -| postgresql.enabled | Spin up a new postgres instance for Kong | `false` | -| dblessConfig.configMap | Name of an existing ConfigMap containing the `kong.yml` file. This must have the key `kong.yml`.| `` | -| dblessConfig.config | Yaml configuration file for the dbless (declarative) configuration of Kong | see in `values.yaml` | - -#### Kong Service Parameters - -The various `SVC.*` parameters below are common to the various Kong services -(the admin API, proxy, Kong Manager, the Developer Portal, and the Developer -Portal API) and define their listener configuration, K8S Service properties, -and K8S Ingress properties. Defaults are listed only if consistent across the -individual services: see values.yaml for their individual default values. - -`SVC` below can be substituted with each of: -* `proxy` -* `udpProxy` -* `admin` -* `manager` -* `portal` -* `portalapi` -* `cluster` -* `clustertelemetry` -* `status` - -`status` is intended for internal use within the cluster. Unlike other -services it cannot be exposed externally, and cannot create a Kubernetes -service or ingress. It supports the settings under `SVC.http` and `SVC.tls` -only. - -`cluster` is used on hybrid mode control plane nodes. It does not support the -`SVC.http.*` settings (cluster communications must be TLS-only) or the -`SVC.ingress.*` settings (cluster communication requires TLS client -authentication, which cannot pass through an ingress proxy). `clustertelemetry` -is similar, and used when Vitals is enabled on Kong Enterprise control plane -nodes. - -`udpProxy` is used for UDP stream listens (Kubernetes does not yet support -mixed TCP/UDP LoadBalancer Services). It _does not_ support the `http`, `tls`, -or `ingress` sections, as it is used only for stream listens. - -| Parameter | Description | Default | -|------------------------------------|---------------------------------------------------------------------------------------|--------------------------| -| SVC.enabled | Create Service resource for SVC (admin, proxy, manager, etc.) | | -| SVC.http.enabled | Enables http on the service | | -| SVC.http.servicePort | Service port to use for http | | -| SVC.http.containerPort | Container port to use for http | | -| SVC.http.nodePort | Node port to use for http | | -| SVC.http.hostPort | Host port to use for http | | -| SVC.http.parameters | Array of additional listen parameters | `[]` | -| SVC.tls.enabled | Enables TLS on the service | | -| SVC.tls.containerPort | Container port to use for TLS | | -| SVC.tls.servicePort | Service port to use for TLS | | -| SVC.tls.nodePort | Node port to use for TLS | | -| SVC.tls.hostPort | Host port to use for TLS | | -| SVC.tls.overrideServiceTargetPort | Override service port to use for TLS without touching Kong containerPort | | -| SVC.tls.parameters | Array of additional listen parameters | `["http2"]` | -| SVC.type | k8s service type. Options: NodePort, ClusterIP, LoadBalancer | | -| SVC.clusterIP | k8s service clusterIP | | -| SVC.loadBalancerClass | loadBalancerClass to use for LoadBalancer provisionning | | -| SVC.loadBalancerSourceRanges | Limit service access to CIDRs if set and service type is `LoadBalancer` | `[]` | -| SVC.loadBalancerIP | Reuse an existing ingress static IP for the service | | -| SVC.externalIPs | IPs for which nodes in the cluster will also accept traffic for the servic | `[]` | -| SVC.externalTrafficPolicy | k8s service's externalTrafficPolicy. Options: Cluster, Local | | -| SVC.ingress.enabled | Enable ingress resource creation (works with SVC.type=ClusterIP) | `false` | -| SVC.ingress.ingressClassName | Set the ingressClassName to associate this Ingress with an IngressClass | | -| SVC.ingress.hostname | Ingress hostname | `""` | -| SVC.ingress.path | Ingress path. | `/` | -| SVC.ingress.pathType | Ingress pathType. One of `ImplementationSpecific`, `Exact` or `Prefix` | `ImplementationSpecific` | -| SVC.ingress.hosts | Slice of hosts configurations, including `hostname`, `path` and `pathType` keys | `[]` | -| SVC.ingress.tls | Name of secret resource or slice of `secretName` and `hosts` keys | | -| SVC.ingress.annotations | Ingress annotations. See documentation for your ingress controller for details | `{}` | -| SVC.ingress.labels | Ingress labels. Additional custom labels to add to the ingress. | `{}` | -| SVC.annotations | Service annotations | `{}` | -| SVC.labels | Service labels | `{}` | - -#### Admin Service mTLS - -On top of the common parameters listed above, the `admin` service supports parameters for mTLS client verification. -If any of `admin.tls.client.caBundle` or `admin.tls.client.secretName` are set, the admin service will be configured to -require mTLS client verification. If both are set, `admin.tls.client.caBundle` will take precedence. - -| Parameter | Description | Default | -|-----------------------------|---------------------------------------------------------------------------------------------|---------| -| admin.tls.client.caBundle | CA certificate to use for TLS verification of the Admin API client (PEM-encoded). | `""` | -| admin.tls.client.secretName | CA certificate secret name - must contain a `tls.crt` key with the PEM-encoded certificate. | `""` | - -#### Stream listens - -The proxy configuration additionally supports creating stream listens. These -are configured using an array of objects under `proxy.stream` and `udpProxy.stream`: - -| Parameter | Description | Default | -| ---------------------------------- | ------------------------------------------------------------------------------------- | ------------------- | -| protocol | The listen protocol, either "TCP" or "UDP" | | -| containerPort | Container port to use for a stream listen | | -| servicePort | Service port to use for a stream listen | | -| nodePort | Node port to use for a stream listen | | -| hostPort | Host port to use for a stream listen | | -| parameters | Array of additional listen parameters | `[]` | - -### Ingress Controller Parameters - -All of the following properties are nested under the `ingressController` -section of `values.yaml` file: - -| Parameter | Description | Default | -|--------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------| -| enabled | Deploy the ingress controller, rbac and crd | true | -| image.repository | Docker image with the ingress controller | kong/kubernetes-ingress-controller | -| image.tag | Version of the ingress controller | `3.0` | -| image.effectiveSemver | Version of the ingress controller used for version-specific features when image.tag is not a valid semantic version | | -| readinessProbe | Kong ingress controllers readiness probe | | -| livenessProbe | Kong ingress controllers liveness probe | | -| installCRDs | Legacy toggle for Helm 2-style CRD management. Should not be set [unless necessary due to cluster permissions](#removing-cluster-scoped-permissions). | false | -| env | Specify Kong Ingress Controller configuration via environment variables | | -| customEnv | Specify custom environment variables (without the CONTROLLER_ prefix) | | -| ingressClass | The name of this controller's ingressClass | kong | -| ingressClassAnnotations | The ingress-class value for controller | kong | -| args | List of ingress-controller cli arguments | [] | -| watchNamespaces | List of namespaces to watch. Watches all namespaces if empty | [] | -| admissionWebhook.enabled | Whether to enable the validating admission webhook | true | -| admissionWebhook.failurePolicy | How unrecognized errors from the admission endpoint are handled (Ignore or Fail) | Ignore | -| admissionWebhook.port | The port the ingress controller will listen on for admission webhooks | 8080 | -| admissionWebhook.address | The address the ingress controller will listen on for admission webhooks, if not 0.0.0.0 | | -| admissionWebhook.annotations | Annotations for the Validation Webhook Configuration | | -| admissionWebhook.certificate.provided | Use a provided certificate. When set to false, the chart will automatically generate a certificate. | false | -| admissionWebhook.certificate.secretName | Name of the TLS secret for the provided webhook certificate | | -| admissionWebhook.certificate.caBundle | PEM encoded CA bundle which will be used to validate the provided webhook certificate | | -| admissionWebhook.namespaceSelector | Add namespaceSelector to the webhook. Please go to [Kubernetes doc for the specs](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-namespaceselector) | | -| admissionWebhook.timeoutSeconds | Kubernetes `apiserver`'s timeout when running this webhook. Default: 10 seconds. | | -| userDefinedVolumes | Create volumes. Please go to Kubernetes doc for the spec of the volumes | | -| userDefinedVolumeMounts | Create volumeMounts. Please go to Kubernetes doc for the spec of the volumeMounts | | -| terminationGracePeriodSeconds | Sets the [termination grace period](https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#hook-handler-execution) for Deployment pod | 30 | -| gatewayDiscovery.enabled | Enables Kong instance service discovery (for more details see [gatewayDiscovery section][gd_section]) | false | -| gatewayDiscovery.generateAdminApiService | Generate the admin API service name based on the release name (for more details see [gatewayDiscovery section][gd_section]) | false | -| gatewayDiscovery.adminApiService.namespace | The namespace of the Kong admin API service (for more details see [gatewayDiscovery section][gd_section]) | `.Release.Namespace` | -| gatewayDiscovery.adminApiService.name | The name of the Kong admin API service (for more details see [gatewayDiscovery section][gd_section]) | "" | -| konnect.enabled | Enable synchronisation of data plane configuration with Konnect Runtime Group | false | -| konnect.runtimeGroupID | Konnect Runtime Group's unique identifier. | | -| konnect.apiHostname | Konnect API hostname. Defaults to a production US-region. | us.kic.api.konghq.com | -| konnect.tlsClientCertSecretName | Name of the secret that contains Konnect Runtime Group's client TLS certificate. | konnect-client-tls | -| konnect.license.enabled | Enable automatic license provisioning for Gateways managed by Ingress Controller in Konnect mode. | false | -| adminApi.tls.client.enabled | Enable TLS client verification for the Admin API. By default, Helm will generate certificates automatically. | false | -| adminApi.tls.client.certProvided | Use user-provided certificates. If set to false, Helm will generate certificates. | false | -| adminApi.tls.client.secretName | Client TLS certificate/key pair secret name. Can be also set when `certProvided` is false to enforce a generated secret's name. | "" | -| adminApi.tls.client.caSecretName | CA TLS certificate/key pair secret name. Can be also set when `certProvided` is false to enforce a generated secret's name. | "" | - -[gd_section]: #the-gatewayDiscovery-section - -#### The `env` section -For a complete list of all configuration values you can set in the -`env` section, please read the Kong Ingress Controller's -[configuration document](https://docs.konghq.com/kubernetes-ingress-controller/latest/reference/cli-arguments/). - -#### The `customEnv` section - -The `customEnv` section can be used to configure all environment variables other than Ingress Controller configuration. -Any key value put under this section translates to environment variables. -Every key is upper-cased before setting the environment variable. - -An example: - -```yaml -kong: - ingressController: - customEnv: - TZ: "Europe/Berlin" -``` - -#### The `gatewayDiscovery` section - -Kong Ingress Controller v2.9 has introduced gateway discovery which allows -the controller to discover Gateway instances that it should configure using -an Admin API Kubernetes service. - -Using this feature requires a split release installation of Gateways and Ingress Controller. -For exemplar `values.yaml` files which use this feature please see: [examples README.md](./example-values/README.md). -or use the [`ingress` chart](../ingress/README.md) which can handle this for you. - -##### Configuration - -You'll be able to configure this feature through configuration section under -`ingressController.gatewayDiscovery`: - -- If `ingressController.gatewayDiscovery.enabled` is set to `false`: the ingress controller - will control a pre-determined set of Gateway instances based on Admin API URLs - (provided under the hood via `CONTROLLER_KONG_ADMIN_URL` environment variable). - -- If `ingressController.gatewayDiscovery.enabled` is set to `true`: the ingress controller - will dynamically locate Gateway instances by watching the specified Kubernetes - service. - (provided under the hood via `CONTROLLER_KONG_ADMIN_SVC` environment variable). - - The following admin API Service flags have to be present in order for gateway - discovery to work: - - - `ingressController.gatewayDiscovery.adminApiService.name` - - `ingressController.gatewayDiscovery.adminApiService.namespace` - - If you set `ingressController.gatewayDiscovery.generateAdminApiService` to `true`, - the chart will generate values for `name` and `namespace` based on the current release name and - namespace. This is useful when consuming the `kong` chart as a subchart. - -Additionally, you can control the addresses that are generated for your Gateways -via the `--gateway-discovery-dns-strategy` CLI flag that can be set on the Ingress Controller -(or an equivalent environment variable: `CONTROLLER_GATEWAY_DISCOVERY_DNS_STRATEGY`). -It accepts 3 values which change the way that Gateway addresses are generated: -- `service` - for service scoped pod DNS names: `pod-ip-address.service-name.my-namespace.svc.cluster-domain.example` -- `pod` - for namespace scope pod DNS names: `pod-ip-address.my-namespace.pod.cluster-domain.example` -- `ip` (default, retains behavior introduced in v2.9) - for regular IP addresses - -When using `gatewayDiscovery`, you should consider configuring the Admin service to use mTLS client verification to make -this interface secure. -Without that, anyone who can access the Admin API from inside the cluster can configure the Gateway instances. - -On the controller release side, that can be achieved by setting `ingressController.adminApi.tls.client.enabled` to `true`. -By default, Helm will generate a certificate Secret named `-admin-api-keypair` and -a CA Secret named `-admin-api-ca-keypair` for you. - -To provide your own cert, set `ingressController.adminApi.tls.client.certProvided` to -`true`, `ingressController.adminApi.tls.client.secretName` to the name of the Secret containing your client cert, and `ingressController.adminApi.tls.client.caSecretName` to the name of the Secret containing your CA cert. - -On the Gateway release side, set either `admin.tls.client.secretName` to the name of your CA Secret or set `admin.tls.client.caBundle` to the CA certificate string. - -### General Parameters - -| Parameter | Description | Default | -| ---------------------------------- | ------------------------------------------------------------------------------------- | ------------------- | -| namespace | Namespace to deploy chart resources | | -| deployment.kong.enabled | Enable or disable deploying Kong | `true` | -| deployment.minReadySeconds | Minimum number of seconds for which newly created pods should be ready without any of its container crashing, for it to be considered available. | | -| deployment.initContainers | Create initContainers. Please go to Kubernetes doc for the spec of the initContainers | | -| deployment.daemonset | Use a DaemonSet instead of a Deployment | `false` | -| deployment.hostname | Set the Deployment's `.spec.template.hostname`. Kong reports this as its hostname. | | -| deployment.hostNetwork | Enable hostNetwork, which binds to the ports to the host | `false` | -| deployment.userDefinedVolumes | Create volumes. Please go to Kubernetes doc for the spec of the volumes | | -| deployment.userDefinedVolumeMounts | Create volumeMounts. Please go to Kubernetes doc for the spec of the volumeMounts | | -| deployment.serviceAccount.create | Create Service Account for the Deployment / Daemonset and the migrations | `true` | -| deployment.serviceAccount.automountServiceAccountToken | Enable ServiceAccount token automount in Kong deployment | `false` | -| deployment.serviceAccount.name | Name of the Service Account, a default one will be generated if left blank. | "" | -| deployment.serviceAccount.annotations | Annotations for the Service Account | {} | -| deployment.test.enabled | Enable creation of test resources for use with "helm test" | `false` | -| autoscaling.enabled | Set this to `true` to enable autoscaling | `false` | -| autoscaling.minReplicas | Set minimum number of replicas | `2` | -| autoscaling.maxReplicas | Set maximum number of replicas | `5` | -| autoscaling.behavior | Sets the [behavior for scaling up and down](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#configurable-scaling-behavior) | `{}` | -| autoscaling.targetCPUUtilizationPercentage | Target Percentage for when autoscaling takes affect. Only used if cluster does not support `autoscaling/v2` or `autoscaling/v2beta2` | `80` | -| autoscaling.metrics | metrics used for autoscaling for clusters that supports `autoscaling/v2` or `autoscaling/v2beta2` | See [values.yaml](values.yaml) | -| updateStrategy | update strategy for deployment | `{}` | -| readinessProbe | Kong readiness probe | | -| livenessProbe | Kong liveness probe | | -| startupProbe | Kong startup probe | | -| lifecycle | Proxy container lifecycle hooks | see `values.yaml` | -| terminationGracePeriodSeconds | Sets the [termination grace period](https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#hook-handler-execution) for Deployment pods | 30 | -| affinity | Node/pod affinities | | -| topologySpreadConstraints | Control how Pods are spread across cluster among failure-domains | | -| nodeSelector | Node labels for pod assignment | `{}` | -| deploymentAnnotations | Annotations to add to deployment | see `values.yaml` | -| podAnnotations | Annotations to add to each pod | see `values.yaml` | -| podLabels | Labels to add to each pod | `{}` | -| resources | Pod resource requests & limits | `{}` | -| tolerations | List of node taints to tolerate | `[]` | -| dnsPolicy | Pod dnsPolicy | | -| dnsConfig | Pod dnsConfig | | -| podDisruptionBudget.enabled | Enable PodDisruptionBudget for Kong | `false` | -| podDisruptionBudget.maxUnavailable | Represents the minimum number of Pods that can be unavailable (integer or percentage) | `50%` | -| podDisruptionBudget.minAvailable | Represents the number of Pods that must be available (integer or percentage) | | -| podSecurityPolicy.enabled | Enable podSecurityPolicy for Kong | `false` | -| podSecurityPolicy.labels | Labels to add to podSecurityPolicy for Kong | `{}` | -| podSecurityPolicy.annotations | Annotations to add to podSecurityPolicy for Kong | `{}` | -| podSecurityPolicy.spec | Collection of [PodSecurityPolicy settings](https://kubernetes.io/docs/concepts/policy/pod-security-policy/#what-is-a-pod-security-policy) | | -| priorityClassName | Set pod scheduling priority class for Kong pods | `""` | -| secretVolumes | Mount given secrets as a volume in Kong container to override default certs and keys. | `[]` | -| securityContext | Set the securityContext for Kong Pods | `{}` | -| containerSecurityContext | Set the securityContext for Containers | See values.yaml | -| serviceMonitor.enabled | Create ServiceMonitor for Prometheus Operator | `false` | -| serviceMonitor.interval | Scraping interval | `30s` | -| serviceMonitor.namespace | Where to create ServiceMonitor | | -| serviceMonitor.labels | ServiceMonitor labels | `{}` | -| serviceMonitor.targetLabels | ServiceMonitor targetLabels | `{}` | -| serviceMonitor.honorLabels | ServiceMonitor honorLabels | `{}` | -| serviceMonitor.metricRelabelings | ServiceMonitor metricRelabelings | `{}` | -| extraConfigMaps | ConfigMaps to add to mounted volumes | `[]` | -| extraSecrets | Secrets to add to mounted volumes | `[]` | -| nameOverride | Replaces "kong" in resource names, like "RELEASENAME-nameOverride" instead of "RELEASENAME-kong" | `""` | -| fullnameOverride | Overrides the entire resource name string | `""` | -| extraObjects | Create additional k8s resources | `[]` | -**Note:** If you are using `deployment.hostNetwork` to bind to lower ports ( < 1024), which may be the desired option (ports 80 and 433), you also -need to tweak the `containerSecurityContext` configuration as in the example: - -```yaml -containerSecurityContext: # run as root to bind to lower ports - capabilities: - add: [NET_BIND_SERVICE] - runAsGroup: 0 - runAsNonRoot: false - runAsUser: 0 -``` - -**Note:** The default `podAnnotations` values disable inbound proxying for Kuma -and Istio. This is appropriate when using Kong as a gateway for external -traffic inbound into the cluster. - -If you want to use Kong as an internal proxy within the cluster network, you -should enable inbound the inbound mesh proxies: - -```yaml -# Enable inbound mesh proxying for Kuma and Istio -podAnnotations: - kuma.io/gateway: disabled - traffic.sidecar.istio.io/includeInboundPorts: "*" -``` - -#### The `env` section - -The `env` section can be used to configured all properties of Kong. -Any key value put under this section translates to environment variables -used to control Kong's configuration. Every key is prefixed with `KONG_` -and upper-cased before setting the environment variable. - -Furthermore, all `kong.env` parameters can also accept a mapping instead of a -value to ensure the parameters can be set through configmaps and secrets. - -An example: - -```yaml -kong: - env: # load PG password from a secret dynamically - pg_user: kong - pg_password: - valueFrom: - secretKeyRef: - key: kong - name: postgres - nginx_worker_processes: "2" -``` - -For complete list of Kong configurations please check the -[Kong configuration docs](https://docs.konghq.com/latest/configuration). - -> **Tip**: You can use the default [values.yaml](values.yaml) - -#### The `customEnv` section - -The `customEnv` section can be used to configure all custom properties of other than Kong. -Any key value put under this section translates to environment variables -that can be used in Kong's plugin configurations. Every key is upper-cased before setting the environment variable. - -An example: - -```yaml -kong: - customEnv: - api_token: - valueFrom: - secretKeyRef: - key: token - name: api_key - client_name: testClient -``` - -#### The `extraLabels` section - -The `extraLabels` section can be used to configure some extra labels that will be added to each Kubernetes object generated. - -For example, you can add the `acme.com/some-key: some-value` label to each Kubernetes object by putting the following in your Helm values: - -```yaml -extraLabels: - acme.com/some-key: some-value -``` - -## Kong Enterprise Parameters - -### Overview - -Kong Enterprise requires some additional configuration not needed when using -Kong Open-Source. To use Kong Enterprise, at the minimum, -you need to do the following: - -- Set `enterprise.enabled` to `true` in `values.yaml` file. -- Update values.yaml to use a Kong Enterprise image. -- Satisfy the two prerequisites below for Enterprise License and - Enterprise Docker Registry. -- (Optional) [set a `password` environment variable](#rbac) to create the - initial super-admin. Though not required, this is recommended for users that - wish to use RBAC, as it cannot be done after initial setup. - -Once you have these set, it is possible to install Kong Enterprise, -but please make sure to review the below sections for other settings that -you should consider configuring before installing Kong. - -Some of the more important configuration is grouped in sections -under the `.enterprise` key in values.yaml, though most enterprise-specific -configuration can be placed under the `.env` key. - -### Prerequisites - -#### Kong Enterprise License - -Kong Enterprise 2.3+ can run with or without a license. If you wish to run 2.3+ -without a license, you can skip this step and leave `enterprise.license_secret` -unset. In this case only a limited subset of features will be available. -Earlier versions require a license. - -If you have paid for a license, but you do not have a copy of yours, please -contact Kong Support. Once you have it, you will need to store it in a Secret: - -```bash -kubectl create secret generic kong-enterprise-license --from-file=license=./license.json -``` - -Set the secret name in `values.yaml`, in the `.enterprise.license_secret` key. -Please ensure the above secret is created in the same namespace in which -Kong is going to be deployed. - -#### Kong Enterprise Docker registry access - -Kong Enterprise versions 2.2 and earlier use a private Docker registry and -require a pull secret. **If you use 2.3 or newer, you can skip this step.** - -You should have received credentials to log into docker hub after -purchasing Kong Enterprise. After logging in, you can retrieve your API key -from \ \> Edit Profile \> API Key. Use this to create registry -secrets: - -```bash -kubectl create secret docker-registry kong-enterprise-edition-docker \ - --docker-server=hub.docker.io \ - --docker-username= \ - --docker-password= -secret/kong-enterprise-edition-docker created -``` - -Set the secret names in `values.yaml` in the `image.pullSecrets` section. -Again, please ensure the above secret is created in the same namespace in which -Kong is going to be deployed. - -### Service location hints - -Kong Enterprise add two GUIs, Kong Manager and the Kong Developer Portal, that -must know where other Kong services (namely the admin and files APIs) can be -accessed in order to function properly. Kong's default behavior for attempting -to locate these absent configuration is unlikely to work in common Kubernetes -environments. Because of this, you should set each of `admin_gui_url`, -`admin_gui_api_url`, `proxy_url`, `portal_api_url`, `portal_gui_host`, and -`portal_gui_protocol` under the `.env` key in values.yaml to locations where -each of their respective services can be accessed to ensure that Kong services -can locate one another and properly set CORS headers. See the -[Property Reference documentation](https://docs.konghq.com/enterprise/latest/property-reference/) -for more details on these settings. - -### RBAC - -You can create a default RBAC superuser when initially running `helm install` -by setting a `password` environment variable under `env` in values.yaml. It -should be a reference to a secret key containing your desired password. This -will create a `kong_admin` admin whose token and basic-auth password match the -value in the secret. For example: - -```yaml -env: - password: - valueFrom: - secretKeyRef: - name: kong-enterprise-superuser-password - key: password -``` - -If using the ingress controller, it needs access to the token as well, by -specifying `kong_admin_token` in its environment variables: - -```yaml -ingressController: - env: - kong_admin_token: - valueFrom: - secretKeyRef: - name: kong-enterprise-superuser-password - key: password -``` - -Although the above examples both use the initial super-admin, we recommend -[creating a less-privileged RBAC user](https://docs.konghq.com/enterprise/latest/kong-manager/administration/rbac/add-user/) -for the controller after installing. It needs at least workspace admin -privileges in its workspace (`default` by default, settable by adding a -`workspace` variable under `ingressController.env`). Once you create the -controller user, add its token to a secret and update your `kong_admin_token` -variable to use it. Remove the `password` variable from Kong's environment -variables and the secret containing the super-admin token after. - -### Sessions - -Login sessions for Kong Manager and the Developer Portal make use of -[the Kong Sessions plugin](https://docs.konghq.com/enterprise/latest/kong-manager/authentication/sessions). -When configured via values.yaml, their configuration must be stored in Secrets, -as it contains an HMAC key. - -Kong Manager's session configuration must be configured via values.yaml, -whereas this is optional for the Developer Portal on versions 0.36+. Providing -Portal session configuration in values.yaml provides the default session -configuration, which can be overridden on a per-workspace basis. - -```bash -cat admin_gui_session_conf -``` - -```json -{"cookie_name":"admin_session","cookie_samesite":"off","secret":"admin-secret-CHANGEME","cookie_secure":true,"storage":"kong"} -``` - -```bash -cat portal_session_conf -``` - -```json -{"cookie_name":"portal_session","cookie_samesite":"off","secret":"portal-secret-CHANGEME","cookie_secure":true,"storage":"kong"} -``` - -```bash -kubectl create secret generic kong-session-config --from-file=admin_gui_session_conf --from-file=portal_session_conf -``` - -```bash -secret/kong-session-config created -``` - -The exact plugin settings may vary in your environment. The `secret` should -always be changed for both configurations. - -After creating your secret, set its name in values.yaml in -`.enterprise.rbac.session_conf_secret`. If you create a Portal configuration, -add it at `env.portal_session_conf` using a secretKeyRef. - -### Email/SMTP - -Email is used to send invitations for -[Kong Admins](https://docs.konghq.com/enterprise/latest/kong-manager/networking/email) -and [Developers](https://docs.konghq.com/enterprise/latest/developer-portal/configuration/smtp). - -Email invitations rely on setting a number of SMTP settings at once. For -convenience, these are grouped under the `.enterprise.smtp` key in values.yaml. -Setting `.enterprise.smtp.disabled: true` will set `KONG_SMTP_MOCK=on` and -allow Admin/Developer invites to proceed without sending email. Note, however, -that these have limited functionality without sending email. - -If your SMTP server requires authentication, you must provide the `username` -and `smtp_password_secret` keys under `.enterprise.smtp.auth`. -`smtp_password_secret` must be a Secret containing an `smtp_password` key whose -value is your SMTP password. - -By default, SMTP uses `AUTH` `PLAIN` when you provide credentials. If your provider requires `AUTH LOGIN`, set `smtp_auth_type: login`. - -## Prometheus Operator integration - -The chart can configure a ServiceMonitor resource to instruct the [Prometheus -Operator](https://github.com/prometheus-operator/prometheus-operator) to -collect metrics from Kong Pods. To enable this, set -`serviceMonitor.enabled=true` in `values.yaml`. - -Kong exposes memory usage and connection counts by default. You can enable -traffic metrics for routes and services by configuring the [Prometheus -plugin](https://docs.konghq.com/hub/kong-inc/prometheus/). - -The ServiceMonitor requires an `enable-metrics: "true"` label on one of the -chart's Services to collect data. By default, this label is set on the proxy -Service. It should only be set on a single chart Service to avoid duplicate -data. If you disable the proxy Service (e.g. on a hybrid control plane instance -or Portal-only instance) and still wish to collect memory usage metrics, add -this label to another Service, e.g. on the admin API Service: - -``` -admin: - labels: - enable-metrics: "true" -``` - -## Argo CD Considerations - -The built-in database subchart (`postgresql.enabled` in values) is not -supported when installing the chart via Argo CD. - -Argo CD does not support the full Helm lifecycle. There is no distinction -between the initial install and upgrades. Both operations are a "sync" in Argo -terms. This affects when migration Jobs execute in database-backed Kong -installs. - -The chart sets the `Sync` and `BeforeHookCreation` deletion -[hook policies](https://argo-cd.readthedocs.io/en/stable/user-guide/resource_hooks/) -on the `init-migrations` and `pre-upgrade-migrations` Jobs. - -The `pre-upgrade-migrations` Job normally uses Helm's `pre-upgrade` policy. Argo -translates this to its `PreSync` policy, which would create the Job before all -sync phase resources. Doing this before various sync phase resources (such as -the ServiceAccount) are in place would prevent the Job from running -successfully. Overriding this with Argo's `Sync` policy starts the Job at the -same time as the upgraded Deployment Pods. The new Pods may fail to start -temporarily, but will eventually start normally once migrations complete. - -## Seeking help - -If you run into an issue, bug or have a question, please reach out to the Kong -community via [Kong Nation](https://discuss.konghq.com). -Please do not open issues in [this](https://github.com/helm/charts) repository -as the maintainers will not be notified and won't respond. - -## Nethgate DOC -if you run nethgate enable `deployment.nethgate.enabled=true` \ No newline at end of file +# kong + +![Version: 2.33.4](https://img.shields.io/badge/Version-2.33.4-informational?style=flat-square) ![AppVersion: 3.5](https://img.shields.io/badge/AppVersion-3.5-informational?style=flat-square) + +The Cloud-Native Ingress and API-management + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| anishgehlot | | | + +## Source Code + +* + +## Requirements + +| Repository | Name | Version | +|------------|------|---------| +| https://charts.bitnami.com/bitnami | postgresql | 11.9.13 | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| admin.annotations | object | `{}` | | +| admin.enabled | bool | `false` | | +| admin.http.containerPort | int | `8001` | | +| admin.http.enabled | bool | `false` | | +| admin.http.parameters | list | `[]` | | +| admin.http.servicePort | int | `8001` | | +| admin.ingress.annotations | object | `{}` | | +| admin.ingress.enabled | bool | `false` | | +| admin.ingress.hostname | string | `nil` | | +| admin.ingress.ingressClassName | string | `nil` | | +| admin.ingress.path | string | `"/"` | | +| admin.ingress.pathType | string | `"ImplementationSpecific"` | | +| admin.labels | object | `{}` | | +| admin.loadBalancerClass | string | `nil` | | +| admin.tls.client.caBundle | string | `""` | | +| admin.tls.client.secretName | string | `""` | | +| admin.tls.containerPort | int | `8444` | | +| admin.tls.enabled | bool | `true` | | +| admin.tls.parameters[0] | string | `"http2"` | | +| admin.tls.servicePort | int | `8444` | | +| admin.type | string | `"NodePort"` | | +| autoscaling.behavior | object | `{}` | | +| autoscaling.enabled | bool | `false` | | +| autoscaling.maxReplicas | int | `5` | | +| autoscaling.metrics[0].resource.name | string | `"cpu"` | | +| autoscaling.metrics[0].resource.target.averageUtilization | int | `80` | | +| autoscaling.metrics[0].resource.target.type | string | `"Utilization"` | | +| autoscaling.metrics[0].type | string | `"Resource"` | | +| autoscaling.minReplicas | int | `2` | | +| autoscaling.targetCPUUtilizationPercentage | string | `nil` | | +| certificates | object | `{"admin":{"clusterIssuer":"","commonName":"kong.example","dnsNames":[],"enabled":true,"issuer":""},"cluster":{"clusterIssuer":"","commonName":"kong_clustering","dnsNames":[],"enabled":true,"issuer":""},"clusterIssuer":"","enabled":false,"issuer":"","portal":{"clusterIssuer":"","commonName":"developer.example","dnsNames":[],"enabled":true,"issuer":""},"proxy":{"clusterIssuer":"","commonName":"app.example","dnsNames":[],"enabled":true,"issuer":""}}` | --------------------------------------------------------------------------- | +| cluster.annotations | object | `{}` | | +| cluster.enabled | bool | `false` | | +| cluster.ingress.annotations | object | `{}` | | +| cluster.ingress.enabled | bool | `false` | | +| cluster.ingress.hostname | string | `nil` | | +| cluster.ingress.ingressClassName | string | `nil` | | +| cluster.ingress.path | string | `"/"` | | +| cluster.ingress.pathType | string | `"ImplementationSpecific"` | | +| cluster.labels | object | `{}` | | +| cluster.loadBalancerClass | string | `nil` | | +| cluster.tls.containerPort | int | `8005` | | +| cluster.tls.enabled | bool | `false` | | +| cluster.tls.parameters | list | `[]` | | +| cluster.tls.servicePort | int | `8005` | | +| cluster.type | string | `"ClusterIP"` | | +| clusterCaSecretName | string | `""` | | +| clustertelemetry.annotations | object | `{}` | | +| clustertelemetry.enabled | bool | `false` | | +| clustertelemetry.ingress.annotations | object | `{}` | | +| clustertelemetry.ingress.enabled | bool | `false` | | +| clustertelemetry.ingress.hostname | string | `nil` | | +| clustertelemetry.ingress.ingressClassName | string | `nil` | | +| clustertelemetry.ingress.path | string | `"/"` | | +| clustertelemetry.ingress.pathType | string | `"ImplementationSpecific"` | | +| clustertelemetry.labels | object | `{}` | | +| clustertelemetry.loadBalancerClass | string | `nil` | | +| clustertelemetry.tls.containerPort | int | `8006` | | +| clustertelemetry.tls.enabled | bool | `false` | | +| clustertelemetry.tls.parameters | list | `[]` | | +| clustertelemetry.tls.servicePort | int | `8006` | | +| clustertelemetry.type | string | `"ClusterIP"` | | +| containerSecurityContext.allowPrivilegeEscalation | bool | `false` | | +| containerSecurityContext.capabilities.drop[0] | string | `"ALL"` | | +| containerSecurityContext.readOnlyRootFilesystem | bool | `false` | | +| containerSecurityContext.runAsNonRoot | bool | `true` | | +| containerSecurityContext.runAsUser | int | `1000` | | +| containerSecurityContext.seccompProfile.type | string | `"RuntimeDefault"` | | +| dblessConfig.config | string | `""` | | +| dblessConfig.configMap | string | `""` | | +| dblessConfig.secret | string | `""` | | +| deployment.customNethgateEnv.NETHGATE_SECRET.valueFrom.secretKeyRef.key | string | `"secret"` | | +| deployment.customNethgateEnv.NETHGATE_SECRET.valueFrom.secretKeyRef.name | string | `"nethgate-dev-secret"` | | +| deployment.daemonset | bool | `false` | | +| deployment.hostNetwork | bool | `false` | | +| deployment.hostname | string | `""` | | +| deployment.kong.enabled | bool | `true` | | +| deployment.nethgate.enabled | bool | `false` | | +| deployment.nethgate.image.nethgate_tag | string | `"0.0.1-dev102"` | | +| deployment.nethgate.image.pullPolicy | string | `"IfNotPresent"` | | +| deployment.nethgate.image.pullSecrets[0] | string | `"regcred"` | | +| deployment.nethgate.image.repository | string | `"nethermindeth/nethgate"` | | +| deployment.nethgate.port | string | `"8080"` | | +| deployment.nethgate.svc.type | string | `"ClusterIP"` | | +| deployment.prefixDir.sizeLimit | string | `"256Mi"` | | +| deployment.serviceAccount.automountServiceAccountToken | bool | `false` | | +| deployment.serviceAccount.create | bool | `true` | | +| deployment.test.enabled | bool | `false` | | +| deployment.tmpDir.sizeLimit | string | `"1Gi"` | | +| deployment.voyager_migrator.enabled | bool | `false` | | +| deployment.voyager_migrator.image.migrator_tag | string | `"0.0.1-dev102"` | | +| deployment.voyager_migrator.image.pullPolicy | string | `"IfNotPresent"` | | +| deployment.voyager_migrator.image.pullSecrets[0] | string | `"regcred"` | | +| deployment.voyager_migrator.image.repository | string | `"nethermindeth/voyager_migrator"` | | +| deploymentAnnotations | object | `{}` | | +| enterprise | object | `{"enabled":false,"portal":{"enabled":false},"rbac":{"admin_gui_auth":"basic-auth","admin_gui_auth_conf_secret":"CHANGEME-admin-gui-auth-conf-secret","enabled":false,"session_conf_secret":"kong-session-config"},"smtp":{"admin_emails_from":"none@example.com","admin_emails_reply_to":"none@example.com","auth":{"smtp_password_secret":"CHANGEME-smtp-password","smtp_username":""},"enabled":false,"portal_emails_from":"none@example.com","portal_emails_reply_to":"none@example.com","smtp_admin_emails":"none@example.com","smtp_auth_type":"","smtp_host":"smtp.example.com","smtp_port":587,"smtp_ssl":"nil","smtp_starttls":true},"vitals":{"enabled":true}}` | --------------------------------------------------------------------------- Toggle Kong Enterprise features on or off RBAC and SMTP configuration have additional options that must all be set together Other settings should be added to the "env" settings below | +| env | object | `{"admin_access_log":"/dev/stdout","admin_error_log":"/dev/stderr","admin_gui_access_log":"/dev/stdout","admin_gui_error_log":"/dev/stderr","database":"off","nginx_worker_processes":"2","portal_api_access_log":"/dev/stdout","portal_api_error_log":"/dev/stderr","prefix":"/kong_prefix/","proxy_access_log":"/dev/stdout","proxy_error_log":"/dev/stderr","router_flavor":"traditional"}` | --------------------------------------------------------------------------- Specify Kong configuration This chart takes all entries defined under `.env` and transforms them into into `KONG_*` environment variables for Kong containers. Their names here should match the names used in https://github.com/Kong/kong/blob/master/kong.conf.default See https://docs.konghq.com/latest/configuration also for additional details Values here take precedence over values from other sections of values.yaml, e.g. setting pg_user here will override the value normally set when postgresql.enabled is set below. In general, you should not set values here if they are set elsewhere. | +| extraConfigMaps | list | `[]` | | +| extraLabels | object | `{}` | | +| extraObjects | list | `[]` | | +| extraSecrets | list | `[]` | | +| image.effectiveSemver | string | `nil` | | +| image.pullPolicy | string | `"IfNotPresent"` | | +| image.repository | string | `"kong"` | | +| image.tag | string | `"3.5"` | | +| ingressController | object | `{"adminApi":{"tls":{"client":{"caSecretName":"","certProvided":false,"enabled":false,"secretName":""}}},"admissionWebhook":{"certificate":{"provided":false},"enabled":true,"failurePolicy":"Ignore","namespaceSelector":{},"port":8080,"service":{"labels":{}}},"args":[],"enabled":true,"env":{"kong_admin_tls_skip_verify":true},"gatewayDiscovery":{"adminApiService":{"name":"","namespace":""},"enabled":false,"generateAdminApiService":false},"image":{"effectiveSemver":null,"repository":"kong/kubernetes-ingress-controller","tag":"3.0"},"ingressClass":"kong","ingressClassAnnotations":{},"konnect":{"apiHostname":"us.kic.api.konghq.com","enabled":false,"license":{"enabled":false},"runtimeGroupID":"","tlsClientCertSecretName":"konnect-client-tls"},"livenessProbe":{"failureThreshold":3,"httpGet":{"path":"/healthz","port":10254,"scheme":"HTTP"},"initialDelaySeconds":5,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":5},"rbac":{"create":true},"readinessProbe":{"failureThreshold":3,"httpGet":{"path":"/readyz","port":10254,"scheme":"HTTP"},"initialDelaySeconds":5,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":5},"resources":{},"watchNamespaces":[]}` | --------------------------------------------------------------------------- Kong Ingress Controller's primary purpose is to satisfy Ingress resources created in k8s. It uses CRDs for more fine grained control over routing and for Kong specific configuration. | +| lifecycle.preStop.exec.command[0] | string | `"kong"` | | +| lifecycle.preStop.exec.command[1] | string | `"quit"` | | +| lifecycle.preStop.exec.command[2] | string | `"--wait=15"` | | +| livenessProbe.failureThreshold | int | `3` | | +| livenessProbe.httpGet.path | string | `"/status"` | | +| livenessProbe.httpGet.port | string | `"status"` | | +| livenessProbe.httpGet.scheme | string | `"HTTP"` | | +| livenessProbe.initialDelaySeconds | int | `5` | | +| livenessProbe.periodSeconds | int | `10` | | +| livenessProbe.successThreshold | int | `1` | | +| livenessProbe.timeoutSeconds | int | `5` | | +| manager.annotations | object | `{}` | | +| manager.enabled | bool | `true` | | +| manager.http.containerPort | int | `8002` | | +| manager.http.enabled | bool | `true` | | +| manager.http.parameters | list | `[]` | | +| manager.http.servicePort | int | `8002` | | +| manager.ingress.annotations | object | `{}` | | +| manager.ingress.enabled | bool | `false` | | +| manager.ingress.hostname | string | `nil` | | +| manager.ingress.ingressClassName | string | `nil` | | +| manager.ingress.path | string | `"/"` | | +| manager.ingress.pathType | string | `"ImplementationSpecific"` | | +| manager.labels | object | `{}` | | +| manager.loadBalancerClass | string | `nil` | | +| manager.tls.containerPort | int | `8445` | | +| manager.tls.enabled | bool | `true` | | +| manager.tls.parameters[0] | string | `"http2"` | | +| manager.tls.servicePort | int | `8445` | | +| manager.type | string | `"ClusterIP"` | | +| migrations.annotations."sidecar.istio.io/inject" | bool | `false` | | +| migrations.backoffLimit | string | `nil` | | +| migrations.jobAnnotations | object | `{}` | | +| migrations.postUpgrade | bool | `true` | | +| migrations.preUpgrade | bool | `true` | | +| migrations.resources | object | `{}` | | +| nodeSelector | object | `{}` | | +| plugins | object | `{}` | | +| podAnnotations."kuma.io/gateway" | string | `"enabled"` | | +| podAnnotations."traffic.sidecar.istio.io/includeInboundPorts" | string | `""` | | +| podDisruptionBudget.enabled | bool | `false` | | +| podLabels | object | `{}` | | +| podSecurityPolicy.annotations | object | `{}` | | +| podSecurityPolicy.enabled | bool | `false` | | +| podSecurityPolicy.labels | object | `{}` | | +| podSecurityPolicy.spec.allowPrivilegeEscalation | bool | `false` | | +| podSecurityPolicy.spec.fsGroup.rule | string | `"RunAsAny"` | | +| podSecurityPolicy.spec.hostIPC | bool | `false` | | +| podSecurityPolicy.spec.hostNetwork | bool | `false` | | +| podSecurityPolicy.spec.hostPID | bool | `false` | | +| podSecurityPolicy.spec.privileged | bool | `false` | | +| podSecurityPolicy.spec.readOnlyRootFilesystem | bool | `true` | | +| podSecurityPolicy.spec.runAsGroup.rule | string | `"RunAsAny"` | | +| podSecurityPolicy.spec.runAsUser.rule | string | `"RunAsAny"` | | +| podSecurityPolicy.spec.seLinux.rule | string | `"RunAsAny"` | | +| podSecurityPolicy.spec.supplementalGroups.rule | string | `"RunAsAny"` | | +| podSecurityPolicy.spec.volumes[0] | string | `"configMap"` | | +| podSecurityPolicy.spec.volumes[1] | string | `"secret"` | | +| podSecurityPolicy.spec.volumes[2] | string | `"emptyDir"` | | +| podSecurityPolicy.spec.volumes[3] | string | `"projected"` | | +| portal.annotations | object | `{}` | | +| portal.enabled | bool | `true` | | +| portal.http.containerPort | int | `8003` | | +| portal.http.enabled | bool | `true` | | +| portal.http.parameters | list | `[]` | | +| portal.http.servicePort | int | `8003` | | +| portal.ingress.annotations | object | `{}` | | +| portal.ingress.enabled | bool | `false` | | +| portal.ingress.hostname | string | `nil` | | +| portal.ingress.ingressClassName | string | `nil` | | +| portal.ingress.path | string | `"/"` | | +| portal.ingress.pathType | string | `"ImplementationSpecific"` | | +| portal.labels | object | `{}` | | +| portal.loadBalancerClass | string | `nil` | | +| portal.tls.containerPort | int | `8446` | | +| portal.tls.enabled | bool | `true` | | +| portal.tls.parameters[0] | string | `"http2"` | | +| portal.tls.servicePort | int | `8446` | | +| portal.type | string | `"NodePort"` | | +| portalapi.annotations | object | `{}` | | +| portalapi.enabled | bool | `true` | | +| portalapi.http.containerPort | int | `8004` | | +| portalapi.http.enabled | bool | `true` | | +| portalapi.http.parameters | list | `[]` | | +| portalapi.http.servicePort | int | `8004` | | +| portalapi.ingress.annotations | object | `{}` | | +| portalapi.ingress.enabled | bool | `false` | | +| portalapi.ingress.hostname | string | `nil` | | +| portalapi.ingress.ingressClassName | string | `nil` | | +| portalapi.ingress.path | string | `"/"` | | +| portalapi.ingress.pathType | string | `"ImplementationSpecific"` | | +| portalapi.labels | object | `{}` | | +| portalapi.loadBalancerClass | string | `nil` | | +| portalapi.tls.containerPort | int | `8447` | | +| portalapi.tls.enabled | bool | `true` | | +| portalapi.tls.parameters[0] | string | `"http2"` | | +| portalapi.tls.servicePort | int | `8447` | | +| portalapi.type | string | `"NodePort"` | | +| postgresql | object | `{"auth":{"database":"kong","username":"kong"},"enabled":false,"image":{"tag":"13.11.0-debian-11-r20"},"service":{"ports":{"postgresql":"5432"}}}` | --------------------------------------------------------------------------- Kong can run without a database or use either Postgres or Cassandra as a backend datatstore for it's configuration. By default, this chart installs Kong without a database. If you would like to use a database, there are two options: - (recommended) Deploy and maintain a database and pass the connection details to Kong via the `env` section. - You can use the below `postgresql` sub-chart to deploy a database along-with Kong as part of a single Helm release. Running a database independently is recommended for production, but the built-in Postgres is useful for quickly creating test instances. PostgreSQL chart documentation: https://github.com/bitnami/charts/blob/master/bitnami/postgresql/README.md WARNING: by default, the Postgres chart generates a random password each time it upgrades, which breaks access to existing volumes. You should set a password explicitly: https://github.com/Kong/charts/blob/main/charts/kong/FAQs.md#kong-fails-to-start-after-helm-upgrade-when-postgres-is-used-what-do-i-do | +| priorityClassName | string | `""` | | +| proxy.annotations | object | `{}` | | +| proxy.enabled | bool | `true` | | +| proxy.http.containerPort | int | `8000` | | +| proxy.http.enabled | bool | `true` | | +| proxy.http.parameters | list | `[]` | | +| proxy.http.servicePort | int | `80` | | +| proxy.ingress.annotations | object | `{}` | | +| proxy.ingress.enabled | bool | `false` | | +| proxy.ingress.hostname | string | `nil` | | +| proxy.ingress.hosts | list | `[]` | | +| proxy.ingress.ingressClassName | string | `nil` | | +| proxy.ingress.labels | object | `{}` | | +| proxy.ingress.path | string | `"/"` | | +| proxy.ingress.pathType | string | `"ImplementationSpecific"` | | +| proxy.labels.enable-metrics | string | `"true"` | | +| proxy.loadBalancerClass | string | `nil` | | +| proxy.monitoring.containerPort | int | `8100` | | +| proxy.monitoring.enabled | bool | `true` | | +| proxy.monitoring.servicePort | int | `8100` | | +| proxy.nameOverride | string | `""` | | +| proxy.stream | list | `[]` | | +| proxy.tls.containerPort | int | `8443` | | +| proxy.tls.enabled | bool | `false` | | +| proxy.tls.parameters[0] | string | `"http2"` | | +| proxy.tls.servicePort | int | `443` | | +| proxy.type | string | `"NodePort"` | | +| readinessProbe.failureThreshold | int | `3` | | +| readinessProbe.httpGet.path | string | `"/status/ready"` | | +| readinessProbe.httpGet.port | string | `"status"` | | +| readinessProbe.httpGet.scheme | string | `"HTTP"` | | +| readinessProbe.initialDelaySeconds | int | `5` | | +| readinessProbe.periodSeconds | int | `10` | | +| readinessProbe.successThreshold | int | `1` | | +| readinessProbe.timeoutSeconds | int | `5` | | +| replicaCount | int | `1` | | +| resources | object | `{}` | | +| secretVolumes | list | `[]` | | +| securityContext | object | `{}` | | +| serviceMonitor.enabled | bool | `false` | | +| status.enabled | bool | `true` | | +| status.http.containerPort | int | `8100` | | +| status.http.enabled | bool | `true` | | +| status.http.parameters | list | `[]` | | +| status.tls.containerPort | int | `8543` | | +| status.tls.enabled | bool | `false` | | +| status.tls.parameters | list | `[]` | | +| terminationGracePeriodSeconds | int | `30` | | +| tolerations | list | `[]` | | +| udpProxy.annotations | object | `{}` | | +| udpProxy.enabled | bool | `false` | | +| udpProxy.labels | object | `{}` | | +| udpProxy.loadBalancerClass | string | `nil` | | +| udpProxy.stream | list | `[]` | | +| udpProxy.type | string | `"LoadBalancer"` | | +| updateStrategy | object | `{}` | | +| waitImage | object | `{"enabled":true,"pullPolicy":"IfNotPresent"}` | --------------------------------------------------------------------------- | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) diff --git a/charts/posmoni/README.md b/charts/posmoni/README.md index 0f992f8e6..1ab95c5d6 100644 --- a/charts/posmoni/README.md +++ b/charts/posmoni/README.md @@ -57,4 +57,4 @@ A Helm chart for installing and configuring Posmoni | tolerations | object | `{}` | Tolerations for pod assignment ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) +Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) diff --git a/charts/rpc-saas-secretStore/README.md b/charts/rpc-saas-secretStore/README.md index fcf21160c..d5422f54c 100644 --- a/charts/rpc-saas-secretStore/README.md +++ b/charts/rpc-saas-secretStore/README.md @@ -23,4 +23,4 @@ A Helm chart for deploying ClusterSecretStore for RPC Saas Service | rpcSaas.clustersecretstore.serviceAccountnamespace | string | `"dummy"` | | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) +Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) diff --git a/charts/validator-ejector/README.md b/charts/validator-ejector/README.md index 206d3e48b..bbe684a1c 100644 --- a/charts/validator-ejector/README.md +++ b/charts/validator-ejector/README.md @@ -98,4 +98,4 @@ A Helm chart for installing and configuring Lido's validator-ejector | tolerations | object | `{}` | Tolerations for pod assignment ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) +Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) diff --git a/charts/validator-kapi/README.md b/charts/validator-kapi/README.md index a05588cb1..cb8df0dfe 100644 --- a/charts/validator-kapi/README.md +++ b/charts/validator-kapi/README.md @@ -86,4 +86,4 @@ A Helm chart for installing and configuring Lido's validator-kapi | tolerations | object | `{}` | Tolerations for pod assignment ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) +Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) diff --git a/charts/validators/README.md b/charts/validators/README.md index 5bbee12ce..df1da99d2 100644 --- a/charts/validators/README.md +++ b/charts/validators/README.md @@ -122,4 +122,4 @@ A Helm chart for installing validators with the web3signer. | web3signerEndpoint | string | `""` | Web3Signer Endpoint | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) +Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) diff --git a/charts/vouch/README.md b/charts/vouch/README.md index 246990eee..15fca5ceb 100644 --- a/charts/vouch/README.md +++ b/charts/vouch/README.md @@ -105,4 +105,4 @@ A Helm chart for installing and configuring large scale ETH staking infrastructu | vouchFullConfig | string | `nil` | use vouchFullConfig: to provide all vouch.yaml values use vouch: to populate good defaults and to do minimal changes | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) +Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) diff --git a/charts/web3signer/README.md b/charts/web3signer/README.md index 40259664b..bf01ee9c1 100644 --- a/charts/web3signer/README.md +++ b/charts/web3signer/README.md @@ -74,4 +74,4 @@ A Helm chart for installing and configuring Web3signer | web3signerJavaOpts | string | `"-Xmx1g -Xms1g"` | Java Opts | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) +Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) From 785620c93df78045b643a28681928341dde1f2c1 Mon Sep 17 00:00:00 2001 From: Philex Date: Thu, 13 Jun 2024 21:23:51 -0700 Subject: [PATCH 16/27] Revert "Fix: change DB size" This reverts commit 9e5871061f1b3fbea354187858091300b98d5107. --- charts/cert-manager/README.md | 2 +- charts/common/README.md | 2 +- charts/dirk/README.md | 2 +- charts/ethereum-node/README.md | 2 +- charts/execution-beacon/README.md | 2 +- charts/external-dns/README.md | 2 +- charts/juno-node/README.md | 34 +- .../templates/juno-data-backup-cronjob.yaml | 2 +- charts/kong/README.md | 2 +- charts/kube-prometheus-stack/README.md | 2 +- charts/lodestar/README.md | 2 +- charts/loki/README.md | 2 +- charts/mev-boost/README.md | 2 +- charts/mysql/README.md | 2 +- charts/nethgate/README.md | 1526 +++++++++++++---- charts/posmoni/README.md | 2 +- charts/rpc-saas-secretStore/README.md | 2 +- charts/validator-ejector/README.md | 2 +- charts/validator-kapi/README.md | 2 +- charts/validators/README.md | 2 +- charts/vouch/README.md | 2 +- charts/web3signer/README.md | 2 +- 22 files changed, 1266 insertions(+), 334 deletions(-) diff --git a/charts/cert-manager/README.md b/charts/cert-manager/README.md index fb9719fa9..bff63e4e2 100644 --- a/charts/cert-manager/README.md +++ b/charts/cert-manager/README.md @@ -185,4 +185,4 @@ Kubernetes: `>= 1.22.0-0` | webhook.volumes | list | `[]` | | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/common/README.md b/charts/common/README.md index 70f1654bd..7327fd7c9 100644 --- a/charts/common/README.md +++ b/charts/common/README.md @@ -18,4 +18,4 @@ A Library Helm Chart for grouping common logic between stakewise charts. This ch | exampleValue | string | `"common-chart"` | | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/dirk/README.md b/charts/dirk/README.md index 824415dcf..8f8d69e31 100644 --- a/charts/dirk/README.md +++ b/charts/dirk/README.md @@ -89,4 +89,4 @@ A Helm chart for installing and configuring large scale ETH staking infrastructu | tolerations | object | `{}` | Tolerations for pod assignment ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/ethereum-node/README.md b/charts/ethereum-node/README.md index 8bde79b9b..8d0051523 100644 --- a/charts/ethereum-node/README.md +++ b/charts/ethereum-node/README.md @@ -87,4 +87,4 @@ This chart acts as an umbrella chart and allows to run a ethereum execution and | prysm.resources.requests.memory | string | `"2Gi"` | | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/execution-beacon/README.md b/charts/execution-beacon/README.md index 07831d6e4..859233aef 100644 --- a/charts/execution-beacon/README.md +++ b/charts/execution-beacon/README.md @@ -201,4 +201,4 @@ Kubernetes: `^1.23.0-0` | serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/external-dns/README.md b/charts/external-dns/README.md index 9bc33c814..a9e7248e5 100644 --- a/charts/external-dns/README.md +++ b/charts/external-dns/README.md @@ -101,4 +101,4 @@ ExternalDNS synchronizes exposed Kubernetes Services and Ingresses with DNS prov | txtSuffix | string | `""` | | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/juno-node/README.md b/charts/juno-node/README.md index 7c7aa8bd4..41794a0a4 100644 --- a/charts/juno-node/README.md +++ b/charts/juno-node/README.md @@ -27,20 +27,17 @@ A Helm chart for deploying Juno service | args.--ws | string | `"true"` | | | args.--ws-host | string | `"0.0.0.0"` | | | args.--ws-port | string | `"6061"` | | -| backupJunoDataJob.backupSchedule | string | `"0 0 * * *"` | | -| backupJunoDataJob.cleanupSchedule | string | `"0 12 * * *"` | | +| backupJunoDataJob.backupSchedule | string | `"*/20 * * * *"` | | +| backupJunoDataJob.cleanupSchedule | string | `"*/40 * * * *"` | | | backupJunoDataJob.dataSource | string | `"juno-sepolia-pv-ssd-juno-sepolia-0"` | | | backupJunoDataJob.enabled | bool | `true` | | +| backupJunoDataJob.endpoint | string | `"https://12345543.r2.cloudflarestorage.com"` | | +| backupJunoDataJob.key | string | `"key-1234"` | | | backupJunoDataJob.network | string | `"sepolia"` | | +| backupJunoDataJob.secret | string | `"secret-12345"` | | | backupJunoDataJob.storageSize | string | `"200Gi"` | | | batchjob.enabled | bool | `false` | | | batchjob.schedule | string | `"* */1 * * *"` | | -| cache.enabled | bool | `false` | | -| cache.image | string | `"us-east1-docker.pkg.dev/juno-stg-nth/juno-cache/cache:2.0"` | | -| cache.resources.limits.cpu | string | `"100m"` | | -| cache.resources.limits.memory | string | `"512Mi"` | | -| cache.resources.requests.cpu | string | `"100m"` | | -| cache.resources.requests.memory | string | `"100Mi"` | | | deployment.healthCheck.enabled | bool | `false` | | | deployment.healthCheck.livenessProbe.failureThreshold | int | `6` | | | deployment.healthCheck.livenessProbe.initialDelaySeconds | int | `9600` | | @@ -81,27 +78,18 @@ A Helm chart for deploying Juno service | pgo.resources.limits.memory | string | `"4Gi"` | | | pgo.resources.requests.cpu | string | `"1"` | | | pgo.resources.requests.memory | string | `"2Gi"` | | +| cache.enabled | bool | `false` | | +| cache.image | string | `"us-east1-docker.pkg.dev/juno-stg-nth/juno-cache/cache:2.0"` | | +| cache.resources.limits.cpu | string | `"100m"` | | +| cache.resources.limits.memory | string | `"512Mi"` | | +| cache.resources.requests.cpu | string | `"100m"` | | +| cache.resources.requests.memory | string | `"100Mi"` | | | pvc.datasource | string | `""` | | | pvc.enabled | bool | `true` | | | pvc.mount[0].mountPath | string | `"/var/lib/juno"` | | | pvc.mount[0].pvName | string | `"pv"` | | | pvc.mount[0].storageSize | string | `"250Gi"` | | | pvc.storageClassName | string | `"standard"` | | -| secret.data.dataFromKey | string | `"secret-store"` | | -| secret.data.refreshInterval | string | `"10m"` | | -| secret.data.secretStoreKind | string | `"ClusterSecretStore"` | | -| secret.data.secretStoreName | string | `"juno-store"` | | -| secret.data.targetCreationPolicy | string | `"Owner"` | | -| secret.data.targetName | string | `"juno-sepolia-common"` | | -| secret.feederGateway.key | string | `"feeder-gateway"` | | -| secret.feederGateway.property | string | `"testnet"` | | -| secret.feederGateway.refreshInterval | string | `"10m"` | | -| secret.feederGateway.secretKey | string | `"testnet"` | | -| secret.feederGateway.secretStoreKind | string | `"ClusterSecretStore"` | | -| secret.feederGateway.secretStoreName | string | `"juno-store"` | | -| secret.feederGateway.targetCreationPolicy | string | `"Owner"` | | -| secret.feederGateway.targetName | string | `"juno-goerli"` | | -| secret.feederGateway.version | string | `"1"` | | | serviceAccount.enabled | bool | `false` | | | serviceAccount.gcpServiceAccount | string | `"monitoring-sa-euw1@juno-prod-nth.iam.gserviceaccount.com"` | | | serviceAccount.name | string | `"juno-pgo"` | | diff --git a/charts/juno-node/templates/juno-data-backup-cronjob.yaml b/charts/juno-node/templates/juno-data-backup-cronjob.yaml index c2eed4b04..5a8e36903 100644 --- a/charts/juno-node/templates/juno-data-backup-cronjob.yaml +++ b/charts/juno-node/templates/juno-data-backup-cronjob.yaml @@ -60,7 +60,7 @@ spec: storageClassName: premium-rwo resources: requests: - storage: {{ .Values.backupJunoDataJob.storageSize }} + storage: 200Gi --- # ConfigMap for cloning disk manifest apiVersion: v1 diff --git a/charts/kong/README.md b/charts/kong/README.md index 6b9789b0a..040833bee 100644 --- a/charts/kong/README.md +++ b/charts/kong/README.md @@ -267,4 +267,4 @@ The Cloud-Native Ingress and API-management | waitImage | object | `{"enabled":true,"pullPolicy":"IfNotPresent"}` | --------------------------------------------------------------------------- | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/kube-prometheus-stack/README.md b/charts/kube-prometheus-stack/README.md index 9a2e4dbeb..48b5e057c 100644 --- a/charts/kube-prometheus-stack/README.md +++ b/charts/kube-prometheus-stack/README.md @@ -1002,4 +1002,4 @@ Kubernetes: `>=1.16.0-0` | windowsMonitoring.job | string | `"prometheus-windows-exporter"` | | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/lodestar/README.md b/charts/lodestar/README.md index cc5621563..b3c9060ae 100644 --- a/charts/lodestar/README.md +++ b/charts/lodestar/README.md @@ -59,4 +59,4 @@ A Helm chart to deploy the Lodestar Consensus Client using Kubernetes | tolerations | list | `[]` | | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/loki/README.md b/charts/loki/README.md index 02916f27c..ec569a1a1 100644 --- a/charts/loki/README.md +++ b/charts/loki/README.md @@ -137,4 +137,4 @@ Kubernetes: `^1.10.0-0` | useExistingAlertingGroup.enabled | bool | `false` | | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/mev-boost/README.md b/charts/mev-boost/README.md index 413e63233..ec2ab8f04 100644 --- a/charts/mev-boost/README.md +++ b/charts/mev-boost/README.md @@ -57,4 +57,4 @@ mev-boost allows proof-of-stake Ethereum consensus clients to outsource block co | tolerations | list | `[]` | | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/mysql/README.md b/charts/mysql/README.md index 88afb56ed..714c16050 100644 --- a/charts/mysql/README.md +++ b/charts/mysql/README.md @@ -22,4 +22,4 @@ A Helm chart for deploying MySQL with StatefulSet, Service, Secret, and PVC. | storageSize | string | `"100Gi"` | | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/nethgate/README.md b/charts/nethgate/README.md index e7cff4fb6..df6c9f7f1 100644 --- a/charts/nethgate/README.md +++ b/charts/nethgate/README.md @@ -1,291 +1,1235 @@ -# kong - -![Version: 2.33.4](https://img.shields.io/badge/Version-2.33.4-informational?style=flat-square) ![AppVersion: 3.5](https://img.shields.io/badge/AppVersion-3.5-informational?style=flat-square) - -The Cloud-Native Ingress and API-management - -## Maintainers - -| Name | Email | Url | -| ---- | ------ | --- | -| anishgehlot | | | - -## Source Code - -* - -## Requirements - -| Repository | Name | Version | -|------------|------|---------| -| https://charts.bitnami.com/bitnami | postgresql | 11.9.13 | - -## Values - -| Key | Type | Default | Description | -|-----|------|---------|-------------| -| admin.annotations | object | `{}` | | -| admin.enabled | bool | `false` | | -| admin.http.containerPort | int | `8001` | | -| admin.http.enabled | bool | `false` | | -| admin.http.parameters | list | `[]` | | -| admin.http.servicePort | int | `8001` | | -| admin.ingress.annotations | object | `{}` | | -| admin.ingress.enabled | bool | `false` | | -| admin.ingress.hostname | string | `nil` | | -| admin.ingress.ingressClassName | string | `nil` | | -| admin.ingress.path | string | `"/"` | | -| admin.ingress.pathType | string | `"ImplementationSpecific"` | | -| admin.labels | object | `{}` | | -| admin.loadBalancerClass | string | `nil` | | -| admin.tls.client.caBundle | string | `""` | | -| admin.tls.client.secretName | string | `""` | | -| admin.tls.containerPort | int | `8444` | | -| admin.tls.enabled | bool | `true` | | -| admin.tls.parameters[0] | string | `"http2"` | | -| admin.tls.servicePort | int | `8444` | | -| admin.type | string | `"NodePort"` | | -| autoscaling.behavior | object | `{}` | | -| autoscaling.enabled | bool | `false` | | -| autoscaling.maxReplicas | int | `5` | | -| autoscaling.metrics[0].resource.name | string | `"cpu"` | | -| autoscaling.metrics[0].resource.target.averageUtilization | int | `80` | | -| autoscaling.metrics[0].resource.target.type | string | `"Utilization"` | | -| autoscaling.metrics[0].type | string | `"Resource"` | | -| autoscaling.minReplicas | int | `2` | | -| autoscaling.targetCPUUtilizationPercentage | string | `nil` | | -| certificates | object | `{"admin":{"clusterIssuer":"","commonName":"kong.example","dnsNames":[],"enabled":true,"issuer":""},"cluster":{"clusterIssuer":"","commonName":"kong_clustering","dnsNames":[],"enabled":true,"issuer":""},"clusterIssuer":"","enabled":false,"issuer":"","portal":{"clusterIssuer":"","commonName":"developer.example","dnsNames":[],"enabled":true,"issuer":""},"proxy":{"clusterIssuer":"","commonName":"app.example","dnsNames":[],"enabled":true,"issuer":""}}` | --------------------------------------------------------------------------- | -| cluster.annotations | object | `{}` | | -| cluster.enabled | bool | `false` | | -| cluster.ingress.annotations | object | `{}` | | -| cluster.ingress.enabled | bool | `false` | | -| cluster.ingress.hostname | string | `nil` | | -| cluster.ingress.ingressClassName | string | `nil` | | -| cluster.ingress.path | string | `"/"` | | -| cluster.ingress.pathType | string | `"ImplementationSpecific"` | | -| cluster.labels | object | `{}` | | -| cluster.loadBalancerClass | string | `nil` | | -| cluster.tls.containerPort | int | `8005` | | -| cluster.tls.enabled | bool | `false` | | -| cluster.tls.parameters | list | `[]` | | -| cluster.tls.servicePort | int | `8005` | | -| cluster.type | string | `"ClusterIP"` | | -| clusterCaSecretName | string | `""` | | -| clustertelemetry.annotations | object | `{}` | | -| clustertelemetry.enabled | bool | `false` | | -| clustertelemetry.ingress.annotations | object | `{}` | | -| clustertelemetry.ingress.enabled | bool | `false` | | -| clustertelemetry.ingress.hostname | string | `nil` | | -| clustertelemetry.ingress.ingressClassName | string | `nil` | | -| clustertelemetry.ingress.path | string | `"/"` | | -| clustertelemetry.ingress.pathType | string | `"ImplementationSpecific"` | | -| clustertelemetry.labels | object | `{}` | | -| clustertelemetry.loadBalancerClass | string | `nil` | | -| clustertelemetry.tls.containerPort | int | `8006` | | -| clustertelemetry.tls.enabled | bool | `false` | | -| clustertelemetry.tls.parameters | list | `[]` | | -| clustertelemetry.tls.servicePort | int | `8006` | | -| clustertelemetry.type | string | `"ClusterIP"` | | -| containerSecurityContext.allowPrivilegeEscalation | bool | `false` | | -| containerSecurityContext.capabilities.drop[0] | string | `"ALL"` | | -| containerSecurityContext.readOnlyRootFilesystem | bool | `false` | | -| containerSecurityContext.runAsNonRoot | bool | `true` | | -| containerSecurityContext.runAsUser | int | `1000` | | -| containerSecurityContext.seccompProfile.type | string | `"RuntimeDefault"` | | -| dblessConfig.config | string | `""` | | -| dblessConfig.configMap | string | `""` | | -| dblessConfig.secret | string | `""` | | -| deployment.customNethgateEnv.NETHGATE_SECRET.valueFrom.secretKeyRef.key | string | `"secret"` | | -| deployment.customNethgateEnv.NETHGATE_SECRET.valueFrom.secretKeyRef.name | string | `"nethgate-dev-secret"` | | -| deployment.daemonset | bool | `false` | | -| deployment.hostNetwork | bool | `false` | | -| deployment.hostname | string | `""` | | -| deployment.kong.enabled | bool | `true` | | -| deployment.nethgate.enabled | bool | `false` | | -| deployment.nethgate.image.nethgate_tag | string | `"0.0.1-dev102"` | | -| deployment.nethgate.image.pullPolicy | string | `"IfNotPresent"` | | -| deployment.nethgate.image.pullSecrets[0] | string | `"regcred"` | | -| deployment.nethgate.image.repository | string | `"nethermindeth/nethgate"` | | -| deployment.nethgate.port | string | `"8080"` | | -| deployment.nethgate.svc.type | string | `"ClusterIP"` | | -| deployment.prefixDir.sizeLimit | string | `"256Mi"` | | -| deployment.serviceAccount.automountServiceAccountToken | bool | `false` | | -| deployment.serviceAccount.create | bool | `true` | | -| deployment.test.enabled | bool | `false` | | -| deployment.tmpDir.sizeLimit | string | `"1Gi"` | | -| deployment.voyager_migrator.enabled | bool | `false` | | -| deployment.voyager_migrator.image.migrator_tag | string | `"0.0.1-dev102"` | | -| deployment.voyager_migrator.image.pullPolicy | string | `"IfNotPresent"` | | -| deployment.voyager_migrator.image.pullSecrets[0] | string | `"regcred"` | | -| deployment.voyager_migrator.image.repository | string | `"nethermindeth/voyager_migrator"` | | -| deploymentAnnotations | object | `{}` | | -| enterprise | object | `{"enabled":false,"portal":{"enabled":false},"rbac":{"admin_gui_auth":"basic-auth","admin_gui_auth_conf_secret":"CHANGEME-admin-gui-auth-conf-secret","enabled":false,"session_conf_secret":"kong-session-config"},"smtp":{"admin_emails_from":"none@example.com","admin_emails_reply_to":"none@example.com","auth":{"smtp_password_secret":"CHANGEME-smtp-password","smtp_username":""},"enabled":false,"portal_emails_from":"none@example.com","portal_emails_reply_to":"none@example.com","smtp_admin_emails":"none@example.com","smtp_auth_type":"","smtp_host":"smtp.example.com","smtp_port":587,"smtp_ssl":"nil","smtp_starttls":true},"vitals":{"enabled":true}}` | --------------------------------------------------------------------------- Toggle Kong Enterprise features on or off RBAC and SMTP configuration have additional options that must all be set together Other settings should be added to the "env" settings below | -| env | object | `{"admin_access_log":"/dev/stdout","admin_error_log":"/dev/stderr","admin_gui_access_log":"/dev/stdout","admin_gui_error_log":"/dev/stderr","database":"off","nginx_worker_processes":"2","portal_api_access_log":"/dev/stdout","portal_api_error_log":"/dev/stderr","prefix":"/kong_prefix/","proxy_access_log":"/dev/stdout","proxy_error_log":"/dev/stderr","router_flavor":"traditional"}` | --------------------------------------------------------------------------- Specify Kong configuration This chart takes all entries defined under `.env` and transforms them into into `KONG_*` environment variables for Kong containers. Their names here should match the names used in https://github.com/Kong/kong/blob/master/kong.conf.default See https://docs.konghq.com/latest/configuration also for additional details Values here take precedence over values from other sections of values.yaml, e.g. setting pg_user here will override the value normally set when postgresql.enabled is set below. In general, you should not set values here if they are set elsewhere. | -| extraConfigMaps | list | `[]` | | -| extraLabels | object | `{}` | | -| extraObjects | list | `[]` | | -| extraSecrets | list | `[]` | | -| image.effectiveSemver | string | `nil` | | -| image.pullPolicy | string | `"IfNotPresent"` | | -| image.repository | string | `"kong"` | | -| image.tag | string | `"3.5"` | | -| ingressController | object | `{"adminApi":{"tls":{"client":{"caSecretName":"","certProvided":false,"enabled":false,"secretName":""}}},"admissionWebhook":{"certificate":{"provided":false},"enabled":true,"failurePolicy":"Ignore","namespaceSelector":{},"port":8080,"service":{"labels":{}}},"args":[],"enabled":true,"env":{"kong_admin_tls_skip_verify":true},"gatewayDiscovery":{"adminApiService":{"name":"","namespace":""},"enabled":false,"generateAdminApiService":false},"image":{"effectiveSemver":null,"repository":"kong/kubernetes-ingress-controller","tag":"3.0"},"ingressClass":"kong","ingressClassAnnotations":{},"konnect":{"apiHostname":"us.kic.api.konghq.com","enabled":false,"license":{"enabled":false},"runtimeGroupID":"","tlsClientCertSecretName":"konnect-client-tls"},"livenessProbe":{"failureThreshold":3,"httpGet":{"path":"/healthz","port":10254,"scheme":"HTTP"},"initialDelaySeconds":5,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":5},"rbac":{"create":true},"readinessProbe":{"failureThreshold":3,"httpGet":{"path":"/readyz","port":10254,"scheme":"HTTP"},"initialDelaySeconds":5,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":5},"resources":{},"watchNamespaces":[]}` | --------------------------------------------------------------------------- Kong Ingress Controller's primary purpose is to satisfy Ingress resources created in k8s. It uses CRDs for more fine grained control over routing and for Kong specific configuration. | -| lifecycle.preStop.exec.command[0] | string | `"kong"` | | -| lifecycle.preStop.exec.command[1] | string | `"quit"` | | -| lifecycle.preStop.exec.command[2] | string | `"--wait=15"` | | -| livenessProbe.failureThreshold | int | `3` | | -| livenessProbe.httpGet.path | string | `"/status"` | | -| livenessProbe.httpGet.port | string | `"status"` | | -| livenessProbe.httpGet.scheme | string | `"HTTP"` | | -| livenessProbe.initialDelaySeconds | int | `5` | | -| livenessProbe.periodSeconds | int | `10` | | -| livenessProbe.successThreshold | int | `1` | | -| livenessProbe.timeoutSeconds | int | `5` | | -| manager.annotations | object | `{}` | | -| manager.enabled | bool | `true` | | -| manager.http.containerPort | int | `8002` | | -| manager.http.enabled | bool | `true` | | -| manager.http.parameters | list | `[]` | | -| manager.http.servicePort | int | `8002` | | -| manager.ingress.annotations | object | `{}` | | -| manager.ingress.enabled | bool | `false` | | -| manager.ingress.hostname | string | `nil` | | -| manager.ingress.ingressClassName | string | `nil` | | -| manager.ingress.path | string | `"/"` | | -| manager.ingress.pathType | string | `"ImplementationSpecific"` | | -| manager.labels | object | `{}` | | -| manager.loadBalancerClass | string | `nil` | | -| manager.tls.containerPort | int | `8445` | | -| manager.tls.enabled | bool | `true` | | -| manager.tls.parameters[0] | string | `"http2"` | | -| manager.tls.servicePort | int | `8445` | | -| manager.type | string | `"ClusterIP"` | | -| migrations.annotations."sidecar.istio.io/inject" | bool | `false` | | -| migrations.backoffLimit | string | `nil` | | -| migrations.jobAnnotations | object | `{}` | | -| migrations.postUpgrade | bool | `true` | | -| migrations.preUpgrade | bool | `true` | | -| migrations.resources | object | `{}` | | -| nodeSelector | object | `{}` | | -| plugins | object | `{}` | | -| podAnnotations."kuma.io/gateway" | string | `"enabled"` | | -| podAnnotations."traffic.sidecar.istio.io/includeInboundPorts" | string | `""` | | -| podDisruptionBudget.enabled | bool | `false` | | -| podLabels | object | `{}` | | -| podSecurityPolicy.annotations | object | `{}` | | -| podSecurityPolicy.enabled | bool | `false` | | -| podSecurityPolicy.labels | object | `{}` | | -| podSecurityPolicy.spec.allowPrivilegeEscalation | bool | `false` | | -| podSecurityPolicy.spec.fsGroup.rule | string | `"RunAsAny"` | | -| podSecurityPolicy.spec.hostIPC | bool | `false` | | -| podSecurityPolicy.spec.hostNetwork | bool | `false` | | -| podSecurityPolicy.spec.hostPID | bool | `false` | | -| podSecurityPolicy.spec.privileged | bool | `false` | | -| podSecurityPolicy.spec.readOnlyRootFilesystem | bool | `true` | | -| podSecurityPolicy.spec.runAsGroup.rule | string | `"RunAsAny"` | | -| podSecurityPolicy.spec.runAsUser.rule | string | `"RunAsAny"` | | -| podSecurityPolicy.spec.seLinux.rule | string | `"RunAsAny"` | | -| podSecurityPolicy.spec.supplementalGroups.rule | string | `"RunAsAny"` | | -| podSecurityPolicy.spec.volumes[0] | string | `"configMap"` | | -| podSecurityPolicy.spec.volumes[1] | string | `"secret"` | | -| podSecurityPolicy.spec.volumes[2] | string | `"emptyDir"` | | -| podSecurityPolicy.spec.volumes[3] | string | `"projected"` | | -| portal.annotations | object | `{}` | | -| portal.enabled | bool | `true` | | -| portal.http.containerPort | int | `8003` | | -| portal.http.enabled | bool | `true` | | -| portal.http.parameters | list | `[]` | | -| portal.http.servicePort | int | `8003` | | -| portal.ingress.annotations | object | `{}` | | -| portal.ingress.enabled | bool | `false` | | -| portal.ingress.hostname | string | `nil` | | -| portal.ingress.ingressClassName | string | `nil` | | -| portal.ingress.path | string | `"/"` | | -| portal.ingress.pathType | string | `"ImplementationSpecific"` | | -| portal.labels | object | `{}` | | -| portal.loadBalancerClass | string | `nil` | | -| portal.tls.containerPort | int | `8446` | | -| portal.tls.enabled | bool | `true` | | -| portal.tls.parameters[0] | string | `"http2"` | | -| portal.tls.servicePort | int | `8446` | | -| portal.type | string | `"NodePort"` | | -| portalapi.annotations | object | `{}` | | -| portalapi.enabled | bool | `true` | | -| portalapi.http.containerPort | int | `8004` | | -| portalapi.http.enabled | bool | `true` | | -| portalapi.http.parameters | list | `[]` | | -| portalapi.http.servicePort | int | `8004` | | -| portalapi.ingress.annotations | object | `{}` | | -| portalapi.ingress.enabled | bool | `false` | | -| portalapi.ingress.hostname | string | `nil` | | -| portalapi.ingress.ingressClassName | string | `nil` | | -| portalapi.ingress.path | string | `"/"` | | -| portalapi.ingress.pathType | string | `"ImplementationSpecific"` | | -| portalapi.labels | object | `{}` | | -| portalapi.loadBalancerClass | string | `nil` | | -| portalapi.tls.containerPort | int | `8447` | | -| portalapi.tls.enabled | bool | `true` | | -| portalapi.tls.parameters[0] | string | `"http2"` | | -| portalapi.tls.servicePort | int | `8447` | | -| portalapi.type | string | `"NodePort"` | | -| postgresql | object | `{"auth":{"database":"kong","username":"kong"},"enabled":false,"image":{"tag":"13.11.0-debian-11-r20"},"service":{"ports":{"postgresql":"5432"}}}` | --------------------------------------------------------------------------- Kong can run without a database or use either Postgres or Cassandra as a backend datatstore for it's configuration. By default, this chart installs Kong without a database. If you would like to use a database, there are two options: - (recommended) Deploy and maintain a database and pass the connection details to Kong via the `env` section. - You can use the below `postgresql` sub-chart to deploy a database along-with Kong as part of a single Helm release. Running a database independently is recommended for production, but the built-in Postgres is useful for quickly creating test instances. PostgreSQL chart documentation: https://github.com/bitnami/charts/blob/master/bitnami/postgresql/README.md WARNING: by default, the Postgres chart generates a random password each time it upgrades, which breaks access to existing volumes. You should set a password explicitly: https://github.com/Kong/charts/blob/main/charts/kong/FAQs.md#kong-fails-to-start-after-helm-upgrade-when-postgres-is-used-what-do-i-do | -| priorityClassName | string | `""` | | -| proxy.annotations | object | `{}` | | -| proxy.enabled | bool | `true` | | -| proxy.http.containerPort | int | `8000` | | -| proxy.http.enabled | bool | `true` | | -| proxy.http.parameters | list | `[]` | | -| proxy.http.servicePort | int | `80` | | -| proxy.ingress.annotations | object | `{}` | | -| proxy.ingress.enabled | bool | `false` | | -| proxy.ingress.hostname | string | `nil` | | -| proxy.ingress.hosts | list | `[]` | | -| proxy.ingress.ingressClassName | string | `nil` | | -| proxy.ingress.labels | object | `{}` | | -| proxy.ingress.path | string | `"/"` | | -| proxy.ingress.pathType | string | `"ImplementationSpecific"` | | -| proxy.labels.enable-metrics | string | `"true"` | | -| proxy.loadBalancerClass | string | `nil` | | -| proxy.monitoring.containerPort | int | `8100` | | -| proxy.monitoring.enabled | bool | `true` | | -| proxy.monitoring.servicePort | int | `8100` | | -| proxy.nameOverride | string | `""` | | -| proxy.stream | list | `[]` | | -| proxy.tls.containerPort | int | `8443` | | -| proxy.tls.enabled | bool | `false` | | -| proxy.tls.parameters[0] | string | `"http2"` | | -| proxy.tls.servicePort | int | `443` | | -| proxy.type | string | `"NodePort"` | | -| readinessProbe.failureThreshold | int | `3` | | -| readinessProbe.httpGet.path | string | `"/status/ready"` | | -| readinessProbe.httpGet.port | string | `"status"` | | -| readinessProbe.httpGet.scheme | string | `"HTTP"` | | -| readinessProbe.initialDelaySeconds | int | `5` | | -| readinessProbe.periodSeconds | int | `10` | | -| readinessProbe.successThreshold | int | `1` | | -| readinessProbe.timeoutSeconds | int | `5` | | -| replicaCount | int | `1` | | -| resources | object | `{}` | | -| secretVolumes | list | `[]` | | -| securityContext | object | `{}` | | -| serviceMonitor.enabled | bool | `false` | | -| status.enabled | bool | `true` | | -| status.http.containerPort | int | `8100` | | -| status.http.enabled | bool | `true` | | -| status.http.parameters | list | `[]` | | -| status.tls.containerPort | int | `8543` | | -| status.tls.enabled | bool | `false` | | -| status.tls.parameters | list | `[]` | | -| terminationGracePeriodSeconds | int | `30` | | -| tolerations | list | `[]` | | -| udpProxy.annotations | object | `{}` | | -| udpProxy.enabled | bool | `false` | | -| udpProxy.labels | object | `{}` | | -| udpProxy.loadBalancerClass | string | `nil` | | -| udpProxy.stream | list | `[]` | | -| udpProxy.type | string | `"LoadBalancer"` | | -| updateStrategy | object | `{}` | | -| waitImage | object | `{"enabled":true,"pullPolicy":"IfNotPresent"}` | --------------------------------------------------------------------------- | - ----------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) +## Kong for Kubernetes + +[Kong for Kubernetes](https://github.com/Kong/kubernetes-ingress-controller) +is an open-source Ingress Controller for Kubernetes that offers +API management capabilities with a plugin architecture. + +This chart bootstraps all the components needed to run Kong on a +[Kubernetes](http://kubernetes.io) cluster using the +[Helm](https://helm.sh) package manager. + +## TL;DR; + +```bash +helm repo add kong https://charts.konghq.com +helm repo update + +helm install kong/kong --generate-name +``` + +## Table of contents + +- [Prerequisites](#prerequisites) +- [Install](#install) +- [Uninstall](#uninstall) +- [FAQs](#faqs) +- [Kong Enterprise](#kong-enterprise) +- [Deployment Options](#deployment-options) + - [Database](#database) + - [DB-less deployment](#db-less-deployment) + - [Using the Postgres sub-chart](#using-the-postgres-sub-chart) + - [Postgres sub-chart considerations for OpenShift](#postgres-sub-chart-considerations-for-openshift) + - [Runtime package](#runtime-package) + - [Configuration method](#configuration-method) + - [Separate admin and proxy nodes](#separate-admin-and-proxy-nodes) + - [Standalone controller nodes](#standalone-controller-nodes) + - [Hybrid mode](#hybrid-mode) + - [Certificates](#certificates) + - [Control plane node configuration](#control-plane-node-configuration) + - [Data plane node configuration](#data-plane-node-configuration) + - [Cert Manager Integration](#cert-manager-integration) + - [CRD management](#crd-management) + - [InitContainers](#initcontainers) + - [HostAliases](#hostaliases) + - [Sidecar Containers](#sidecar-containers) + - [Migration Sidecar Containers](#migration-sidecar-containers) + - [User Defined Volumes](#user-defined-volumes) + - [User Defined Volume Mounts](#user-defined-volume-mounts) + - [Removing cluster-scoped permissions](#removing-cluster-scoped-permissions) + - [Using a DaemonSet](#using-a-daemonset) + - [Using dnsPolicy and dnsConfig](#using-dnspolicy-and-dnsconfig) + - [Example configurations](#example-configurations) +- [Configuration](#configuration) + - [Kong parameters](#kong-parameters) + - [Kong Service Parameters](#kong-service-parameters) + - [Admin Service mTLS](#admin-service-mtls) + - [Stream listens](#stream-listens) + - [Ingress Controller Parameters](#ingress-controller-parameters) + - [The `env` section](#the-env-section) + - [The `customEnv` section](#the-customenv-section) + - [General Parameters](#general-parameters) + - [The `env` section](#the-env-section-1) + - [The `customEnv` section](#the-customenv-section-1) + - [The `extraLabels` section](#the-extralabels-section) +- [Kong Enterprise Parameters](#kong-enterprise-parameters) + - [Overview](#overview) + - [Prerequisites](#prerequisites-1) + - [Kong Enterprise License](#kong-enterprise-license) + - [Kong Enterprise Docker registry access](#kong-enterprise-docker-registry-access) + - [Service location hints](#service-location-hints) + - [RBAC](#rbac) + - [Sessions](#sessions) + - [Email/SMTP](#emailsmtp) +- [Prometheus Operator integration](#prometheus-operator-integration) +- [Argo CD considerations](#argo-cd-considerations) +- [Changelog](https://github.com/Kong/charts/blob/main/charts/kong/CHANGELOG.md) +- [Upgrading](https://github.com/Kong/charts/blob/main/charts/kong/UPGRADE.md) +- [Seeking help](#seeking-help) + +## Prerequisites + +- Kubernetes 1.17+. Older chart releases support older Kubernetes versions. + Refer to the [supported version matrix](https://docs.konghq.com/kubernetes-ingress-controller/latest/references/version-compatibility/#kubernetes) + and the [chart changelog](https://github.com/Kong/charts/blob/main/charts/kong/CHANGELOG.md) + for information about the default chart controller versions and Kubernetes + versions supported by controller releases. +- PV provisioner support in the underlying infrastructure if persistence + is needed for Kong datastore. + +## Install + +To install Kong: + +```bash +helm repo add kong https://charts.konghq.com +helm repo update + +helm install kong/kong --generate-name +``` + +## Uninstall + +To uninstall/delete a Helm release `my-release`: + +```bash +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the +chart and deletes the release. + +> **Tip**: List all releases using `helm list` + +## FAQs + +Please read the +[FAQs](https://github.com/Kong/charts/blob/main/charts/kong/FAQs.md) +document. + +## Kong Enterprise + +If using Kong Enterprise, several additional steps are necessary before +installing the chart: + +- Set `enterprise.enabled` to `true` in `values.yaml` file. +- Update values.yaml to use a Kong Enterprise image. +- Satisfy the two prerequisites below for + [Enterprise License](#kong-enterprise-license) and + [Enterprise Docker Registry](#kong-enterprise-docker-registry-access). +- (Optional) [set a `password` environment variable](#rbac) to create the + initial super-admin. Though not required, this is recommended for users that + wish to use RBAC, as it cannot be done after initial setup. + +Once you have these set, it is possible to install Kong Enterprise. + +Please read through +[Kong Enterprise considerations](#kong-enterprise-parameters) +to understand all settings that are enterprise specific. + +## Deployment Options + +Kong is a highly configurable piece of software that can be deployed +in a number of different ways, depending on your use-case. + +All combinations of various runtimes, databases and configuration methods are +supported by this Helm chart. +The recommended approach is to use the Ingress Controller based configuration +along-with DB-less mode. + +Following sections detail on various high-level architecture options available: + +### Database + +Kong can run with or without a database (DB-less). By default, this chart +installs Kong without a database. + +You can set the database the `env.database` parameter. For more details, please +read the [env](#the-env-section) section. + +#### DB-less deployment + +When deploying Kong in DB-less mode(`env.database: "off"`) +and without the Ingress Controller(`ingressController.enabled: false`), +you have to provide a [declarative configuration](https://docs.konghq.com/gateway-oss/latest/db-less-and-declarative-config/#the-declarative-configuration-format) for Kong to run. +You can provide an existing ConfigMap +(`dblessConfig.configMap`) or Secret (`dblessConfig.secret`) or place the whole +configuration into `values.yaml` (`dblessConfig.config`) parameter. See the +example configuration in the default values.yaml for more details. You can use +`--set-file dblessConfig.config=/path/to/declarative-config.yaml` in Helm +commands to substitute in a complete declarative config file. + +Note that externally supplied ConfigMaps are not hashed or tracked in deployment annotations. +Subsequent ConfigMap updates will require user-initiated new deployment rollouts +to apply the new configuration. You should run `kubectl rollout restart deploy` +after updating externally supplied ConfigMap content. + +#### Using the Postgres sub-chart + +The chart can optionally spawn a Postgres instance using [Bitnami's Postgres +chart](https://github.com/bitnami/charts/blob/master/bitnami/postgresql/README.md) +as a sub-chart. Set `postgresql.enabled=true` to enable the sub-chart. Enabling +this will auto-populate Postgres connection settings in Kong's environment. + +The Postgres sub-chart is best used to quickly provision temporary environments +without installing and configuring your database separately. For longer-lived +environments, we recommend you manage your database outside the Kong Helm +release. + +##### Postgres sub-chart considerations for OpenShift + +Due to the default `securityContexts` in the postgres sub-chart, you will need to add the following values to the `postgresql` section to get postgres running on OpenShift: + +```yaml + volumePermissions: + enabled: false + securityContext: + runAsUser: "auto" + primary: + containerSecurityContext: + enabled: false + podSecurityContext: + enabled: false +``` + +### Runtime package + +There are three different packages of Kong that are available: + +- **Kong Gateway**\ + This is the [Open-Source](https://github.com/kong/kong) offering. It is a + full-blown API Gateway and Ingress solution with a wide-array of functionality. + When Kong Gateway is combined with the Ingress based configuration method, + you get Kong for Kubernetes. This is the default deployment for this Helm + Chart. +- **Kong Enterprise K8S**\ + This package builds up on top of the Open-Source Gateway and bundles in all + the Enterprise-only plugins as well. + When Kong Enterprise K8S is combined with the Ingress based + configuration method, you get Kong for Kubernetes Enterprise. + This package also comes with 24x7 support from Kong Inc. +- **Kong Enterprise**\ + This is the full-blown Enterprise package which packs with itself all the + Enterprise functionality like Manager, Portal, Vitals, etc. + This package can't be run in DB-less mode. + +The package to run can be changed via `image.repository` and `image.tag` +parameters. If you would like to run the Enterprise package, please read +the [Kong Enterprise Parameters](#kong-enterprise-parameters) section. + +### Configuration method + +Kong can be configured via two methods: +- **Ingress and CRDs**\ + The configuration for Kong is done via `kubectl` and Kubernetes-native APIs. + This is also known as Kong Ingress Controller or Kong for Kubernetes and is + the default deployment pattern for this Helm Chart. The configuration + for Kong is managed via Ingress and a few + [Custom Resources](https://docs.konghq.com/kubernetes-ingress-controller/latest/concepts/custom-resources). + For more details, please read the + [documentation](https://docs.konghq.com/kubernetes-ingress-controller/) + on Kong Ingress Controller. + To configure and fine-tune the controller, please read the + [Ingress Controller Parameters](#ingress-controller-parameters) section. +- **Admin API**\ + This is the traditional method of running and configuring Kong. + By default, the Admin API of Kong is not exposed as a Service. This + can be controlled via `admin.enabled` and `env.admin_listen` parameters. + +### Separate admin and proxy nodes + +*Note: although this section is titled "Separate admin and proxy nodes", this +split release technique is generally applicable to any deployment with +different types of Kong nodes. Separating Admin API and proxy nodes is one of +the more common use cases for splitting across multiple releases, but you can +also split releases for split proxy and Developer Portal nodes, multiple groups +of proxy nodes with separate listen configurations for network segmentation, etc. +However, it does not apply to hybrid mode, as only the control plane release +interacts with the database.* + +Users may wish to split their Kong deployment into multiple instances that only +run some of Kong's services (i.e. you run `helm install` once for every +instance type you wish to create). + +To disable Kong services on an instance, you should set `SVC.enabled`, +`SVC.http.enabled`, `SVC.tls.enabled`, and `SVC.ingress.enabled` all to +`false`, where `SVC` is `proxy`, `admin`, `manager`, `portal`, or `portalapi`. + +The standard chart upgrade automation process assumes that there is only a +single Kong release in the Kong cluster, and runs both `migrations up` and +`migrations finish` jobs. To handle clusters split across multiple releases, +you should: +1. Upgrade one of the releases with `helm upgrade RELEASENAME -f values.yaml + --set migrations.preUpgrade=true --set migrations.postUpgrade=false`. +2. Upgrade all but one of the remaining releases with `helm upgrade RELEASENAME + -f values.yaml --set migrations.preUpgrade=false --set + migrations.postUpgrade=false`. +3. Upgrade the final release with `helm upgrade RELEASENAME -f values.yaml + --set migrations.preUpgrade=false --set migrations.postUpgrade=true`. + +This ensures that all instances are using the new Kong package before running +`kong migrations finish`. + +Users should note that Helm supports supplying multiple values.yaml files, +allowing you to separate shared configuration from instance-specific +configuration. For example, you may have a shared values.yaml that contains +environment variables and other common settings, and then several +instance-specific values.yamls that contain service configuration only. You can +then create releases with: + +```bash +helm install proxy-only -f shared-values.yaml -f only-proxy.yaml kong/kong +helm install admin-only -f shared-values.yaml -f only-admin.yaml kong/kong +``` + +### Standalone controller nodes + +The chart can deploy releases that contain the controller only, with no Kong +container, by setting `deployment.kong.enabled: false` in values.yaml. There +are several controller settings that must be populated manually in this +scenario and several settings that are useful when using multiple controllers: + +* `ingressController.env.kong_admin_url` must be set to the Kong Admin API URL. + If the Admin API is exposed by a service in the cluster, this should look + something like `https://my-release-kong-admin.kong-namespace.svc:8444` +* `ingressController.env.publish_service` must be set to the Kong proxy + service, e.g. `namespace/my-release-kong-proxy`. +* `ingressController.ingressClass` should be set to a different value for each + instance of the controller. +* `ingressController.env.kong_admin_filter_tag` should be set to a different value + for each instance of the controller. +* If using Kong Enterprise, `ingressController.env.kong_workspace` can + optionally create configuration in a workspace other than `default`. + +Standalone controllers require a database-backed Kong instance, as DB-less mode +requires that a single controller generate a complete Kong configuration. + +### Hybrid mode + +Kong supports [hybrid mode +deployments](https://docs.konghq.com/2.0.x/hybrid-mode/) as of Kong 2.0.0 and +[Kong Enterprise 2.1.0](https://docs.konghq.com/enterprise/2.1.x/deployment/hybrid-mode/). +These deployments split Kong nodes into control plane (CP) nodes, which provide +the admin API and interact with the database, and data plane (DP) nodes, which +provide the proxy and receive configuration from control plane nodes. + +You can deploy hybrid mode Kong clusters by [creating separate releases for each node +type](#separate-admin-and-proxy-nodes), i.e. use separate control and data +plane values.yamls that are then installed separately. The [control +plane](#control-plane-node-configuration) and [data +plane](#data-plane-node-configuration) configuration sections below cover the +values.yaml specifics for each. + +Cluster certificates are not generated automatically. You must [create a +certificate and key pair](#certificates) for intra-cluster communication. + +When upgrading the Kong version, you must [upgrade the control plane release +first and then upgrade the data plane release](https://docs.konghq.com/gateway/latest/plan-and-deploy/hybrid-mode/#version-compatibility). + +#### Certificates + +> This example shows how to use Kong Hybrid mode with `cluster_mtls: shared`. +> For an example of `cluster_mtls: pki` see the [hybrid-cert-manager example](https://github.com/Kong/charts/blob/main/charts/kong/example-values/hybrid-cert-manager/) + +Hybrid mode uses TLS to secure the CP/DP node communication channel, and +requires certificates for it. You can generate these either using `kong hybrid +gen_cert` on a local Kong installation or using OpenSSL: + +```bash +openssl req -new -x509 -nodes -newkey ec:<(openssl ecparam -name secp384r1) \ + -keyout /tmp/cluster.key -out /tmp/cluster.crt \ + -days 1095 -subj "/CN=kong_clustering" +``` + +You must then place these certificates in a Secret: + +```bash +kubectl create secret tls kong-cluster-cert --cert=/tmp/cluster.crt --key=/tmp/cluster.key +``` + +#### Control plane node configuration + +You must configure the control plane nodes to mount the certificate secret on +the container filesystem is serve it from the cluster listen. In values.yaml: + +```yaml +secretVolumes: +- kong-cluster-cert +``` + +```yaml +env: + role: control_plane + cluster_cert: /etc/secrets/kong-cluster-cert/tls.crt + cluster_cert_key: /etc/secrets/kong-cluster-cert/tls.key +``` + +Furthermore, you must enable the cluster listen and Kubernetes Service, and +should typically disable the proxy: + +```yaml +cluster: + enabled: true + tls: + enabled: true + servicePort: 8005 + containerPort: 8005 + +proxy: + enabled: false +``` + +Enterprise users with Vitals enabled must also enable the cluster telemetry +service: + +```yaml +clustertelemetry: + enabled: true + tls: + enabled: true + servicePort: 8006 + containerPort: 8006 +``` + +If using the ingress controller, you must also specify the DP proxy service as +its publish target to keep Ingress status information up to date: + +``` +ingressController: + env: + publish_service: hybrid/example-release-data-kong-proxy +``` + +Replace `hybrid` with your DP nodes' namespace and `example-release-data` with +the name of the DP release. + +#### Data plane node configuration + +Data plane configuration also requires the certificate and `role` +configuration, and the database should always be set to `off`. You must also +trust the cluster certificate and indicate what hostname/port Kong should use +to find control plane nodes. + +Though not strictly required, you should disable the admin service (it will not +work on DP nodes anyway, but should be disabled to avoid creating an invalid +Service resource). + +```yaml +secretVolumes: +- kong-cluster-cert +``` + +```yaml +admin: + enabled: false +``` + +```yaml +env: + role: data_plane + database: "off" + cluster_cert: /etc/secrets/kong-cluster-cert/tls.crt + cluster_cert_key: /etc/secrets/kong-cluster-cert/tls.key + lua_ssl_trusted_certificate: /etc/secrets/kong-cluster-cert/tls.crt + cluster_control_plane: control-plane-release-name-kong-cluster.hybrid.svc.cluster.local:8005 + cluster_telemetry_endpoint: control-plane-release-name-kong-clustertelemetry.hybrid.svc.cluster.local:8006 # Enterprise-only +``` + +Note that the `cluster_control_plane` value will differ depending on your +environment. `control-plane-release-name` will change to your CP release name, +`hybrid` will change to whatever namespace it resides in. See [Kubernetes' +documentation on Service +DNS](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/) +for more detail. + +If you use multiple Helm releases to manage different data plane configurations +attached to the same control plane, setting the `deployment.hostname` field +will help you keep track of which is which in the `/clustering/data-plane` +endpoint. + +### Cert Manager Integration + +By default, Kong will create self-signed certificates on start for its TLS +listens if you do not provide your own. The chart can create +[cert-manager](https://cert-manager.io/docs/) Certificates for its Services and +configure them for you. To use this integration, install cert-manager, create +an issuer, set `certificates.enabled: true` in values.yaml, and set your issuer +name in `certificates.issuer` or `certificates.clusterIssuer` depending on the +issuer type. + +If you do not have an issuer available, you can install the example [self-signed ClusterIssuer](https://cert-manager.io/docs/configuration/selfsigned/#bootstrapping-ca-issuers) +and set `certificates.clusterIssuer: selfsigned-issuer` for testing. You +should, however, migrate to an issuer using a CA your clients trust for actual +usage. + +The `proxy`, `admin`, `portal`, and `cluster` subsections under `certificates` +let you choose hostnames, override issuers, set `subject` or set `privateKey` on a per-certificate basis for the +proxy, admin API and Manager, Portal and Portal API, and hybrid mode mTLS +services, respectively. + +To use hybrid mode, the control and data plane releases must use the same +issuer for their cluster certificates. + +### CRD management + +Earlier versions of this chart (<2.0) created CRDs associated with the ingress +controller as part of the release. This raised two challenges: + +- Multiple release of the chart would conflict with one another, as each would + attempt to create its own set of CRDs. +- Because deleting a CRD also deletes any custom resources associated with it, + deleting a release of the chart could destroy user configuration without + providing any means to restore it. + +Helm 3 introduced a simplified CRD management method that was safer, but +requires some manual work when a chart added or modified CRDs: CRDs are created +on install if they are not already present, but are not modified during +release upgrades or deletes. Our chart release upgrade instructions call out +when manual action is necessary to update CRDs. This CRD handling strategy is +recommended for most users. + +Some users may wish to manage their CRDs automatically. If you manage your CRDs +this way, we _strongly_ recommend that you back up all associated custom +resources in the event you need to recover from unintended CRD deletion. + +While Helm 3's CRD management system is recommended, there is no simple means +of migrating away from release-managed CRDs if you previously installed your +release with the old system (you would need to back up your existing custom +resources, delete your release, reinstall, and restore your custom resources +after). As such, the chart detects if you currently use release-managed CRDs +and continues to use the old CRD templates when using chart version 2.0+. If +you do (your resources will have a `meta.helm.sh/release-name` annotation), we +_strongly_ recommend that you back up all associated custom resources in the +event you need to recover from unintended CRD deletion. + +### InitContainers + +The chart is able to deploy initContainers along with Kong. This can be very +useful when there's a requirement for custom initialization. The +`deployment.initContainers` field in values.yaml takes an array of objects that +get appended as-is to the existing `spec.template.initContainers` array in the +kong deployment resource. + +### HostAliases + +The chart is able to inject host aliases into containers. This can be very useful +when it's required to resolve additional domain name which can't be looked-up +directly from dns server. The `deployment.hostAliases` field in values.yaml +takes an array of objects that set to `spec.template.hostAliases` field in the +kong deployment resource. + +### Sidecar Containers + +The chart can deploy additional containers along with the Kong and Ingress +Controller containers, sometimes referred to as "sidecar containers". This can +be useful to include network proxies or logging services along with Kong. The +`deployment.sidecarContainers` field in values.yaml takes an array of objects +that get appended as-is to the existing `spec.template.spec.containers` array +in the Kong deployment resource. + +### Migration Sidecar Containers + +In the same way sidecar containers are attached to the Kong and Ingress +Controller containers the chart can add sidecars to the containers that runs +the migrations. The +`migrations.sidecarContainers` field in values.yaml takes an array of objects +that get appended as-is to the existing `spec.template.spec.containers` array +in the pre-upgrade-migrations, post-upgrade-migrations and migration resrouces. +Keep in mind the containers should be finite and they should be terminated +with the migration containers, otherwise the migration could get the status +as finished and the deployment of the chart will reach the timeout. + +### User Defined Volumes + +The chart can deploy additional volumes along with Kong. This can be useful to +include additional volumes which required during iniatilization phase +(InitContainer). The `deployment.userDefinedVolumes` field in values.yaml +takes an array of objects that get appended as-is to the existing +`spec.template.spec.volumes` array in the kong deployment resource. + +### User Defined Volume Mounts + +The chart can mount user-defined volumes. The +`deployment.userDefinedVolumeMounts` and +`ingressController.userDefinedVolumeMounts` fields in values.yaml take an array +of object that get appended as-is to the existing +`spec.template.spec.containers[].volumeMounts` and +`spec.template.spec.initContainers[].volumeMounts` array in the kong deployment +resource. + +### Removing cluster-scoped permissions + +You can limit the controller's access to allow it to only watch specific +namespaces for namespaced resources. By default, the controller watches all +namespaces. Limiting access requires several changes to configuration: + +- Set `ingressController.watchNamespaces` to a list of namespaces you want to + watch. The chart will automatically generate roles for each namespace and + assign them to the controller's service account. +- Optionally set `ingressController.installCRDs=false` if your user role (the + role you use when running `helm install`, not the controller service + account's role) does not have access to get CRDs. By default, the chart + attempts to look up the controller CRDs for [a legacy behavior + check](#crd-management). + +### Using a DaemonSet + +Setting `deployment.daemonset: true` deploys Kong using a [DaemonSet +controller](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) +instead of a Deployment controller. This runs a Kong Pod on every kubelet in +the Kubernetes cluster. For such configuration it may be desirable to configure +Pods to use the network of the host they run on instead of a dedicated network +namespace. The benefit of this approach is that the Kong can bind ports directly +to Kubernetes nodes' network interfaces, without the extra network translation +imposed by NodePort Services. It can be achieved by setting `deployment.hostNetwork: true`. + +### Using dnsPolicy and dnsConfig + +The chart able to inject custom DNS configuration into containers. This can be useful when you have EKS cluster with [NodeLocal DNSCache](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) configured and attach AWS security groups directly to pod using [security groups for pods feature](https://docs.aws.amazon.com/eks/latest/userguide/security-groups-for-pods.html). + +### Example configurations + +Several example values.yaml are available in the +[example-values](https://github.com/Kong/charts/blob/main/charts/kong/example-values/) +directory. + +## Configuration + +### Kong parameters + +| Parameter | Description | Default | +| ---------------------------------- | ------------------------------------------------------------------------------------- | ------------------- | +| image.repository | Kong image | `kong` | +| image.tag | Kong image version | `3.5` | +| image.effectiveSemver | Semantic version to use for version-dependent features (if `tag` is not a semver) | | +| image.pullPolicy | Image pull policy | `IfNotPresent` | +| image.pullSecrets | Image pull secrets | `null` | +| replicaCount | Kong instance count. It has no effect when `autoscaling.enabled` is set to true | `1` | +| plugins | Install custom plugins into Kong via ConfigMaps or Secrets | `{}` | +| env | Additional [Kong configurations](https://getkong.org/docs/latest/configuration/) | | +| customEnv | Custom Environment variables without `KONG_` prefix | | +| migrations.preUpgrade | Run "kong migrations up" jobs | `true` | +| migrations.postUpgrade | Run "kong migrations finish" jobs | `true` | +| migrations.annotations | Annotations for migration job pods | `{"sidecar.istio.io/inject": "false" | +| migrations.jobAnnotations | Additional annotations for migration jobs | `{}` | +| migrations.backoffLimit | Override the system backoffLimit | `{}` | +| waitImage.enabled | Spawn init containers that wait for the database before starting Kong | `true` | +| waitImage.repository | Image used to wait for database to become ready. Uses the Kong image if none set | | +| waitImage.tag | Tag for image used to wait for database to become ready | | +| waitImage.pullPolicy | Wait image pull policy | `IfNotPresent` | +| postgresql.enabled | Spin up a new postgres instance for Kong | `false` | +| dblessConfig.configMap | Name of an existing ConfigMap containing the `kong.yml` file. This must have the key `kong.yml`.| `` | +| dblessConfig.config | Yaml configuration file for the dbless (declarative) configuration of Kong | see in `values.yaml` | + +#### Kong Service Parameters + +The various `SVC.*` parameters below are common to the various Kong services +(the admin API, proxy, Kong Manager, the Developer Portal, and the Developer +Portal API) and define their listener configuration, K8S Service properties, +and K8S Ingress properties. Defaults are listed only if consistent across the +individual services: see values.yaml for their individual default values. + +`SVC` below can be substituted with each of: +* `proxy` +* `udpProxy` +* `admin` +* `manager` +* `portal` +* `portalapi` +* `cluster` +* `clustertelemetry` +* `status` + +`status` is intended for internal use within the cluster. Unlike other +services it cannot be exposed externally, and cannot create a Kubernetes +service or ingress. It supports the settings under `SVC.http` and `SVC.tls` +only. + +`cluster` is used on hybrid mode control plane nodes. It does not support the +`SVC.http.*` settings (cluster communications must be TLS-only) or the +`SVC.ingress.*` settings (cluster communication requires TLS client +authentication, which cannot pass through an ingress proxy). `clustertelemetry` +is similar, and used when Vitals is enabled on Kong Enterprise control plane +nodes. + +`udpProxy` is used for UDP stream listens (Kubernetes does not yet support +mixed TCP/UDP LoadBalancer Services). It _does not_ support the `http`, `tls`, +or `ingress` sections, as it is used only for stream listens. + +| Parameter | Description | Default | +|------------------------------------|---------------------------------------------------------------------------------------|--------------------------| +| SVC.enabled | Create Service resource for SVC (admin, proxy, manager, etc.) | | +| SVC.http.enabled | Enables http on the service | | +| SVC.http.servicePort | Service port to use for http | | +| SVC.http.containerPort | Container port to use for http | | +| SVC.http.nodePort | Node port to use for http | | +| SVC.http.hostPort | Host port to use for http | | +| SVC.http.parameters | Array of additional listen parameters | `[]` | +| SVC.tls.enabled | Enables TLS on the service | | +| SVC.tls.containerPort | Container port to use for TLS | | +| SVC.tls.servicePort | Service port to use for TLS | | +| SVC.tls.nodePort | Node port to use for TLS | | +| SVC.tls.hostPort | Host port to use for TLS | | +| SVC.tls.overrideServiceTargetPort | Override service port to use for TLS without touching Kong containerPort | | +| SVC.tls.parameters | Array of additional listen parameters | `["http2"]` | +| SVC.type | k8s service type. Options: NodePort, ClusterIP, LoadBalancer | | +| SVC.clusterIP | k8s service clusterIP | | +| SVC.loadBalancerClass | loadBalancerClass to use for LoadBalancer provisionning | | +| SVC.loadBalancerSourceRanges | Limit service access to CIDRs if set and service type is `LoadBalancer` | `[]` | +| SVC.loadBalancerIP | Reuse an existing ingress static IP for the service | | +| SVC.externalIPs | IPs for which nodes in the cluster will also accept traffic for the servic | `[]` | +| SVC.externalTrafficPolicy | k8s service's externalTrafficPolicy. Options: Cluster, Local | | +| SVC.ingress.enabled | Enable ingress resource creation (works with SVC.type=ClusterIP) | `false` | +| SVC.ingress.ingressClassName | Set the ingressClassName to associate this Ingress with an IngressClass | | +| SVC.ingress.hostname | Ingress hostname | `""` | +| SVC.ingress.path | Ingress path. | `/` | +| SVC.ingress.pathType | Ingress pathType. One of `ImplementationSpecific`, `Exact` or `Prefix` | `ImplementationSpecific` | +| SVC.ingress.hosts | Slice of hosts configurations, including `hostname`, `path` and `pathType` keys | `[]` | +| SVC.ingress.tls | Name of secret resource or slice of `secretName` and `hosts` keys | | +| SVC.ingress.annotations | Ingress annotations. See documentation for your ingress controller for details | `{}` | +| SVC.ingress.labels | Ingress labels. Additional custom labels to add to the ingress. | `{}` | +| SVC.annotations | Service annotations | `{}` | +| SVC.labels | Service labels | `{}` | + +#### Admin Service mTLS + +On top of the common parameters listed above, the `admin` service supports parameters for mTLS client verification. +If any of `admin.tls.client.caBundle` or `admin.tls.client.secretName` are set, the admin service will be configured to +require mTLS client verification. If both are set, `admin.tls.client.caBundle` will take precedence. + +| Parameter | Description | Default | +|-----------------------------|---------------------------------------------------------------------------------------------|---------| +| admin.tls.client.caBundle | CA certificate to use for TLS verification of the Admin API client (PEM-encoded). | `""` | +| admin.tls.client.secretName | CA certificate secret name - must contain a `tls.crt` key with the PEM-encoded certificate. | `""` | + +#### Stream listens + +The proxy configuration additionally supports creating stream listens. These +are configured using an array of objects under `proxy.stream` and `udpProxy.stream`: + +| Parameter | Description | Default | +| ---------------------------------- | ------------------------------------------------------------------------------------- | ------------------- | +| protocol | The listen protocol, either "TCP" or "UDP" | | +| containerPort | Container port to use for a stream listen | | +| servicePort | Service port to use for a stream listen | | +| nodePort | Node port to use for a stream listen | | +| hostPort | Host port to use for a stream listen | | +| parameters | Array of additional listen parameters | `[]` | + +### Ingress Controller Parameters + +All of the following properties are nested under the `ingressController` +section of `values.yaml` file: + +| Parameter | Description | Default | +|--------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------| +| enabled | Deploy the ingress controller, rbac and crd | true | +| image.repository | Docker image with the ingress controller | kong/kubernetes-ingress-controller | +| image.tag | Version of the ingress controller | `3.0` | +| image.effectiveSemver | Version of the ingress controller used for version-specific features when image.tag is not a valid semantic version | | +| readinessProbe | Kong ingress controllers readiness probe | | +| livenessProbe | Kong ingress controllers liveness probe | | +| installCRDs | Legacy toggle for Helm 2-style CRD management. Should not be set [unless necessary due to cluster permissions](#removing-cluster-scoped-permissions). | false | +| env | Specify Kong Ingress Controller configuration via environment variables | | +| customEnv | Specify custom environment variables (without the CONTROLLER_ prefix) | | +| ingressClass | The name of this controller's ingressClass | kong | +| ingressClassAnnotations | The ingress-class value for controller | kong | +| args | List of ingress-controller cli arguments | [] | +| watchNamespaces | List of namespaces to watch. Watches all namespaces if empty | [] | +| admissionWebhook.enabled | Whether to enable the validating admission webhook | true | +| admissionWebhook.failurePolicy | How unrecognized errors from the admission endpoint are handled (Ignore or Fail) | Ignore | +| admissionWebhook.port | The port the ingress controller will listen on for admission webhooks | 8080 | +| admissionWebhook.address | The address the ingress controller will listen on for admission webhooks, if not 0.0.0.0 | | +| admissionWebhook.annotations | Annotations for the Validation Webhook Configuration | | +| admissionWebhook.certificate.provided | Use a provided certificate. When set to false, the chart will automatically generate a certificate. | false | +| admissionWebhook.certificate.secretName | Name of the TLS secret for the provided webhook certificate | | +| admissionWebhook.certificate.caBundle | PEM encoded CA bundle which will be used to validate the provided webhook certificate | | +| admissionWebhook.namespaceSelector | Add namespaceSelector to the webhook. Please go to [Kubernetes doc for the specs](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-namespaceselector) | | +| admissionWebhook.timeoutSeconds | Kubernetes `apiserver`'s timeout when running this webhook. Default: 10 seconds. | | +| userDefinedVolumes | Create volumes. Please go to Kubernetes doc for the spec of the volumes | | +| userDefinedVolumeMounts | Create volumeMounts. Please go to Kubernetes doc for the spec of the volumeMounts | | +| terminationGracePeriodSeconds | Sets the [termination grace period](https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#hook-handler-execution) for Deployment pod | 30 | +| gatewayDiscovery.enabled | Enables Kong instance service discovery (for more details see [gatewayDiscovery section][gd_section]) | false | +| gatewayDiscovery.generateAdminApiService | Generate the admin API service name based on the release name (for more details see [gatewayDiscovery section][gd_section]) | false | +| gatewayDiscovery.adminApiService.namespace | The namespace of the Kong admin API service (for more details see [gatewayDiscovery section][gd_section]) | `.Release.Namespace` | +| gatewayDiscovery.adminApiService.name | The name of the Kong admin API service (for more details see [gatewayDiscovery section][gd_section]) | "" | +| konnect.enabled | Enable synchronisation of data plane configuration with Konnect Runtime Group | false | +| konnect.runtimeGroupID | Konnect Runtime Group's unique identifier. | | +| konnect.apiHostname | Konnect API hostname. Defaults to a production US-region. | us.kic.api.konghq.com | +| konnect.tlsClientCertSecretName | Name of the secret that contains Konnect Runtime Group's client TLS certificate. | konnect-client-tls | +| konnect.license.enabled | Enable automatic license provisioning for Gateways managed by Ingress Controller in Konnect mode. | false | +| adminApi.tls.client.enabled | Enable TLS client verification for the Admin API. By default, Helm will generate certificates automatically. | false | +| adminApi.tls.client.certProvided | Use user-provided certificates. If set to false, Helm will generate certificates. | false | +| adminApi.tls.client.secretName | Client TLS certificate/key pair secret name. Can be also set when `certProvided` is false to enforce a generated secret's name. | "" | +| adminApi.tls.client.caSecretName | CA TLS certificate/key pair secret name. Can be also set when `certProvided` is false to enforce a generated secret's name. | "" | + +[gd_section]: #the-gatewayDiscovery-section + +#### The `env` section +For a complete list of all configuration values you can set in the +`env` section, please read the Kong Ingress Controller's +[configuration document](https://docs.konghq.com/kubernetes-ingress-controller/latest/reference/cli-arguments/). + +#### The `customEnv` section + +The `customEnv` section can be used to configure all environment variables other than Ingress Controller configuration. +Any key value put under this section translates to environment variables. +Every key is upper-cased before setting the environment variable. + +An example: + +```yaml +kong: + ingressController: + customEnv: + TZ: "Europe/Berlin" +``` + +#### The `gatewayDiscovery` section + +Kong Ingress Controller v2.9 has introduced gateway discovery which allows +the controller to discover Gateway instances that it should configure using +an Admin API Kubernetes service. + +Using this feature requires a split release installation of Gateways and Ingress Controller. +For exemplar `values.yaml` files which use this feature please see: [examples README.md](./example-values/README.md). +or use the [`ingress` chart](../ingress/README.md) which can handle this for you. + +##### Configuration + +You'll be able to configure this feature through configuration section under +`ingressController.gatewayDiscovery`: + +- If `ingressController.gatewayDiscovery.enabled` is set to `false`: the ingress controller + will control a pre-determined set of Gateway instances based on Admin API URLs + (provided under the hood via `CONTROLLER_KONG_ADMIN_URL` environment variable). + +- If `ingressController.gatewayDiscovery.enabled` is set to `true`: the ingress controller + will dynamically locate Gateway instances by watching the specified Kubernetes + service. + (provided under the hood via `CONTROLLER_KONG_ADMIN_SVC` environment variable). + + The following admin API Service flags have to be present in order for gateway + discovery to work: + + - `ingressController.gatewayDiscovery.adminApiService.name` + - `ingressController.gatewayDiscovery.adminApiService.namespace` + + If you set `ingressController.gatewayDiscovery.generateAdminApiService` to `true`, + the chart will generate values for `name` and `namespace` based on the current release name and + namespace. This is useful when consuming the `kong` chart as a subchart. + +Additionally, you can control the addresses that are generated for your Gateways +via the `--gateway-discovery-dns-strategy` CLI flag that can be set on the Ingress Controller +(or an equivalent environment variable: `CONTROLLER_GATEWAY_DISCOVERY_DNS_STRATEGY`). +It accepts 3 values which change the way that Gateway addresses are generated: +- `service` - for service scoped pod DNS names: `pod-ip-address.service-name.my-namespace.svc.cluster-domain.example` +- `pod` - for namespace scope pod DNS names: `pod-ip-address.my-namespace.pod.cluster-domain.example` +- `ip` (default, retains behavior introduced in v2.9) - for regular IP addresses + +When using `gatewayDiscovery`, you should consider configuring the Admin service to use mTLS client verification to make +this interface secure. +Without that, anyone who can access the Admin API from inside the cluster can configure the Gateway instances. + +On the controller release side, that can be achieved by setting `ingressController.adminApi.tls.client.enabled` to `true`. +By default, Helm will generate a certificate Secret named `-admin-api-keypair` and +a CA Secret named `-admin-api-ca-keypair` for you. + +To provide your own cert, set `ingressController.adminApi.tls.client.certProvided` to +`true`, `ingressController.adminApi.tls.client.secretName` to the name of the Secret containing your client cert, and `ingressController.adminApi.tls.client.caSecretName` to the name of the Secret containing your CA cert. + +On the Gateway release side, set either `admin.tls.client.secretName` to the name of your CA Secret or set `admin.tls.client.caBundle` to the CA certificate string. + +### General Parameters + +| Parameter | Description | Default | +| ---------------------------------- | ------------------------------------------------------------------------------------- | ------------------- | +| namespace | Namespace to deploy chart resources | | +| deployment.kong.enabled | Enable or disable deploying Kong | `true` | +| deployment.minReadySeconds | Minimum number of seconds for which newly created pods should be ready without any of its container crashing, for it to be considered available. | | +| deployment.initContainers | Create initContainers. Please go to Kubernetes doc for the spec of the initContainers | | +| deployment.daemonset | Use a DaemonSet instead of a Deployment | `false` | +| deployment.hostname | Set the Deployment's `.spec.template.hostname`. Kong reports this as its hostname. | | +| deployment.hostNetwork | Enable hostNetwork, which binds to the ports to the host | `false` | +| deployment.userDefinedVolumes | Create volumes. Please go to Kubernetes doc for the spec of the volumes | | +| deployment.userDefinedVolumeMounts | Create volumeMounts. Please go to Kubernetes doc for the spec of the volumeMounts | | +| deployment.serviceAccount.create | Create Service Account for the Deployment / Daemonset and the migrations | `true` | +| deployment.serviceAccount.automountServiceAccountToken | Enable ServiceAccount token automount in Kong deployment | `false` | +| deployment.serviceAccount.name | Name of the Service Account, a default one will be generated if left blank. | "" | +| deployment.serviceAccount.annotations | Annotations for the Service Account | {} | +| deployment.test.enabled | Enable creation of test resources for use with "helm test" | `false` | +| autoscaling.enabled | Set this to `true` to enable autoscaling | `false` | +| autoscaling.minReplicas | Set minimum number of replicas | `2` | +| autoscaling.maxReplicas | Set maximum number of replicas | `5` | +| autoscaling.behavior | Sets the [behavior for scaling up and down](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#configurable-scaling-behavior) | `{}` | +| autoscaling.targetCPUUtilizationPercentage | Target Percentage for when autoscaling takes affect. Only used if cluster does not support `autoscaling/v2` or `autoscaling/v2beta2` | `80` | +| autoscaling.metrics | metrics used for autoscaling for clusters that supports `autoscaling/v2` or `autoscaling/v2beta2` | See [values.yaml](values.yaml) | +| updateStrategy | update strategy for deployment | `{}` | +| readinessProbe | Kong readiness probe | | +| livenessProbe | Kong liveness probe | | +| startupProbe | Kong startup probe | | +| lifecycle | Proxy container lifecycle hooks | see `values.yaml` | +| terminationGracePeriodSeconds | Sets the [termination grace period](https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#hook-handler-execution) for Deployment pods | 30 | +| affinity | Node/pod affinities | | +| topologySpreadConstraints | Control how Pods are spread across cluster among failure-domains | | +| nodeSelector | Node labels for pod assignment | `{}` | +| deploymentAnnotations | Annotations to add to deployment | see `values.yaml` | +| podAnnotations | Annotations to add to each pod | see `values.yaml` | +| podLabels | Labels to add to each pod | `{}` | +| resources | Pod resource requests & limits | `{}` | +| tolerations | List of node taints to tolerate | `[]` | +| dnsPolicy | Pod dnsPolicy | | +| dnsConfig | Pod dnsConfig | | +| podDisruptionBudget.enabled | Enable PodDisruptionBudget for Kong | `false` | +| podDisruptionBudget.maxUnavailable | Represents the minimum number of Pods that can be unavailable (integer or percentage) | `50%` | +| podDisruptionBudget.minAvailable | Represents the number of Pods that must be available (integer or percentage) | | +| podSecurityPolicy.enabled | Enable podSecurityPolicy for Kong | `false` | +| podSecurityPolicy.labels | Labels to add to podSecurityPolicy for Kong | `{}` | +| podSecurityPolicy.annotations | Annotations to add to podSecurityPolicy for Kong | `{}` | +| podSecurityPolicy.spec | Collection of [PodSecurityPolicy settings](https://kubernetes.io/docs/concepts/policy/pod-security-policy/#what-is-a-pod-security-policy) | | +| priorityClassName | Set pod scheduling priority class for Kong pods | `""` | +| secretVolumes | Mount given secrets as a volume in Kong container to override default certs and keys. | `[]` | +| securityContext | Set the securityContext for Kong Pods | `{}` | +| containerSecurityContext | Set the securityContext for Containers | See values.yaml | +| serviceMonitor.enabled | Create ServiceMonitor for Prometheus Operator | `false` | +| serviceMonitor.interval | Scraping interval | `30s` | +| serviceMonitor.namespace | Where to create ServiceMonitor | | +| serviceMonitor.labels | ServiceMonitor labels | `{}` | +| serviceMonitor.targetLabels | ServiceMonitor targetLabels | `{}` | +| serviceMonitor.honorLabels | ServiceMonitor honorLabels | `{}` | +| serviceMonitor.metricRelabelings | ServiceMonitor metricRelabelings | `{}` | +| extraConfigMaps | ConfigMaps to add to mounted volumes | `[]` | +| extraSecrets | Secrets to add to mounted volumes | `[]` | +| nameOverride | Replaces "kong" in resource names, like "RELEASENAME-nameOverride" instead of "RELEASENAME-kong" | `""` | +| fullnameOverride | Overrides the entire resource name string | `""` | +| extraObjects | Create additional k8s resources | `[]` | +**Note:** If you are using `deployment.hostNetwork` to bind to lower ports ( < 1024), which may be the desired option (ports 80 and 433), you also +need to tweak the `containerSecurityContext` configuration as in the example: + +```yaml +containerSecurityContext: # run as root to bind to lower ports + capabilities: + add: [NET_BIND_SERVICE] + runAsGroup: 0 + runAsNonRoot: false + runAsUser: 0 +``` + +**Note:** The default `podAnnotations` values disable inbound proxying for Kuma +and Istio. This is appropriate when using Kong as a gateway for external +traffic inbound into the cluster. + +If you want to use Kong as an internal proxy within the cluster network, you +should enable inbound the inbound mesh proxies: + +```yaml +# Enable inbound mesh proxying for Kuma and Istio +podAnnotations: + kuma.io/gateway: disabled + traffic.sidecar.istio.io/includeInboundPorts: "*" +``` + +#### The `env` section + +The `env` section can be used to configured all properties of Kong. +Any key value put under this section translates to environment variables +used to control Kong's configuration. Every key is prefixed with `KONG_` +and upper-cased before setting the environment variable. + +Furthermore, all `kong.env` parameters can also accept a mapping instead of a +value to ensure the parameters can be set through configmaps and secrets. + +An example: + +```yaml +kong: + env: # load PG password from a secret dynamically + pg_user: kong + pg_password: + valueFrom: + secretKeyRef: + key: kong + name: postgres + nginx_worker_processes: "2" +``` + +For complete list of Kong configurations please check the +[Kong configuration docs](https://docs.konghq.com/latest/configuration). + +> **Tip**: You can use the default [values.yaml](values.yaml) + +#### The `customEnv` section + +The `customEnv` section can be used to configure all custom properties of other than Kong. +Any key value put under this section translates to environment variables +that can be used in Kong's plugin configurations. Every key is upper-cased before setting the environment variable. + +An example: + +```yaml +kong: + customEnv: + api_token: + valueFrom: + secretKeyRef: + key: token + name: api_key + client_name: testClient +``` + +#### The `extraLabels` section + +The `extraLabels` section can be used to configure some extra labels that will be added to each Kubernetes object generated. + +For example, you can add the `acme.com/some-key: some-value` label to each Kubernetes object by putting the following in your Helm values: + +```yaml +extraLabels: + acme.com/some-key: some-value +``` + +## Kong Enterprise Parameters + +### Overview + +Kong Enterprise requires some additional configuration not needed when using +Kong Open-Source. To use Kong Enterprise, at the minimum, +you need to do the following: + +- Set `enterprise.enabled` to `true` in `values.yaml` file. +- Update values.yaml to use a Kong Enterprise image. +- Satisfy the two prerequisites below for Enterprise License and + Enterprise Docker Registry. +- (Optional) [set a `password` environment variable](#rbac) to create the + initial super-admin. Though not required, this is recommended for users that + wish to use RBAC, as it cannot be done after initial setup. + +Once you have these set, it is possible to install Kong Enterprise, +but please make sure to review the below sections for other settings that +you should consider configuring before installing Kong. + +Some of the more important configuration is grouped in sections +under the `.enterprise` key in values.yaml, though most enterprise-specific +configuration can be placed under the `.env` key. + +### Prerequisites + +#### Kong Enterprise License + +Kong Enterprise 2.3+ can run with or without a license. If you wish to run 2.3+ +without a license, you can skip this step and leave `enterprise.license_secret` +unset. In this case only a limited subset of features will be available. +Earlier versions require a license. + +If you have paid for a license, but you do not have a copy of yours, please +contact Kong Support. Once you have it, you will need to store it in a Secret: + +```bash +kubectl create secret generic kong-enterprise-license --from-file=license=./license.json +``` + +Set the secret name in `values.yaml`, in the `.enterprise.license_secret` key. +Please ensure the above secret is created in the same namespace in which +Kong is going to be deployed. + +#### Kong Enterprise Docker registry access + +Kong Enterprise versions 2.2 and earlier use a private Docker registry and +require a pull secret. **If you use 2.3 or newer, you can skip this step.** + +You should have received credentials to log into docker hub after +purchasing Kong Enterprise. After logging in, you can retrieve your API key +from \ \> Edit Profile \> API Key. Use this to create registry +secrets: + +```bash +kubectl create secret docker-registry kong-enterprise-edition-docker \ + --docker-server=hub.docker.io \ + --docker-username= \ + --docker-password= +secret/kong-enterprise-edition-docker created +``` + +Set the secret names in `values.yaml` in the `image.pullSecrets` section. +Again, please ensure the above secret is created in the same namespace in which +Kong is going to be deployed. + +### Service location hints + +Kong Enterprise add two GUIs, Kong Manager and the Kong Developer Portal, that +must know where other Kong services (namely the admin and files APIs) can be +accessed in order to function properly. Kong's default behavior for attempting +to locate these absent configuration is unlikely to work in common Kubernetes +environments. Because of this, you should set each of `admin_gui_url`, +`admin_gui_api_url`, `proxy_url`, `portal_api_url`, `portal_gui_host`, and +`portal_gui_protocol` under the `.env` key in values.yaml to locations where +each of their respective services can be accessed to ensure that Kong services +can locate one another and properly set CORS headers. See the +[Property Reference documentation](https://docs.konghq.com/enterprise/latest/property-reference/) +for more details on these settings. + +### RBAC + +You can create a default RBAC superuser when initially running `helm install` +by setting a `password` environment variable under `env` in values.yaml. It +should be a reference to a secret key containing your desired password. This +will create a `kong_admin` admin whose token and basic-auth password match the +value in the secret. For example: + +```yaml +env: + password: + valueFrom: + secretKeyRef: + name: kong-enterprise-superuser-password + key: password +``` + +If using the ingress controller, it needs access to the token as well, by +specifying `kong_admin_token` in its environment variables: + +```yaml +ingressController: + env: + kong_admin_token: + valueFrom: + secretKeyRef: + name: kong-enterprise-superuser-password + key: password +``` + +Although the above examples both use the initial super-admin, we recommend +[creating a less-privileged RBAC user](https://docs.konghq.com/enterprise/latest/kong-manager/administration/rbac/add-user/) +for the controller after installing. It needs at least workspace admin +privileges in its workspace (`default` by default, settable by adding a +`workspace` variable under `ingressController.env`). Once you create the +controller user, add its token to a secret and update your `kong_admin_token` +variable to use it. Remove the `password` variable from Kong's environment +variables and the secret containing the super-admin token after. + +### Sessions + +Login sessions for Kong Manager and the Developer Portal make use of +[the Kong Sessions plugin](https://docs.konghq.com/enterprise/latest/kong-manager/authentication/sessions). +When configured via values.yaml, their configuration must be stored in Secrets, +as it contains an HMAC key. + +Kong Manager's session configuration must be configured via values.yaml, +whereas this is optional for the Developer Portal on versions 0.36+. Providing +Portal session configuration in values.yaml provides the default session +configuration, which can be overridden on a per-workspace basis. + +```bash +cat admin_gui_session_conf +``` + +```json +{"cookie_name":"admin_session","cookie_samesite":"off","secret":"admin-secret-CHANGEME","cookie_secure":true,"storage":"kong"} +``` + +```bash +cat portal_session_conf +``` + +```json +{"cookie_name":"portal_session","cookie_samesite":"off","secret":"portal-secret-CHANGEME","cookie_secure":true,"storage":"kong"} +``` + +```bash +kubectl create secret generic kong-session-config --from-file=admin_gui_session_conf --from-file=portal_session_conf +``` + +```bash +secret/kong-session-config created +``` + +The exact plugin settings may vary in your environment. The `secret` should +always be changed for both configurations. + +After creating your secret, set its name in values.yaml in +`.enterprise.rbac.session_conf_secret`. If you create a Portal configuration, +add it at `env.portal_session_conf` using a secretKeyRef. + +### Email/SMTP + +Email is used to send invitations for +[Kong Admins](https://docs.konghq.com/enterprise/latest/kong-manager/networking/email) +and [Developers](https://docs.konghq.com/enterprise/latest/developer-portal/configuration/smtp). + +Email invitations rely on setting a number of SMTP settings at once. For +convenience, these are grouped under the `.enterprise.smtp` key in values.yaml. +Setting `.enterprise.smtp.disabled: true` will set `KONG_SMTP_MOCK=on` and +allow Admin/Developer invites to proceed without sending email. Note, however, +that these have limited functionality without sending email. + +If your SMTP server requires authentication, you must provide the `username` +and `smtp_password_secret` keys under `.enterprise.smtp.auth`. +`smtp_password_secret` must be a Secret containing an `smtp_password` key whose +value is your SMTP password. + +By default, SMTP uses `AUTH` `PLAIN` when you provide credentials. If your provider requires `AUTH LOGIN`, set `smtp_auth_type: login`. + +## Prometheus Operator integration + +The chart can configure a ServiceMonitor resource to instruct the [Prometheus +Operator](https://github.com/prometheus-operator/prometheus-operator) to +collect metrics from Kong Pods. To enable this, set +`serviceMonitor.enabled=true` in `values.yaml`. + +Kong exposes memory usage and connection counts by default. You can enable +traffic metrics for routes and services by configuring the [Prometheus +plugin](https://docs.konghq.com/hub/kong-inc/prometheus/). + +The ServiceMonitor requires an `enable-metrics: "true"` label on one of the +chart's Services to collect data. By default, this label is set on the proxy +Service. It should only be set on a single chart Service to avoid duplicate +data. If you disable the proxy Service (e.g. on a hybrid control plane instance +or Portal-only instance) and still wish to collect memory usage metrics, add +this label to another Service, e.g. on the admin API Service: + +``` +admin: + labels: + enable-metrics: "true" +``` + +## Argo CD Considerations + +The built-in database subchart (`postgresql.enabled` in values) is not +supported when installing the chart via Argo CD. + +Argo CD does not support the full Helm lifecycle. There is no distinction +between the initial install and upgrades. Both operations are a "sync" in Argo +terms. This affects when migration Jobs execute in database-backed Kong +installs. + +The chart sets the `Sync` and `BeforeHookCreation` deletion +[hook policies](https://argo-cd.readthedocs.io/en/stable/user-guide/resource_hooks/) +on the `init-migrations` and `pre-upgrade-migrations` Jobs. + +The `pre-upgrade-migrations` Job normally uses Helm's `pre-upgrade` policy. Argo +translates this to its `PreSync` policy, which would create the Job before all +sync phase resources. Doing this before various sync phase resources (such as +the ServiceAccount) are in place would prevent the Job from running +successfully. Overriding this with Argo's `Sync` policy starts the Job at the +same time as the upgraded Deployment Pods. The new Pods may fail to start +temporarily, but will eventually start normally once migrations complete. + +## Seeking help + +If you run into an issue, bug or have a question, please reach out to the Kong +community via [Kong Nation](https://discuss.konghq.com). +Please do not open issues in [this](https://github.com/helm/charts) repository +as the maintainers will not be notified and won't respond. + +## Nethgate DOC +if you run nethgate enable `deployment.nethgate.enabled=true` \ No newline at end of file diff --git a/charts/posmoni/README.md b/charts/posmoni/README.md index 1ab95c5d6..0f992f8e6 100644 --- a/charts/posmoni/README.md +++ b/charts/posmoni/README.md @@ -57,4 +57,4 @@ A Helm chart for installing and configuring Posmoni | tolerations | object | `{}` | Tolerations for pod assignment ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/rpc-saas-secretStore/README.md b/charts/rpc-saas-secretStore/README.md index d5422f54c..fcf21160c 100644 --- a/charts/rpc-saas-secretStore/README.md +++ b/charts/rpc-saas-secretStore/README.md @@ -23,4 +23,4 @@ A Helm chart for deploying ClusterSecretStore for RPC Saas Service | rpcSaas.clustersecretstore.serviceAccountnamespace | string | `"dummy"` | | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/validator-ejector/README.md b/charts/validator-ejector/README.md index bbe684a1c..206d3e48b 100644 --- a/charts/validator-ejector/README.md +++ b/charts/validator-ejector/README.md @@ -98,4 +98,4 @@ A Helm chart for installing and configuring Lido's validator-ejector | tolerations | object | `{}` | Tolerations for pod assignment ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/validator-kapi/README.md b/charts/validator-kapi/README.md index cb8df0dfe..a05588cb1 100644 --- a/charts/validator-kapi/README.md +++ b/charts/validator-kapi/README.md @@ -86,4 +86,4 @@ A Helm chart for installing and configuring Lido's validator-kapi | tolerations | object | `{}` | Tolerations for pod assignment ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/validators/README.md b/charts/validators/README.md index df1da99d2..5bbee12ce 100644 --- a/charts/validators/README.md +++ b/charts/validators/README.md @@ -122,4 +122,4 @@ A Helm chart for installing validators with the web3signer. | web3signerEndpoint | string | `""` | Web3Signer Endpoint | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/vouch/README.md b/charts/vouch/README.md index 15fca5ceb..246990eee 100644 --- a/charts/vouch/README.md +++ b/charts/vouch/README.md @@ -105,4 +105,4 @@ A Helm chart for installing and configuring large scale ETH staking infrastructu | vouchFullConfig | string | `nil` | use vouchFullConfig: to provide all vouch.yaml values use vouch: to populate good defaults and to do minimal changes | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/web3signer/README.md b/charts/web3signer/README.md index bf01ee9c1..40259664b 100644 --- a/charts/web3signer/README.md +++ b/charts/web3signer/README.md @@ -74,4 +74,4 @@ A Helm chart for installing and configuring Web3signer | web3signerJavaOpts | string | `"-Xmx1g -Xms1g"` | Java Opts | ---------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) From 109292fac9eff3f7bcf6ac984261add261943e64 Mon Sep 17 00:00:00 2001 From: Philex Date: Thu, 13 Jun 2024 21:25:28 -0700 Subject: [PATCH 17/27] Fix: change DB size -v --- charts/juno-node/templates/juno-data-backup-cronjob.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/juno-node/templates/juno-data-backup-cronjob.yaml b/charts/juno-node/templates/juno-data-backup-cronjob.yaml index 5a8e36903..922c4cb0d 100644 --- a/charts/juno-node/templates/juno-data-backup-cronjob.yaml +++ b/charts/juno-node/templates/juno-data-backup-cronjob.yaml @@ -60,7 +60,7 @@ spec: storageClassName: premium-rwo resources: requests: - storage: 200Gi + storage: {{ .Values.backupJunoDataJob.storageSize }} --- # ConfigMap for cloning disk manifest apiVersion: v1 From 46699397198e24f80139aba436e6b950c34a975c Mon Sep 17 00:00:00 2001 From: Philex Date: Thu, 20 Jun 2024 12:46:58 -0700 Subject: [PATCH 18/27] Fix: test purpose to exclude sst files --- charts/juno-node/templates/juno-data-backup-cronjob.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/juno-node/templates/juno-data-backup-cronjob.yaml b/charts/juno-node/templates/juno-data-backup-cronjob.yaml index 922c4cb0d..78f083340 100644 --- a/charts/juno-node/templates/juno-data-backup-cronjob.yaml +++ b/charts/juno-node/templates/juno-data-backup-cronjob.yaml @@ -123,7 +123,7 @@ data: - | rm -rf /mnt/juno-tar-backup/*.tar && rm -rf /mnt/data/*.tar && - tar -czvf /mnt/juno-tar-backup/juno_{{ .Values.backupJunoDataJob.network }}_{{ .Values.deployment.imagetag }}_$(date +\%Y\%m\%d).tar --exclude=./lost+found -C /mnt/data . && sleep 10 + tar -czvf /mnt/juno-tar-backup/juno_{{ .Values.backupJunoDataJob.network }}_{{ .Values.deployment.imagetag }}_$(date +\%Y\%m\%d).tar --exclude=./lost+found --exclude=./*.sst -C /mnt/data . && sleep 10 volumeMounts: - name: juno-data-volume mountPath: /mnt/data From 05f09465c27d1fae1b26a90d612e2495e07ab55b Mon Sep 17 00:00:00 2001 From: Philex Date: Thu, 20 Jun 2024 13:56:28 -0700 Subject: [PATCH 19/27] Fix: test purpose to exclude sst files --- .../juno-node/templates/externalsecret-common.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/charts/juno-node/templates/externalsecret-common.yaml b/charts/juno-node/templates/externalsecret-common.yaml index 1ceec4d62..dd4b1cdc8 100644 --- a/charts/juno-node/templates/externalsecret-common.yaml +++ b/charts/juno-node/templates/externalsecret-common.yaml @@ -6,15 +6,15 @@ metadata: name: {{ $.Values.deployment.projectName }} namespace: {{ $.Values.deployment.namespace }} spec: - refreshInterval: {{ .refreshInterval }} + refreshInterval: {{ $.Values.secret.data.refreshInterval }} secretStoreRef: - name: {{ .secretStoreName }} - kind: {{ .secretStoreKind }} + name: {{ $.Values.secret.data.secretStoreName }} + kind: {{ $.Values.secret.data.secretStoreKind }} target: - name: {{ .targetName }} - creationPolicy: {{ .targetCreationPolicy }} + name: {{ $.Values.secret.data.targetName }} + creationPolicy: {{ $.Values.secret.data.targetCreationPolicy }} dataFrom: - extract: - key: {{ .dataFromKey }} # name of the secret in secret manager (GCP secret manager) + key: {{ $.Values.secret.data.dataFromKey }} # name of the secret in secret manager (GCP secret manager) {{- end }} {{- end }} \ No newline at end of file From b3cb75304aff347f8ba79436f003909a37d1443c Mon Sep 17 00:00:00 2001 From: Philex Date: Thu, 20 Jun 2024 14:03:45 -0700 Subject: [PATCH 20/27] Fix: test purpose to exclude sst files --- charts/juno-node/templates/externalsecret-common.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/juno-node/templates/externalsecret-common.yaml b/charts/juno-node/templates/externalsecret-common.yaml index dd4b1cdc8..ef76bd71a 100644 --- a/charts/juno-node/templates/externalsecret-common.yaml +++ b/charts/juno-node/templates/externalsecret-common.yaml @@ -11,7 +11,7 @@ spec: name: {{ $.Values.secret.data.secretStoreName }} kind: {{ $.Values.secret.data.secretStoreKind }} target: - name: {{ $.Values.secret.data.targetName }} + name: {{ $.Values.secret.data.targetName }}-new creationPolicy: {{ $.Values.secret.data.targetCreationPolicy }} dataFrom: - extract: From 80025c830812a3812c665348c06fce29d3fd67b8 Mon Sep 17 00:00:00 2001 From: PhilexWong <142860658+PhilexWong@users.noreply.github.com> Date: Thu, 20 Jun 2024 14:18:24 -0700 Subject: [PATCH 21/27] Update externalsecret-common.yaml Signed-off-by: PhilexWong <142860658+PhilexWong@users.noreply.github.com> --- charts/juno-node/templates/externalsecret-common.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/juno-node/templates/externalsecret-common.yaml b/charts/juno-node/templates/externalsecret-common.yaml index ef76bd71a..3e4c2bb40 100644 --- a/charts/juno-node/templates/externalsecret-common.yaml +++ b/charts/juno-node/templates/externalsecret-common.yaml @@ -3,7 +3,7 @@ apiVersion: external-secrets.io/v1beta1 kind: ExternalSecret metadata: - name: {{ $.Values.deployment.projectName }} + name: {{ $.Values.deployment.projectName }}-new namespace: {{ $.Values.deployment.namespace }} spec: refreshInterval: {{ $.Values.secret.data.refreshInterval }} @@ -17,4 +17,4 @@ spec: - extract: key: {{ $.Values.secret.data.dataFromKey }} # name of the secret in secret manager (GCP secret manager) {{- end }} -{{- end }} \ No newline at end of file +{{- end }} From 6c251b8d8fd506054b3eee281d58f7ade71d4039 Mon Sep 17 00:00:00 2001 From: PhilexWong <142860658+PhilexWong@users.noreply.github.com> Date: Thu, 20 Jun 2024 14:20:23 -0700 Subject: [PATCH 22/27] Update externalsecret-common.yaml Signed-off-by: PhilexWong <142860658+PhilexWong@users.noreply.github.com> --- charts/juno-node/templates/externalsecret-common.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/juno-node/templates/externalsecret-common.yaml b/charts/juno-node/templates/externalsecret-common.yaml index 3e4c2bb40..3d22a40d6 100644 --- a/charts/juno-node/templates/externalsecret-common.yaml +++ b/charts/juno-node/templates/externalsecret-common.yaml @@ -3,7 +3,7 @@ apiVersion: external-secrets.io/v1beta1 kind: ExternalSecret metadata: - name: {{ $.Values.deployment.projectName }}-new + name: {{ $.Values.deployment.projectName }} namespace: {{ $.Values.deployment.namespace }} spec: refreshInterval: {{ $.Values.secret.data.refreshInterval }} @@ -11,7 +11,7 @@ spec: name: {{ $.Values.secret.data.secretStoreName }} kind: {{ $.Values.secret.data.secretStoreKind }} target: - name: {{ $.Values.secret.data.targetName }}-new + name: {{ $.Values.secret.data.targetName }} creationPolicy: {{ $.Values.secret.data.targetCreationPolicy }} dataFrom: - extract: From 0845d6c12563d08dbd4fcb208569d586312dbbbb Mon Sep 17 00:00:00 2001 From: PhilexWong <142860658+PhilexWong@users.noreply.github.com> Date: Thu, 20 Jun 2024 14:30:46 -0700 Subject: [PATCH 23/27] Update externalsecret-common.yaml Signed-off-by: PhilexWong <142860658+PhilexWong@users.noreply.github.com> --- charts/juno-node/templates/externalsecret-common.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/juno-node/templates/externalsecret-common.yaml b/charts/juno-node/templates/externalsecret-common.yaml index 3d22a40d6..c877ce3b3 100644 --- a/charts/juno-node/templates/externalsecret-common.yaml +++ b/charts/juno-node/templates/externalsecret-common.yaml @@ -3,7 +3,7 @@ apiVersion: external-secrets.io/v1beta1 kind: ExternalSecret metadata: - name: {{ $.Values.deployment.projectName }} + name: {{ $.Values.deployment.projectName }}-external-secret namespace: {{ $.Values.deployment.namespace }} spec: refreshInterval: {{ $.Values.secret.data.refreshInterval }} From b2fb07d0c5aafb32aa54fe0a27efef4e04fbce02 Mon Sep 17 00:00:00 2001 From: Philex Date: Thu, 20 Jun 2024 15:26:18 -0700 Subject: [PATCH 24/27] Fix: test purpose to exclude sst files - revert --- charts/juno-node/templates/juno-data-backup-cronjob.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/juno-node/templates/juno-data-backup-cronjob.yaml b/charts/juno-node/templates/juno-data-backup-cronjob.yaml index 78f083340..922c4cb0d 100644 --- a/charts/juno-node/templates/juno-data-backup-cronjob.yaml +++ b/charts/juno-node/templates/juno-data-backup-cronjob.yaml @@ -123,7 +123,7 @@ data: - | rm -rf /mnt/juno-tar-backup/*.tar && rm -rf /mnt/data/*.tar && - tar -czvf /mnt/juno-tar-backup/juno_{{ .Values.backupJunoDataJob.network }}_{{ .Values.deployment.imagetag }}_$(date +\%Y\%m\%d).tar --exclude=./lost+found --exclude=./*.sst -C /mnt/data . && sleep 10 + tar -czvf /mnt/juno-tar-backup/juno_{{ .Values.backupJunoDataJob.network }}_{{ .Values.deployment.imagetag }}_$(date +\%Y\%m\%d).tar --exclude=./lost+found -C /mnt/data . && sleep 10 volumeMounts: - name: juno-data-volume mountPath: /mnt/data From c56f72e456d56997e5963db77432fc990640ecce Mon Sep 17 00:00:00 2001 From: Philex Date: Mon, 8 Jul 2024 17:42:46 -0700 Subject: [PATCH 25/27] Fix: rename the jar file --- charts/juno-node/templates/juno-data-backup-cronjob.yaml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/charts/juno-node/templates/juno-data-backup-cronjob.yaml b/charts/juno-node/templates/juno-data-backup-cronjob.yaml index 922c4cb0d..afd5bbe94 100644 --- a/charts/juno-node/templates/juno-data-backup-cronjob.yaml +++ b/charts/juno-node/templates/juno-data-backup-cronjob.yaml @@ -143,8 +143,13 @@ data: secretKeyRef: name: {{ .Values.secret.data.targetName }} key: r2_secret_access_key - command: ["/bin/sh"] - args: ["-c", "rclone copy /mnt/juno-tar-backup/*.tar R2:/juno-snapshot/{{ .Values.backupJunoDataJob.network }}"] + command: ["/bin/sh", "-c"] + args: + - | + latestBlockNumber=$(curl --location 'https://free-rpc.nethermind.io/mainnet-juno' --header 'Content-Type: application/json' --data '{ "jsonrpc": "2.0","method": "starknet_blockNumber", "id": 1}' | jq '.result') && + echo "latestBlockNumber is $latestBlockNumber" && + mv /mnt/juno-tar-backup/juno_{{ .Values.backupJunoDataJob.network }}_{{ .Values.deployment.imagetag }}*.tar /mnt/juno-tar-backup/juno_{{ .Values.backupJunoDataJob.network }}_{{ .Values.deployment.imagetag }}_$latestBlockNumber.tar + rclone copy /mnt/juno-tar-backup/*.tar R2:/juno-snapshot/{{ .Values.backupJunoDataJob.network }} volumeMounts: - name: {{ .Values.deployment.namespace }}-rclone-config mountPath: /config/rclone From 8785e596cf928cab7ba4c8dfb4507ba18b7c8e91 Mon Sep 17 00:00:00 2001 From: Philex Date: Mon, 8 Jul 2024 17:43:27 -0700 Subject: [PATCH 26/27] Fix: rename the jar file -exclude sst file for testing --- charts/juno-node/templates/juno-data-backup-cronjob.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/juno-node/templates/juno-data-backup-cronjob.yaml b/charts/juno-node/templates/juno-data-backup-cronjob.yaml index afd5bbe94..8740d39fa 100644 --- a/charts/juno-node/templates/juno-data-backup-cronjob.yaml +++ b/charts/juno-node/templates/juno-data-backup-cronjob.yaml @@ -123,7 +123,7 @@ data: - | rm -rf /mnt/juno-tar-backup/*.tar && rm -rf /mnt/data/*.tar && - tar -czvf /mnt/juno-tar-backup/juno_{{ .Values.backupJunoDataJob.network }}_{{ .Values.deployment.imagetag }}_$(date +\%Y\%m\%d).tar --exclude=./lost+found -C /mnt/data . && sleep 10 + tar -czvf /mnt/juno-tar-backup/juno_{{ .Values.backupJunoDataJob.network }}_{{ .Values.deployment.imagetag }}_$(date +\%Y\%m\%d).tar --exclude=./lost+found --exclude=*.sst -C /mnt/data . && sleep 10 volumeMounts: - name: juno-data-volume mountPath: /mnt/data From 652810bf452571e868352c8f030f49aa2718388b Mon Sep 17 00:00:00 2001 From: Philex Date: Mon, 15 Jul 2024 02:21:33 -0700 Subject: [PATCH 27/27] feat: add retention function --- .../templates/juno-data-backup-cronjob.yaml | 64 +++++++++++++++++++ 1 file changed, 64 insertions(+) diff --git a/charts/juno-node/templates/juno-data-backup-cronjob.yaml b/charts/juno-node/templates/juno-data-backup-cronjob.yaml index 8740d39fa..54e227a12 100644 --- a/charts/juno-node/templates/juno-data-backup-cronjob.yaml +++ b/charts/juno-node/templates/juno-data-backup-cronjob.yaml @@ -199,7 +199,71 @@ spec: - name: cloning-juno-manifest-volume configMap: name: {{ .Values.deployment.namespace }}-cloning-juno-manifest +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: {{ .Values.deployment.namespace }}-r2-retention-cronjob + namespace: {{ .Values.deployment.namespace }} +spec: + schedule: "0 0 * * *" # Run everyday + jobTemplate: + spec: + completions: 1 + ttlSecondsAfterFinished: 300 + template: + spec: + containers: + - name: {{ .Values.deployment.namespace }}-r2-retention + image: ubuntu:latest + command: + - /bin/sh + - -c + - | + #!/bin/sh + mkdir -p /var/lib/apt/lists/partial + apt-get update && apt-get install -y curl jq + # Constants + API_TOKEN="$API_TOKEN" + RETENTION_LIMIT="$RETENTION_LIMIT" + ACCOUNT_ID="$ACCOUNT_ID" + BUCKET_NAME="$BUCKET_NAME" + + # Construct the Cloudflare API URL with account ID and bucket name + CLOUDFLARE_API_URL="https://api.cloudflare.com/client/v4/accounts/$ACCOUNT_ID/r2/buckets/$BUCKET_NAME/objects?prefix={{ .Values.backupJunoDataJob.network }}/" + echo ...."$CLOUDFLARE_API_URL".... + # Get the list of objects with the specified prefix + objects=$(curl -s -X GET "$CLOUDFLARE_API_URL" -H "Authorization: Bearer $API_TOKEN" | jq -r '.result') + # Check if the number of objects exceeds the retention limit + object_count=$(echo "$objects" | jq length) + echo "total backup number is $object_count" + + if [ "$object_count" -le "$RETENTION_LIMIT" ]; then + echo "exiting...." + exit 0 + fi + delete_number=$((object_count - RETENTION_LIMIT)) + # Sort the objects by last_modified date and delete the oldest ones + echo "$objects" | jq -r '.[] | [.key, .last_modified] | @tsv' | sort -k2 | head -n "$delete_number" | while IFS=$'\t' read -r key last_modified; do + delete_url="https://api.cloudflare.com/client/v4/accounts/$ACCOUNT_ID/r2/buckets/$BUCKET_NAME/objects/${key}tar" + echo "Deleting ${key}tar at $delete_url" + delete_response=$(curl -s -X DELETE "$delete_url" -H "Authorization: Bearer $API_TOKEN") + echo "Delete response: $delete_response" + done + env: + - name: API_TOKEN + valueFrom: + secretKeyRef: + name: {{ .Values.secret.data.targetName }} + key: r2_api_token + - name: RETENTION_LIMIT + value: {{ .Values.backupJunoDataJob.retensionLimit }} + - name: ACCOUNT_ID + value: "d1cc7d59ae8f8dc2b1aa530c41b5c6ec" + - name: BUCKET_NAME + value: "juno-snapshot" + restartPolicy: OnFailure --- # CronJob for Cleaning up Completed Pods and PVCs apiVersion: batch/v1