From e3eb1caffa6d42b74fcbf23b55444ee5e4a90085 Mon Sep 17 00:00:00 2001 From: Hugo Hervieux Date: Wed, 7 Dec 2022 09:01:31 -0500 Subject: [PATCH 01/14] helm: split auth and proxy, templates --- .../teleport-cluster/templates/_helpers.tpl | 61 +++- .../templates/auth/_config.aws.tpl | 25 ++ .../templates/auth/_config.common.tpl | 64 ++++ .../templates/auth/_config.gcp.tpl | 16 + .../templates/auth/_config.scratch.tpl | 13 + .../templates/auth/_config.standalone.tpl | 3 + .../templates/{ => auth}/clusterrole.yaml | 0 .../templates/auth/clusterrolebinding.yaml | 31 ++ .../templates/auth/config.yaml | 28 ++ .../teleport-cluster/templates/auth/pdb.yaml | 17 ++ .../teleport-cluster/templates/auth/pvc.yaml | 23 ++ .../auth/service-previous-version.yaml | 16 + .../templates/auth/service.yaml | 21 ++ .../templates/auth/serviceaccount.yaml | 12 + .../templates/auth/statefulset.yaml | 256 ++++++++++++++++ .../templates/clusterrolebinding.yaml | 14 - .../teleport-cluster/templates/config.yaml | 171 ----------- .../templates/deployment.yaml | 289 ------------------ .../chart/teleport-cluster/templates/pdb.yaml | 18 -- .../templates/proxy/_config.aws.tpl | 3 + .../templates/proxy/_config.common.tpl | 72 +++++ .../templates/proxy/_config.gcp.tpl | 3 + .../templates/proxy/_config.scratch.tpl | 13 + .../templates/proxy/_config.standalone.tpl | 3 + .../templates/{ => proxy}/certificate.yaml | 18 +- .../templates/proxy/config.yaml | 13 + .../templates/proxy/deployment.yaml | 224 ++++++++++++++ .../teleport-cluster/templates/proxy/pdb.yaml | 17 ++ .../templates/{ => proxy}/service.yaml | 31 +- .../templates/proxy/serviceaccount.yaml | 11 + .../chart/teleport-cluster/templates/psp.yaml | 3 + .../chart/teleport-cluster/templates/pvc.yaml | 16 - .../templates/serviceaccount.yaml | 11 - .../chart/teleport-cluster/values.schema.json | 15 +- examples/chart/teleport-cluster/values.yaml | 42 ++- 35 files changed, 1019 insertions(+), 554 deletions(-) create mode 100644 examples/chart/teleport-cluster/templates/auth/_config.aws.tpl create mode 100644 examples/chart/teleport-cluster/templates/auth/_config.common.tpl create mode 100644 examples/chart/teleport-cluster/templates/auth/_config.gcp.tpl create mode 100644 examples/chart/teleport-cluster/templates/auth/_config.scratch.tpl create mode 100644 examples/chart/teleport-cluster/templates/auth/_config.standalone.tpl rename examples/chart/teleport-cluster/templates/{ => auth}/clusterrole.yaml (100%) create mode 100644 examples/chart/teleport-cluster/templates/auth/clusterrolebinding.yaml create mode 100644 examples/chart/teleport-cluster/templates/auth/config.yaml create mode 100644 examples/chart/teleport-cluster/templates/auth/pdb.yaml create mode 100644 examples/chart/teleport-cluster/templates/auth/pvc.yaml create mode 100644 examples/chart/teleport-cluster/templates/auth/service-previous-version.yaml create mode 100644 examples/chart/teleport-cluster/templates/auth/service.yaml create mode 100644 examples/chart/teleport-cluster/templates/auth/serviceaccount.yaml create mode 100644 examples/chart/teleport-cluster/templates/auth/statefulset.yaml delete mode 100644 examples/chart/teleport-cluster/templates/clusterrolebinding.yaml delete mode 100644 examples/chart/teleport-cluster/templates/config.yaml delete mode 100644 examples/chart/teleport-cluster/templates/deployment.yaml delete mode 100644 examples/chart/teleport-cluster/templates/pdb.yaml create mode 100644 examples/chart/teleport-cluster/templates/proxy/_config.aws.tpl create mode 100644 examples/chart/teleport-cluster/templates/proxy/_config.common.tpl create mode 100644 examples/chart/teleport-cluster/templates/proxy/_config.gcp.tpl create mode 100644 examples/chart/teleport-cluster/templates/proxy/_config.scratch.tpl create mode 100644 examples/chart/teleport-cluster/templates/proxy/_config.standalone.tpl rename examples/chart/teleport-cluster/templates/{ => proxy}/certificate.yaml (51%) create mode 100644 examples/chart/teleport-cluster/templates/proxy/config.yaml create mode 100644 examples/chart/teleport-cluster/templates/proxy/deployment.yaml create mode 100644 examples/chart/teleport-cluster/templates/proxy/pdb.yaml rename examples/chart/teleport-cluster/templates/{ => proxy}/service.yaml (54%) create mode 100644 examples/chart/teleport-cluster/templates/proxy/serviceaccount.yaml delete mode 100644 examples/chart/teleport-cluster/templates/pvc.yaml delete mode 100644 examples/chart/teleport-cluster/templates/serviceaccount.yaml diff --git a/examples/chart/teleport-cluster/templates/_helpers.tpl b/examples/chart/teleport-cluster/templates/_helpers.tpl index c52cf8ba187cf..c603a922e5371 100644 --- a/examples/chart/teleport-cluster/templates/_helpers.tpl +++ b/examples/chart/teleport-cluster/templates/_helpers.tpl @@ -2,6 +2,65 @@ Create the name of the service account to use if serviceAccount is not defined or serviceAccount.name is empty, use .Release.Name */}} -{{- define "teleport.serviceAccountName" -}} +{{- define "teleport-cluster.auth.serviceAccountName" -}} {{- coalesce .Values.serviceAccount.name .Release.Name -}} {{- end -}} + +{{- define "teleport-cluster.proxy.serviceAccountName" -}} +{{- coalesce .Values.serviceAccount.name .Release.Name -}}-proxy +{{- end -}} + +{{- define "teleport-cluster.version" -}} +{{- if .Values.teleportVersionOverride }}{{ .Values.teleportVersionOverride }}{{ else }}{{ .Chart.Version }}{{ end -}} +{{- end -}} + +{{- define "teleport-cluster.majorVersion" -}} +{{- (semver (include "teleport-cluster.version" .)).Major -}} +{{- end -}} + +{{/* Proxy selector labels */}} +{{- define "teleport-cluster.proxy.selectorLabels" -}} +app.kubernetes.io/name: '{{ default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}' +app.kubernetes.io/instance: '{{ .Release.Name }}' +app.kubernetes.io/component: 'proxy' +{{- end -}} + +{{/* Proxy all labels */}} +{{- define "teleport-cluster.proxy.labels" -}} +{{ include "teleport-cluster.proxy.selectorLabels" . }} +helm.sh/chart: '{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}' +app.kubernetes.io/managed-by: '{{ .Release.Service }}' +app.kubernetes.io/version: '{{ include "teleport-cluster.version" . }}' +teleport.dev/majorVersion: '{{ include "teleport-cluster.majorVersion" . }}' +{{- end -}} + +{{/* Auth pods selector labels */}} +{{- define "teleport-cluster.auth.selectorLabels" -}} +app.kubernetes.io/name: '{{ default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}' +app.kubernetes.io/instance: '{{ .Release.Name }}' +app.kubernetes.io/component: 'auth' +{{- end -}} + +{{/* All pods all labels */}} +{{- define "teleport-cluster.labels" -}} +{{ include "teleport-cluster.selectorLabels" . }} +helm.sh/chart: '{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}' +app.kubernetes.io/managed-by: '{{ .Release.Service }}' +app.kubernetes.io/version: '{{ include "teleport-cluster.version" . }}' +teleport.dev/majorVersion: '{{ include "teleport-cluster.majorVersion" . }}' +{{- end -}} + +{{/* All pods selector labels */}} +{{- define "teleport-cluster.selectorLabels" -}} +app.kubernetes.io/name: '{{ default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}' +app.kubernetes.io/instance: '{{ .Release.Name }}' +{{- end -}} + +{{/* Auth pods all labels */}} +{{- define "teleport-cluster.auth.labels" -}} +{{ include "teleport-cluster.auth.selectorLabels" . }} +helm.sh/chart: '{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}' +app.kubernetes.io/managed-by: '{{ .Release.Service }}' +app.kubernetes.io/version: '{{ include "teleport-cluster.version" . }}' +teleport.dev/majorVersion: '{{ include "teleport-cluster.majorVersion" . }}' +{{- end -}} diff --git a/examples/chart/teleport-cluster/templates/auth/_config.aws.tpl b/examples/chart/teleport-cluster/templates/auth/_config.aws.tpl new file mode 100644 index 0000000000000..d0919e77592c9 --- /dev/null +++ b/examples/chart/teleport-cluster/templates/auth/_config.aws.tpl @@ -0,0 +1,25 @@ +{{- define "teleport-cluster.auth.config.aws" -}} +{{ include "teleport-cluster.auth.config.common" . }} + storage: + type: dynamodb + region: {{ required "aws.region is required in chart values" .Values.aws.region }} + table_name: {{ required "aws.backendTable is required in chart values" .Values.aws.backendTable }} + {{- if .Values.aws.auditLogMirrorOnStdout }} + audit_events_uri: ['dynamodb://{{ required "aws.auditLogTable is required in chart values" .Values.aws.auditLogTable }}', 'stdout://'] + {{- else }} + audit_events_uri: ['dynamodb://{{ required "aws.auditLogTable is required in chart values" .Values.aws.auditLogTable }}'] + {{- end }} + audit_sessions_uri: s3://{{ required "aws.sessionRecordingBucket is required in chart values" .Values.aws.sessionRecordingBucket }} + continuous_backups: {{ required "aws.backups is required in chart values" .Values.aws.backups }} + {{- if .Values.aws.dynamoAutoScaling }} + auto_scaling: true + read_min_capacity: {{ required "aws.readMinCapacity is required when aws.dynamoAutoScaling is true" .Values.aws.readMinCapacity }} + read_max_capacity: {{ required "aws.readMaxCapacity is required when aws.dynamoAutoScaling is true" .Values.aws.readMaxCapacity }} + read_target_value: {{ required "aws.readTargetValue is required when aws.dynamoAutoScaling is true" .Values.aws.readTargetValue }} + write_min_capacity: {{ required "aws.writeMinCapacity is required when aws.dynamoAutoScaling is true" .Values.aws.writeMinCapacity }} + write_max_capacity: {{ required "aws.writeMaxCapacity is required when aws.dynamoAutoScaling is true" .Values.aws.writeMaxCapacity }} + write_target_value: {{ required "aws.writeTargetValue is required when aws.dynamoAutoScaling is true" .Values.aws.writeTargetValue }} + {{- else }} + auto_scaling: false + {{- end }} +{{- end -}} diff --git a/examples/chart/teleport-cluster/templates/auth/_config.common.tpl b/examples/chart/teleport-cluster/templates/auth/_config.common.tpl new file mode 100644 index 0000000000000..0cd1cc1e73c7b --- /dev/null +++ b/examples/chart/teleport-cluster/templates/auth/_config.common.tpl @@ -0,0 +1,64 @@ +{{- define "teleport-cluster.auth.config.common" -}} +{{- $authentication := mustMergeOverwrite .Values.authentication (default dict .Values.authenticationSecondFactor) -}} +{{- $logLevel := (coalesce .Values.logLevel .Values.log.level "INFO") -}} +version: v3 +kubernetes_service: + enabled: true + listen_addr: 0.0.0.0:3026 + public_addr: "{{ .Release.Name }}-auth.{{ .Release.Namespace }}.svc.cluster.local:3026" +{{- if .Values.kubeClusterName }} + kube_cluster_name: {{ .Values.kubeClusterName }} +{{- else }} + kube_cluster_name: {{ .Values.clusterName }} +{{- end }} +{{- if .Values.labels }} + labels: {{- toYaml .Values.labels | nindent 8 }} +{{- end }} +proxy_service: + enabled: false +ssh_service: + enabled: false +auth_service: + enabled: true + cluster_name: {{ required "clusterName is required in chart values" .Values.clusterName }} +{{- if .Values.enterprise }} + license_file: '/var/lib/license/license.pem' +{{- end }} + authentication: + type: "{{ required "authentication.type is required in chart values" (coalesce .Values.authenticationType $authentication.type) }}" + local_auth: {{ $authentication.localAuth }} +{{- if $authentication.connectorName }} + connector_name: "{{ $authentication.connectorName }}" +{{- end }} +{{- if $authentication.lockingMode }} + locking_mode: "{{ $authentication.lockingMode }}" +{{- end }} +{{- if $authentication.secondFactor }} + second_factor: "{{ $authentication.secondFactor }}" + {{- if not (or (eq $authentication.secondFactor "off") (eq $authentication.secondFactor "otp")) }} + webauthn: + rp_id: {{ required "clusterName is required in chart values" .Values.clusterName }} + {{- if $authentication.webauthn }} + {{- if $authentication.webauthn.attestationAllowedCas }} + attestation_allowed_cas: {{- toYaml $authentication.webauthn.attestationAllowedCas | nindent 12 }} + {{- end }} + {{- if $authentication.webauthn.attestationDeniedCas }} + attestation_denied_cas: {{- toYaml $authentication.webauthn.attestationDeniedCas | nindent 12 }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} +{{- if .Values.sessionRecording }} + session_recording: {{ .Values.sessionRecording }} +{{- end }} +{{- if .Values.proxyListenerMode }} + proxy_listener_mode: {{ .Values.proxyListenerMode }} +{{- end }} +teleport: + log: + severity: {{ $logLevel }} + output: {{ .Values.log.output }} + format: + output: {{ .Values.log.format }} + extra_fields: {{ .Values.log.extraFields | toJson }} +{{- end -}} diff --git a/examples/chart/teleport-cluster/templates/auth/_config.gcp.tpl b/examples/chart/teleport-cluster/templates/auth/_config.gcp.tpl new file mode 100644 index 0000000000000..f55743b61ffa3 --- /dev/null +++ b/examples/chart/teleport-cluster/templates/auth/_config.gcp.tpl @@ -0,0 +1,16 @@ +{{- define "teleport-cluster.auth.config.gcp" -}} +{{ include "teleport-cluster.auth.config.common" . }} + storage: + type: firestore + project_id: {{ required "gcp.projectId is required in chart values" .Values.gcp.projectId }} + collection_name: {{ required "gcp.backendTable is required in chart values" .Values.gcp.backendTable }} + {{- if .Values.gcp.credentialSecretName }} + credentials_path: /etc/teleport-secrets/gcp-credentials.json + {{- end }} + {{- if .Values.gcp.auditLogMirrorOnStdout }} + audit_events_uri: ['firestore://{{ required "gcp.auditLogTable is required in chart values" .Values.gcp.auditLogTable }}?projectID={{ required "gcp.projectId is required in chart values" .Values.gcp.projectId }}{{ empty .Values.gcp.credentialSecretName | ternary "" "&credentialsPath=/etc/teleport-secrets/gcp-credentials.json"}}', 'stdout://'] + {{- else }} + audit_events_uri: ['firestore://{{ required "gcp.auditLogTable is required in chart values" .Values.gcp.auditLogTable }}?projectID={{ required "gcp.projectId is required in chart values" .Values.gcp.projectId }}{{ empty .Values.gcp.credentialSecretName | ternary "" "&credentialsPath=/etc/teleport-secrets/gcp-credentials.json"}}'] + {{- end }} + audit_sessions_uri: "gs://{{ required "gcp.sessionRecordingBucket is required in chart values" .Values.gcp.sessionRecordingBucket }}?projectID={{ required "gcp.projectId is required in chart values" .Values.gcp.projectId }}{{ empty .Values.gcp.credentialSecretName | ternary "" "&credentialsPath=/etc/teleport-secrets/gcp-credentials.json"}}" +{{- end -}} diff --git a/examples/chart/teleport-cluster/templates/auth/_config.scratch.tpl b/examples/chart/teleport-cluster/templates/auth/_config.scratch.tpl new file mode 100644 index 0000000000000..8c77cca276cfc --- /dev/null +++ b/examples/chart/teleport-cluster/templates/auth/_config.scratch.tpl @@ -0,0 +1,13 @@ +{{- define "teleport-cluster.auth.config.scratch" -}} +{{- required "'auth.teleportConfig' is required in scratch mode" .Values.auth.teleportConfig }} +proxy_service: + enabled: false +ssh_service: + enabled: false +auth_service: + enabled: true +{{- end -}} + +{{- define "teleport-cluster.auth.config.custom" -}} +{{ fail "'custom' mode has been depreacted with chart v12 because of the proxy/auth split, see http://link" }} +{{- end -}} diff --git a/examples/chart/teleport-cluster/templates/auth/_config.standalone.tpl b/examples/chart/teleport-cluster/templates/auth/_config.standalone.tpl new file mode 100644 index 0000000000000..db5ff58945b95 --- /dev/null +++ b/examples/chart/teleport-cluster/templates/auth/_config.standalone.tpl @@ -0,0 +1,3 @@ +{{- define "teleport-cluster.auth.config.standalone" -}} +{{ include "teleport-cluster.auth.config.common" . }} +{{- end -}} diff --git a/examples/chart/teleport-cluster/templates/clusterrole.yaml b/examples/chart/teleport-cluster/templates/auth/clusterrole.yaml similarity index 100% rename from examples/chart/teleport-cluster/templates/clusterrole.yaml rename to examples/chart/teleport-cluster/templates/auth/clusterrole.yaml diff --git a/examples/chart/teleport-cluster/templates/auth/clusterrolebinding.yaml b/examples/chart/teleport-cluster/templates/auth/clusterrolebinding.yaml new file mode 100644 index 0000000000000..ba39919d59abb --- /dev/null +++ b/examples/chart/teleport-cluster/templates/auth/clusterrolebinding.yaml @@ -0,0 +1,31 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ .Release.Name }} + labels: {{- include "teleport-cluster.auth.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Release.Name }} +subjects: +- kind: ServiceAccount + name: {{ include "teleport-cluster.auth.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +--- +# This ClusterRoleBinding allows the auth service-account to validate Kubernetes tokens +# This is required for proxies to join using their Kubernetes tokens +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ .Release.Name }}-auth + labels: {{- include "teleport-cluster.auth.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- kind: ServiceAccount + name: {{ include "teleport-cluster.auth.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/examples/chart/teleport-cluster/templates/auth/config.yaml b/examples/chart/teleport-cluster/templates/auth/config.yaml new file mode 100644 index 0000000000000..fde5e1d5ec5e9 --- /dev/null +++ b/examples/chart/teleport-cluster/templates/auth/config.yaml @@ -0,0 +1,28 @@ +{{- $auth := mustMergeOverwrite .Values .Values.auth -}} +{{- $configTemplate := printf "teleport-cluster.auth.config.%s" $auth.chartMode -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-auth + namespace: {{ .Release.Namespace }} + labels: {{- include "teleport-cluster.auth.labels" . | nindent 4 }} +{{- if $auth.annotations.config }} + annotations: {{- toYaml $auth.annotations.config | nindent 4 }} +{{- end }} +data: +{{- if $auth.createProxyToken }} + apply-on-startup.yaml: |2 + kind: token + version: v2 + metadata: + name: {{ .Release.Name }}-proxy + expires: "3000-01-01T00:00:00Z" + spec: + roles: [Proxy] + join_method: kubernetes + kubernetes: + allow: + - service_account: "{{ .Release.Namespace }}:{{ include "teleport-cluster.proxy.serviceAccountName" . }}" +{{- end }} + teleport.yaml: |2 + {{- mustMergeOverwrite (include $configTemplate . | fromYaml) $auth.teleportConfig | toYaml | nindent 4 -}} diff --git a/examples/chart/teleport-cluster/templates/auth/pdb.yaml b/examples/chart/teleport-cluster/templates/auth/pdb.yaml new file mode 100644 index 0000000000000..84a72e4ed6cde --- /dev/null +++ b/examples/chart/teleport-cluster/templates/auth/pdb.yaml @@ -0,0 +1,17 @@ +{{- $auth := mustMergeOverwrite .Values .Values.auth -}} +{{- if $auth.highAvailability.podDisruptionBudget.enabled }} +{{- if .Capabilities.APIVersions.Has "policy/v1" }} +apiVersion: policy/v1 +{{- else }} +apiVersion: policy/v1beta1 +{{- end }} +kind: PodDisruptionBudget +metadata: + name: {{ .Release.Name }}-auth + namespace: {{ .Release.Namespace }} + labels: {{- include "teleport-cluster.auth.labels" . | nindent 4 }} +spec: + minAvailable: {{ $auth.highAvailability.podDisruptionBudget.minAvailable }} + selector: + matchLabels: {{- include "teleport-cluster.auth.selectorLabels" . | nindent 6 }} +{{- end }} diff --git a/examples/chart/teleport-cluster/templates/auth/pvc.yaml b/examples/chart/teleport-cluster/templates/auth/pvc.yaml new file mode 100644 index 0000000000000..3036ffb0572ba --- /dev/null +++ b/examples/chart/teleport-cluster/templates/auth/pvc.yaml @@ -0,0 +1,23 @@ +{{- $auth := mustMergeOverwrite .Values .Values.auth -}} +{{/* $persistence looks like some backward compatibility trick, is this still relevant? */}} +{{- $persistence := (coalesce $auth.standalone $auth.persistence) -}} +{{- if .Values.persistence.enabled }} + {{/* Disable persistence for aws and gpc modes */}} + {{- if and (not (eq $auth.chartMode "aws")) (not (eq $auth.chartMode "gcp")) }} + {{/* No need to create a PVC if we reuse an existing claim */}} + {{- if not $persistence.existingClaimName }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} + labels: {{- include "teleport-cluster.auth.labels" . | nindent 4 }} +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ required "persistence.volumeSize is required in chart values" $persistence.volumeSize }} + {{- end }} + {{- end }} +{{- end }} diff --git a/examples/chart/teleport-cluster/templates/auth/service-previous-version.yaml b/examples/chart/teleport-cluster/templates/auth/service-previous-version.yaml new file mode 100644 index 0000000000000..1547e00951688 --- /dev/null +++ b/examples/chart/teleport-cluster/templates/auth/service-previous-version.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ .Release.Name }}-auth-old + namespace: {{ .Release.Namespace }} + labels: {{- include "teleport-cluster.auth.labels" . | nindent 4 }} +spec: + # This is a headless service. Resolving it will return the list of all auth pods running the previous major version + # Proxies should not connect to auth pods from the previous major version + # Proxy rollout should be held until this headLessService does not match pods anymore. + clusterIP: "None" + # Publishing not ready addresses ensures that unhealthy or terminating pods are still accounted for + publishNotReadyAddresses: true + selector: + {{- include "teleport-cluster.auth.selectorLabels" . | nindent 4 }} + teleport.dev/majorVersion: {{ sub (include "teleport-cluster.majorVersion" . | atoi ) 1 | quote }} diff --git a/examples/chart/teleport-cluster/templates/auth/service.yaml b/examples/chart/teleport-cluster/templates/auth/service.yaml new file mode 100644 index 0000000000000..b6eb0f993d75a --- /dev/null +++ b/examples/chart/teleport-cluster/templates/auth/service.yaml @@ -0,0 +1,21 @@ +{{- $auth := mustMergeOverwrite .Values .Values.auth -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ .Release.Name }}-auth + namespace: {{ .Release.Namespace }} + labels: {{- include "teleport-cluster.auth.labels" . | nindent 4 }} +{{- if $auth.annotations.service }} + annotations: {{- toYaml $auth.annotations.service | nindent 4 }} +{{- end }} +spec: + ports: + - name: auth + port: 3025 + targetPort: 3025 + protocol: TCP + - name: kube + port: 3026 + targetPort: 3026 + protocol: TCP + selector: {{- include "teleport-cluster.auth.selectorLabels" . | nindent 4 }} diff --git a/examples/chart/teleport-cluster/templates/auth/serviceaccount.yaml b/examples/chart/teleport-cluster/templates/auth/serviceaccount.yaml new file mode 100644 index 0000000000000..a066365c74305 --- /dev/null +++ b/examples/chart/teleport-cluster/templates/auth/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- $auth := mustMergeOverwrite .Values .Values.auth -}} +{{- if $auth.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "teleport-cluster.auth.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- if $auth.annotations.serviceAccount }} + annotations: +{{- toYaml $auth.annotations.serviceAccount | nindent 4 }} +{{- end -}} +{{- end }} diff --git a/examples/chart/teleport-cluster/templates/auth/statefulset.yaml b/examples/chart/teleport-cluster/templates/auth/statefulset.yaml new file mode 100644 index 0000000000000..68901cf106d27 --- /dev/null +++ b/examples/chart/teleport-cluster/templates/auth/statefulset.yaml @@ -0,0 +1,256 @@ +{{- $auth := mustMergeOverwrite .Values .Values.auth -}} +{{- $persistence := (coalesce .Values.standalone .Values.persistence) -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ .Release.Name }}-auth + namespace: {{ .Release.Namespace }} + labels: + {{- include "teleport-cluster.auth.labels" . | nindent 4 }} + app: {{ .Release.Name }} +{{- if $auth.annotations.deployment }} + annotations: {{- toYaml $auth.annotations.deployment | nindent 4 }} +{{- end }} +spec: +{{- if not (eq $auth.chartMode "standalone") }} + replicas: {{ $auth.highAvailability.replicaCount }} + minReadySeconds: {{ $auth.highAvailability.minReadySeconds }} + {{- else }} + replicas: 1 + {{- end }} + serviceName: {{ .Release.Name }}-auth + selector: + matchLabels: {{- include "teleport-cluster.auth.selectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + # ConfigMap checksum, to recreate the pod on config changes. + checksum/config: {{ include (print $.Template.BasePath "/auth/config.yaml") . | sha256sum }} +{{- if $auth.annotations.pod }} + {{- toYaml $auth.annotations.pod | nindent 8 }} +{{- end }} + labels: + {{- include "teleport-cluster.auth.labels" . | nindent 8 }} + app: {{ .Release.Name }} + spec: + affinity: +{{- if $auth.affinity }} + {{- if $auth.highAvailability.requireAntiAffinity }} + {{- fail "Cannot use highAvailability.requireAntiAffinity when affinity is also set in chart values - unset one or the other" }} + {{- end }} + {{- toYaml $auth.affinity | nindent 8 }} +{{- else }} + podAntiAffinity: + {{- if $auth.highAvailability.requireAntiAffinity }} + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app.kubernetes.io/instance + operator: In + values: + - {{ .Release.Name }} + - key: app.kubernetes.io/component + operator: In + values: + - proxy + topologyKey: "kubernetes.io/hostname" + {{- else if gt (int $auth.highAvailability.replicaCount) 1 }} + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 50 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/instance + operator: In + values: + - {{ .Release.Name }} + - key: app.kubernetes.io/component + operator: In + values: + - proxy + topologyKey: "kubernetes.io/hostname" + {{- end }} +{{- end }} +{{- if $auth.tolerations }} + tolerations: {{- toYaml $auth.tolerations | nindent 6 }} +{{- end }} +{{- if $auth.initContainers }} + initContainers: + {{- range $initContainer := $auth.initContainers }} + {{- if and (not $initContainer.resources) $auth.resources }} + {{- $_ := set $initContainer "resources" $auth.resources }} + {{- end }} + {{- list $initContainer | toYaml | nindent 8 }} + {{- /* Note: this will break if the user sets volumeMounts to its initContainer */}} + volumeMounts: + {{- if $auth.enterprise }} + - mountPath: /var/lib/license + name: "license" + readOnly: true + {{- end }} + {{- if and ($auth.gcp.credentialSecretName) (eq $auth.chartMode "gcp") }} + - mountPath: /etc/teleport-secrets + name: "gcp-credentials" + readOnly: true + {{- end }} + - mountPath: /etc/teleport + name: "config" + readOnly: true + - mountPath: /var/lib/teleport + name: "data" + {{- if $auth.extraVolumeMounts }} + {{- toYaml $auth.extraVolumeMounts | nindent 10 }} + {{- end }} + {{- end }} +{{- end }} + containers: + - name: "teleport" + image: '{{ if $auth.enterprise }}{{ $auth.enterpriseImage }}{{ else }}{{ $auth.image }}{{ end }}:{{ include "teleport-cluster.version" . }}' + imagePullPolicy: {{ $auth.imagePullPolicy }} + {{- if or $auth.extraEnv $auth.tls.existingCASecretName }} + env: + {{- if (gt (len $auth.extraEnv) 0) }} + {{- toYaml $auth.extraEnv | nindent 8 }} + {{- end }} + {{- if $auth.tls.existingCASecretName }} + - name: SSL_CERT_FILE + value: /etc/teleport-tls-ca/ca.pem + {{- end }} + {{- end }} + args: + - "--diag-addr=0.0.0.0:3000" + {{- if $auth.insecureSkipProxyTLSVerify }} + - "--insecure" + {{- end }} + {{- if $auth.createProxyToken }} + - "--apply-on-startup=/etc/teleport/apply-on-startup.yaml" + {{- end }} + {{- if $auth.extraArgs }} + {{- toYaml $auth.extraArgs | nindent 8 }} + {{- end }} + ports: + - name: diag + containerPort: 3000 + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: diag + initialDelaySeconds: 5 # wait 5s for agent to start + periodSeconds: 5 # poll health every 5s + failureThreshold: 6 # consider agent unhealthy after 30s (6 * 5s) + timeoutSeconds: {{ .Values.probeTimeoutSeconds }} + readinessProbe: + httpGet: + path: /readyz + port: diag + initialDelaySeconds: 5 # wait 5s for agent to register + periodSeconds: 5 # poll health every 5s + failureThreshold: 12 # consider agent unhealthy after 60s (12 * 5s) + timeoutSeconds: {{ .Values.probeTimeoutSeconds }} +{{- if .Values.postStart.command }} + lifecycle: + postStart: + exec: + command: {{ toYaml .Values.postStart.command | nindent 14 }} +{{- end }} +{{- if .Values.resources }} + resources: + {{- toYaml .Values.resources | nindent 10 }} +{{- end }} +{{- if .Values.securityContext }} + securityContext: {{- toYaml .Values.securityContext | nindent 10 }} +{{- end }} + volumeMounts: +{{- if .Values.enterprise }} + - mountPath: /var/lib/license + name: "license" + readOnly: true +{{- end }} + {{- if and (.Values.gcp.credentialSecretName) (eq .Values.chartMode "gcp") }} + - mountPath: /etc/teleport-secrets + name: "gcp-credentials" + readOnly: true +{{- end }} +{{- if .Values.tls.existingCASecretName }} + - mountPath: /etc/teleport-tls-ca + name: "teleport-tls-ca" + readOnly: true +{{- end }} + - mountPath: /etc/teleport + name: "config" + readOnly: true + - mountPath: /var/lib/teleport + name: "data" +{{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 8 }} +{{- end }} +{{ if .Values.operator.enabled }} + - name: "operator" + image: '{{ .Values.operator.image }}:{{ include "teleport-cluster.version" . }}' + imagePullPolicy: {{ .Values.imagePullPolicy }} + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 +{{- if .Values.operator.resources }} + resources: + {{- toYaml .Values.operator.resources | nindent 10 }} +{{- end }} + volumeMounts: + {{- if .Values.highAvailability.certManager.enabled }} + - mountPath: /etc/teleport-tls + name: "teleport-tls" + readOnly: true + {{- else if .Values.tls.existingSecretName }} + - mountPath: /etc/teleport-tls + name: "teleport-tls" + readOnly: true + {{- end }} + - mountPath: /etc/teleport + name: "config" + readOnly: true + - mountPath: /var/lib/teleport + name: "data" +{{ end }} + volumes: +{{- if .Values.enterprise }} + - name: license + secret: + secretName: "license" +{{- end }} +{{- if and (.Values.gcp.credentialSecretName) (eq .Values.chartMode "gcp") }} + - name: gcp-credentials + secret: + secretName: {{ .Values.gcp.credentialSecretName | quote }} +{{- end }} +{{- if .Values.tls.existingCASecretName }} + - name: teleport-tls-ca + secret: + secretName: {{ .Values.tls.existingCASecretName }} +{{- end }} + - name: "config" + configMap: + name: {{ .Release.Name }}-auth + - name: "data" + {{- if and (.Values.persistence.enabled) ( and (not (eq .Values.chartMode "gcp")) (not (eq .Values.chartMode "aws"))) }} + persistentVolumeClaim: + claimName: {{ if $persistence.existingClaimName }}{{ $persistence.existingClaimName }}{{ else }}{{ .Release.Name }}{{ end }} + {{- else }} + emptyDir: {} + {{- end }} +{{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 6 }} +{{- end }} +{{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} +{{- end }} + serviceAccountName: {{ include "teleport-cluster.auth.serviceAccountName" . }} diff --git a/examples/chart/teleport-cluster/templates/clusterrolebinding.yaml b/examples/chart/teleport-cluster/templates/clusterrolebinding.yaml deleted file mode 100644 index 0a0c5e21ead8c..0000000000000 --- a/examples/chart/teleport-cluster/templates/clusterrolebinding.yaml +++ /dev/null @@ -1,14 +0,0 @@ -{{- if .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ .Release.Name }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ .Release.Name }} -subjects: -- kind: ServiceAccount - name: {{ template "teleport.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} -{{- end -}} diff --git a/examples/chart/teleport-cluster/templates/config.yaml b/examples/chart/teleport-cluster/templates/config.yaml deleted file mode 100644 index 1d26381b7ee25..0000000000000 --- a/examples/chart/teleport-cluster/templates/config.yaml +++ /dev/null @@ -1,171 +0,0 @@ -{{- if not (eq .Values.chartMode "custom") -}} - {{- $authentication := mustMergeOverwrite .Values.authentication (default dict .Values.authenticationSecondFactor) -}} - {{- $logLevel := (coalesce .Values.logLevel .Values.log.level "INFO") -}} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ .Release.Name }} - namespace: {{ .Release.Namespace }} - {{- if .Values.annotations.config }} - annotations: - {{- toYaml .Values.annotations.config | nindent 4 }} - {{- end }} -data: - teleport.yaml: | - {{- if eq .Values.proxyListenerMode "multiplex" }} - version: v2 - {{- end }} - teleport: - log: - severity: {{ $logLevel }} - output: {{ .Values.log.output }} - format: - output: {{ .Values.log.format }} - extra_fields: {{ .Values.log.extraFields | toJson }} - {{- if eq .Values.chartMode "aws" }} - storage: - type: dynamodb - region: {{ required "aws.region is required in chart values" .Values.aws.region }} - table_name: {{ required "aws.backendTable is required in chart values" .Values.aws.backendTable }} - {{- if .Values.aws.auditLogMirrorOnStdout }} - audit_events_uri: ['dynamodb://{{ required "aws.auditLogTable is required in chart values" .Values.aws.auditLogTable }}', 'stdout://'] - {{- else }} - audit_events_uri: ['dynamodb://{{ required "aws.auditLogTable is required in chart values" .Values.aws.auditLogTable }}'] - {{- end }} - audit_sessions_uri: s3://{{ required "aws.sessionRecordingBucket is required in chart values" .Values.aws.sessionRecordingBucket }} - continuous_backups: {{ required "aws.backups is required in chart values" .Values.aws.backups }} - {{- if .Values.aws.dynamoAutoScaling }} - auto_scaling: true - read_min_capacity: {{ required "aws.readMinCapacity is required when aws.dynamoAutoScaling is true" .Values.aws.readMinCapacity }} - read_max_capacity: {{ required "aws.readMaxCapacity is required when aws.dynamoAutoScaling is true" .Values.aws.readMaxCapacity }} - read_target_value: {{ required "aws.readTargetValue is required when aws.dynamoAutoScaling is true" .Values.aws.readTargetValue }} - write_min_capacity: {{ required "aws.writeMinCapacity is required when aws.dynamoAutoScaling is true" .Values.aws.writeMinCapacity }} - write_max_capacity: {{ required "aws.writeMaxCapacity is required when aws.dynamoAutoScaling is true" .Values.aws.writeMaxCapacity }} - write_target_value: {{ required "aws.writeTargetValue is required when aws.dynamoAutoScaling is true" .Values.aws.writeTargetValue }} - {{- else }} - auto_scaling: false - {{- end }} - - {{- else if eq .Values.chartMode "gcp" }} - storage: - type: firestore - project_id: {{ required "gcp.projectId is required in chart values" .Values.gcp.projectId }} - collection_name: {{ required "gcp.backendTable is required in chart values" .Values.gcp.backendTable }} - {{- if .Values.gcp.credentialSecretName }} - credentials_path: /etc/teleport-secrets/gcp-credentials.json - {{- end }} - {{- if .Values.gcp.auditLogMirrorOnStdout }} - audit_events_uri: ['firestore://{{ required "gcp.auditLogTable is required in chart values" .Values.gcp.auditLogTable }}?projectID={{ required "gcp.projectId is required in chart values" .Values.gcp.projectId }}{{ empty .Values.gcp.credentialSecretName | ternary "" "&credentialsPath=/etc/teleport-secrets/gcp-credentials.json"}}', 'stdout://'] - {{- else }} - audit_events_uri: ['firestore://{{ required "gcp.auditLogTable is required in chart values" .Values.gcp.auditLogTable }}?projectID={{ required "gcp.projectId is required in chart values" .Values.gcp.projectId }}{{ empty .Values.gcp.credentialSecretName | ternary "" "&credentialsPath=/etc/teleport-secrets/gcp-credentials.json"}}'] - {{- end }} - audit_sessions_uri: "gs://{{ required "gcp.sessionRecordingBucket is required in chart values" .Values.gcp.sessionRecordingBucket }}?projectID={{ required "gcp.projectId is required in chart values" .Values.gcp.projectId }}{{ empty .Values.gcp.credentialSecretName | ternary "" "&credentialsPath=/etc/teleport-secrets/gcp-credentials.json"}}" - {{- end }} - auth_service: - enabled: true - cluster_name: {{ required "clusterName is required in chart values" .Values.clusterName }} - {{- if .Values.enterprise }} - license_file: '/var/lib/license/license.pem' - {{- end }} - authentication: - type: "{{ required "authentication.type is required in chart values" (coalesce .Values.authenticationType $authentication.type) }}" - local_auth: {{ $authentication.localAuth }} - {{- if $authentication.connectorName }} - connector_name: "{{ $authentication.connectorName }}" - {{- end }} - {{- if $authentication.lockingMode }} - locking_mode: "{{ $authentication.lockingMode }}" - {{- end }} - {{- if $authentication.secondFactor }} - second_factor: "{{ $authentication.secondFactor }}" - {{- if not (or (eq $authentication.secondFactor "off") (eq $authentication.secondFactor "otp")) }} - webauthn: - rp_id: {{ required "clusterName is required in chart values" .Values.clusterName }} - {{- if $authentication.webauthn }} - {{- if $authentication.webauthn.attestationAllowedCas }} - attestation_allowed_cas: - {{- toYaml $authentication.webauthn.attestationAllowedCas | nindent 12 }} - {{- end }} - {{- if $authentication.webauthn.attestationDeniedCas }} - attestation_denied_cas: - {{- toYaml $authentication.webauthn.attestationDeniedCas | nindent 12 }} - {{- end }} - {{- end }} - {{- end }} - {{- end }} - {{- if .Values.sessionRecording }} - session_recording: {{ .Values.sessionRecording }} - {{- end }} - {{- if eq .Values.proxyListenerMode "multiplex" }} - proxy_listener_mode: multiplex - {{- end }} - kubernetes_service: - enabled: true - {{- if eq .Values.proxyListenerMode "multiplex" }} - listen_addr: 0.0.0.0:443 - {{- else if not .Values.proxyListenerMode }} - listen_addr: 0.0.0.0:3027 - {{- end }} - {{- if .Values.kubeClusterName }} - kube_cluster_name: {{ .Values.kubeClusterName }} - {{- else }} - kube_cluster_name: {{ .Values.clusterName }} - {{- end }} - {{- if .Values.labels }} - labels: - {{- toYaml .Values.labels | nindent 8 }} - {{- end }} - proxy_service: - {{- if .Values.publicAddr }} - public_addr: {{- toYaml .Values.publicAddr | nindent 8 }} - {{- else }} - public_addr: '{{ required "clusterName is required in chart values" .Values.clusterName }}:443' - {{- end }} - {{- if .Values.sshPublicAddr }} - ssh_public_addr: {{- toYaml .Values.sshPublicAddr | nindent 8 }} - {{- end }} - {{- if .Values.tunnelPublicAddr }} - tunnel_public_addr: {{- toYaml .Values.tunnelPublicAddr | nindent 8 }} - {{- end }} - {{- if not .Values.proxyListenerMode }} - kube_listen_addr: 0.0.0.0:3026 - {{- if .Values.kubePublicAddr }} - kube_public_addr: {{- toYaml .Values.kubePublicAddr | nindent 8 }} - {{- end }} - mysql_listen_addr: 0.0.0.0:3036 - {{- if .Values.mysqlPublicAddr }} - mysql_public_addr: {{- toYaml .Values.mysqlPublicAddr | nindent 8 }} - {{- end }} - {{- if .Values.separatePostgresListener }} - postgres_listen_addr: 0.0.0.0:5432 - {{- if .Values.postgresPublicAddr }} - postgres_public_addr: {{- toYaml .Values.postgresPublicAddr | nindent 8 }} - {{- else }} - postgres_public_addr: {{ .Values.clusterName }}:5432 - {{- end }} - {{- end }} - {{- if .Values.separateMongoListener }} - mongo_listen_addr: 0.0.0.0:27017 - {{- if .Values.mongoPublicAddr }} - mongo_public_addr: {{- toYaml .Values.mongoPublicAddr | nindent 8 }} - {{- else }} - mongo_public_addr: {{ .Values.clusterName }}:27017 - {{- end }} - {{- end }} - {{- end }} - enabled: true - {{- if or .Values.highAvailability.certManager.enabled .Values.tls.existingSecretName }} - https_keypairs: - - key_file: /etc/teleport-tls/tls.key - cert_file: /etc/teleport-tls/tls.crt - {{- else if .Values.acme }} - acme: - enabled: {{ .Values.acme }} - email: {{ required "acmeEmail is required in chart values" .Values.acmeEmail }} - {{- if .Values.acmeURI }} - uri: {{ .Values.acmeURI }} - {{- end }} - {{- end }} - ssh_service: - enabled: false -{{- end -}} diff --git a/examples/chart/teleport-cluster/templates/deployment.yaml b/examples/chart/teleport-cluster/templates/deployment.yaml deleted file mode 100644 index 07df27e5cb190..0000000000000 --- a/examples/chart/teleport-cluster/templates/deployment.yaml +++ /dev/null @@ -1,289 +0,0 @@ -{{- if and (.Values.acme) (gt (int .Values.highAvailability.replicaCount) 1) }} -{{- fail "Cannot enable built-in ACME support with more than one replica, use highAvailability.certManager.enabled or tls.existingSecretName instead" }} -{{- end }} -{{- if and (eq .Values.chartMode "standalone") (gt (int .Values.highAvailability.replicaCount) 1) }} -{{- fail "Cannot enable multiple replicas in standalone mode, use a different chartMode which supports high availability - see README and docs" }} -{{- end }} -{{- if and .Values.highAvailability.certManager.enabled .Values.tls.existingSecretName }} -{{- fail "Cannot set both highAvailability.certManager.enabled and tls.existingSecretName, choose one or the other" }} -{{- end }} -{{- if and .Values.acme .Values.tls.existingSecretName }} -{{- fail "Cannot set both acme.enabled and tls.existingSecretName, choose one or the other" }} -{{- end }} -{{- define "imageVersion" -}} -{{ if .Values.teleportVersionOverride }}{{ .Values.teleportVersionOverride }}{{ else }}{{ .Chart.Version }}{{ end }} -{{- end }} - {{- $persistence := (coalesce .Values.standalone .Values.persistence) -}} -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ .Release.Name }} - namespace: {{ .Release.Namespace }} - labels: - app: {{ .Release.Name }} - {{- if .Values.annotations.deployment }} - annotations: - {{- toYaml .Values.annotations.deployment | nindent 4 }} - {{- end }} -spec: - {{- if not (eq .Values.chartMode "standalone") }} - replicas: {{ .Values.highAvailability.replicaCount }} - minReadySeconds: {{ .Values.highAvailability.minReadySeconds }} - {{- else }} - replicas: 1 - {{- end }} - {{- if eq .Values.chartMode "standalone" }} - strategy: - type: Recreate - {{- end }} - selector: - matchLabels: - app: {{ .Release.Name }} - template: - metadata: - annotations: - # ConfigMap checksum, to recreate the pod on config changes. - checksum/config: {{ include (print $.Template.BasePath "/config.yaml") . | sha256sum }} -{{- if .Values.annotations.pod }} - {{- toYaml .Values.annotations.pod | nindent 8 }} -{{- end }} - labels: - app: {{ .Release.Name }} - spec: - {{- if or .Values.affinity (gt (int .Values.highAvailability.replicaCount) 1) }} - affinity: - {{- if .Values.affinity }} - {{- if .Values.highAvailability.requireAntiAffinity }} - {{- fail "Cannot use highAvailability.requireAntiAffinity when affinity is also set in chart values - unset one or the other" }} - {{- end }} - {{- toYaml .Values.affinity | nindent 8 }} - {{- else }} - podAntiAffinity: - {{- if .Values.highAvailability.requireAntiAffinity }} - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - {{ .Release.Name }} - topologyKey: "kubernetes.io/hostname" - {{- else if gt (int .Values.highAvailability.replicaCount) 1 }} - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 50 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - {{ .Release.Name }} - topologyKey: "kubernetes.io/hostname" - {{- end }} - {{- end }} - {{- end }} - {{- if .Values.tolerations }} - tolerations: - {{- toYaml .Values.tolerations | nindent 6 }} - {{- end }} -{{- if .Values.initContainers }} - initContainers: {{- toYaml .Values.initContainers | nindent 6 }} - {{- if .Values.resources }} - resources: - {{- toYaml .Values.resources | nindent 10 }} - {{- end }} - volumeMounts: - {{- if .Values.enterprise }} - - mountPath: /var/lib/license - name: "license" - readOnly: true - {{- end }} - {{- if and (.Values.gcp.credentialSecretName) (eq .Values.chartMode "gcp") }} - - mountPath: /etc/teleport-secrets - name: "gcp-credentials" - readOnly: true - {{- end }} - {{- if .Values.highAvailability.certManager.enabled }} - - mountPath: /etc/teleport-tls - name: "teleport-tls" - readOnly: true - {{- else if .Values.tls.existingSecretName }} - - mountPath: /etc/teleport-tls - name: "teleport-tls" - readOnly: true - {{- end }} - - mountPath: /etc/teleport - name: "config" - readOnly: true - - mountPath: /var/lib/teleport - name: "data" - {{- if .Values.extraVolumeMounts }} - {{- toYaml .Values.extraVolumeMounts | nindent 8 }} - {{- end }} -{{- end }} - containers: - - name: "teleport" - image: '{{ if .Values.enterprise }}{{ .Values.enterpriseImage }}{{ else }}{{ .Values.image }}{{ end }}:{{ template "imageVersion" . }}' - imagePullPolicy: {{ .Values.imagePullPolicy }} - {{- if or .Values.extraEnv .Values.tls.existingCASecretName }} - env: - {{- if (gt (len .Values.extraEnv) 0) }} - {{- toYaml .Values.extraEnv | nindent 8 }} - {{- end }} - {{- if .Values.tls.existingCASecretName }} - - name: SSL_CERT_FILE - value: /etc/teleport-tls-ca/ca.pem - {{- end }} - {{- end }} - args: - - "--diag-addr=0.0.0.0:3000" - {{- if .Values.insecureSkipProxyTLSVerify }} - - "--insecure" - {{- end }} - {{- if .Values.extraArgs }} - {{- toYaml .Values.extraArgs | nindent 8 }} - {{- end }} - ports: - - name: diag - containerPort: 3000 - protocol: TCP - livenessProbe: - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 # wait 5s for agent to start - periodSeconds: 5 # poll health every 5s - failureThreshold: 6 # consider agent unhealthy after 30s (6 * 5s) - timeoutSeconds: {{ .Values.probeTimeoutSeconds }} - readinessProbe: - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 # wait 5s for agent to register - periodSeconds: 5 # poll health every 5s - failureThreshold: 12 # consider agent unhealthy after 60s (12 * 5s) - timeoutSeconds: {{ .Values.probeTimeoutSeconds }} -{{- if .Values.postStart.command }} - lifecycle: - postStart: - exec: - command: {{ toYaml .Values.postStart.command | nindent 14 }} -{{- end }} -{{- if .Values.resources }} - resources: - {{- toYaml .Values.resources | nindent 10 }} -{{- end }} -{{- if .Values.securityContext }} - securityContext: {{- toYaml .Values.securityContext | nindent 10 }} -{{- end }} - volumeMounts: -{{- if .Values.enterprise }} - - mountPath: /var/lib/license - name: "license" - readOnly: true -{{- end }} - {{- if and (.Values.gcp.credentialSecretName) (eq .Values.chartMode "gcp") }} - - mountPath: /etc/teleport-secrets - name: "gcp-credentials" - readOnly: true -{{- end }} -{{- if .Values.highAvailability.certManager.enabled }} - - mountPath: /etc/teleport-tls - name: "teleport-tls" - readOnly: true -{{- else if .Values.tls.existingSecretName }} - - mountPath: /etc/teleport-tls - name: "teleport-tls" - readOnly: true -{{- end }} -{{- if .Values.tls.existingCASecretName }} - - mountPath: /etc/teleport-tls-ca - name: "teleport-tls-ca" - readOnly: true -{{- end }} - - mountPath: /etc/teleport - name: "config" - readOnly: true - - mountPath: /var/lib/teleport - name: "data" -{{- if .Values.extraVolumeMounts }} - {{- toYaml .Values.extraVolumeMounts | nindent 8 }} -{{- end }} -{{ if .Values.operator.enabled }} - - name: "operator" - image: '{{ .Values.operator.image }}:{{ template "imageVersion" . }}' - imagePullPolicy: {{ .Values.imagePullPolicy }} - livenessProbe: - httpGet: - path: /healthz - port: 8081 - initialDelaySeconds: 15 - periodSeconds: 20 - readinessProbe: - httpGet: - path: /readyz - port: 8081 - initialDelaySeconds: 5 - periodSeconds: 10 -{{- if .Values.operator.resources }} - resources: - {{- toYaml .Values.operator.resources | nindent 10 }} -{{- end }} - volumeMounts: - {{- if .Values.highAvailability.certManager.enabled }} - - mountPath: /etc/teleport-tls - name: "teleport-tls" - readOnly: true - {{- else if .Values.tls.existingSecretName }} - - mountPath: /etc/teleport-tls - name: "teleport-tls" - readOnly: true - {{- end }} - - mountPath: /etc/teleport - name: "config" - readOnly: true - - mountPath: /var/lib/teleport - name: "data" -{{ end }} - volumes: -{{- if .Values.enterprise }} - - name: license - secret: - secretName: "license" -{{- end }} -{{- if and (.Values.gcp.credentialSecretName) (eq .Values.chartMode "gcp") }} - - name: gcp-credentials - secret: - secretName: {{ .Values.gcp.credentialSecretName }} -{{- end }} -{{- if .Values.highAvailability.certManager.enabled }} - - name: teleport-tls - secret: - secretName: teleport-tls -{{- else if .Values.tls.existingSecretName }} - - name: teleport-tls - secret: - secretName: {{ .Values.tls.existingSecretName }} -{{- end }} -{{- if .Values.tls.existingCASecretName }} - - name: teleport-tls-ca - secret: - secretName: {{ .Values.tls.existingCASecretName }} -{{- end }} - - name: "config" - configMap: - name: {{ .Release.Name }} - - name: "data" - {{- if and (.Values.persistence.enabled) (or (eq .Values.chartMode "standalone") (eq .Values.chartMode "custom")) }} - persistentVolumeClaim: - claimName: {{ if $persistence.existingClaimName }}{{ $persistence.existingClaimName }}{{ else }}{{ .Release.Name }}{{ end }} - {{- else }} - emptyDir: {} - {{- end }} -{{- if .Values.extraVolumes }} - {{- toYaml .Values.extraVolumes | nindent 6 }} -{{- end }} -{{- if .Values.priorityClassName }} - priorityClassName: {{ .Values.priorityClassName }} -{{- end }} - serviceAccountName: {{ template "teleport.serviceAccountName" . }} diff --git a/examples/chart/teleport-cluster/templates/pdb.yaml b/examples/chart/teleport-cluster/templates/pdb.yaml deleted file mode 100644 index 6e3f5c3f117a4..0000000000000 --- a/examples/chart/teleport-cluster/templates/pdb.yaml +++ /dev/null @@ -1,18 +0,0 @@ -{{- if .Values.highAvailability.podDisruptionBudget.enabled }} -{{- if .Capabilities.APIVersions.Has "policy/v1" }} -apiVersion: policy/v1 -{{- else }} -apiVersion: policy/v1beta1 -{{- end }} -kind: PodDisruptionBudget -metadata: - name: {{ .Release.Name }} - namespace: {{ .Release.Namespace }} - labels: - app: {{ .Release.Name }} -spec: - minAvailable: {{ .Values.highAvailability.podDisruptionBudget.minAvailable }} - selector: - matchLabels: - app: {{ .Release.Name }} -{{- end }} diff --git a/examples/chart/teleport-cluster/templates/proxy/_config.aws.tpl b/examples/chart/teleport-cluster/templates/proxy/_config.aws.tpl new file mode 100644 index 0000000000000..3e4d97a34770c --- /dev/null +++ b/examples/chart/teleport-cluster/templates/proxy/_config.aws.tpl @@ -0,0 +1,3 @@ +{{- define "teleport-cluster.proxy.config.aws" -}} +{{ include "teleport-cluster.proxy.config.common" . }} +{{- end -}} diff --git a/examples/chart/teleport-cluster/templates/proxy/_config.common.tpl b/examples/chart/teleport-cluster/templates/proxy/_config.common.tpl new file mode 100644 index 0000000000000..cab6d8295bdcd --- /dev/null +++ b/examples/chart/teleport-cluster/templates/proxy/_config.common.tpl @@ -0,0 +1,72 @@ +{{- define "teleport-cluster.proxy.config.common" -}} +{{- $logLevel := (coalesce .Values.logLevel .Values.log.level "INFO") -}} +version: v3 +teleport: + join_params: + method: kubernetes + token_name: "{{.Release.Name}}-proxy" + auth_server: "{{ .Release.Name }}-auth.{{ .Release.Namespace }}.svc.cluster.local:3025" + log: + severity: {{ $logLevel }} + output: {{ .Values.log.output }} + format: + output: {{ .Values.log.format }} + extra_fields: {{ .Values.log.extraFields | toJson }} +ssh_service: + enabled: false +auth_service: + enabled: false +proxy_service: + enabled: true +{{- if .Values.publicAddr }} + public_addr: {{- toYaml .Values.publicAddr | nindent 8 }} +{{- else }} + public_addr: '{{ required "clusterName is required in chart values" .Values.clusterName }}:443' +{{- end }} +{{- if eq .Values.proxyListenerMode "separate" }} + listen_addr: 0.0.0.0:3023 +{{- if .Values.sshPublicAddr }} + ssh_public_addr: {{- toYaml .Values.sshPublicAddr | nindent 8 }} +{{- end }} + tunnel_listen_addr: 0.0.0.0:3024 +{{- if .Values.tunnelPublicAddr }} + tunnel_public_addr: {{- toYaml .Values.tunnelPublicAddr | nindent 8 }} +{{- end }} + kube_listen_addr: 0.0.0.0:3026 + {{- if .Values.kubePublicAddr }} + kube_public_addr: {{- toYaml .Values.kubePublicAddr | nindent 8 }} + {{- end }} + mysql_listen_addr: 0.0.0.0:3036 + {{- if .Values.mysqlPublicAddr }} + mysql_public_addr: {{- toYaml .Values.mysqlPublicAddr | nindent 8 }} + {{- end }} + {{- if .Values.separatePostgresListener }} + postgres_listen_addr: 0.0.0.0:5432 + {{- if .Values.postgresPublicAddr }} + postgres_public_addr: {{- toYaml .Values.postgresPublicAddr | nindent 8 }} + {{- else }} + postgres_public_addr: {{ .Values.clusterName }}:5432 + {{- end }} + {{- end }} + {{- if .Values.separateMongoListener }} + mongo_listen_addr: 0.0.0.0:27017 + {{- if .Values.mongoPublicAddr }} + mongo_public_addr: {{- toYaml .Values.mongoPublicAddr | nindent 8 }} + {{- else }} + mongo_public_addr: {{ .Values.clusterName }}:27017 + {{- end }} + {{- end }} +{{- end }} +{{- if or .Values.highAvailability.certManager.enabled .Values.tls.existingSecretName }} + https_keypairs: + - key_file: /etc/teleport-tls/tls.key + cert_file: /etc/teleport-tls/tls.crt +{{- else if .Values.acme }} + acme: + enabled: {{ .Values.acme }} + email: {{ required "acmeEmail is required in chart values" .Values.acmeEmail }} + {{- if .Values.acmeURI }} + uri: {{ .Values.acmeURI }} + {{- end }} +{{- end }} +{{- end -}} diff --git a/examples/chart/teleport-cluster/templates/proxy/_config.gcp.tpl b/examples/chart/teleport-cluster/templates/proxy/_config.gcp.tpl new file mode 100644 index 0000000000000..cf9c79d6949bc --- /dev/null +++ b/examples/chart/teleport-cluster/templates/proxy/_config.gcp.tpl @@ -0,0 +1,3 @@ +{{- define "teleport-cluster.proxy.config.gcp" -}} +{{ include "teleport-cluster.proxy.config.common" . }} +{{- end -}} diff --git a/examples/chart/teleport-cluster/templates/proxy/_config.scratch.tpl b/examples/chart/teleport-cluster/templates/proxy/_config.scratch.tpl new file mode 100644 index 0000000000000..fd43387b956d9 --- /dev/null +++ b/examples/chart/teleport-cluster/templates/proxy/_config.scratch.tpl @@ -0,0 +1,13 @@ +{{- define "teleport-cluster.proxy.config.scratch" -}} +{{- required "'proxy.teleportConfig' is required in scratch mode" .Values.proxy.teleportConfig }} +ssh_service: + enabled: false +auth_service: + enabled: true +proxy_service: + enabled: true +{{- end -}} + +{{- define "teleport-cluster.proxy.config.custom" -}} +{{ fail "'custom' mode has been depreacted with chart v12 because of the proxy/auth split, see http://link" }} +{{- end -}} \ No newline at end of file diff --git a/examples/chart/teleport-cluster/templates/proxy/_config.standalone.tpl b/examples/chart/teleport-cluster/templates/proxy/_config.standalone.tpl new file mode 100644 index 0000000000000..7355813cd5ec7 --- /dev/null +++ b/examples/chart/teleport-cluster/templates/proxy/_config.standalone.tpl @@ -0,0 +1,3 @@ +{{- define "teleport-cluster.proxy.config.standalone" -}} +{{ include "teleport-cluster.proxy.config.common" . }} +{{- end -}} diff --git a/examples/chart/teleport-cluster/templates/certificate.yaml b/examples/chart/teleport-cluster/templates/proxy/certificate.yaml similarity index 51% rename from examples/chart/teleport-cluster/templates/certificate.yaml rename to examples/chart/teleport-cluster/templates/proxy/certificate.yaml index 7741b2cf15061..af5a464bfbfb0 100644 --- a/examples/chart/teleport-cluster/templates/certificate.yaml +++ b/examples/chart/teleport-cluster/templates/proxy/certificate.yaml @@ -1,24 +1,26 @@ -{{- if .Values.highAvailability.certManager.enabled }} - {{- $domain := (required "clusterName is required in chartValues when certManager is enabled" .Values.clusterName) }} - {{- $domainWildcard := printf "*.%s" (required "clusterName is required in chartValues when certManager is enabled" .Values.clusterName) }} +{{- $proxy := mustMergeOverwrite .Values .Values.proxy -}} +{{- if $proxy.highAvailability.certManager.enabled }} + {{- $domain := (required "clusterName is required in chartValues when certManager is enabled" $proxy.clusterName) }} + {{- $domainWildcard := printf "*.%s" (required "clusterName is required in chartValues when certManager is enabled" $proxy.clusterName) }} apiVersion: cert-manager.io/v1 kind: Certificate metadata: name: {{ .Release.Name }} namespace: {{ .Release.Namespace }} + labels: {{- include "teleport-cluster.proxy.labels" . | nindent 4 }} spec: secretName: teleport-tls - {{- if .Values.highAvailability.certManager.addCommonName }} + {{- if $proxy.highAvailability.certManager.addCommonName }} commonName: {{ quote $domain }} {{- end }} dnsNames: - {{ quote $domain }} - {{ quote $domainWildcard }} issuerRef: - name: {{ required "highAvailability.certManager.issuerName is required in chart values" .Values.highAvailability.certManager.issuerName }} - kind: {{ required "highAvailability.certManager.issuerKind is required in chart values" .Values.highAvailability.certManager.issuerKind }} - group: {{ required "highAvailability.certManager.issuerGroup is required in chart values" .Values.highAvailability.certManager.issuerGroup }} - {{- with .Values.annotations.certSecret }} + name: {{ required "highAvailability.certManager.issuerName is required in chart values" $proxy.highAvailability.certManager.issuerName }} + kind: {{ required "highAvailability.certManager.issuerKind is required in chart values" $proxy.highAvailability.certManager.issuerKind }} + group: {{ required "highAvailability.certManager.issuerGroup is required in chart values" $proxy.highAvailability.certManager.issuerGroup }} + {{- with $proxy.annotations.certSecret }} secretTemplate: annotations: {{- toYaml . | nindent 6 }} {{- end }} diff --git a/examples/chart/teleport-cluster/templates/proxy/config.yaml b/examples/chart/teleport-cluster/templates/proxy/config.yaml new file mode 100644 index 0000000000000..e87b4a8e6e252 --- /dev/null +++ b/examples/chart/teleport-cluster/templates/proxy/config.yaml @@ -0,0 +1,13 @@ +{{- $proxy := mustMergeOverwrite .Values .Values.proxy -}} +{{- $configTemplate := printf "teleport-cluster.proxy.config.%s" $proxy.chartMode -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-proxy + namespace: {{ .Release.Namespace }} +{{- if $proxy.annotations.config }} + annotations: {{- toYaml $proxy.annotations.config | nindent 4 }} +{{- end }} +data: + teleport.yaml: |2 + {{- mustMergeOverwrite (include $configTemplate . | fromYaml) $proxy.teleportConfig | toYaml | nindent 4 -}} diff --git a/examples/chart/teleport-cluster/templates/proxy/deployment.yaml b/examples/chart/teleport-cluster/templates/proxy/deployment.yaml new file mode 100644 index 0000000000000..6388a344c3648 --- /dev/null +++ b/examples/chart/teleport-cluster/templates/proxy/deployment.yaml @@ -0,0 +1,224 @@ +{{- $proxy := mustMergeOverwrite .Values .Values.proxy -}} +{{- $replicable := or $proxy.highAvailability.certManager.enabled $proxy.tls.existingSecretName -}} +# Deployment is {{ if not $replicable }}not {{end}}replicable +{{- if and $proxy.highAvailability.certManager.enabled $proxy.tls.existingSecretName }} +{{- fail "Cannot set both highAvailability.certManager.enabled and tls.existingSecretName, choose one or the other" }} +{{- end }} +{{- if and $proxy.acme $proxy.tls.existingSecretName }} +{{- fail "Cannot set both acme.enabled and tls.existingSecretName, choose one or the other" }} +{{- end }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Release.Name }}-proxy + namespace: {{ .Release.Namespace }} + labels: {{- include "teleport-cluster.proxy.labels" . | nindent 4 }} +{{- if $proxy.annotations.deployment }} + annotations: {{- toYaml $proxy.annotations.deployment | nindent 4 }} +{{- end }} +spec: +{{- /* + If proxies cannot be replicated we use a single replica. + By default we want to upgrade all users to at least 2 replicas, if they had a higher replica count we take it. + If a user wants to force a single proxy, they can use the `proxy` specific override. + + $proxySpecificHA is a hack to avoid .Values.proxy.highAvailability to be nil, which would cause a fail when + accessing .Values.proxy.highAvailability.replicaCount. +*/}} +{{- if $replicable }} + {{- $proxySpecificHA := default (dict) .Values.proxy.highAvailability }} + {{- if $proxySpecificHA.replicaCount }} + replicas: {{ $proxySpecificHA.replicaCount }} + {{- else }} + replicas: {{ max .Values.highAvailability.replicaCount 2 }} + {{- end }} + minReadySeconds: {{ $proxy.highAvailability.minReadySeconds }} +{{- else }} + replicas: 1 +{{- end }} + selector: + matchLabels: {{- include "teleport-cluster.proxy.selectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + # ConfigMap checksum, to recreate the pod on config changes. + checksum/config: {{ include (print $.Template.BasePath "/proxy/config.yaml") . | sha256sum }} +{{- if $proxy.annotations.pod }} + {{- toYaml $proxy.annotations.pod | nindent 8 }} +{{- end }} + labels: {{- include "teleport-cluster.proxy.labels" . | nindent 8 }} + spec: + affinity: +{{- if $proxy.affinity }} + {{- if $proxy.highAvailability.requireAntiAffinity }} + {{- fail "Cannot use highAvailability.requireAntiAffinity when affinity is also set in chart values - unset one or the other" }} + {{- end }} + {{- toYaml $proxy.affinity | nindent 8 }} +{{- else }} + podAntiAffinity: + {{- if $proxy.highAvailability.requireAntiAffinity }} + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app.kubernetes.io/instance + operator: In + values: + - {{ .Release.Name }} + - key: app.kubernetes.io/component + operator: In + values: + - proxy + topologyKey: "kubernetes.io/hostname" + {{- else if gt (int $proxy.highAvailability.replicaCount) 1 }} + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 50 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/instance + operator: In + values: + - {{ .Release.Name }} + - key: app.kubernetes.io/component + operator: In + values: + - proxy + topologyKey: "kubernetes.io/hostname" + {{- end }} +{{- end }} +{{- if $proxy.tolerations }} + tolerations: {{- toYaml $proxy.tolerations | nindent 6 }} +{{- end }} +{{- if $proxy.initContainers }} + initContainers: + {{- range $initContainer := $proxy.initContainers }} + {{- if and (not $initContainer.resources) $proxy.resources }} + {{- $_ := set $initContainer "resources" $proxy.resources }} + {{- end }} + {{- list $initContainer | toYaml | nindent 8 }} + {{- /* Note: this will break if the user sets volumeMounts to its initContainer */}} + volumeMounts: + {{- if $proxy.highAvailability.certManager.enabled }} + - mountPath: /etc/teleport-tls + name: "teleport-tls" + readOnly: true + {{- else if $proxy.tls.existingSecretName }} + - mountPath: /etc/teleport-tls + name: "teleport-tls" + readOnly: true + {{- end }} + - mountPath: /etc/teleport + name: "config" + readOnly: true + - mountPath: /var/lib/teleport + name: "data" + {{- if $proxy.extraVolumeMounts }} + {{- toYaml $proxy.extraVolumeMounts | nindent 10 }} + {{- end }} + {{- end }} +{{- end }} + containers: + - name: "teleport" + image: '{{ if $proxy.enterprise }}{{ $proxy.enterpriseImage }}{{ else }}{{ $proxy.image }}{{ end }}:{{ include "teleport-cluster.version" . }}' + imagePullPolicy: {{ $proxy.imagePullPolicy }} + {{- if or $proxy.extraEnv $proxy.tls.existingCASecretName }} + env: + {{- if (gt (len $proxy.extraEnv) 0) }} + {{- toYaml $proxy.extraEnv | nindent 8 }} + {{- end }} + {{- if $proxy.tls.existingCASecretName }} + - name: SSL_CERT_FILE + value: /etc/teleport-tls-ca/ca.pem + {{- end }} + {{- end }} + args: + - "--diag-addr=0.0.0.0:3000" + {{- if $proxy.insecureSkipProxyTLSVerify }} + - "--insecure" + {{- end }} + {{- if $proxy.extraArgs }} + {{- toYaml $proxy.extraArgs | nindent 8 }} + {{- end }} + ports: + - name: diag + containerPort: 3000 + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: diag + initialDelaySeconds: 5 # wait 5s for agent to start + periodSeconds: 5 # poll health every 5s + failureThreshold: 6 # consider agent unhealthy after 30s (6 * 5s) + timeoutSeconds: {{ $proxy.probeTimeoutSeconds }} + readinessProbe: + httpGet: + path: /readyz + port: diag + initialDelaySeconds: 5 # wait 5s for agent to register + periodSeconds: 5 # poll health every 5s + failureThreshold: 12 # consider agent unhealthy after 60s (12 * 5s) + timeoutSeconds: {{ $proxy.probeTimeoutSeconds }} +{{- if $proxy.postStart.command }} + lifecycle: + postStart: + exec: + command: {{ toYaml $proxy.postStart.command | nindent 14 }} +{{- end }} +{{- if $proxy.resources }} + resources: + {{- toYaml $proxy.resources | nindent 10 }} +{{- end }} +{{- if $proxy.securityContext }} + securityContext: {{- toYaml $proxy.securityContext | nindent 10 }} +{{- end }} + volumeMounts: +{{- if $proxy.highAvailability.certManager.enabled }} + - mountPath: /etc/teleport-tls + name: "teleport-tls" + readOnly: true +{{- else if $proxy.tls.existingSecretName }} + - mountPath: /etc/teleport-tls + name: "teleport-tls" + readOnly: true +{{- end }} +{{- if $proxy.tls.existingCASecretName }} + - mountPath: /etc/teleport-tls-ca + name: "teleport-tls-ca" + readOnly: true +{{- end }} + - mountPath: /etc/teleport + name: "config" + readOnly: true + - mountPath: /var/lib/teleport + name: "data" +{{- if $proxy.extraVolumeMounts }} + {{- toYaml $proxy.extraVolumeMounts | nindent 8 }} +{{- end }} + volumes: +{{- if $proxy.highAvailability.certManager.enabled }} + - name: teleport-tls + secret: + secretName: teleport-tls +{{- else if $proxy.tls.existingSecretName }} + - name: teleport-tls + secret: + secretName: {{ $proxy.tls.existingSecretName }} +{{- end }} +{{- if $proxy.tls.existingCASecretName }} + - name: teleport-tls-ca + secret: + secretName: {{ $proxy.tls.existingCASecretName }} +{{- end }} + - name: "config" + configMap: + name: {{ .Release.Name }}-proxy + - name: "data" + emptyDir: {} +{{- if $proxy.extraVolumes }} + {{- toYaml $proxy.extraVolumes | nindent 6 }} +{{- end }} +{{- if $proxy.priorityClassName }} + priorityClassName: {{ $proxy.priorityClassName }} +{{- end }} + serviceAccountName: {{ include "teleport-cluster.proxy.serviceAccountName" . }} diff --git a/examples/chart/teleport-cluster/templates/proxy/pdb.yaml b/examples/chart/teleport-cluster/templates/proxy/pdb.yaml new file mode 100644 index 0000000000000..dd6dddf09e323 --- /dev/null +++ b/examples/chart/teleport-cluster/templates/proxy/pdb.yaml @@ -0,0 +1,17 @@ +{{- $proxy := mustMergeOverwrite .Values .Values.proxy -}} +{{- if $proxy.highAvailability.podDisruptionBudget.enabled }} +{{- if .Capabilities.APIVersions.Has "policy/v1" }} +apiVersion: policy/v1 +{{- else }} +apiVersion: policy/v1beta1 +{{- end }} +kind: PodDisruptionBudget +metadata: + name: {{ .Release.Name }}-proxy + namespace: {{ .Release.Namespace }} + labels: {{- include "teleport-cluster.proxy.labels" . | nindent 4 }} +spec: + minAvailable: {{ $proxy.highAvailability.podDisruptionBudget.minAvailable }} + selector: + matchLabels: {{- include "teleport-cluster.proxy.selectorLabels" . | nindent 6 }} +{{- end }} diff --git a/examples/chart/teleport-cluster/templates/service.yaml b/examples/chart/teleport-cluster/templates/proxy/service.yaml similarity index 54% rename from examples/chart/teleport-cluster/templates/service.yaml rename to examples/chart/teleport-cluster/templates/proxy/service.yaml index 6adabec3e66d4..0a5f4b0f3a20d 100644 --- a/examples/chart/teleport-cluster/templates/service.yaml +++ b/examples/chart/teleport-cluster/templates/proxy/service.yaml @@ -1,33 +1,33 @@ -{{- $backendProtocol := ternary "ssl" "tcp" (hasKey .Values.annotations.service "service.beta.kubernetes.io/aws-load-balancer-ssl-cert") -}} +{{- $proxy := mustMergeOverwrite .Values .Values.proxy -}} +{{- $backendProtocol := ternary "ssl" "tcp" (hasKey $proxy.annotations.service "service.beta.kubernetes.io/aws-load-balancer-ssl-cert") -}} apiVersion: v1 kind: Service metadata: name: {{ .Release.Name }} namespace: {{ .Release.Namespace }} - labels: - app: {{ .Release.Name }} - {{- if (or (.Values.annotations.service) (eq .Values.chartMode "aws")) }} + labels: {{- include "teleport-cluster.proxy.labels" . | nindent 4 }} + {{- if (or ($proxy.annotations.service) (eq $proxy.chartMode "aws")) }} annotations: - {{- if eq .Values.chartMode "aws" }} + {{- if eq $proxy.chartMode "aws" }} service.beta.kubernetes.io/aws-load-balancer-backend-protocol: {{ $backendProtocol }} service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true" service.beta.kubernetes.io/aws-load-balancer-type: nlb {{- end }} - {{- if .Values.annotations.service }} - {{- toYaml .Values.annotations.service | nindent 4 }} + {{- if $proxy.annotations.service }} + {{- toYaml $proxy.annotations.service | nindent 4 }} {{- end }} {{- end }} spec: - type: {{ default "LoadBalancer" .Values.service.type }} - {{- with .Values.service.spec }} + type: {{ default "LoadBalancer" $proxy.service.type }} +{{- with $proxy.service.spec }} {{- toYaml . | nindent 2 }} - {{- end }} +{{- end }} ports: - name: https port: 443 targetPort: 3080 protocol: TCP - {{- if not .Values.proxyListenerMode }} +{{- if eq $proxy.proxyListenerMode "separate" }} - name: sshproxy port: 3023 targetPort: 3023 @@ -44,18 +44,17 @@ spec: port: 3036 targetPort: 3036 protocol: TCP - {{- if .Values.separatePostgresListener }} + {{- if $proxy.separatePostgresListener }} - name: postgres port: 5432 targetPort: 5432 protocol: TCP {{- end }} - {{- if .Values.separateMongoListener }} + {{- if $proxy.separateMongoListener }} - name: mongo port: 27017 targetPort: 27017 protocol: TCP {{- end }} - {{- end }} - selector: - app: {{ .Release.Name }} +{{- end }} + selector: {{- include "teleport-cluster.proxy.selectorLabels" . | nindent 4 }} diff --git a/examples/chart/teleport-cluster/templates/proxy/serviceaccount.yaml b/examples/chart/teleport-cluster/templates/proxy/serviceaccount.yaml new file mode 100644 index 0000000000000..fb772df1f68ff --- /dev/null +++ b/examples/chart/teleport-cluster/templates/proxy/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- $proxy := mustMergeOverwrite .Values .Values.proxy -}} +{{- if $proxy.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "teleport-cluster.proxy.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- if $proxy.annotations.serviceAccount }} + annotations: {{- toYaml $proxy.annotations.serviceAccount | nindent 4 }} +{{- end -}} +{{- end }} diff --git a/examples/chart/teleport-cluster/templates/psp.yaml b/examples/chart/teleport-cluster/templates/psp.yaml index b70d4cb173f6c..6540a05333c8e 100644 --- a/examples/chart/teleport-cluster/templates/psp.yaml +++ b/examples/chart/teleport-cluster/templates/psp.yaml @@ -3,6 +3,7 @@ apiVersion: policy/v1beta1 kind: PodSecurityPolicy metadata: name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} annotations: seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' @@ -38,6 +39,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: {{ .Release.Name }}-psp + namespace: {{ .Release.Namespace }} rules: - apiGroups: - policy @@ -52,6 +54,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: {{ .Release.Name }}-psp + namespace: {{ .Release.Namespace }} roleRef: apiGroup: rbac.authorization.k8s.io kind: Role diff --git a/examples/chart/teleport-cluster/templates/pvc.yaml b/examples/chart/teleport-cluster/templates/pvc.yaml deleted file mode 100644 index 57df7e18fca2d..0000000000000 --- a/examples/chart/teleport-cluster/templates/pvc.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{{- $persistence := (coalesce .Values.standalone .Values.persistence) -}} -{{- if and (and (or (eq .Values.chartMode "standalone") (eq .Values.chartMode "custom")) (.Values.persistence.enabled)) (not $persistence.existingClaimName) }} -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: {{ .Release.Name }} - namespace: {{ .Release.Namespace }} - labels: - app: {{ .Release.Name }} -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: {{ required "persistence.volumeSize is required in chart values" $persistence.volumeSize }} -{{- end }} diff --git a/examples/chart/teleport-cluster/templates/serviceaccount.yaml b/examples/chart/teleport-cluster/templates/serviceaccount.yaml deleted file mode 100644 index 3b1aa42983240..0000000000000 --- a/examples/chart/teleport-cluster/templates/serviceaccount.yaml +++ /dev/null @@ -1,11 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ template "teleport.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} -{{- if .Values.annotations.serviceAccount }} - annotations: -{{- toYaml .Values.annotations.serviceAccount | nindent 4 }} -{{- end -}} -{{- end }} diff --git a/examples/chart/teleport-cluster/values.schema.json b/examples/chart/teleport-cluster/values.schema.json index 1bdfad85d23c9..97c2f1bbba76e 100644 --- a/examples/chart/teleport-cluster/values.schema.json +++ b/examples/chart/teleport-cluster/values.schema.json @@ -30,6 +30,19 @@ "type": "string", "default": "" }, + "auth": { + "$id": "#/properties/auth", + "type": "object" + }, + "proxy": { + "$id": "#/properties/proxy", + "type": "object" + }, + "createProxyToken": { + "$id": "#/properties/createProxyToken", + "type": "boolean", + "default": true + }, "authentication": { "$id": "#/properties/authentication", "type": "object", @@ -267,7 +280,7 @@ "standalone", "aws", "gcp", - "custom" + "scratch" ], "default": "standalone" }, diff --git a/examples/chart/teleport-cluster/values.yaml b/examples/chart/teleport-cluster/values.yaml index 3bdea9aae314b..d024900aa66f2 100644 --- a/examples/chart/teleport-cluster/values.yaml +++ b/examples/chart/teleport-cluster/values.yaml @@ -18,6 +18,28 @@ kubeClusterName: "" # Version of teleport image, if different from chart version in Chart.yaml. teleportVersionOverride: "" +# auth contains values specific for the auth pods +# You can override chart-scoped values, for example +# auth: +# postStart: ["curl", "http://hook"] +# imagePullPolicy: Always +auth: + # auth.teleportConfig contains YAML teleport configuration for auth pods + # The configuration will be merged with the chart-generated configuration + # and will take precedence in case of conflict + teleportConfig: {} + +# proxy contains values specific for the proxy pods +# You can override chart-scoped values, for example +# auth: +# postStart: ["curl", "http://hook"] +# imagePullPolicy: Always +proxy: + # proxy.teleportConfig contains YAML teleport configuration for proxy pods + # The configuration will be merged with the chart-generated configuration + # and will take precedence in case of conflict + teleportConfig: {} + authentication: # Default authentication type. Possible values are 'local' and 'github' for OSS, plus 'oidc' and 'saml' for Enterprise. type: local @@ -67,8 +89,8 @@ authentication: # Teleport supports TLS routing. In this mode, all client connections are wrapped in TLS and multiplexed on one Teleport proxy port. # Default mode will not utilize TLS routing and operate in backwards-compatibility mode. -# Possible values are 'multiplex' -proxyListenerMode: "" +# Possible values are 'separate' and 'multiplex' +proxyListenerMode: "separate" # Optional setting for configuring session recording. # See `session_recording` under https://goteleport.com/docs/setup/reference/config/#teleportyaml @@ -139,7 +161,6 @@ operator: # limits: # memory: "1Gi" - # If true, create & use Pod Security Policy resources # https://kubernetes.io/docs/concepts/policy/pod-security-policy/ # Note: the PSP won't be deployed if Kubernetes does not support the resource (Kubernetes >= 1.25) @@ -151,16 +172,19 @@ labels: {} # Mode to deploy the chart in. The default is "standalone". Options: # - "standalone": will deploy a Teleport container running auth and proxy services with a PersistentVolumeClaim for storage. -# - "aws": will deploy a Teleport container running auth and proxy services using DynamoDB for backend/audit log storage and S3 for session recordings. (1) -# - "gcp": will deploy a Teleport container running auth and proxy services using Firestore for backend/audit log storage and Google Cloud storage for session recordings. (2) -# - "custom": will deploy a Teleport container using a teleport.yaml config file that you provide. (3) +# - "aws": will deploy Teleport using DynamoDB for backend/audit log storage and S3 for session recordings. (1) +# - "gcp": will deploy Teleport using Firestore for backend/audit log storage and Google Cloud storage for session recordings. (2) +# - "scratch": will deploy Teleport container but will not provide default configuration file. You must pass your own configuration. (3) # (1) To use "aws" mode, you must also configure the "aws" section below. # (2) To use "gcp" mode, you must also configure the "gcp" section below. -# (3) When set to "custom", you must create a ConfigMap containing a 'teleport.yaml' key with an inline Teleport YAML config, -# give it the same name as the Helm release and place it in the chart namespace. -# kubectl -n ${TELEPORT_NAMESPACE?} create configmap ${HELM_RELEASE_NAME?} --from-file=teleport.yaml +# (3) When set to "scratch", you must write the teleport configuration in auth.teleportConfig and proxy.teleportConfig. chartMode: standalone +# Whether the chart should create a Teleport ProvisionToken for the proxies to join the Teleport cluster. +# Disabling this flag will cause the proxies not to be able to join the auth pods. In this case, the +# Helm chart user is responsible to configure working join_params on the proxy. +createProxyToken: true + ###################################################################### # Persistence settings (only used in "standalone" and "custom" modes) # NOTE: Changes in Kubernetes 1.23+ mean that persistent volumes will not automatically be provisioned in AWS EKS clusters From 95c4da0b25c74e4682e8930a573777461fd866bc Mon Sep 17 00:00:00 2001 From: Hugo Hervieux Date: Wed, 7 Dec 2022 09:02:09 -0500 Subject: [PATCH 02/14] helm: split auth and proxy, lint files --- examples/chart/teleport-cluster/.lint/cert-manager.yaml | 7 ++++++- examples/chart/teleport-cluster/.lint/cert-secret.yaml | 7 ++++++- .../chart/teleport-cluster/.lint/custom-customsize.yaml | 9 --------- .../chart/teleport-cluster/.lint/custom-existingpvc.yaml | 9 --------- .../.lint/example-minimal-standalone.yaml | 7 +++++++ .../chart/teleport-cluster/.lint/initcontainers.yaml | 3 +++ examples/chart/teleport-cluster/.lint/pdb.yaml | 7 ++++++- ...ener-mode.yaml => proxy-listener-mode-multiplex.yaml} | 0 .../.lint/proxy-listener-mode-separate.yaml | 2 ++ 9 files changed, 30 insertions(+), 21 deletions(-) delete mode 100644 examples/chart/teleport-cluster/.lint/custom-customsize.yaml delete mode 100644 examples/chart/teleport-cluster/.lint/custom-existingpvc.yaml create mode 100644 examples/chart/teleport-cluster/.lint/example-minimal-standalone.yaml rename examples/chart/teleport-cluster/.lint/{proxy-listener-mode.yaml => proxy-listener-mode-multiplex.yaml} (100%) create mode 100644 examples/chart/teleport-cluster/.lint/proxy-listener-mode-separate.yaml diff --git a/examples/chart/teleport-cluster/.lint/cert-manager.yaml b/examples/chart/teleport-cluster/.lint/cert-manager.yaml index 51bc3612362ee..7748890c24297 100644 --- a/examples/chart/teleport-cluster/.lint/cert-manager.yaml +++ b/examples/chart/teleport-cluster/.lint/cert-manager.yaml @@ -1,5 +1,10 @@ clusterName: test-cluster -chartMode: custom +chartMode: aws +aws: + region: us-west-2 + backendTable: test-dynamodb-backend-table + auditLogTable: test-dynamodb-auditlog-table + sessionRecordingBucket: test-s3-session-storage-bucket highAvailability: replicaCount: 3 certManager: diff --git a/examples/chart/teleport-cluster/.lint/cert-secret.yaml b/examples/chart/teleport-cluster/.lint/cert-secret.yaml index 040f434713f63..d86eb31f31e06 100644 --- a/examples/chart/teleport-cluster/.lint/cert-secret.yaml +++ b/examples/chart/teleport-cluster/.lint/cert-secret.yaml @@ -1,5 +1,10 @@ clusterName: test-cluster -chartMode: custom +chartMode: aws +aws: + region: us-west-2 + backendTable: test-dynamodb-backend-table + auditLogTable: test-dynamodb-auditlog-table + sessionRecordingBucket: test-s3-session-storage-bucket annotations: certSecret: kubernetes.io/cert-secret: value diff --git a/examples/chart/teleport-cluster/.lint/custom-customsize.yaml b/examples/chart/teleport-cluster/.lint/custom-customsize.yaml deleted file mode 100644 index 90579e01a0228..0000000000000 --- a/examples/chart/teleport-cluster/.lint/custom-customsize.yaml +++ /dev/null @@ -1,9 +0,0 @@ -clusterName: test-custom-cluster -chartMode: custom -persistence: - enabled: true - volumeSize: 50Gi -acme: true -acmeEmail: test@email.com -labels: - env: custom diff --git a/examples/chart/teleport-cluster/.lint/custom-existingpvc.yaml b/examples/chart/teleport-cluster/.lint/custom-existingpvc.yaml deleted file mode 100644 index 731ba719fff4d..0000000000000 --- a/examples/chart/teleport-cluster/.lint/custom-existingpvc.yaml +++ /dev/null @@ -1,9 +0,0 @@ -clusterName: test-custom-cluster -chartMode: custom -persistence: - enabled: true - existingClaimName: teleport-storage -acme: true -acmeEmail: test@email.com -labels: - env: custom diff --git a/examples/chart/teleport-cluster/.lint/example-minimal-standalone.yaml b/examples/chart/teleport-cluster/.lint/example-minimal-standalone.yaml new file mode 100644 index 0000000000000..9cdba9ad3fcab --- /dev/null +++ b/examples/chart/teleport-cluster/.lint/example-minimal-standalone.yaml @@ -0,0 +1,7 @@ +# This setup is not safe for production because the proxy will self-sign its certificate. +# Use those values for testing only + +# The chart should deploy and work only with a clusterName. +# This setup can also cause redirection issues if the proxy is contacted with a hostName instead of an IP address +# as it is not aware of its external hostname and will attempt to perform a redirection. +clusterName: helm-lint diff --git a/examples/chart/teleport-cluster/.lint/initcontainers.yaml b/examples/chart/teleport-cluster/.lint/initcontainers.yaml index 24de49e5ee2ee..a558e451c18c7 100644 --- a/examples/chart/teleport-cluster/.lint/initcontainers.yaml +++ b/examples/chart/teleport-cluster/.lint/initcontainers.yaml @@ -3,3 +3,6 @@ initContainers: - name: "teleport-init" image: "alpine" args: ["echo test"] +- name: "teleport-init2" + image: "alpine" + args: ["echo test2"] diff --git a/examples/chart/teleport-cluster/.lint/pdb.yaml b/examples/chart/teleport-cluster/.lint/pdb.yaml index f3105c5766eb0..0504d09d21cc2 100644 --- a/examples/chart/teleport-cluster/.lint/pdb.yaml +++ b/examples/chart/teleport-cluster/.lint/pdb.yaml @@ -1,5 +1,10 @@ clusterName: helm-lint -chartMode: custom +chartMode: aws +aws: + region: us-west-2 + backendTable: test-dynamodb-backend-table + auditLogTable: test-dynamodb-auditlog-table + sessionRecordingBucket: test-s3-session-storage-bucket highAvailability: replicaCount: 3 podDisruptionBudget: diff --git a/examples/chart/teleport-cluster/.lint/proxy-listener-mode.yaml b/examples/chart/teleport-cluster/.lint/proxy-listener-mode-multiplex.yaml similarity index 100% rename from examples/chart/teleport-cluster/.lint/proxy-listener-mode.yaml rename to examples/chart/teleport-cluster/.lint/proxy-listener-mode-multiplex.yaml diff --git a/examples/chart/teleport-cluster/.lint/proxy-listener-mode-separate.yaml b/examples/chart/teleport-cluster/.lint/proxy-listener-mode-separate.yaml new file mode 100644 index 0000000000000..3be257a8a4a38 --- /dev/null +++ b/examples/chart/teleport-cluster/.lint/proxy-listener-mode-separate.yaml @@ -0,0 +1,2 @@ +clusterName: test-proxy-listener-mode +proxyListenerMode: separate From e54d2adaef9c3985d505d2c89b12ff6ec01d308b Mon Sep 17 00:00:00 2001 From: Hugo Hervieux Date: Wed, 7 Dec 2022 09:04:54 -0500 Subject: [PATCH 03/14] helm: split auth and proxy, tests --- ...l.snap => auth_clusterrole_test.yaml.snap} | 27 - .../auth_clusterrolebinding_test.yaml.snap | 1 + .../__snapshot__/auth_config_test.yaml.snap | 1392 ++++++++++ .../auth_statefulset_test.yaml.snap | 288 ++ .../clusterrolebinding_test.yaml.snap | 14 - .../__snapshot__/deployment_test.yaml.snap | 2456 ----------------- .../tests/__snapshot__/pdb_test.yaml.snap | 14 - ....snap => proxy_certificate_test.yaml.snap} | 0 .../proxy_deployment_test.yaml.snap | 171 ++ .../__snapshot__/proxy_service_test.yaml.snap | 50 + .../tests/__snapshot__/psp_test.yaml.snap | 3 + .../tests/__snapshot__/pvc_test.yaml.snap | 60 - .../tests/__snapshot__/service_test.yaml.snap | 297 -- .../serviceaccount_test.yaml.snap | 19 - ...e_test.yaml => auth_clusterrole_test.yaml} | 5 +- .../tests/auth_clusterrolebinding_test.yaml | 20 + ...config_test.yaml => auth_config_test.yaml} | 175 +- .../teleport-cluster/tests/auth_pdb_test.yaml | 23 + .../{pvc_test.yaml => auth_pvc_test.yaml} | 59 +- ...est.yaml => auth_serviceaccount_test.yaml} | 9 +- ...t_test.yaml => auth_statefulset_test.yaml} | 461 +--- .../tests/clusterrolebinding_test.yaml | 11 - .../teleport-cluster/tests/pdb_test.yaml | 13 - ..._test.yaml => proxy_certificate_test.yaml} | 4 +- .../tests/proxy_deployment_test.yaml | 551 ++++ .../tests/proxy_pdb_test.yaml | 23 + ...vice_test.yaml => proxy_service_test.yaml} | 33 +- .../tests/proxy_serviceaccount_test.yaml | 22 + .../teleport-cluster/tests/psp_test.yaml | 2 +- 29 files changed, 2767 insertions(+), 3436 deletions(-) rename examples/chart/teleport-cluster/tests/__snapshot__/{clusterrole_test.yaml.snap => auth_clusterrole_test.yaml.snap} (70%) create mode 100644 examples/chart/teleport-cluster/tests/__snapshot__/auth_clusterrolebinding_test.yaml.snap create mode 100644 examples/chart/teleport-cluster/tests/__snapshot__/auth_config_test.yaml.snap create mode 100644 examples/chart/teleport-cluster/tests/__snapshot__/auth_statefulset_test.yaml.snap delete mode 100644 examples/chart/teleport-cluster/tests/__snapshot__/clusterrolebinding_test.yaml.snap delete mode 100644 examples/chart/teleport-cluster/tests/__snapshot__/deployment_test.yaml.snap delete mode 100644 examples/chart/teleport-cluster/tests/__snapshot__/pdb_test.yaml.snap rename examples/chart/teleport-cluster/tests/__snapshot__/{certificate_test.yaml.snap => proxy_certificate_test.yaml.snap} (100%) create mode 100644 examples/chart/teleport-cluster/tests/__snapshot__/proxy_deployment_test.yaml.snap create mode 100644 examples/chart/teleport-cluster/tests/__snapshot__/proxy_service_test.yaml.snap delete mode 100644 examples/chart/teleport-cluster/tests/__snapshot__/pvc_test.yaml.snap delete mode 100644 examples/chart/teleport-cluster/tests/__snapshot__/service_test.yaml.snap delete mode 100644 examples/chart/teleport-cluster/tests/__snapshot__/serviceaccount_test.yaml.snap rename examples/chart/teleport-cluster/tests/{clusterrole_test.yaml => auth_clusterrole_test.yaml} (84%) create mode 100644 examples/chart/teleport-cluster/tests/auth_clusterrolebinding_test.yaml rename examples/chart/teleport-cluster/tests/{config_test.yaml => auth_config_test.yaml} (75%) create mode 100644 examples/chart/teleport-cluster/tests/auth_pdb_test.yaml rename examples/chart/teleport-cluster/tests/{pvc_test.yaml => auth_pvc_test.yaml} (51%) rename examples/chart/teleport-cluster/tests/{serviceaccount_test.yaml => auth_serviceaccount_test.yaml} (76%) rename examples/chart/teleport-cluster/tests/{deployment_test.yaml => auth_statefulset_test.yaml} (59%) delete mode 100644 examples/chart/teleport-cluster/tests/clusterrolebinding_test.yaml delete mode 100644 examples/chart/teleport-cluster/tests/pdb_test.yaml rename examples/chart/teleport-cluster/tests/{certificate_test.yaml => proxy_certificate_test.yaml} (93%) create mode 100644 examples/chart/teleport-cluster/tests/proxy_deployment_test.yaml create mode 100644 examples/chart/teleport-cluster/tests/proxy_pdb_test.yaml rename examples/chart/teleport-cluster/tests/{service_test.yaml => proxy_service_test.yaml} (83%) create mode 100644 examples/chart/teleport-cluster/tests/proxy_serviceaccount_test.yaml diff --git a/examples/chart/teleport-cluster/tests/__snapshot__/clusterrole_test.yaml.snap b/examples/chart/teleport-cluster/tests/__snapshot__/auth_clusterrole_test.yaml.snap similarity index 70% rename from examples/chart/teleport-cluster/tests/__snapshot__/clusterrole_test.yaml.snap rename to examples/chart/teleport-cluster/tests/__snapshot__/auth_clusterrole_test.yaml.snap index ae8088b23b8aa..94885ae7ee164 100644 --- a/examples/chart/teleport-cluster/tests/__snapshot__/clusterrole_test.yaml.snap +++ b/examples/chart/teleport-cluster/tests/__snapshot__/auth_clusterrole_test.yaml.snap @@ -58,30 +58,3 @@ adds operator permissions to ClusterRole: - events verbs: - create -creates a ClusterRole: - 1: | - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: RELEASE-NAME - rules: - - apiGroups: - - "" - resources: - - users - - groups - - serviceaccounts - verbs: - - impersonate - - apiGroups: - - "" - resources: - - pods - verbs: - - get - - apiGroups: - - authorization.k8s.io - resources: - - selfsubjectaccessreviews - verbs: - - create diff --git a/examples/chart/teleport-cluster/tests/__snapshot__/auth_clusterrolebinding_test.yaml.snap b/examples/chart/teleport-cluster/tests/__snapshot__/auth_clusterrolebinding_test.yaml.snap new file mode 100644 index 0000000000000..0967ef424bce6 --- /dev/null +++ b/examples/chart/teleport-cluster/tests/__snapshot__/auth_clusterrolebinding_test.yaml.snap @@ -0,0 +1 @@ +{} diff --git a/examples/chart/teleport-cluster/tests/__snapshot__/auth_config_test.yaml.snap b/examples/chart/teleport-cluster/tests/__snapshot__/auth_config_test.yaml.snap new file mode 100644 index 0000000000000..2d24e3ce51ced --- /dev/null +++ b/examples/chart/teleport-cluster/tests/__snapshot__/auth_config_test.yaml.snap @@ -0,0 +1,1392 @@ +matches snapshot for acme-off.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: local + cluster_name: test-cluster-name + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-cluster-name + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for acme-on.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: local + cluster_name: test-acme-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-acme-cluster + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for acme-uri-staging.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: local + cluster_name: test-acme-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-acme-cluster + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for auth-connector-name.yaml: + 1: | + |- + auth_service: + authentication: + connector_name: okta + local_auth: true + second_factor: otp + type: local + cluster_name: helm-lint + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: helm-lint + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for auth-disable-local.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: false + second_factor: "off" + type: github + cluster_name: helm-lint + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: helm-lint + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for auth-locking-mode.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + locking_mode: strict + second_factor: otp + type: local + cluster_name: helm-lint + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: helm-lint + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for auth-passwordless.yaml: + 1: | + |- + auth_service: + authentication: + connector_name: passwordless + local_auth: true + second_factor: webauthn + type: local + webauthn: + rp_id: helm-lint + cluster_name: helm-lint + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: helm-lint + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for auth-type-legacy.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: github + cluster_name: helm-lint + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: helm-lint + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for auth-type.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: github + cluster_name: helm-lint + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: helm-lint + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for auth-webauthn-legacy.yaml: + 1: | + |- + Error: 'error converting YAML to JSON: yaml: line 20: mapping values are not allowed + in this context' +matches snapshot for auth-webauthn.yaml: + 1: | + |- + Error: 'error converting YAML to JSON: yaml: line 20: mapping values are not allowed + in this context' +matches snapshot for aws-dynamodb-autoscaling.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: local + cluster_name: test-aws-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-aws-cluster + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + storage: + audit_events_uri: + - dynamodb://test-dynamodb-auditlog-table + audit_sessions_uri: s3://test-s3-session-storage-bucket + auto_scaling: true + continuous_backups: false + read_max_capacity: 100 + read_min_capacity: 5 + read_target_value: 50 + region: us-west-2 + table_name: test-dynamodb-backend-table + type: dynamodb + write_max_capacity: 100 + write_min_capacity: 5 + write_target_value: 50 + version: v3 +matches snapshot for aws-ha-acme.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: local + cluster_name: test-aws-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-aws-cluster + labels: + env: aws + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + storage: + audit_events_uri: + - dynamodb://test-dynamodb-auditlog-table + audit_sessions_uri: s3://test-s3-session-storage-bucket + auto_scaling: false + continuous_backups: false + region: us-west-2 + table_name: test-dynamodb-backend-table + type: dynamodb + version: v3 +matches snapshot for aws-ha-antiaffinity.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: local + cluster_name: test-aws-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-aws-cluster + labels: + env: aws + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + storage: + audit_events_uri: + - dynamodb://test-dynamodb-auditlog-table + audit_sessions_uri: s3://test-s3-session-storage-bucket + auto_scaling: false + continuous_backups: false + region: us-west-2 + table_name: test-dynamodb-backend-table + type: dynamodb + version: v3 +matches snapshot for aws-ha-log.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: local + cluster_name: test-aws-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-aws-cluster + labels: + env: aws + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: DEBUG + storage: + audit_events_uri: + - dynamodb://test-dynamodb-auditlog-table + - stdout:// + audit_sessions_uri: s3://test-s3-session-storage-bucket + auto_scaling: false + continuous_backups: false + region: us-west-2 + table_name: test-dynamodb-backend-table + type: dynamodb + version: v3 +matches snapshot for aws-ha.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: local + cluster_name: test-aws-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-aws-cluster + labels: + env: aws + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + storage: + audit_events_uri: + - dynamodb://test-dynamodb-auditlog-table + audit_sessions_uri: s3://test-s3-session-storage-bucket + auto_scaling: false + continuous_backups: false + region: us-west-2 + table_name: test-dynamodb-backend-table + type: dynamodb + version: v3 +matches snapshot for aws.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: local + cluster_name: test-aws-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-aws-cluster + labels: + env: aws + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + storage: + audit_events_uri: + - dynamodb://test-dynamodb-auditlog-table + audit_sessions_uri: s3://test-s3-session-storage-bucket + auto_scaling: false + continuous_backups: false + region: us-west-2 + table_name: test-dynamodb-backend-table + type: dynamodb + version: v3 +matches snapshot for existing-tls-secret-with-ca.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: local + cluster_name: test-cluster-name + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-cluster-name + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for existing-tls-secret.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: local + cluster_name: test-cluster-name + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-cluster-name + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for gcp-ha-acme.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: local + cluster_name: test-gcp-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-gcp-cluster + labels: + env: gcp + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + storage: + audit_events_uri: + - firestore://test-teleport-firestore-auditlog-collection?projectID=gcpproj-123456&credentialsPath=/etc/teleport-secrets/gcp-credentials.json + audit_sessions_uri: gs://test-gcp-session-storage-bucket?projectID=gcpproj-123456&credentialsPath=/etc/teleport-secrets/gcp-credentials.json + collection_name: test-teleport-firestore-storage-collection + credentials_path: /etc/teleport-secrets/gcp-credentials.json + project_id: gcpproj-123456 + type: firestore + version: v3 +matches snapshot for gcp-ha-antiaffinity.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: local + cluster_name: test-gcp-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-gcp-cluster + labels: + env: gcp + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + storage: + audit_events_uri: + - firestore://test-teleport-firestore-auditlog-collection?projectID=gcpproj-123456&credentialsPath=/etc/teleport-secrets/gcp-credentials.json + audit_sessions_uri: gs://test-gcp-session-storage-bucket?projectID=gcpproj-123456&credentialsPath=/etc/teleport-secrets/gcp-credentials.json + collection_name: test-teleport-firestore-storage-collection + credentials_path: /etc/teleport-secrets/gcp-credentials.json + project_id: gcpproj-123456 + type: firestore + version: v3 +matches snapshot for gcp-ha-log.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: local + cluster_name: test-gcp-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-gcp-cluster + labels: + env: gcp + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: DEBUG + storage: + audit_events_uri: + - firestore://test-teleport-firestore-auditlog-collection?projectID=gcpproj-123456&credentialsPath=/etc/teleport-secrets/gcp-credentials.json + - stdout:// + audit_sessions_uri: gs://test-gcp-session-storage-bucket?projectID=gcpproj-123456&credentialsPath=/etc/teleport-secrets/gcp-credentials.json + collection_name: test-teleport-firestore-storage-collection + credentials_path: /etc/teleport-secrets/gcp-credentials.json + project_id: gcpproj-123456 + type: firestore + version: v3 +matches snapshot for gcp.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: local + cluster_name: test-gcp-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-gcp-cluster + labels: + env: gcp + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + storage: + audit_events_uri: + - firestore://test-teleport-firestore-auditlog-collection?projectID=gcpproj-123456&credentialsPath=/etc/teleport-secrets/gcp-credentials.json + audit_sessions_uri: gs://test-gcp-session-storage-bucket?projectID=gcpproj-123456&credentialsPath=/etc/teleport-secrets/gcp-credentials.json + collection_name: test-teleport-firestore-storage-collection + credentials_path: /etc/teleport-secrets/gcp-credentials.json + project_id: gcpproj-123456 + type: firestore + version: v3 +matches snapshot for initcontainers.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: local + cluster_name: helm-lint + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: helm-lint + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for kube-cluster-name.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: local + cluster_name: test-aws-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-kube-cluster + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for log-basic.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: local + cluster_name: test-log-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-log-cluster + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: json + output: stderr + severity: INFO + version: v3 +matches snapshot for log-extra.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: local + cluster_name: test-log-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-log-cluster + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - level + - timestamp + - component + - caller + output: json + output: /var/lib/teleport/test.log + severity: DEBUG + version: v3 +matches snapshot for log-legacy.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: local + cluster_name: test-log-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-log-cluster + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: DEBUG + version: v3 +matches snapshot for priority-class-name.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: local + cluster_name: helm-lint + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: helm-lint + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for proxy-listener-mode-multiplex.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: local + cluster_name: test-proxy-listener-mode + enabled: true + proxy_listener_mode: multiplex + kubernetes_service: + enabled: true + kube_cluster_name: test-proxy-listener-mode + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for proxy-listener-mode-separate.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: local + cluster_name: test-proxy-listener-mode + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-proxy-listener-mode + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for public-addresses.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: local + cluster_name: helm-lint + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: helm-lint + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for separate-mongo-listener.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: local + cluster_name: helm-lint + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: helm-lint + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for separate-postgres-listener.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: local + cluster_name: helm-lint + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: helm-lint + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for service.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: local + cluster_name: helm-lint + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: helm-lint + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for session-recording.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: local + cluster_name: helm-lint + enabled: true + proxy_listener_mode: separate + session_recording: node-sync + kubernetes_service: + enabled: true + kube_cluster_name: helm-lint + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for standalone-customsize.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: local + cluster_name: test-standalone-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-standalone-cluster + labels: + env: standalone + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for standalone-existingpvc.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: local + cluster_name: test-standalone-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-standalone-cluster + labels: + env: standalone + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for tolerations.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: local + cluster_name: test-aws-cluster + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-aws-cluster + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + storage: + audit_events_uri: + - dynamodb://test-dynamodb-auditlog-table + audit_sessions_uri: s3://test-s3-session-storage-bucket + auto_scaling: false + continuous_backups: false + region: us-west-2 + table_name: test-dynamodb-backend-table + type: dynamodb + version: v3 +matches snapshot for version-override.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: local + cluster_name: test-cluster-name + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: test-cluster-name + labels: + env: test + version: 5.2.1 + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 +matches snapshot for volumes.yaml: + 1: | + |- + auth_service: + authentication: + local_auth: true + second_factor: otp + type: local + cluster_name: helm-lint + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: helm-lint + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 diff --git a/examples/chart/teleport-cluster/tests/__snapshot__/auth_statefulset_test.yaml.snap b/examples/chart/teleport-cluster/tests/__snapshot__/auth_statefulset_test.yaml.snap new file mode 100644 index 0000000000000..1b8742dcde1db --- /dev/null +++ b/examples/chart/teleport-cluster/tests/__snapshot__/auth_statefulset_test.yaml.snap @@ -0,0 +1,288 @@ +should add an operator side-car when operator is enabled: + 1: | + image: public.ecr.aws/gravitational/teleport-operator:12.0.0-dev + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: operator + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + volumeMounts: + - mountPath: /etc/teleport + name: config + readOnly: true + - mountPath: /var/lib/teleport + name: data +? should not add named PersistentVolumeClaim as volume when in scratch mode, persistence.existingClaimName + is set and persistence.enabled is false +: 1: | + affinity: + podAntiAffinity: null + containers: + - args: + - --diag-addr=0.0.0.0:3000 + - --apply-on-startup=/etc/teleport/apply-on-startup.yaml + image: public.ecr.aws/gravitational/teleport:12.0.0-dev + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 6 + httpGet: + path: /healthz + port: diag + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + name: teleport + ports: + - containerPort: 3000 + name: diag + protocol: TCP + readinessProbe: + failureThreshold: 12 + httpGet: + path: /readyz + port: diag + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + volumeMounts: + - mountPath: /etc/teleport + name: config + readOnly: true + - mountPath: /var/lib/teleport + name: data + serviceAccountName: RELEASE-NAME + volumes: + - configMap: + name: RELEASE-NAME-auth + name: config + - emptyDir: {} + name: data +should provision initContainer correctly when set in values: + 1: | + - args: + - echo test + image: alpine + name: teleport-init + resources: + limits: + cpu: 2 + memory: 4Gi + requests: + cpu: 1 + memory: 2Gi + volumeMounts: + - mountPath: /etc/teleport + name: config + readOnly: true + - mountPath: /var/lib/teleport + name: data + - args: + - echo test2 + image: alpine + name: teleport-init2 + resources: + limits: + cpu: 2 + memory: 4Gi + requests: + cpu: 1 + memory: 2Gi + volumeMounts: + - mountPath: /etc/teleport + name: config + readOnly: true + - mountPath: /var/lib/teleport + name: data +should set affinity when set in values: + 1: | + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: gravitational.io/dedicated + operator: In + values: + - teleport +should set required affinity when highAvailability.requireAntiAffinity is set: + 1: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app.kubernetes.io/instance + operator: In + values: + - RELEASE-NAME + - key: app.kubernetes.io/component + operator: In + values: + - proxy + topologyKey: kubernetes.io/hostname +should set resources when set in values: + 1: | + affinity: + podAntiAffinity: null + containers: + - args: + - --diag-addr=0.0.0.0:3000 + - --apply-on-startup=/etc/teleport/apply-on-startup.yaml + image: public.ecr.aws/gravitational/teleport:12.0.0-dev + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 6 + httpGet: + path: /healthz + port: diag + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + name: teleport + ports: + - containerPort: 3000 + name: diag + protocol: TCP + readinessProbe: + failureThreshold: 12 + httpGet: + path: /readyz + port: diag + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + resources: + limits: + cpu: 2 + memory: 4Gi + requests: + cpu: 1 + memory: 2Gi + volumeMounts: + - mountPath: /etc/teleport + name: config + readOnly: true + - mountPath: /var/lib/teleport + name: data + serviceAccountName: RELEASE-NAME + volumes: + - configMap: + name: RELEASE-NAME-auth + name: config + - name: data + persistentVolumeClaim: + claimName: RELEASE-NAME +should set securityContext when set in values: + 1: | + affinity: + podAntiAffinity: null + containers: + - args: + - --diag-addr=0.0.0.0:3000 + - --apply-on-startup=/etc/teleport/apply-on-startup.yaml + image: public.ecr.aws/gravitational/teleport:12.0.0-dev + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 6 + httpGet: + path: /healthz + port: diag + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + name: teleport + ports: + - containerPort: 3000 + name: diag + protocol: TCP + readinessProbe: + failureThreshold: 12 + httpGet: + path: /readyz + port: diag + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: false + runAsGroup: 99 + runAsNonRoot: true + runAsUser: 99 + volumeMounts: + - mountPath: /etc/teleport + name: config + readOnly: true + - mountPath: /var/lib/teleport + name: data + serviceAccountName: RELEASE-NAME + volumes: + - configMap: + name: RELEASE-NAME-auth + name: config + - name: data + persistentVolumeClaim: + claimName: RELEASE-NAME +should set tolerations when set in values: + 1: | + - effect: NoExecute + key: dedicated + operator: Equal + value: teleport + - effect: NoSchedule + key: dedicated + operator: Equal + value: teleport +should use OSS image and not mount license when enterprise is not set in values: + 1: | + affinity: + podAntiAffinity: null + containers: + - args: + - --diag-addr=0.0.0.0:3000 + - --apply-on-startup=/etc/teleport/apply-on-startup.yaml + image: public.ecr.aws/gravitational/teleport:8.3.4 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 6 + httpGet: + path: /healthz + port: diag + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + name: teleport + ports: + - containerPort: 3000 + name: diag + protocol: TCP + readinessProbe: + failureThreshold: 12 + httpGet: + path: /readyz + port: diag + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + volumeMounts: + - mountPath: /etc/teleport + name: config + readOnly: true + - mountPath: /var/lib/teleport + name: data + serviceAccountName: RELEASE-NAME + volumes: + - configMap: + name: RELEASE-NAME-auth + name: config + - name: data + persistentVolumeClaim: + claimName: RELEASE-NAME diff --git a/examples/chart/teleport-cluster/tests/__snapshot__/clusterrolebinding_test.yaml.snap b/examples/chart/teleport-cluster/tests/__snapshot__/clusterrolebinding_test.yaml.snap deleted file mode 100644 index 408ec5f4556e2..0000000000000 --- a/examples/chart/teleport-cluster/tests/__snapshot__/clusterrolebinding_test.yaml.snap +++ /dev/null @@ -1,14 +0,0 @@ -creates a ClusterRoleBinding: - 1: | - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: RELEASE-NAME - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: RELEASE-NAME - subjects: - - kind: ServiceAccount - name: RELEASE-NAME - namespace: NAMESPACE diff --git a/examples/chart/teleport-cluster/tests/__snapshot__/deployment_test.yaml.snap b/examples/chart/teleport-cluster/tests/__snapshot__/deployment_test.yaml.snap deleted file mode 100644 index 93855014bdf66..0000000000000 --- a/examples/chart/teleport-cluster/tests/__snapshot__/deployment_test.yaml.snap +++ /dev/null @@ -1,2456 +0,0 @@ -sets Deployment annotations when specified: - 1: | - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - name: data - persistentVolumeClaim: - claimName: RELEASE-NAME -sets Pod annotations when specified: - 1: | - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - name: data - persistentVolumeClaim: - claimName: RELEASE-NAME -should add PersistentVolumeClaim as volume when in custom mode and persistence.enabled is true: - 1: | - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - name: data - persistentVolumeClaim: - claimName: RELEASE-NAME -should add PersistentVolumeClaim as volume when in standalone mode and persistence.enabled is true: - 1: | - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - name: data - persistentVolumeClaim: - claimName: RELEASE-NAME -should add an operator side-car when operator is enabled: - 1: | - image: public.ecr.aws/gravitational/teleport-operator:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - httpGet: - path: /healthz - port: 8081 - initialDelaySeconds: 15 - periodSeconds: 20 - name: operator - readinessProbe: - httpGet: - path: /readyz - port: 8081 - initialDelaySeconds: 5 - periodSeconds: 10 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data -should add emptyDir for data in AWS mode: - 1: | - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - RELEASE-NAME - topologyKey: kubernetes.io/hostname - weight: 50 - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - emptyDir: {} - name: data -should add emptyDir for data in GCP mode: - 1: | - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - RELEASE-NAME - topologyKey: kubernetes.io/hostname - weight: 50 - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport-secrets - name: gcp-credentials - readOnly: true - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - name: gcp-credentials - secret: - secretName: teleport-gcp-credentials - - configMap: - name: RELEASE-NAME - name: config - - emptyDir: {} - name: data -should add insecureSkipProxyTLSVerify to args when set in values: - 1: | - containers: - - args: - - --diag-addr=0.0.0.0:3000 - - --insecure - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - name: data - persistentVolumeClaim: - claimName: RELEASE-NAME -should add named PersistentVolumeClaim as volume when in custom mode and persistence.existingClaimName is set: - 1: | - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - name: data - persistentVolumeClaim: - claimName: teleport-storage -? should add named PersistentVolumeClaim as volume when in standalone mode, persistence.existingClaimName - is set and persistence.enabled is true -: 1: | - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - name: data - persistentVolumeClaim: - claimName: teleport-storage -should do enterprise things when when enterprise is set in values: - 1: | - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport-ent:8.3.4 - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /var/lib/license - name: license - readOnly: true - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - name: license - secret: - secretName: license - - configMap: - name: RELEASE-NAME - name: config - - name: data - persistentVolumeClaim: - claimName: RELEASE-NAME -should expose diag port: - 1: | - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - name: data - persistentVolumeClaim: - claimName: RELEASE-NAME -should have Recreate strategy in standalone mode: - 1: | - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - name: data - persistentVolumeClaim: - claimName: RELEASE-NAME -should have multiple replicas when replicaCount is set: - 1: | - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - RELEASE-NAME - topologyKey: kubernetes.io/hostname - weight: 50 - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - name: data - persistentVolumeClaim: - claimName: RELEASE-NAME -should mount ConfigMap for config in AWS mode: - 1: | - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - RELEASE-NAME - topologyKey: kubernetes.io/hostname - weight: 50 - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - emptyDir: {} - name: data -should mount ConfigMap for config in GCP mode: - 1: | - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - RELEASE-NAME - topologyKey: kubernetes.io/hostname - weight: 50 - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport-secrets - name: gcp-credentials - readOnly: true - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - name: gcp-credentials - secret: - secretName: teleport-gcp-credentials - - configMap: - name: RELEASE-NAME - name: config - - emptyDir: {} - name: data -should mount ConfigMap for config in custom mode: - 1: | - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - name: data - persistentVolumeClaim: - claimName: RELEASE-NAME -should mount ConfigMap for config in standalone mode: - 1: | - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - name: data - persistentVolumeClaim: - claimName: RELEASE-NAME -should mount GCP credentials for initContainer in GCP mode: - 1: | - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - RELEASE-NAME - topologyKey: kubernetes.io/hostname - weight: 50 - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport-secrets - name: gcp-credentials - readOnly: true - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - initContainers: - - args: - - echo test - image: alpine - name: teleport-init - volumeMounts: - - mountPath: /etc/teleport-secrets - name: gcp-credentials - readOnly: true - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - name: gcp-credentials - secret: - secretName: teleport-gcp-credentials - - configMap: - name: RELEASE-NAME - name: config - - emptyDir: {} - name: data -should mount GCP credentials in GCP mode: - 1: | - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - RELEASE-NAME - topologyKey: kubernetes.io/hostname - weight: 50 - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport-secrets - name: gcp-credentials - readOnly: true - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - name: gcp-credentials - secret: - secretName: teleport-gcp-credentials - - configMap: - name: RELEASE-NAME - name: config - - emptyDir: {} - name: data -should mount TLS certs for initContainer when cert-manager is enabled: - 1: | - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - RELEASE-NAME - topologyKey: kubernetes.io/hostname - weight: 50 - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport-secrets - name: gcp-credentials - readOnly: true - - mountPath: /etc/teleport-tls - name: teleport-tls - readOnly: true - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - initContainers: - - args: - - echo test - image: alpine - name: teleport-init - volumeMounts: - - mountPath: /etc/teleport-secrets - name: gcp-credentials - readOnly: true - - mountPath: /etc/teleport-tls - name: teleport-tls - readOnly: true - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - name: gcp-credentials - secret: - secretName: teleport-gcp-credentials - - name: teleport-tls - secret: - secretName: teleport-tls - - configMap: - name: RELEASE-NAME - name: config - - emptyDir: {} - name: data -should mount TLS certs when cert-manager is enabled: - 1: | - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - RELEASE-NAME - topologyKey: kubernetes.io/hostname - weight: 50 - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport-secrets - name: gcp-credentials - readOnly: true - - mountPath: /etc/teleport-tls - name: teleport-tls - readOnly: true - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - name: gcp-credentials - secret: - secretName: teleport-gcp-credentials - - name: teleport-tls - secret: - secretName: teleport-tls - - configMap: - name: RELEASE-NAME - name: config - - emptyDir: {} - name: data -should mount cert-manager TLS secret when highAvailability.certManager.enabled is true: - 1: | - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - RELEASE-NAME - topologyKey: kubernetes.io/hostname - weight: 50 - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport-tls - name: teleport-tls - readOnly: true - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - name: teleport-tls - secret: - secretName: teleport-tls - - configMap: - name: RELEASE-NAME - name: config - - name: data - persistentVolumeClaim: - claimName: RELEASE-NAME -should mount extraVolumes and extraVolumeMounts: - 1: | - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - - mountPath: /path/to/mount - name: my-mount - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - name: data - persistentVolumeClaim: - claimName: RELEASE-NAME - - name: my-mount - secret: - secretName: mySecret -should mount tls.existingCASecretName and set environment when set in values: - 1: | - containers: - - args: - - --diag-addr=0.0.0.0:3000 - env: - - name: SSL_CERT_FILE - value: /etc/teleport-tls-ca/ca.pem - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport-tls - name: teleport-tls - readOnly: true - - mountPath: /etc/teleport-tls-ca - name: teleport-tls-ca - readOnly: true - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - name: teleport-tls - secret: - secretName: helm-lint-existing-tls-secret - - name: teleport-tls-ca - secret: - secretName: helm-lint-existing-tls-secret-ca - - configMap: - name: RELEASE-NAME - name: config - - name: data - persistentVolumeClaim: - claimName: RELEASE-NAME -should mount tls.existingCASecretName and set extra environment when set in values: - 1: | - containers: - - args: - - --diag-addr=0.0.0.0:3000 - env: - - name: SOME_ENVIRONMENT_VARIABLE - value: some-value - - name: SSL_CERT_FILE - value: /etc/teleport-tls-ca/ca.pem - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport-tls - name: teleport-tls - readOnly: true - - mountPath: /etc/teleport-tls-ca - name: teleport-tls-ca - readOnly: true - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - name: teleport-tls - secret: - secretName: helm-lint-existing-tls-secret - - name: teleport-tls-ca - secret: - secretName: helm-lint-existing-tls-secret-ca - - configMap: - name: RELEASE-NAME - name: config - - name: data - persistentVolumeClaim: - claimName: RELEASE-NAME -should mount tls.existingSecretName when set in values: - 1: | - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport-tls - name: teleport-tls - readOnly: true - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - name: teleport-tls - secret: - secretName: helm-lint-existing-tls-secret - - configMap: - name: RELEASE-NAME - name: config - - name: data - persistentVolumeClaim: - claimName: RELEASE-NAME -should not add PersistentVolumeClaim as volume when in custom mode and persistence.enabled is false: - 1: | - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - emptyDir: {} - name: data -should not add PersistentVolumeClaim as volume when in standalone mode and persistence.enabled is false: - 1: | - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - emptyDir: {} - name: data -? should not add named PersistentVolumeClaim as volume when in custom mode, persistence.existingClaimName - is set and persistence.enabled is false -: 1: | - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - emptyDir: {} - name: data -? should not add named PersistentVolumeClaim as volume when in standalone mode, persistence.existingClaimName - is set but persistence.enabled is false -: 1: | - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - emptyDir: {} - name: data -should not do enterprise things when when enterprise is not set in values: - 1: | - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:8.3.4 - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - name: data - persistentVolumeClaim: - claimName: RELEASE-NAME -should not have more than one replica in standalone mode: - 1: | - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - name: data - persistentVolumeClaim: - claimName: RELEASE-NAME -should not have strategy in AWS mode: - 1: | - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - RELEASE-NAME - topologyKey: kubernetes.io/hostname - weight: 50 - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - emptyDir: {} - name: data -should not have strategy in GCP mode: - 1: | - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - RELEASE-NAME - topologyKey: kubernetes.io/hostname - weight: 50 - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport-secrets - name: gcp-credentials - readOnly: true - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - name: gcp-credentials - secret: - secretName: teleport-gcp-credentials - - configMap: - name: RELEASE-NAME - name: config - - emptyDir: {} - name: data -should not have strategy in custom mode: - 1: | - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - name: data - persistentVolumeClaim: - claimName: RELEASE-NAME -should not mount TLS secrets when when highAvailability.certManager.enabled is false and tls.existingSecretName is not set: - 1: | - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - name: data - persistentVolumeClaim: - claimName: RELEASE-NAME -should not mount secret when credentialSecretName is blank in values: - 1: | - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - RELEASE-NAME - topologyKey: kubernetes.io/hostname - weight: 50 - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - emptyDir: {} - name: data -should not set securityContext when is empty object (default value): - 1: | - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - name: data - persistentVolumeClaim: - claimName: RELEASE-NAME -should provision initContainer correctly when set in values: - 1: | - containers: - - args: - - --diag-addr=0.0.0.0:3000 - env: - - name: SOME_ENVIRONMENT_VARIABLE - value: some-value - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - resources: - limits: - cpu: 2 - memory: 4Gi - requests: - cpu: 1 - memory: 2Gi - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - initContainers: - - args: - - echo test - image: alpine - name: teleport-init - resources: - limits: - cpu: 2 - memory: 4Gi - requests: - cpu: 1 - memory: 2Gi - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - name: data - persistentVolumeClaim: - claimName: RELEASE-NAME -should set affinity when set in values: - 1: | - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: gravitational.io/dedicated - operator: In - values: - - teleport - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - name: data - persistentVolumeClaim: - claimName: RELEASE-NAME -should set environment when extraEnv set in values: - 1: | - containers: - - args: - - --diag-addr=0.0.0.0:3000 - env: - - name: SOME_ENVIRONMENT_VARIABLE - value: some-value - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - name: data - persistentVolumeClaim: - claimName: RELEASE-NAME -should set imagePullPolicy when set in values: - 1: | - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: Always - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - name: data - persistentVolumeClaim: - claimName: RELEASE-NAME -should set postStart command if set in values: - 1: | - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - lifecycle: - postStart: - exec: - command: - - /bin/echo - - test - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - name: data - persistentVolumeClaim: - claimName: RELEASE-NAME -should set priorityClassName when set in values: - 1: | - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - priorityClassName: system-cluster-critical - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - name: data - persistentVolumeClaim: - claimName: RELEASE-NAME -should set probeTimeoutSeconds when set in values: - 1: | - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 5 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 5 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - name: data - persistentVolumeClaim: - claimName: RELEASE-NAME -should set required affinity when highAvailability.requireAntiAffinity is set: - 1: | - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - RELEASE-NAME - topologyKey: kubernetes.io/hostname - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - emptyDir: {} - name: data -should set resources when set in values: - 1: | - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - resources: - limits: - cpu: 2 - memory: 4Gi - requests: - cpu: 1 - memory: 2Gi - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - name: data - persistentVolumeClaim: - claimName: RELEASE-NAME -should set securityContext when set in values: - 1: | - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - securityContext: - allowPrivilegeEscalation: false - privileged: false - readOnlyRootFilesystem: false - runAsGroup: 99 - runAsNonRoot: true - runAsUser: 99 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - volumes: - - configMap: - name: RELEASE-NAME - name: config - - name: data - persistentVolumeClaim: - claimName: RELEASE-NAME -should set tolerations when set in values: - 1: | - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - RELEASE-NAME - topologyKey: kubernetes.io/hostname - weight: 50 - containers: - - args: - - --diag-addr=0.0.0.0:3000 - image: public.ecr.aws/gravitational/teleport:12.0.0-dev - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 6 - httpGet: - path: /healthz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - name: teleport - ports: - - containerPort: 3000 - name: diag - protocol: TCP - readinessProbe: - failureThreshold: 12 - httpGet: - path: /readyz - port: diag - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/teleport - name: config - readOnly: true - - mountPath: /var/lib/teleport - name: data - serviceAccountName: RELEASE-NAME - tolerations: - - effect: NoExecute - key: dedicated - operator: Equal - value: teleport - - effect: NoSchedule - key: dedicated - operator: Equal - value: teleport - volumes: - - configMap: - name: RELEASE-NAME - name: config - - emptyDir: {} - name: data diff --git a/examples/chart/teleport-cluster/tests/__snapshot__/pdb_test.yaml.snap b/examples/chart/teleport-cluster/tests/__snapshot__/pdb_test.yaml.snap deleted file mode 100644 index a8d3bf87b8092..0000000000000 --- a/examples/chart/teleport-cluster/tests/__snapshot__/pdb_test.yaml.snap +++ /dev/null @@ -1,14 +0,0 @@ -should create a PDB when enabled in values (pdb.yaml): - 1: | - apiVersion: policy/v1beta1 - kind: PodDisruptionBudget - metadata: - labels: - app: RELEASE-NAME - name: RELEASE-NAME - namespace: NAMESPACE - spec: - minAvailable: 2 - selector: - matchLabels: - app: RELEASE-NAME diff --git a/examples/chart/teleport-cluster/tests/__snapshot__/certificate_test.yaml.snap b/examples/chart/teleport-cluster/tests/__snapshot__/proxy_certificate_test.yaml.snap similarity index 100% rename from examples/chart/teleport-cluster/tests/__snapshot__/certificate_test.yaml.snap rename to examples/chart/teleport-cluster/tests/__snapshot__/proxy_certificate_test.yaml.snap diff --git a/examples/chart/teleport-cluster/tests/__snapshot__/proxy_deployment_test.yaml.snap b/examples/chart/teleport-cluster/tests/__snapshot__/proxy_deployment_test.yaml.snap new file mode 100644 index 0000000000000..437e02ead7c9a --- /dev/null +++ b/examples/chart/teleport-cluster/tests/__snapshot__/proxy_deployment_test.yaml.snap @@ -0,0 +1,171 @@ +should provision initContainer correctly when set in values: + 1: | + - args: + - echo test + image: alpine + name: teleport-init + resources: + limits: + cpu: 2 + memory: 4Gi + requests: + cpu: 1 + memory: 2Gi + volumeMounts: + - mountPath: /etc/teleport + name: config + readOnly: true + - mountPath: /var/lib/teleport + name: data + - args: + - echo test2 + image: alpine + name: teleport-init2 + resources: + limits: + cpu: 2 + memory: 4Gi + requests: + cpu: 1 + memory: 2Gi + volumeMounts: + - mountPath: /etc/teleport + name: config + readOnly: true + - mountPath: /var/lib/teleport + name: data +should set affinity when set in values: + 1: | + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: gravitational.io/dedicated + operator: In + values: + - teleport +should set required affinity when highAvailability.requireAntiAffinity is set: + 1: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app.kubernetes.io/instance + operator: In + values: + - RELEASE-NAME + - key: app.kubernetes.io/component + operator: In + values: + - proxy + topologyKey: kubernetes.io/hostname +should set resources when set in values: + 1: | + affinity: + podAntiAffinity: null + containers: + - args: + - --diag-addr=0.0.0.0:3000 + image: public.ecr.aws/gravitational/teleport:12.0.0-dev + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 6 + httpGet: + path: /healthz + port: diag + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + name: teleport + ports: + - containerPort: 3000 + name: diag + protocol: TCP + readinessProbe: + failureThreshold: 12 + httpGet: + path: /readyz + port: diag + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + resources: + limits: + cpu: 2 + memory: 4Gi + requests: + cpu: 1 + memory: 2Gi + volumeMounts: + - mountPath: /etc/teleport + name: config + readOnly: true + - mountPath: /var/lib/teleport + name: data + serviceAccountName: RELEASE-NAME-proxy + volumes: + - configMap: + name: RELEASE-NAME-proxy + name: config + - emptyDir: {} + name: data +should set securityContext when set in values: + 1: | + affinity: + podAntiAffinity: null + containers: + - args: + - --diag-addr=0.0.0.0:3000 + image: public.ecr.aws/gravitational/teleport:12.0.0-dev + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 6 + httpGet: + path: /healthz + port: diag + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + name: teleport + ports: + - containerPort: 3000 + name: diag + protocol: TCP + readinessProbe: + failureThreshold: 12 + httpGet: + path: /readyz + port: diag + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: false + runAsGroup: 99 + runAsNonRoot: true + runAsUser: 99 + volumeMounts: + - mountPath: /etc/teleport + name: config + readOnly: true + - mountPath: /var/lib/teleport + name: data + serviceAccountName: RELEASE-NAME-proxy + volumes: + - configMap: + name: RELEASE-NAME-proxy + name: config + - emptyDir: {} + name: data +should set tolerations when set in values: + 1: | + - effect: NoExecute + key: dedicated + operator: Equal + value: teleport + - effect: NoSchedule + key: dedicated + operator: Equal + value: teleport diff --git a/examples/chart/teleport-cluster/tests/__snapshot__/proxy_service_test.yaml.snap b/examples/chart/teleport-cluster/tests/__snapshot__/proxy_service_test.yaml.snap new file mode 100644 index 0000000000000..cf98a5e600665 --- /dev/null +++ b/examples/chart/teleport-cluster/tests/__snapshot__/proxy_service_test.yaml.snap @@ -0,0 +1,50 @@ +exposes a single port when running in multiplex mode: + 1: | + - name: https + port: 443 + protocol: TCP + targetPort: 3080 +exposes separate listener ports by default: + 1: | + - name: https + port: 443 + protocol: TCP + targetPort: 3080 + - name: sshproxy + port: 3023 + protocol: TCP + targetPort: 3023 + - name: k8s + port: 3026 + protocol: TCP + targetPort: 3026 + - name: sshtun + port: 3024 + protocol: TCP + targetPort: 3024 + - name: mysql + port: 3036 + protocol: TCP + targetPort: 3036 +exposes separate listener ports when running in separate mode: + 1: | + - name: https + port: 443 + protocol: TCP + targetPort: 3080 + - name: sshproxy + port: 3023 + protocol: TCP + targetPort: 3023 + - name: k8s + port: 3026 + protocol: TCP + targetPort: 3026 + - name: sshtun + port: 3024 + protocol: TCP + targetPort: 3024 + - name: mysql + port: 3036 + protocol: TCP + targetPort: 3036 diff --git a/examples/chart/teleport-cluster/tests/__snapshot__/psp_test.yaml.snap b/examples/chart/teleport-cluster/tests/__snapshot__/psp_test.yaml.snap index 4a76c564b3cd9..4b30b331fc4e6 100644 --- a/examples/chart/teleport-cluster/tests/__snapshot__/psp_test.yaml.snap +++ b/examples/chart/teleport-cluster/tests/__snapshot__/psp_test.yaml.snap @@ -7,6 +7,7 @@ creates a PodSecurityPolicy when enabled in values and supported: seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default,runtime/default seccomp.security.alpha.kubernetes.io/defaultProfileName: runtime/default name: RELEASE-NAME + namespace: NAMESPACE spec: allowPrivilegeEscalation: false fsGroup: @@ -37,6 +38,7 @@ creates a PodSecurityPolicy when enabled in values and supported: kind: Role metadata: name: RELEASE-NAME-psp + namespace: NAMESPACE rules: - apiGroups: - policy @@ -51,6 +53,7 @@ creates a PodSecurityPolicy when enabled in values and supported: kind: RoleBinding metadata: name: RELEASE-NAME-psp + namespace: NAMESPACE roleRef: apiGroup: rbac.authorization.k8s.io kind: Role diff --git a/examples/chart/teleport-cluster/tests/__snapshot__/pvc_test.yaml.snap b/examples/chart/teleport-cluster/tests/__snapshot__/pvc_test.yaml.snap deleted file mode 100644 index 2eef45611fa09..0000000000000 --- a/examples/chart/teleport-cluster/tests/__snapshot__/pvc_test.yaml.snap +++ /dev/null @@ -1,60 +0,0 @@ -creates a PersistentVolumeClaim when chartMode=custom: - 1: | - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - labels: - app: RELEASE-NAME - name: RELEASE-NAME - namespace: NAMESPACE - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 10Gi -creates a PersistentVolumeClaim when chartMode=standalone: - 1: | - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - labels: - app: RELEASE-NAME - name: RELEASE-NAME - namespace: NAMESPACE - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 10Gi -creates a PersistentVolumeClaim with values from custom-customsize.yaml: - 1: | - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - labels: - app: RELEASE-NAME - name: RELEASE-NAME - namespace: NAMESPACE - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 50Gi -creates a PersistentVolumeClaim with values from standalone-customsize.yaml: - 1: | - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - labels: - app: RELEASE-NAME - name: RELEASE-NAME - namespace: NAMESPACE - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 50Gi diff --git a/examples/chart/teleport-cluster/tests/__snapshot__/service_test.yaml.snap b/examples/chart/teleport-cluster/tests/__snapshot__/service_test.yaml.snap deleted file mode 100644 index 4d38452469101..0000000000000 --- a/examples/chart/teleport-cluster/tests/__snapshot__/service_test.yaml.snap +++ /dev/null @@ -1,297 +0,0 @@ -adds a separate Mongo listener port when separateMongoListener is true: - 1: | - apiVersion: v1 - kind: Service - metadata: - labels: - app: RELEASE-NAME - name: RELEASE-NAME - namespace: NAMESPACE - spec: - ports: - - name: https - port: 443 - protocol: TCP - targetPort: 3080 - - name: sshproxy - port: 3023 - protocol: TCP - targetPort: 3023 - - name: k8s - port: 3026 - protocol: TCP - targetPort: 3026 - - name: sshtun - port: 3024 - protocol: TCP - targetPort: 3024 - - name: mysql - port: 3036 - protocol: TCP - targetPort: 3036 - - name: mongo - port: 27017 - protocol: TCP - targetPort: 27017 - selector: - app: RELEASE-NAME - type: LoadBalancer -adds a separate Postgres listener port when separatePostgresListener is true: - 1: | - apiVersion: v1 - kind: Service - metadata: - labels: - app: RELEASE-NAME - name: RELEASE-NAME - namespace: NAMESPACE - spec: - ports: - - name: https - port: 443 - protocol: TCP - targetPort: 3080 - - name: sshproxy - port: 3023 - protocol: TCP - targetPort: 3023 - - name: k8s - port: 3026 - protocol: TCP - targetPort: 3026 - - name: sshtun - port: 3024 - protocol: TCP - targetPort: 3024 - - name: mysql - port: 3036 - protocol: TCP - targetPort: 3036 - - name: postgres - port: 5432 - protocol: TCP - targetPort: 5432 - selector: - app: RELEASE-NAME - type: LoadBalancer -sets AWS annotations when chartMode=aws: - 1: | - apiVersion: v1 - kind: Service - metadata: - annotations: - service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp - service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true" - service.beta.kubernetes.io/aws-load-balancer-type: nlb - labels: - app: RELEASE-NAME - name: RELEASE-NAME - namespace: NAMESPACE - spec: - ports: - - name: https - port: 443 - protocol: TCP - targetPort: 3080 - - name: sshproxy - port: 3023 - protocol: TCP - targetPort: 3023 - - name: k8s - port: 3026 - protocol: TCP - targetPort: 3026 - - name: sshtun - port: 3024 - protocol: TCP - targetPort: 3024 - - name: mysql - port: 3036 - protocol: TCP - targetPort: 3036 - selector: - app: RELEASE-NAME - type: LoadBalancer -sets AWS backend protocol annotation to ssl when in AWS mode and ACM annotation is set: - 1: | - apiVersion: v1 - kind: Service - metadata: - annotations: - service.beta.kubernetes.io/aws-load-balancer-backend-protocol: ssl - service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true" - service.beta.kubernetes.io/aws-load-balancer-ssl-cert: arn:aws:acm:us-east-1:1234567890:certificate/a857a76c-51d0-4d3d-8000-465bb3e9829b - service.beta.kubernetes.io/aws-load-balancer-ssl-ports: 443 - service.beta.kubernetes.io/aws-load-balancer-type: nlb - labels: - app: RELEASE-NAME - name: RELEASE-NAME - namespace: NAMESPACE - spec: - ports: - - name: https - port: 443 - protocol: TCP - targetPort: 3080 - - name: sshproxy - port: 3023 - protocol: TCP - targetPort: 3023 - - name: k8s - port: 3026 - protocol: TCP - targetPort: 3026 - - name: sshtun - port: 3024 - protocol: TCP - targetPort: 3024 - - name: mysql - port: 3036 - protocol: TCP - targetPort: 3036 - selector: - app: RELEASE-NAME - type: LoadBalancer -sets AWS backend protocol annotation to tcp when in AWS mode and ACM annotation is not set: - 1: | - apiVersion: v1 - kind: Service - metadata: - annotations: - service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp - service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true" - service.beta.kubernetes.io/aws-load-balancer-type: nlb - labels: - app: RELEASE-NAME - name: RELEASE-NAME - namespace: NAMESPACE - spec: - ports: - - name: https - port: 443 - protocol: TCP - targetPort: 3080 - - name: sshproxy - port: 3023 - protocol: TCP - targetPort: 3023 - - name: k8s - port: 3026 - protocol: TCP - targetPort: 3026 - - name: sshtun - port: 3024 - protocol: TCP - targetPort: 3024 - - name: mysql - port: 3036 - protocol: TCP - targetPort: 3036 - selector: - app: RELEASE-NAME - type: LoadBalancer -sets service annotations when specified: - 1: | - apiVersion: v1 - kind: Service - metadata: - annotations: - kubernetes.io/service: test-annotation - kubernetes.io/service-different: 5 - labels: - app: RELEASE-NAME - name: RELEASE-NAME - namespace: NAMESPACE - spec: - ports: - - name: https - port: 443 - protocol: TCP - targetPort: 3080 - - name: sshproxy - port: 3023 - protocol: TCP - targetPort: 3023 - - name: k8s - port: 3026 - protocol: TCP - targetPort: 3026 - - name: sshtun - port: 3024 - protocol: TCP - targetPort: 3024 - - name: mysql - port: 3036 - protocol: TCP - targetPort: 3036 - selector: - app: RELEASE-NAME - type: LoadBalancer -uses a ClusterIP when service.type=ClusterIP: - 1: | - apiVersion: v1 - kind: Service - metadata: - labels: - app: RELEASE-NAME - name: RELEASE-NAME - namespace: NAMESPACE - spec: - ports: - - name: https - port: 443 - protocol: TCP - targetPort: 3080 - - name: sshproxy - port: 3023 - protocol: TCP - targetPort: 3023 - - name: k8s - port: 3026 - protocol: TCP - targetPort: 3026 - - name: sshtun - port: 3024 - protocol: TCP - targetPort: 3024 - - name: mysql - port: 3036 - protocol: TCP - targetPort: 3036 - selector: - app: RELEASE-NAME - type: ClusterIP -uses a LoadBalancer by default: - 1: | - apiVersion: v1 - kind: Service - metadata: - labels: - app: RELEASE-NAME - name: RELEASE-NAME - namespace: NAMESPACE - spec: - ports: - - name: https - port: 443 - protocol: TCP - targetPort: 3080 - - name: sshproxy - port: 3023 - protocol: TCP - targetPort: 3023 - - name: k8s - port: 3026 - protocol: TCP - targetPort: 3026 - - name: sshtun - port: 3024 - protocol: TCP - targetPort: 3024 - - name: mysql - port: 3036 - protocol: TCP - targetPort: 3036 - selector: - app: RELEASE-NAME - type: LoadBalancer diff --git a/examples/chart/teleport-cluster/tests/__snapshot__/serviceaccount_test.yaml.snap b/examples/chart/teleport-cluster/tests/__snapshot__/serviceaccount_test.yaml.snap deleted file mode 100644 index fd33a98a02919..0000000000000 --- a/examples/chart/teleport-cluster/tests/__snapshot__/serviceaccount_test.yaml.snap +++ /dev/null @@ -1,19 +0,0 @@ -changes ServiceAccount name when specified: - 1: | - apiVersion: v1 - kind: ServiceAccount - metadata: - annotations: - kubernetes.io/serviceaccount: test-annotation - name: helm-lint - namespace: NAMESPACE -sets ServiceAccount annotations when specified: - 1: | - apiVersion: v1 - kind: ServiceAccount - metadata: - annotations: - kubernetes.io/serviceaccount: test-annotation - kubernetes.io/serviceaccount-different: 6 - name: RELEASE-NAME - namespace: NAMESPACE diff --git a/examples/chart/teleport-cluster/tests/clusterrole_test.yaml b/examples/chart/teleport-cluster/tests/auth_clusterrole_test.yaml similarity index 84% rename from examples/chart/teleport-cluster/tests/clusterrole_test.yaml rename to examples/chart/teleport-cluster/tests/auth_clusterrole_test.yaml index bae7ebfcf5cca..6e26d74d710a7 100644 --- a/examples/chart/teleport-cluster/tests/clusterrole_test.yaml +++ b/examples/chart/teleport-cluster/tests/auth_clusterrole_test.yaml @@ -1,6 +1,6 @@ -suite: ClusterRole +suite: Auth ClusterRole templates: - - clusterrole.yaml + - auth/clusterrole.yaml tests: - it: creates a ClusterRole asserts: @@ -8,7 +8,6 @@ tests: count: 1 - isKind: of: ClusterRole - - matchSnapshot: {} - it: adds operator permissions to ClusterRole values: - ../.lint/operator.yaml diff --git a/examples/chart/teleport-cluster/tests/auth_clusterrolebinding_test.yaml b/examples/chart/teleport-cluster/tests/auth_clusterrolebinding_test.yaml new file mode 100644 index 0000000000000..45117b15a6604 --- /dev/null +++ b/examples/chart/teleport-cluster/tests/auth_clusterrolebinding_test.yaml @@ -0,0 +1,20 @@ +suite: Auth ClusterRoleBinding +templates: + - auth/clusterrolebinding.yaml +tests: + - it: creates a ClusterRoleBinding + asserts: + - hasDocuments: + count: 2 + - isKind: + of: ClusterRoleBinding + - it: uses the provided serviceAccount name + values: + - ../.lint/service-account.yaml + asserts: + - contains: + path: subjects + any: true + content: + kind: ServiceAccount + name: "helm-lint" diff --git a/examples/chart/teleport-cluster/tests/config_test.yaml b/examples/chart/teleport-cluster/tests/auth_config_test.yaml similarity index 75% rename from examples/chart/teleport-cluster/tests/config_test.yaml rename to examples/chart/teleport-cluster/tests/auth_config_test.yaml index ff99d943eb4a6..c829f3be4511e 100644 --- a/examples/chart/teleport-cluster/tests/config_test.yaml +++ b/examples/chart/teleport-cluster/tests/auth_config_test.yaml @@ -1,6 +1,6 @@ suite: ConfigMap templates: - - config.yaml + - auth/config.yaml tests: - it: matches snapshot for acme-off.yaml values: @@ -10,7 +10,8 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for acme-on.yaml values: @@ -20,7 +21,8 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for acme-uri-staging.yaml values: @@ -30,19 +32,10 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - - it: matches snapshot for affinity.yaml - values: - - ../.lint/affinity.yaml - asserts: - - hasDocuments: - count: 1 - - isKind: - of: ConfigMap - - matchSnapshot: {} - - - it: matches snapshot and tests for annotations.yaml + - it: wears annotations (annotations.yaml) values: - ../.lint/annotations.yaml asserts: @@ -56,7 +49,6 @@ tests: - equal: path: metadata.annotations.kubernetes\.io/config-different value: 2 - - matchSnapshot: {} - it: matches snapshot for auth-connector-name.yaml values: @@ -66,7 +58,8 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for auth-disable-local.yaml values: @@ -76,7 +69,8 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for auth-locking-mode.yaml values: @@ -86,7 +80,8 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for auth-passwordless.yaml values: @@ -96,7 +91,8 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for auth-type.yaml values: @@ -106,7 +102,8 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for auth-type-legacy.yaml values: @@ -116,7 +113,8 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for auth-webauthn.yaml values: @@ -126,7 +124,8 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for auth-webauthn-legacy.yaml values: @@ -136,7 +135,8 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for aws.yaml values: @@ -146,17 +146,19 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for aws-dynamodb-autoscaling.yaml values: - ../.lint/aws-dynamodb-autoscaling.yaml asserts: - - hasDocuments: - count: 1 - - isKind: - of: ConfigMap - - matchSnapshot: {} + - hasDocuments: + count: 1 + - isKind: + of: ConfigMap + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for aws-ha.yaml values: @@ -166,7 +168,8 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for aws-ha-acme.yaml values: @@ -176,7 +179,8 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for aws-ha-antiaffinity.yaml values: @@ -186,7 +190,8 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for aws-ha-log.yaml values: @@ -196,21 +201,8 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} - - - it: does not generate a config for cert-manager.yaml - values: - - ../.lint/cert-manager.yaml - asserts: - - hasDocuments: - count: 0 - - - it: does not generate a config for cert-secret.yaml - values: - - ../.lint/cert-secret.yaml - asserts: - - hasDocuments: - count: 0 + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for existing-tls-secret.yaml values: @@ -220,7 +212,8 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for existing-tls-secret-with-ca.yaml values: @@ -230,7 +223,8 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for gcp-ha-acme.yaml values: @@ -240,7 +234,8 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for gcp-ha-antiaffinity.yaml values: @@ -250,7 +245,8 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for gcp-ha-log.yaml values: @@ -260,7 +256,8 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for gcp.yaml values: @@ -270,7 +267,8 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for initcontainers.yaml values: @@ -280,7 +278,8 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for kube-cluster-name.yaml values: @@ -290,7 +289,8 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for log-basic.yaml values: @@ -300,7 +300,8 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for log-extra.yaml values: @@ -310,7 +311,8 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for log-legacy.yaml values: @@ -320,14 +322,8 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} - - - it: does not generate a config for pdb.yaml - values: - - ../.lint/pdb.yaml - asserts: - - hasDocuments: - count: 0 + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for priority-class-name.yaml values: @@ -337,27 +333,30 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - - it: matches snapshot for proxy-listener-mode.yaml + - it: matches snapshot for proxy-listener-mode-multiplex.yaml values: - - ../.lint/proxy-listener-mode.yaml + - ../.lint/proxy-listener-mode-multiplex.yaml asserts: - hasDocuments: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - - it: matches snapshot for resources.yaml + - it: matches snapshot for proxy-listener-mode-separate.yaml values: - - ../.lint/resources.yaml + - ../.lint/proxy-listener-mode-separate.yaml asserts: - hasDocuments: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for service.yaml values: @@ -367,7 +366,8 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for separate-mongo-listener.yaml values: @@ -377,7 +377,8 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for separate-postgres-listener.yaml values: @@ -387,7 +388,8 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for public-addresses.yaml values: @@ -397,7 +399,8 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for session-recording.yaml values: @@ -407,7 +410,8 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for standalone-customsize.yaml values: @@ -417,7 +421,8 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for standalone-existingpvc.yaml values: @@ -427,7 +432,8 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for tolerations.yaml values: @@ -437,7 +443,8 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for version-override.yaml values: @@ -447,7 +454,8 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml - it: matches snapshot for volumes.yaml values: @@ -457,4 +465,5 @@ tests: count: 1 - isKind: of: ConfigMap - - matchSnapshot: {} + - matchSnapshot: + path: data.teleport\.yaml \ No newline at end of file diff --git a/examples/chart/teleport-cluster/tests/auth_pdb_test.yaml b/examples/chart/teleport-cluster/tests/auth_pdb_test.yaml new file mode 100644 index 0000000000000..0ef9aad75e7cf --- /dev/null +++ b/examples/chart/teleport-cluster/tests/auth_pdb_test.yaml @@ -0,0 +1,23 @@ +suite: Auth PodDisruptionBudget +templates: + - auth/pdb.yaml +tests: + - it: not should create a PDB when disabled in values + set: + highAvailability: + podDisruptionBudget: + enabled: false + asserts: + - hasDocuments: + count: 0 + - it: should create a PDB when enabled in values (pdb.yaml) + values: + - ../.lint/pdb.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: PodDisruptionBudget + - equal: + path: spec.minAvailable + value: 2 diff --git a/examples/chart/teleport-cluster/tests/pvc_test.yaml b/examples/chart/teleport-cluster/tests/auth_pvc_test.yaml similarity index 51% rename from examples/chart/teleport-cluster/tests/pvc_test.yaml rename to examples/chart/teleport-cluster/tests/auth_pvc_test.yaml index b40b16d9bed79..4653fd348909a 100644 --- a/examples/chart/teleport-cluster/tests/pvc_test.yaml +++ b/examples/chart/teleport-cluster/tests/auth_pvc_test.yaml @@ -1,47 +1,31 @@ -suite: PersistentVolumeClaim +suite: Auth PersistentVolumeClaim templates: - - pvc.yaml + - auth/pvc.yaml tests: - - it: creates a PersistentVolumeClaim when chartMode=standalone + - it: creates a PersistentVolumeClaim when chartMode=standalone with default size set: chartMode: standalone asserts: - - template: pvc.yaml - hasDocuments: + - hasDocuments: count: 1 - - template: pvc.yaml - isKind: + - isKind: of: PersistentVolumeClaim - - matchSnapshot: {} + - equal: + path: spec.resources.requests.storage + value: "10Gi" - - it: creates a PersistentVolumeClaim when chartMode=custom + - it: creates a PersistentVolumeClaim when chartMode=scratch set: - chartMode: custom - asserts: - - template: pvc.yaml - hasDocuments: - count: 1 - - template: pvc.yaml - isKind: - of: PersistentVolumeClaim - - matchSnapshot: {} - - - it: creates a PersistentVolumeClaim with values from standalone-customsize.yaml - values: - - ../.lint/standalone-customsize.yaml + chartMode: scratch asserts: - hasDocuments: count: 1 - isKind: of: PersistentVolumeClaim - - equal: - path: spec.resources.requests.storage - value: 50Gi - - matchSnapshot: {} - - it: creates a PersistentVolumeClaim with values from custom-customsize.yaml + - it: uses a custom size when set values: - - ../.lint/custom-customsize.yaml + - ../.lint/standalone-customsize.yaml asserts: - hasDocuments: count: 1 @@ -50,7 +34,6 @@ tests: - equal: path: spec.resources.requests.storage value: 50Gi - - matchSnapshot: {} - it: does not create a PersistentVolumeClaim when chartMode=standalone and existingClaimName is not blank set: @@ -61,29 +44,15 @@ tests: - hasDocuments: count: 0 - - it: does not create a PersistentVolumeClaim when chartMode=custom and existingClaimName is not blank + - it: does not create a PersistentVolumeClaim when chartMode=scratch and existingClaimName is not blank set: - chartMode: custom + chartMode: scratch persistence: existingClaimName: test-claim asserts: - hasDocuments: count: 0 - - it: does not create a PersistentVolumeClaim with values from standalone-existingpvc.yaml - values: - - ../.lint/standalone-existingpvc.yaml - asserts: - - hasDocuments: - count: 0 - - - it: does not create a PersistentVolumeClaim with values from custom-existingpvc.yaml - values: - - ../.lint/custom-existingpvc.yaml - asserts: - - hasDocuments: - count: 0 - - it: does not create a PersistentVolumeClaim when chartMode=aws set: chartMode: aws diff --git a/examples/chart/teleport-cluster/tests/serviceaccount_test.yaml b/examples/chart/teleport-cluster/tests/auth_serviceaccount_test.yaml similarity index 76% rename from examples/chart/teleport-cluster/tests/serviceaccount_test.yaml rename to examples/chart/teleport-cluster/tests/auth_serviceaccount_test.yaml index ea7c0c6b4aa0e..7a64ed016c19d 100644 --- a/examples/chart/teleport-cluster/tests/serviceaccount_test.yaml +++ b/examples/chart/teleport-cluster/tests/auth_serviceaccount_test.yaml @@ -1,6 +1,6 @@ -suite: ServiceAccount +suite: Auth ServiceAccount templates: - - serviceaccount.yaml + - auth/serviceaccount.yaml tests: - it: sets ServiceAccount annotations when specified values: @@ -12,12 +12,11 @@ tests: - equal: path: metadata.annotations.kubernetes\.io/serviceaccount-different value: 6 - - matchSnapshot: {} - - it: changes ServiceAccount name when specified + + - it: changes ServiceAccount name when specified values: - ../.lint/service-account.yaml asserts: - equal: path: metadata.name value: "helm-lint" - - matchSnapshot: {} diff --git a/examples/chart/teleport-cluster/tests/deployment_test.yaml b/examples/chart/teleport-cluster/tests/auth_statefulset_test.yaml similarity index 59% rename from examples/chart/teleport-cluster/tests/deployment_test.yaml rename to examples/chart/teleport-cluster/tests/auth_statefulset_test.yaml index 8d7674cb44c95..3c49b61d2c83c 100644 --- a/examples/chart/teleport-cluster/tests/deployment_test.yaml +++ b/examples/chart/teleport-cluster/tests/auth_statefulset_test.yaml @@ -1,10 +1,10 @@ -suite: Deployment +suite: Auth StatefulSet templates: - - deployment.yaml - - config.yaml + - auth/statefulset.yaml + - auth/config.yaml tests: - - it: sets Deployment annotations when specified - template: deployment.yaml + - it: sets Statefulset annotations when specified + template: auth/statefulset.yaml values: - ../.lint/annotations.yaml asserts: @@ -14,11 +14,9 @@ tests: - equal: path: metadata.annotations.kubernetes\.io/deployment-different value: 3 - - matchSnapshot: - path: spec.template.spec - it: sets Pod annotations when specified - template: deployment.yaml + template: auth/statefulset.yaml values: - ../.lint/annotations.yaml asserts: @@ -28,11 +26,9 @@ tests: - equal: path: spec.template.metadata.annotations.kubernetes\.io/pod-different value: 4 - - matchSnapshot: - path: spec.template.spec - it: should not have more than one replica in standalone mode - template: deployment.yaml + template: auth/statefulset.yaml set: chartMode: standalone clusterName: helm-lint.example.com @@ -40,13 +36,11 @@ tests: - equal: path: spec.replicas value: 1 - - matchSnapshot: - path: spec.template.spec - it: should have multiple replicas when replicaCount is set - template: deployment.yaml + template: auth/statefulset.yaml set: - chartMode: custom + chartMode: scratch clusterName: helm-lint.example.com highAvailability: replicaCount: 3 @@ -54,16 +48,11 @@ tests: - equal: path: spec.replicas value: 3 - - matchSnapshot: - path: spec.template.spec - it: should set affinity when set in values - template: deployment.yaml + template: auth/statefulset.yaml set: - chartMode: custom clusterName: helm-lint.example.com - highAvailability: - replicaCount: 3 affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -77,10 +66,10 @@ tests: - isNotNull: path: spec.template.spec.affinity - matchSnapshot: - path: spec.template.spec + path: spec.template.spec.affinity - it: should set required affinity when highAvailability.requireAntiAffinity is set - template: deployment.yaml + template: auth/statefulset.yaml values: - ../.lint/aws-ha-antiaffinity.yaml asserts: @@ -91,20 +80,20 @@ tests: - isNotNull: path: spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution - matchSnapshot: - path: spec.template.spec + path: spec.template.spec.affinity - it: should set tolerations when set in values - template: deployment.yaml + template: auth/statefulset.yaml values: - ../.lint/tolerations.yaml asserts: - isNotNull: path: spec.template.spec.tolerations - matchSnapshot: - path: spec.template.spec + path: spec.template.spec.tolerations - it: should set resources when set in values - template: deployment.yaml + template: auth/statefulset.yaml values: - ../.lint/resources.yaml asserts: @@ -124,7 +113,7 @@ tests: path: spec.template.spec - it: should set securityContext when set in values - template: deployment.yaml + template: auth/statefulset.yaml values: - ../.lint/security-context.yaml asserts: @@ -150,20 +139,18 @@ tests: path: spec.template.spec - it: should not set securityContext when is empty object (default value) - template: deployment.yaml + template: auth/statefulset.yaml values: - ../.lint/security-context-empty.yaml asserts: - isNull: path: spec.template.spec.containers[0].securityContext - - matchSnapshot: - path: spec.template.spec # we can't use the dynamic chart version or appVersion as a variable in the tests, # so we override it manually and check that gets set instead # this saves us having to update the test every time we cut a new release - - it: should do enterprise things when when enterprise is set in values - template: deployment.yaml + - it: should use enterprise image and mount license when enterprise is set in values + template: auth/statefulset.yaml set: clusterName: helm-lint.example.com enterprise: true @@ -184,11 +171,9 @@ tests: name: license secret: secretName: license - - matchSnapshot: - path: spec.template.spec - - it: should not do enterprise things when when enterprise is not set in values - template: deployment.yaml + - it: should use OSS image and not mount license when enterprise is not set in values + template: auth/statefulset.yaml set: clusterName: helm-lint teleportVersionOverride: 8.3.4 @@ -212,7 +197,7 @@ tests: path: spec.template.spec - it: should mount GCP credentials in GCP mode - template: deployment.yaml + template: auth/statefulset.yaml values: - ../.lint/gcp-ha.yaml asserts: @@ -228,8 +213,6 @@ tests: name: gcp-credentials secret: secretName: teleport-gcp-credentials - - matchSnapshot: - path: spec.template.spec - it: should not mount secret when credentialSecretName is blank in values template: deployment.yaml @@ -248,11 +231,9 @@ tests: name: gcp-credentials secret: secretName: teleport-gcp-credentials - - matchSnapshot: - path: spec.template.spec - it: should mount GCP credentials for initContainer in GCP mode - template: deployment.yaml + template: auth/statefulset.yaml values: - ../.lint/gcp-ha.yaml - ../.lint/initcontainers.yaml @@ -263,109 +244,10 @@ tests: mountPath: /etc/teleport-secrets name: "gcp-credentials" readOnly: true - - matchSnapshot: - path: spec.template.spec - - - it: should mount TLS certs when cert-manager is enabled - template: deployment.yaml - values: - - ../.lint/gcp-ha-acme.yaml - asserts: - - contains: - path: spec.template.spec.containers[0].volumeMounts - content: - mountPath: /etc/teleport-tls - name: "teleport-tls" - readOnly: true - - contains: - path: spec.template.spec.volumes - content: - name: teleport-tls - secret: - secretName: teleport-tls - - matchSnapshot: - path: spec.template.spec - - - it: should mount TLS certs for initContainer when cert-manager is enabled - template: deployment.yaml - values: - - ../.lint/gcp-ha-acme.yaml - - ../.lint/initcontainers.yaml - asserts: - - contains: - path: spec.template.spec.initContainers[0].volumeMounts - content: - mountPath: /etc/teleport-tls - name: "teleport-tls" - readOnly: true - - matchSnapshot: - path: spec.template.spec - - - it: should mount ConfigMap for config in AWS mode - template: deployment.yaml - values: - - ../.lint/aws-ha.yaml - asserts: - - contains: - path: spec.template.spec.containers[0].volumeMounts - content: - mountPath: /etc/teleport - name: "config" - readOnly: true - - contains: - path: spec.template.spec.volumes - content: - name: config - configMap: - name: RELEASE-NAME - - matchSnapshot: - path: spec.template.spec - - - it: should mount ConfigMap for config in GCP mode - template: deployment.yaml - values: - - ../.lint/gcp-ha.yaml - asserts: - - contains: - path: spec.template.spec.containers[0].volumeMounts - content: - mountPath: /etc/teleport - name: "config" - readOnly: true - - contains: - path: spec.template.spec.volumes - content: - name: config - configMap: - name: RELEASE-NAME - - matchSnapshot: - path: spec.template.spec - - - it: should mount ConfigMap for config in standalone mode - template: deployment.yaml - set: - chartMode: standalone - clusterName: helm-lint.example.com - asserts: - - contains: - path: spec.template.spec.containers[0].volumeMounts - content: - mountPath: /etc/teleport - name: "config" - readOnly: true - - contains: - path: spec.template.spec.volumes - content: - name: config - configMap: - name: RELEASE-NAME - - matchSnapshot: - path: spec.template.spec - - it: should mount ConfigMap for config in custom mode - template: deployment.yaml + - it: should mount ConfigMap containing Teleport config + template: auth/statefulset.yaml set: - chartMode: custom clusterName: helm-lint.example.com asserts: - contains: @@ -379,57 +261,13 @@ tests: content: name: config configMap: - name: RELEASE-NAME - - matchSnapshot: - path: spec.template.spec - - - it: should have Recreate strategy in standalone mode - template: deployment.yaml - set: - chartMode: standalone - clusterName: helm-lint.example.com - asserts: - - equal: - path: spec.strategy.type - value: Recreate - - matchSnapshot: - path: spec.template.spec - - - it: should not have strategy in AWS mode - template: deployment.yaml - values: - - ../.lint/aws-ha.yaml - asserts: - - isNull: - path: spec.template.spec.strategy - - matchSnapshot: - path: spec.template.spec + name: RELEASE-NAME-auth - - it: should not have strategy in GCP mode - template: deployment.yaml - values: - - ../.lint/gcp-ha.yaml - asserts: - - isNull: - path: spec.template.spec.strategy - - matchSnapshot: - path: spec.template.spec - - - it: should not have strategy in custom mode - template: deployment.yaml - set: - chartMode: custom - clusterName: helm-lint.example.com - asserts: - - isNull: - path: spec.template.spec.strategy - - matchSnapshot: - path: spec.template.spec - - - it: should mount extraVolumes and extraVolumeMounts - template: deployment.yaml + - it: should mount extraVolumes and extraVolumeMounts on container and initContainers + template: auth/statefulset.yaml values: - ../.lint/volumes.yaml + - ../.lint/initcontainers.yaml asserts: - contains: path: spec.template.spec.containers[0].volumeMounts @@ -437,40 +275,12 @@ tests: mountPath: /path/to/mount name: my-mount - contains: - path: spec.template.spec.volumes - content: - name: my-mount - secret: - secretName: mySecret - - matchSnapshot: - path: spec.template.spec - - - it: should mount extraVolumes and extraVolumeMounts - template: deployment.yaml - values: - - ../.lint/volumes.yaml - asserts: - - contains: - path: spec.template.spec.containers[0].volumeMounts + path: spec.template.spec.initContainers[0].volumeMounts content: mountPath: /path/to/mount name: my-mount - contains: - path: spec.template.spec.volumes - content: - name: my-mount - secret: - secretName: mySecret - - matchSnapshot: - path: spec.template.spec - - - it: should mount extraVolumes and extraVolumeMounts - template: deployment.yaml - values: - - ../.lint/volumes.yaml - asserts: - - contains: - path: spec.template.spec.containers[0].volumeMounts + path: spec.template.spec.initContainers[1].volumeMounts content: mountPath: /path/to/mount name: my-mount @@ -480,11 +290,8 @@ tests: name: my-mount secret: secretName: mySecret - - matchSnapshot: - path: spec.template.spec - - it: should set imagePullPolicy when set in values - template: deployment.yaml + template: auth/statefulset.yaml set: clusterName: helm-lint.example.com imagePullPolicy: Always @@ -492,11 +299,9 @@ tests: - equal: path: spec.template.spec.containers[0].imagePullPolicy value: Always - - matchSnapshot: - path: spec.template.spec - it: should set environment when extraEnv set in values - template: deployment.yaml + template: auth/statefulset.yaml values: - ../.lint/extra-env.yaml asserts: @@ -505,11 +310,9 @@ tests: content: name: SOME_ENVIRONMENT_VARIABLE value: "some-value" - - matchSnapshot: - path: spec.template.spec - it: should provision initContainer correctly when set in values - template: deployment.yaml + template: auth/statefulset.yaml values: - ../.lint/initcontainers.yaml - ../.lint/resources.yaml @@ -536,11 +339,32 @@ tests: - equal: path: spec.template.spec.initContainers[0].resources.requests.memory value: 2Gi + - contains: + path: spec.template.spec.initContainers[1].args + content: "echo test2" + - equal: + path: spec.template.spec.initContainers[1].name + value: "teleport-init2" + - equal: + path: spec.template.spec.initContainers[1].image + value: "alpine" + - equal: + path: spec.template.spec.initContainers[1].resources.limits.cpu + value: 2 + - equal: + path: spec.template.spec.initContainers[1].resources.limits.memory + value: 4Gi + - equal: + path: spec.template.spec.initContainers[1].resources.requests.cpu + value: 1 + - equal: + path: spec.template.spec.initContainers[1].resources.requests.memory + value: 2Gi - matchSnapshot: - path: spec.template.spec + path: spec.template.spec.initContainers - it: should add insecureSkipProxyTLSVerify to args when set in values - template: deployment.yaml + template: auth/statefulset.yaml set: clusterName: helm-lint.example.com insecureSkipProxyTLSVerify: true @@ -548,11 +372,9 @@ tests: - contains: path: spec.template.spec.containers[0].args content: "--insecure" - - matchSnapshot: - path: spec.template.spec - it: should expose diag port - template: deployment.yaml + template: auth/statefulset.yaml set: clusterName: helm-lint.example.com asserts: @@ -562,11 +384,9 @@ tests: name: diag containerPort: 3000 protocol: TCP - - matchSnapshot: - path: spec.template.spec - it: should set postStart command if set in values - template: deployment.yaml + template: auth/statefulset.yaml set: clusterName: helm-lint.example.com postStart: @@ -575,11 +395,9 @@ tests: - equal: path: spec.template.spec.containers[0].lifecycle.postStart.exec.command value: ["/bin/echo", "test"] - - matchSnapshot: - path: spec.template.spec - it: should add PersistentVolumeClaim as volume when in standalone mode and persistence.enabled is true - template: deployment.yaml + template: auth/statefulset.yaml set: chartMode: standalone clusterName: helm-lint.example.com @@ -592,11 +410,9 @@ tests: name: data persistentVolumeClaim: claimName: RELEASE-NAME - - matchSnapshot: - path: spec.template.spec - it: should not add PersistentVolumeClaim as volume when in standalone mode and persistence.enabled is false - template: deployment.yaml + template: auth/statefulset.yaml set: chartMode: standalone clusterName: helm-lint.example.com @@ -609,13 +425,11 @@ tests: name: data persistentVolumeClaim: claimName: RELEASE-NAME - - matchSnapshot: - path: spec.template.spec - - it: should add PersistentVolumeClaim as volume when in custom mode and persistence.enabled is true - template: deployment.yaml + - it: should add PersistentVolumeClaim as volume when in scratch mode and persistence.enabled is true + template: auth/statefulset.yaml set: - chartMode: custom + chartMode: scratch clusterName: helm-lint.example.com persistence: enabled: true @@ -626,13 +440,11 @@ tests: name: data persistentVolumeClaim: claimName: RELEASE-NAME - - matchSnapshot: - path: spec.template.spec - - it: should not add PersistentVolumeClaim as volume when in custom mode and persistence.enabled is false - template: deployment.yaml + - it: should not add PersistentVolumeClaim as volume when in scratch mode and persistence.enabled is false + template: auth/statefulset.yaml set: - chartMode: custom + chartMode: scratch clusterName: helm-lint.example.com persistence: enabled: false @@ -643,11 +455,9 @@ tests: name: data persistentVolumeClaim: claimName: RELEASE-NAME - - matchSnapshot: - path: spec.template.spec - it: should add an operator side-car when operator is enabled - template: deployment.yaml + template: auth/statefulset.yaml values: - ../.lint/operator.yaml asserts: @@ -658,7 +468,7 @@ tests: path: spec.template.spec.containers[1] - it: should add named PersistentVolumeClaim as volume when in standalone mode, persistence.existingClaimName is set and persistence.enabled is true - template: deployment.yaml + template: auth/statefulset.yaml values: - ../.lint/standalone-existingpvc.yaml asserts: @@ -668,11 +478,9 @@ tests: name: data persistentVolumeClaim: claimName: teleport-storage - - matchSnapshot: - path: spec.template.spec - it: should not add named PersistentVolumeClaim as volume when in standalone mode, persistence.existingClaimName is set but persistence.enabled is false - template: deployment.yaml + template: auth/statefulset.yaml values: - ../.lint/standalone-existingpvc.yaml set: @@ -685,11 +493,9 @@ tests: name: data persistentVolumeClaim: claimName: teleport-storage - - matchSnapshot: - path: spec.template.spec - - it: should add named PersistentVolumeClaim as volume when in custom mode and persistence.existingClaimName is set - template: deployment.yaml + - it: should add named PersistentVolumeClaim as volume when in scratch mode and persistence.existingClaimName is set + template: auth/statefulset.yaml values: - ../.lint/standalone-existingpvc.yaml asserts: @@ -699,11 +505,9 @@ tests: name: data persistentVolumeClaim: claimName: teleport-storage - - matchSnapshot: - path: spec.template.spec - - it: should not add named PersistentVolumeClaim as volume when in custom mode, persistence.existingClaimName is set and persistence.enabled is false - template: deployment.yaml + - it: should not add named PersistentVolumeClaim as volume when in scratch mode, persistence.existingClaimName is set and persistence.enabled is false + template: auth/statefulset.yaml values: - ../.lint/standalone-existingpvc.yaml set: @@ -720,7 +524,7 @@ tests: path: spec.template.spec - it: should add emptyDir for data in AWS mode - template: deployment.yaml + template: auth/statefulset.yaml values: - ../.lint/aws-ha.yaml asserts: @@ -729,11 +533,9 @@ tests: content: name: data emptyDir: {} - - matchSnapshot: - path: spec.template.spec - it: should add emptyDir for data in GCP mode - template: deployment.yaml + template: auth/statefulset.yaml values: - ../.lint/gcp-ha.yaml asserts: @@ -742,22 +544,18 @@ tests: content: name: data emptyDir: {} - - matchSnapshot: - path: spec.template.spec - it: should set priorityClassName when set in values - template: deployment.yaml + template: auth/statefulset.yaml values: - ../.lint/priority-class-name.yaml asserts: - equal: path: spec.template.spec.priorityClassName value: system-cluster-critical - - matchSnapshot: - path: spec.template.spec - it: should set probeTimeoutSeconds when set in values - template: deployment.yaml + template: auth/statefulset.yaml values: - ../.lint/probe-timeout-seconds.yaml asserts: @@ -767,92 +565,18 @@ tests: - equal: path: spec.template.spec.containers[0].readinessProbe.timeoutSeconds value: 5 - - matchSnapshot: - path: spec.template.spec - - - it: should not mount TLS secrets when when highAvailability.certManager.enabled is false and tls.existingSecretName is not set - template: deployment.yaml - set: - clusterName: helm-lint-test-cluster - asserts: - - notContains: - path: spec.template.spec.volumes - content: - name: teleport-tls - secret: - secretName: teleport-tls - - notContains: - path: spec.template.spec.containers[0].volumeMounts - content: - mountPath: /etc/teleport-tls - name: teleport-tls - readOnly: true - - matchSnapshot: - path: spec.template.spec - - - it: should mount cert-manager TLS secret when highAvailability.certManager.enabled is true - template: deployment.yaml - values: - - ../.lint/cert-manager.yaml - asserts: - - contains: - path: spec.template.spec.volumes - content: - name: teleport-tls - secret: - secretName: teleport-tls - - contains: - path: spec.template.spec.containers[0].volumeMounts - content: - mountPath: /etc/teleport-tls - name: teleport-tls - readOnly: true - - matchSnapshot: - path: spec.template.spec - - - it: should mount tls.existingSecretName when set in values - template: deployment.yaml - values: - - ../.lint/existing-tls-secret.yaml - asserts: - - contains: - path: spec.template.spec.volumes - content: - name: teleport-tls - secret: - secretName: helm-lint-existing-tls-secret - - contains: - path: spec.template.spec.containers[0].volumeMounts - content: - mountPath: /etc/teleport-tls - name: teleport-tls - readOnly: true - - matchSnapshot: - path: spec.template.spec - it: should mount tls.existingCASecretName and set environment when set in values - template: deployment.yaml + template: auth/statefulset.yaml values: - ../.lint/existing-tls-secret-with-ca.yaml asserts: - - contains: - path: spec.template.spec.volumes - content: - name: teleport-tls - secret: - secretName: helm-lint-existing-tls-secret - contains: path: spec.template.spec.volumes content: name: teleport-tls-ca secret: secretName: helm-lint-existing-tls-secret-ca - - contains: - path: spec.template.spec.containers[0].volumeMounts - content: - mountPath: /etc/teleport-tls - name: teleport-tls - readOnly: true - contains: path: spec.template.spec.containers[0].volumeMounts content: @@ -864,33 +588,19 @@ tests: content: name: SSL_CERT_FILE value: /etc/teleport-tls-ca/ca.pem - - matchSnapshot: - path: spec.template.spec - it: should mount tls.existingCASecretName and set extra environment when set in values - template: deployment.yaml + template: auth/statefulset.yaml values: - ../.lint/existing-tls-secret-with-ca.yaml - ../.lint/extra-env.yaml asserts: - - contains: - path: spec.template.spec.volumes - content: - name: teleport-tls - secret: - secretName: helm-lint-existing-tls-secret - contains: path: spec.template.spec.volumes content: name: teleport-tls-ca secret: secretName: helm-lint-existing-tls-secret-ca - - contains: - path: spec.template.spec.containers[0].volumeMounts - content: - mountPath: /etc/teleport-tls - name: teleport-tls - readOnly: true - contains: path: spec.template.spec.containers[0].volumeMounts content: @@ -907,15 +617,14 @@ tests: content: name: SOME_ENVIRONMENT_VARIABLE value: some-value - - matchSnapshot: - path: spec.template.spec - - it: should set minReadySeconds in non-standalone mode - template: deployment.yaml + - it: should set minReadySeconds when replicaCount > 1 + template: auth/statefulset.yaml set: - chartMode: custom + chartMode: scratch highAvailability: minReadySeconds: 60 + replicaCount: 3 asserts: - equal: path: spec.minReadySeconds diff --git a/examples/chart/teleport-cluster/tests/clusterrolebinding_test.yaml b/examples/chart/teleport-cluster/tests/clusterrolebinding_test.yaml deleted file mode 100644 index b8f30d8cf4fc2..0000000000000 --- a/examples/chart/teleport-cluster/tests/clusterrolebinding_test.yaml +++ /dev/null @@ -1,11 +0,0 @@ -suite: ClusterRoleBinding -templates: - - clusterrolebinding.yaml -tests: - - it: creates a ClusterRoleBinding - asserts: - - hasDocuments: - count: 1 - - isKind: - of: ClusterRoleBinding - - matchSnapshot: {} diff --git a/examples/chart/teleport-cluster/tests/pdb_test.yaml b/examples/chart/teleport-cluster/tests/pdb_test.yaml deleted file mode 100644 index b673ce8400ce3..0000000000000 --- a/examples/chart/teleport-cluster/tests/pdb_test.yaml +++ /dev/null @@ -1,13 +0,0 @@ -suite: PodDisruptionBudget -templates: - - pdb.yaml -tests: - - it: should create a PDB when enabled in values (pdb.yaml) - values: - - ../.lint/pdb.yaml - asserts: - - hasDocuments: - count: 1 - - isKind: - of: PodDisruptionBudget - - matchSnapshot: {} diff --git a/examples/chart/teleport-cluster/tests/certificate_test.yaml b/examples/chart/teleport-cluster/tests/proxy_certificate_test.yaml similarity index 93% rename from examples/chart/teleport-cluster/tests/certificate_test.yaml rename to examples/chart/teleport-cluster/tests/proxy_certificate_test.yaml index b2cb8cc737301..d1d8f0c3c7770 100644 --- a/examples/chart/teleport-cluster/tests/certificate_test.yaml +++ b/examples/chart/teleport-cluster/tests/proxy_certificate_test.yaml @@ -1,6 +1,6 @@ -suite: Certificate +suite: Proxy Certificate templates: - - certificate.yaml + - proxy/certificate.yaml tests: - it: should request a certificate for cluster name when cert-manager is enabled (cert-manager.yaml) values: diff --git a/examples/chart/teleport-cluster/tests/proxy_deployment_test.yaml b/examples/chart/teleport-cluster/tests/proxy_deployment_test.yaml new file mode 100644 index 0000000000000..e0392f1f5e12b --- /dev/null +++ b/examples/chart/teleport-cluster/tests/proxy_deployment_test.yaml @@ -0,0 +1,551 @@ +suite: Proxy Deployment +templates: + - proxy/deployment.yaml + - proxy/config.yaml +tests: + - it: sets Deployment annotations when specified + template: proxy/deployment.yaml + values: + - ../.lint/annotations.yaml + asserts: + - equal: + path: metadata.annotations.kubernetes\.io/deployment + value: test-annotation + - equal: + path: metadata.annotations.kubernetes\.io/deployment-different + value: 3 + + - it: sets Pod annotations when specified + template: proxy/deployment.yaml + values: + - ../.lint/annotations.yaml + asserts: + - equal: + path: spec.template.metadata.annotations.kubernetes\.io/pod + value: test-annotation + - equal: + path: spec.template.metadata.annotations.kubernetes\.io/pod-different + value: 4 + + - it: should not have more than one replica if no certificate is passed + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + asserts: + - equal: + path: spec.replicas + value: 1 + + - it: should have multiple replicas by default when a certificate is passed through a secret + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + tls: + existingSecretName: my-certs + asserts: + - equal: + path: spec.replicas + value: 2 + + - it: should have multiple replicas by default when certManager is configured + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + highAvailability: + certManager: + enabled: true + asserts: + - equal: + path: spec.replicas + value: 2 + + - it: should have multiple replicas when global replicaCount is set and a certificate is passed + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + highAvailability: + replicaCount: 3 + certManager: + enabled: true + asserts: + - equal: + path: spec.replicas + value: 3 + + - it: should have a single replica when proxy-specific replicaCount is set to 1 and a cert is passed + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + highAvailability: + certManager: + enabled: true + proxy: + highAvailability: + replicaCount: 1 + asserts: + - equal: + path: spec.replicas + value: 1 + + - it: should set affinity when set in values + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + highAvailability: + replicaCount: 3 + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: gravitational.io/dedicated + operator: In + values: + - teleport + asserts: + - isNotNull: + path: spec.template.spec.affinity + - matchSnapshot: + path: spec.template.spec.affinity + + - it: should set required affinity when highAvailability.requireAntiAffinity is set + template: proxy/deployment.yaml + values: + - ../.lint/aws-ha-antiaffinity.yaml + asserts: + - isNotNull: + path: spec.template.spec.affinity + - isNotNull: + path: spec.template.spec.affinity.podAntiAffinity + - isNotNull: + path: spec.template.spec.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution + - matchSnapshot: + path: spec.template.spec.affinity + + - it: should set tolerations when set in values + template: proxy/deployment.yaml + values: + - ../.lint/tolerations.yaml + asserts: + - isNotNull: + path: spec.template.spec.tolerations + - matchSnapshot: + path: spec.template.spec.tolerations + + - it: should set resources when set in values + template: proxy/deployment.yaml + values: + - ../.lint/resources.yaml + asserts: + - equal: + path: spec.template.spec.containers[0].resources.limits.cpu + value: 2 + - equal: + path: spec.template.spec.containers[0].resources.limits.memory + value: 4Gi + - equal: + path: spec.template.spec.containers[0].resources.requests.cpu + value: 1 + - equal: + path: spec.template.spec.containers[0].resources.requests.memory + value: 2Gi + - matchSnapshot: + path: spec.template.spec + + - it: should set securityContext when set in values + template: proxy/deployment.yaml + values: + - ../.lint/security-context.yaml + asserts: + - equal: + path: spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation + value: false + - equal: + path: spec.template.spec.containers[0].securityContext.privileged + value: false + - equal: + path: spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem + value: false + - equal: + path: spec.template.spec.containers[0].securityContext.runAsGroup + value: 99 + - equal: + path: spec.template.spec.containers[0].securityContext.runAsNonRoot + value: true + - equal: + path: spec.template.spec.containers[0].securityContext.runAsUser + value: 99 + - matchSnapshot: + path: spec.template.spec + + - it: should not set securityContext when is empty object (default value) + template: proxy/deployment.yaml + values: + - ../.lint/security-context-empty.yaml + asserts: + - isNull: + path: spec.template.spec.containers[0].securityContext + + # we can't use the dynamic chart version or appVersion as a variable in the tests, + # so we override it manually and check that gets set instead + # this saves us having to update the test every time we cut a new release + - it: should use enterprise image when enterprise is set in values + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + enterprise: true + teleportVersionOverride: 8.3.4 + asserts: + - equal: + path: spec.template.spec.containers[0].image + value: public.ecr.aws/gravitational/teleport-ent:8.3.4 + + - it: should use OSS image when enterprise is not set in values + template: proxy/deployment.yaml + set: + clusterName: helm-lint + teleportVersionOverride: 8.3.4 + asserts: + - equal: + path: spec.template.spec.containers[0].image + value: public.ecr.aws/gravitational/teleport:8.3.4 + + - it: should mount TLS certs when cert-manager is enabled + template: proxy/deployment.yaml + values: + - ../.lint/gcp-ha-acme.yaml + - ../.lint/initcontainers.yaml + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + mountPath: /etc/teleport-tls + name: "teleport-tls" + readOnly: true + - contains: + path: spec.template.spec.volumes + content: + name: teleport-tls + secret: + secretName: teleport-tls + - contains: + path: spec.template.spec.initContainers[0].volumeMounts + content: + mountPath: /etc/teleport-tls + name: "teleport-tls" + readOnly: true + - contains: + path: spec.template.spec.initContainers[1].volumeMounts + content: + mountPath: /etc/teleport-tls + name: "teleport-tls" + readOnly: true + + - it: should mount ConfigMap containing Teleport config + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + mountPath: /etc/teleport + name: "config" + readOnly: true + - contains: + path: spec.template.spec.volumes + content: + name: config + configMap: + name: RELEASE-NAME-proxy + + - it: should mount extraVolumes and extraVolumeMounts on container and initContainers + template: proxy/deployment.yaml + values: + - ../.lint/volumes.yaml + - ../.lint/initcontainers.yaml + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + mountPath: /path/to/mount + name: my-mount + - contains: + path: spec.template.spec.initContainers[0].volumeMounts + content: + mountPath: /path/to/mount + name: my-mount + - contains: + path: spec.template.spec.initContainers[1].volumeMounts + content: + mountPath: /path/to/mount + name: my-mount + - contains: + path: spec.template.spec.volumes + content: + name: my-mount + secret: + secretName: mySecret + + - it: should set imagePullPolicy when set in values + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + imagePullPolicy: Always + asserts: + - equal: + path: spec.template.spec.containers[0].imagePullPolicy + value: Always + + - it: should set environment when extraEnv set in values + template: proxy/deployment.yaml + values: + - ../.lint/extra-env.yaml + asserts: + - contains: + path: spec.template.spec.containers[0].env + content: + name: SOME_ENVIRONMENT_VARIABLE + value: "some-value" + + - it: should provision initContainer correctly when set in values + template: proxy/deployment.yaml + values: + - ../.lint/initcontainers.yaml + - ../.lint/resources.yaml + - ../.lint/extra-env.yaml + asserts: + - contains: + path: spec.template.spec.initContainers[0].args + content: "echo test" + - equal: + path: spec.template.spec.initContainers[0].name + value: "teleport-init" + - equal: + path: spec.template.spec.initContainers[0].image + value: "alpine" + - equal: + path: spec.template.spec.initContainers[0].resources.limits.cpu + value: 2 + - equal: + path: spec.template.spec.initContainers[0].resources.limits.memory + value: 4Gi + - equal: + path: spec.template.spec.initContainers[0].resources.requests.cpu + value: 1 + - equal: + path: spec.template.spec.initContainers[0].resources.requests.memory + value: 2Gi + - contains: + path: spec.template.spec.initContainers[1].args + content: "echo test2" + - equal: + path: spec.template.spec.initContainers[1].name + value: "teleport-init2" + - equal: + path: spec.template.spec.initContainers[1].image + value: "alpine" + - equal: + path: spec.template.spec.initContainers[1].resources.limits.cpu + value: 2 + - equal: + path: spec.template.spec.initContainers[1].resources.limits.memory + value: 4Gi + - equal: + path: spec.template.spec.initContainers[1].resources.requests.cpu + value: 1 + - equal: + path: spec.template.spec.initContainers[1].resources.requests.memory + value: 2Gi + - matchSnapshot: + path: spec.template.spec.initContainers + + - it: should add insecureSkipProxyTLSVerify to args when set in values + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + insecureSkipProxyTLSVerify: true + asserts: + - contains: + path: spec.template.spec.containers[0].args + content: "--insecure" + + - it: should expose diag port + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + asserts: + - contains: + path: spec.template.spec.containers[0].ports + content: + name: diag + containerPort: 3000 + protocol: TCP + + - it: should set postStart command if set in values + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + postStart: + command: ["/bin/echo", "test"] + asserts: + - equal: + path: spec.template.spec.containers[0].lifecycle.postStart.exec.command + value: ["/bin/echo", "test"] + + - it: should add and mount emptyDir for data + template: proxy/deployment.yaml + set: + clusterName: helm-lint.example.com + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + mountPath: /var/lib/teleport + name: data + - contains: + path: spec.template.spec.volumes + content: + name: data + emptyDir: {} + + - it: should set priorityClassName when set in values + template: proxy/deployment.yaml + values: + - ../.lint/priority-class-name.yaml + asserts: + - equal: + path: spec.template.spec.priorityClassName + value: system-cluster-critical + + - it: should set probeTimeoutSeconds when set in values + template: proxy/deployment.yaml + values: + - ../.lint/probe-timeout-seconds.yaml + asserts: + - equal: + path: spec.template.spec.containers[0].livenessProbe.timeoutSeconds + value: 5 + - equal: + path: spec.template.spec.containers[0].readinessProbe.timeoutSeconds + value: 5 + + - it: should not mount TLS secrets when when highAvailability.certManager.enabled is false and tls.existingSecretName is not set + template: proxy/deployment.yaml + set: + clusterName: helm-lint-test-cluster + asserts: + - notContains: + path: spec.template.spec.volumes + content: + name: teleport-tls + secret: + secretName: teleport-tls + - notContains: + path: spec.template.spec.containers[0].volumeMounts + content: + mountPath: /etc/teleport-tls + name: teleport-tls + readOnly: true + + - it: should mount cert-manager TLS secret when highAvailability.certManager.enabled is true + template: proxy/deployment.yaml + values: + - ../.lint/cert-manager.yaml + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: teleport-tls + secret: + secretName: teleport-tls + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + mountPath: /etc/teleport-tls + name: teleport-tls + readOnly: true + + - it: should mount tls.existingSecretName when set in values + template: proxy/deployment.yaml + values: + - ../.lint/existing-tls-secret.yaml + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: teleport-tls + secret: + secretName: helm-lint-existing-tls-secret + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + mountPath: /etc/teleport-tls + name: teleport-tls + readOnly: true + + - it: should mount tls.existingCASecretName and set environment when set in values + template: proxy/deployment.yaml + values: + - ../.lint/existing-tls-secret-with-ca.yaml + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: teleport-tls-ca + secret: + secretName: helm-lint-existing-tls-secret-ca + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + mountPath: /etc/teleport-tls-ca + name: teleport-tls-ca + readOnly: true + - contains: + path: spec.template.spec.containers[0].env + content: + name: SSL_CERT_FILE + value: /etc/teleport-tls-ca/ca.pem + + - it: should mount tls.existingCASecretName and set extra environment when set in values + template: proxy/deployment.yaml + values: + - ../.lint/existing-tls-secret-with-ca.yaml + - ../.lint/extra-env.yaml + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: teleport-tls-ca + secret: + secretName: helm-lint-existing-tls-secret-ca + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + mountPath: /etc/teleport-tls-ca + name: teleport-tls-ca + readOnly: true + - contains: + path: spec.template.spec.containers[0].env + content: + name: SSL_CERT_FILE + value: /etc/teleport-tls-ca/ca.pem + - contains: + path: spec.template.spec.containers[0].env + content: + name: SOME_ENVIRONMENT_VARIABLE + value: some-value + + - it: should set minReadySeconds when replicaCount > 1 + template: proxy/deployment.yaml + set: + clusterName: helm-lint + highAvailability: + certManager: + enabled: true + replicaCount: 3 + minReadySeconds: 60 + asserts: + - equal: + path: spec.minReadySeconds + value: 60 diff --git a/examples/chart/teleport-cluster/tests/proxy_pdb_test.yaml b/examples/chart/teleport-cluster/tests/proxy_pdb_test.yaml new file mode 100644 index 0000000000000..851a0a751163d --- /dev/null +++ b/examples/chart/teleport-cluster/tests/proxy_pdb_test.yaml @@ -0,0 +1,23 @@ +suite: Proxy PodDisruptionBudget +templates: + - proxy/pdb.yaml +tests: + - it: not should create a PDB when disabled in values + set: + highAvailability: + podDisruptionBudget: + enabled: false + asserts: + - hasDocuments: + count: 0 + - it: should create a PDB when enabled in values (pdb.yaml) + values: + - ../.lint/pdb.yaml + asserts: + - hasDocuments: + count: 1 + - isKind: + of: PodDisruptionBudget + - equal: + path: spec.minAvailable + value: 2 diff --git a/examples/chart/teleport-cluster/tests/service_test.yaml b/examples/chart/teleport-cluster/tests/proxy_service_test.yaml similarity index 83% rename from examples/chart/teleport-cluster/tests/service_test.yaml rename to examples/chart/teleport-cluster/tests/proxy_service_test.yaml index 8af9ef83bdabd..ee74b3f3d0982 100644 --- a/examples/chart/teleport-cluster/tests/service_test.yaml +++ b/examples/chart/teleport-cluster/tests/proxy_service_test.yaml @@ -1,6 +1,6 @@ -suite: Service +suite: Proxy Service templates: - - service.yaml + - proxy/service.yaml tests: - it: uses a LoadBalancer by default set: @@ -13,7 +13,6 @@ tests: - equal: path: spec.type value: LoadBalancer - - matchSnapshot: {} - it: uses a ClusterIP when service.type=ClusterIP set: @@ -28,7 +27,6 @@ tests: - equal: path: spec.type value: ClusterIP - - matchSnapshot: {} - it: sets AWS annotations when chartMode=aws set: @@ -51,7 +49,6 @@ tests: - equal: path: metadata.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-cross-zone-load-balancing-enabled value: "true" - - matchSnapshot: {} - it: sets service annotations when specified values: @@ -63,7 +60,6 @@ tests: - equal: path: metadata.annotations.kubernetes\.io/service-different value: 5 - - matchSnapshot: {} - it: adds a separate Postgres listener port when separatePostgresListener is true values: @@ -76,7 +72,6 @@ tests: port: 5432 targetPort: 5432 protocol: TCP - - matchSnapshot: {} - it: adds a separate Mongo listener port when separateMongoListener is true values: @@ -89,7 +84,6 @@ tests: port: 27017 targetPort: 27017 protocol: TCP - - matchSnapshot: {} - it: sets AWS backend protocol annotation to ssl when in AWS mode and ACM annotation is set values: @@ -103,7 +97,6 @@ tests: - equal: path: metadata.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-backend-protocol value: ssl - - matchSnapshot: {} - it: sets AWS backend protocol annotation to tcp when in AWS mode and ACM annotation is not set values: @@ -112,4 +105,24 @@ tests: - equal: path: metadata.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-backend-protocol value: tcp - - matchSnapshot: {} + + - it: exposes separate listener ports by default + values: + - ../.lint/example-minimal-standalone.yaml + asserts: + - matchSnapshot: + path: spec.ports + + - it: exposes separate listener ports when running in separate mode + values: + - ../.lint/proxy-listener-mode-separate.yaml + asserts: + - matchSnapshot: + path: spec.ports + + - it: exposes a single port when running in multiplex mode + values: + - ../.lint/proxy-listener-mode-multiplex.yaml + asserts: + - matchSnapshot: + path: spec.ports diff --git a/examples/chart/teleport-cluster/tests/proxy_serviceaccount_test.yaml b/examples/chart/teleport-cluster/tests/proxy_serviceaccount_test.yaml new file mode 100644 index 0000000000000..14ec87f31e645 --- /dev/null +++ b/examples/chart/teleport-cluster/tests/proxy_serviceaccount_test.yaml @@ -0,0 +1,22 @@ +suite: Proxy ServiceAccount +templates: + - proxy/serviceaccount.yaml +tests: + - it: sets ServiceAccount annotations when specified + values: + - ../.lint/annotations.yaml + asserts: + - equal: + path: metadata.annotations.kubernetes\.io/serviceaccount + value: test-annotation + - equal: + path: metadata.annotations.kubernetes\.io/serviceaccount-different + value: 6 + + - it: changes ServiceAccount name when specified and appends "-proxy" + values: + - ../.lint/service-account.yaml + asserts: + - equal: + path: metadata.name + value: "helm-lint-proxy" diff --git a/examples/chart/teleport-cluster/tests/psp_test.yaml b/examples/chart/teleport-cluster/tests/psp_test.yaml index 4e9e1b2eaead5..d255b9f88ada4 100644 --- a/examples/chart/teleport-cluster/tests/psp_test.yaml +++ b/examples/chart/teleport-cluster/tests/psp_test.yaml @@ -29,4 +29,4 @@ tests: enabled: true asserts: - hasDocuments: - count: 0 \ No newline at end of file + count: 0 From 0e848f58bb0e748989447e19a914ac75d40a1879 Mon Sep 17 00:00:00 2001 From: Hugo Shaka Date: Tue, 13 Dec 2022 15:14:22 -0500 Subject: [PATCH 04/14] Apply suggestions from code review Co-authored-by: Gus Luxton --- .../chart/teleport-cluster/templates/auth/_config.scratch.tpl | 2 +- .../teleport-cluster/templates/proxy/_config.scratch.tpl | 2 +- examples/chart/teleport-cluster/templates/psp.yaml | 1 - examples/chart/teleport-cluster/values.yaml | 4 ++-- 4 files changed, 4 insertions(+), 5 deletions(-) diff --git a/examples/chart/teleport-cluster/templates/auth/_config.scratch.tpl b/examples/chart/teleport-cluster/templates/auth/_config.scratch.tpl index 8c77cca276cfc..73a4cd0209d93 100644 --- a/examples/chart/teleport-cluster/templates/auth/_config.scratch.tpl +++ b/examples/chart/teleport-cluster/templates/auth/_config.scratch.tpl @@ -9,5 +9,5 @@ auth_service: {{- end -}} {{- define "teleport-cluster.auth.config.custom" -}} -{{ fail "'custom' mode has been depreacted with chart v12 because of the proxy/auth split, see http://link" }} +{{ fail "'custom' mode has been deprecated with chart v12 because of the proxy/auth split, see http://link" }} {{- end -}} diff --git a/examples/chart/teleport-cluster/templates/proxy/_config.scratch.tpl b/examples/chart/teleport-cluster/templates/proxy/_config.scratch.tpl index fd43387b956d9..1880d8ee0dc6c 100644 --- a/examples/chart/teleport-cluster/templates/proxy/_config.scratch.tpl +++ b/examples/chart/teleport-cluster/templates/proxy/_config.scratch.tpl @@ -9,5 +9,5 @@ proxy_service: {{- end -}} {{- define "teleport-cluster.proxy.config.custom" -}} -{{ fail "'custom' mode has been depreacted with chart v12 because of the proxy/auth split, see http://link" }} +{{ fail "'custom' mode has been deprecated with chart v12 because of the proxy/auth split, see http://link" }} {{- end -}} \ No newline at end of file diff --git a/examples/chart/teleport-cluster/templates/psp.yaml b/examples/chart/teleport-cluster/templates/psp.yaml index 6540a05333c8e..bc7b87c302ec9 100644 --- a/examples/chart/teleport-cluster/templates/psp.yaml +++ b/examples/chart/teleport-cluster/templates/psp.yaml @@ -3,7 +3,6 @@ apiVersion: policy/v1beta1 kind: PodSecurityPolicy metadata: name: {{ .Release.Name }} - namespace: {{ .Release.Namespace }} annotations: seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' diff --git a/examples/chart/teleport-cluster/values.yaml b/examples/chart/teleport-cluster/values.yaml index d024900aa66f2..80927f64a25a4 100644 --- a/examples/chart/teleport-cluster/values.yaml +++ b/examples/chart/teleport-cluster/values.yaml @@ -174,7 +174,7 @@ labels: {} # - "standalone": will deploy a Teleport container running auth and proxy services with a PersistentVolumeClaim for storage. # - "aws": will deploy Teleport using DynamoDB for backend/audit log storage and S3 for session recordings. (1) # - "gcp": will deploy Teleport using Firestore for backend/audit log storage and Google Cloud storage for session recordings. (2) -# - "scratch": will deploy Teleport container but will not provide default configuration file. You must pass your own configuration. (3) +# - "scratch": will deploy Teleport containers but will not provide default configuration file. You must pass your own configuration. (3) # (1) To use "aws" mode, you must also configure the "aws" section below. # (2) To use "gcp" mode, you must also configure the "gcp" section below. # (3) When set to "scratch", you must write the teleport configuration in auth.teleportConfig and proxy.teleportConfig. @@ -182,7 +182,7 @@ chartMode: standalone # Whether the chart should create a Teleport ProvisionToken for the proxies to join the Teleport cluster. # Disabling this flag will cause the proxies not to be able to join the auth pods. In this case, the -# Helm chart user is responsible to configure working join_params on the proxy. +# Helm chart user is responsible for configuring working join_params on the proxy. createProxyToken: true ###################################################################### From 9c67245784ab88eae36c1da1d5f8447c8475b113 Mon Sep 17 00:00:00 2001 From: Hugo Shaka Date: Wed, 14 Dec 2022 14:51:02 -0500 Subject: [PATCH 05/14] Allow standalone auth to run with multiple auth pods. Suport AWs lb controller --- .../chart/teleport-cluster/templates/auth/statefulset.yaml | 5 ----- .../chart/teleport-cluster/templates/proxy/service.yaml | 6 ++++++ 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/examples/chart/teleport-cluster/templates/auth/statefulset.yaml b/examples/chart/teleport-cluster/templates/auth/statefulset.yaml index 68901cf106d27..b8bdc247f5c63 100644 --- a/examples/chart/teleport-cluster/templates/auth/statefulset.yaml +++ b/examples/chart/teleport-cluster/templates/auth/statefulset.yaml @@ -12,12 +12,7 @@ metadata: annotations: {{- toYaml $auth.annotations.deployment | nindent 4 }} {{- end }} spec: -{{- if not (eq $auth.chartMode "standalone") }} replicas: {{ $auth.highAvailability.replicaCount }} - minReadySeconds: {{ $auth.highAvailability.minReadySeconds }} - {{- else }} - replicas: 1 - {{- end }} serviceName: {{ .Release.Name }}-auth selector: matchLabels: {{- include "teleport-cluster.auth.selectorLabels" . | nindent 6 }} diff --git a/examples/chart/teleport-cluster/templates/proxy/service.yaml b/examples/chart/teleport-cluster/templates/proxy/service.yaml index 0a5f4b0f3a20d..0a2d548f725c0 100644 --- a/examples/chart/teleport-cluster/templates/proxy/service.yaml +++ b/examples/chart/teleport-cluster/templates/proxy/service.yaml @@ -9,9 +9,15 @@ metadata: {{- if (or ($proxy.annotations.service) (eq $proxy.chartMode "aws")) }} annotations: {{- if eq $proxy.chartMode "aws" }} + {{- if not (hasKey $proxy.annotations.service "service.beta.kubernetes.io/aws-load-balancer-backend-protocol")}} service.beta.kubernetes.io/aws-load-balancer-backend-protocol: {{ $backendProtocol }} + {{- end }} + {{- if not (or (hasKey $proxy.annotations.service "service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled") (hasKey $proxy.annotations.service "service.beta.kubernetes.io/aws-load-balancer-attributes"))}} service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true" + {{- end }} + {{- if not (hasKey $proxy.annotations.service "service.beta.kubernetes.io/aws-load-balancer-type")}} service.beta.kubernetes.io/aws-load-balancer-type: nlb + {{- end }} {{- end }} {{- if $proxy.annotations.service }} {{- toYaml $proxy.annotations.service | nindent 4 }} From 8f847bfea81cc57f2733dccff2545f85979de09e Mon Sep 17 00:00:00 2001 From: Hugo Shaka Date: Thu, 15 Dec 2022 15:07:39 -0500 Subject: [PATCH 06/14] Treat gus' feedback + avoid auth servicename overflow --- .../chart/teleport-cluster/templates/_helpers.tpl | 12 ++++++++++++ .../templates/auth/_config.common.tpl | 2 +- .../chart/teleport-cluster/templates/auth/pvc.yaml | 8 +++----- .../teleport-cluster/templates/auth/service.yaml | 2 +- .../teleport-cluster/templates/auth/statefulset.yaml | 3 +-- .../templates/proxy/_config.common.tpl | 2 +- .../tests/__snapshot__/psp_test.yaml.snap | 1 - .../teleport-cluster/tests/auth_config_test.yaml | 2 +- 8 files changed, 20 insertions(+), 12 deletions(-) diff --git a/examples/chart/teleport-cluster/templates/_helpers.tpl b/examples/chart/teleport-cluster/templates/_helpers.tpl index c603a922e5371..77f2403f73480 100644 --- a/examples/chart/teleport-cluster/templates/_helpers.tpl +++ b/examples/chart/teleport-cluster/templates/_helpers.tpl @@ -64,3 +64,15 @@ app.kubernetes.io/managed-by: '{{ .Release.Service }}' app.kubernetes.io/version: '{{ include "teleport-cluster.version" . }}' teleport.dev/majorVersion: '{{ include "teleport-cluster.majorVersion" . }}' {{- end -}} + +{{/* ServiceNames are limited to 63 characters, we might have to truncate the ReleaseName + to make sure the auth serviceName won't exceed this limit */}} +{{- define "teleport-cluster.auth.serviceName" -}} +{{- .Release.Name | trunc 58 | trimSuffix "-" -}}-auth +{{- end -}} + +{{/* In most places we want to use the FQDN instead of relying on Kubernetes ndots behaviour + for performance reasons */}} +{{- define "teleport-cluster.auth.serviceFQDN" -}} +{{ include "teleport-cluster.auth.serviceName" . }}.{{ .Release.Namespace }}.svc.cluster.local +{{- end -}} diff --git a/examples/chart/teleport-cluster/templates/auth/_config.common.tpl b/examples/chart/teleport-cluster/templates/auth/_config.common.tpl index 0cd1cc1e73c7b..d8796cd08151e 100644 --- a/examples/chart/teleport-cluster/templates/auth/_config.common.tpl +++ b/examples/chart/teleport-cluster/templates/auth/_config.common.tpl @@ -5,7 +5,7 @@ version: v3 kubernetes_service: enabled: true listen_addr: 0.0.0.0:3026 - public_addr: "{{ .Release.Name }}-auth.{{ .Release.Namespace }}.svc.cluster.local:3026" + public_addr: "{{ include "teleport-cluster.auth.serviceFQDN" . }}:3026" {{- if .Values.kubeClusterName }} kube_cluster_name: {{ .Values.kubeClusterName }} {{- else }} diff --git a/examples/chart/teleport-cluster/templates/auth/pvc.yaml b/examples/chart/teleport-cluster/templates/auth/pvc.yaml index 3036ffb0572ba..751e120ad90fd 100644 --- a/examples/chart/teleport-cluster/templates/auth/pvc.yaml +++ b/examples/chart/teleport-cluster/templates/auth/pvc.yaml @@ -1,11 +1,9 @@ {{- $auth := mustMergeOverwrite .Values .Values.auth -}} -{{/* $persistence looks like some backward compatibility trick, is this still relevant? */}} -{{- $persistence := (coalesce $auth.standalone $auth.persistence) -}} -{{- if .Values.persistence.enabled }} +{{- if $auth.persistence.enabled }} {{/* Disable persistence for aws and gpc modes */}} {{- if and (not (eq $auth.chartMode "aws")) (not (eq $auth.chartMode "gcp")) }} {{/* No need to create a PVC if we reuse an existing claim */}} - {{- if not $persistence.existingClaimName }} + {{- if not $auth.persistence.existingClaimName }} apiVersion: v1 kind: PersistentVolumeClaim metadata: @@ -17,7 +15,7 @@ spec: - ReadWriteOnce resources: requests: - storage: {{ required "persistence.volumeSize is required in chart values" $persistence.volumeSize }} + storage: {{ required "persistence.volumeSize is required in chart values" $auth.persistence.volumeSize }} {{- end }} {{- end }} {{- end }} diff --git a/examples/chart/teleport-cluster/templates/auth/service.yaml b/examples/chart/teleport-cluster/templates/auth/service.yaml index b6eb0f993d75a..12779e90c4823 100644 --- a/examples/chart/teleport-cluster/templates/auth/service.yaml +++ b/examples/chart/teleport-cluster/templates/auth/service.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: Service metadata: - name: {{ .Release.Name }}-auth + name: {{ import "teleport-cluster.auth.serviceName" . }} namespace: {{ .Release.Namespace }} labels: {{- include "teleport-cluster.auth.labels" . | nindent 4 }} {{- if $auth.annotations.service }} diff --git a/examples/chart/teleport-cluster/templates/auth/statefulset.yaml b/examples/chart/teleport-cluster/templates/auth/statefulset.yaml index b8bdc247f5c63..276d7e3606ce4 100644 --- a/examples/chart/teleport-cluster/templates/auth/statefulset.yaml +++ b/examples/chart/teleport-cluster/templates/auth/statefulset.yaml @@ -1,5 +1,4 @@ {{- $auth := mustMergeOverwrite .Values .Values.auth -}} -{{- $persistence := (coalesce .Values.standalone .Values.persistence) -}} apiVersion: apps/v1 kind: StatefulSet metadata: @@ -238,7 +237,7 @@ spec: - name: "data" {{- if and (.Values.persistence.enabled) ( and (not (eq .Values.chartMode "gcp")) (not (eq .Values.chartMode "aws"))) }} persistentVolumeClaim: - claimName: {{ if $persistence.existingClaimName }}{{ $persistence.existingClaimName }}{{ else }}{{ .Release.Name }}{{ end }} + claimName: {{ if $auth.persistence.existingClaimName }}{{ $auth.persistence.existingClaimName }}{{ else }}{{ .Release.Name }}{{ end }} {{- else }} emptyDir: {} {{- end }} diff --git a/examples/chart/teleport-cluster/templates/proxy/_config.common.tpl b/examples/chart/teleport-cluster/templates/proxy/_config.common.tpl index cab6d8295bdcd..f842ee037df66 100644 --- a/examples/chart/teleport-cluster/templates/proxy/_config.common.tpl +++ b/examples/chart/teleport-cluster/templates/proxy/_config.common.tpl @@ -5,7 +5,7 @@ teleport: join_params: method: kubernetes token_name: "{{.Release.Name}}-proxy" - auth_server: "{{ .Release.Name }}-auth.{{ .Release.Namespace }}.svc.cluster.local:3025" + auth_server: "{{ include "teleport-cluster.auth.serviceFQDN" . }}:3025" log: severity: {{ $logLevel }} output: {{ .Values.log.output }} diff --git a/examples/chart/teleport-cluster/tests/__snapshot__/psp_test.yaml.snap b/examples/chart/teleport-cluster/tests/__snapshot__/psp_test.yaml.snap index 4b30b331fc4e6..d950054835262 100644 --- a/examples/chart/teleport-cluster/tests/__snapshot__/psp_test.yaml.snap +++ b/examples/chart/teleport-cluster/tests/__snapshot__/psp_test.yaml.snap @@ -7,7 +7,6 @@ creates a PodSecurityPolicy when enabled in values and supported: seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default,runtime/default seccomp.security.alpha.kubernetes.io/defaultProfileName: runtime/default name: RELEASE-NAME - namespace: NAMESPACE spec: allowPrivilegeEscalation: false fsGroup: diff --git a/examples/chart/teleport-cluster/tests/auth_config_test.yaml b/examples/chart/teleport-cluster/tests/auth_config_test.yaml index c829f3be4511e..7b073f0400cee 100644 --- a/examples/chart/teleport-cluster/tests/auth_config_test.yaml +++ b/examples/chart/teleport-cluster/tests/auth_config_test.yaml @@ -466,4 +466,4 @@ tests: - isKind: of: ConfigMap - matchSnapshot: - path: data.teleport\.yaml \ No newline at end of file + path: data.teleport\.yaml From 5e49dd6a763f0949a4a2094dfe1cf8daed4f06ff Mon Sep 17 00:00:00 2001 From: Hugo Shaka Date: Fri, 16 Dec 2022 11:20:10 -0500 Subject: [PATCH 07/14] Fix webauthn templating, scratch proxy config + marco's feedback --- .../teleport-cluster/templates/_helpers.tpl | 2 +- .../templates/auth/_config.common.tpl | 4 +- .../templates/auth/_config.scratch.tpl | 2 +- .../templates/auth/service.yaml | 2 +- .../templates/proxy/_config.scratch.tpl | 6 +- .../auth_clusterrolebinding_test.yaml.snap | 1 - .../__snapshot__/auth_config_test.yaml.snap | 74 +- .../tests/__snapshot__/config_test.yaml.snap | 1660 ----------------- 8 files changed, 78 insertions(+), 1673 deletions(-) delete mode 100644 examples/chart/teleport-cluster/tests/__snapshot__/auth_clusterrolebinding_test.yaml.snap delete mode 100644 examples/chart/teleport-cluster/tests/__snapshot__/config_test.yaml.snap diff --git a/examples/chart/teleport-cluster/templates/_helpers.tpl b/examples/chart/teleport-cluster/templates/_helpers.tpl index 77f2403f73480..bdfb64eb6185e 100644 --- a/examples/chart/teleport-cluster/templates/_helpers.tpl +++ b/examples/chart/teleport-cluster/templates/_helpers.tpl @@ -11,7 +11,7 @@ if serviceAccount is not defined or serviceAccount.name is empty, use .Release.N {{- end -}} {{- define "teleport-cluster.version" -}} -{{- if .Values.teleportVersionOverride }}{{ .Values.teleportVersionOverride }}{{ else }}{{ .Chart.Version }}{{ end -}} +{{- coalesce .Values.teleportVersionOverride .Chart.Version }} {{- end -}} {{- define "teleport-cluster.majorVersion" -}} diff --git a/examples/chart/teleport-cluster/templates/auth/_config.common.tpl b/examples/chart/teleport-cluster/templates/auth/_config.common.tpl index d8796cd08151e..60b6f3d03a5ec 100644 --- a/examples/chart/teleport-cluster/templates/auth/_config.common.tpl +++ b/examples/chart/teleport-cluster/templates/auth/_config.common.tpl @@ -40,10 +40,10 @@ auth_service: rp_id: {{ required "clusterName is required in chart values" .Values.clusterName }} {{- if $authentication.webauthn }} {{- if $authentication.webauthn.attestationAllowedCas }} - attestation_allowed_cas: {{- toYaml $authentication.webauthn.attestationAllowedCas | nindent 12 }} + attestation_allowed_cas: {{- toYaml $authentication.webauthn.attestationAllowedCas | nindent 12 }} {{- end }} {{- if $authentication.webauthn.attestationDeniedCas }} - attestation_denied_cas: {{- toYaml $authentication.webauthn.attestationDeniedCas | nindent 12 }} + attestation_denied_cas: {{- toYaml $authentication.webauthn.attestationDeniedCas | nindent 12 }} {{- end }} {{- end }} {{- end }} diff --git a/examples/chart/teleport-cluster/templates/auth/_config.scratch.tpl b/examples/chart/teleport-cluster/templates/auth/_config.scratch.tpl index 73a4cd0209d93..dfbb48bfe9a99 100644 --- a/examples/chart/teleport-cluster/templates/auth/_config.scratch.tpl +++ b/examples/chart/teleport-cluster/templates/auth/_config.scratch.tpl @@ -9,5 +9,5 @@ auth_service: {{- end -}} {{- define "teleport-cluster.auth.config.custom" -}} -{{ fail "'custom' mode has been deprecated with chart v12 because of the proxy/auth split, see http://link" }} +{{ fail "'custom' mode has been removed with chart v12 because of the proxy/auth split breaking change, see http://link" }} {{- end -}} diff --git a/examples/chart/teleport-cluster/templates/auth/service.yaml b/examples/chart/teleport-cluster/templates/auth/service.yaml index 12779e90c4823..69ceb834aa9f8 100644 --- a/examples/chart/teleport-cluster/templates/auth/service.yaml +++ b/examples/chart/teleport-cluster/templates/auth/service.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: Service metadata: - name: {{ import "teleport-cluster.auth.serviceName" . }} + name: {{ include "teleport-cluster.auth.serviceName" . }} namespace: {{ .Release.Namespace }} labels: {{- include "teleport-cluster.auth.labels" . | nindent 4 }} {{- if $auth.annotations.service }} diff --git a/examples/chart/teleport-cluster/templates/proxy/_config.scratch.tpl b/examples/chart/teleport-cluster/templates/proxy/_config.scratch.tpl index 1880d8ee0dc6c..4fbc03e225a27 100644 --- a/examples/chart/teleport-cluster/templates/proxy/_config.scratch.tpl +++ b/examples/chart/teleport-cluster/templates/proxy/_config.scratch.tpl @@ -3,11 +3,11 @@ ssh_service: enabled: false auth_service: - enabled: true + enabled: false proxy_service: enabled: true {{- end -}} {{- define "teleport-cluster.proxy.config.custom" -}} -{{ fail "'custom' mode has been deprecated with chart v12 because of the proxy/auth split, see http://link" }} -{{- end -}} \ No newline at end of file +{{ fail "'custom' mode has been removed with chart v12 because of the proxy/auth split breaking change, see http://link" }} +{{- end -}} diff --git a/examples/chart/teleport-cluster/tests/__snapshot__/auth_clusterrolebinding_test.yaml.snap b/examples/chart/teleport-cluster/tests/__snapshot__/auth_clusterrolebinding_test.yaml.snap deleted file mode 100644 index 0967ef424bce6..0000000000000 --- a/examples/chart/teleport-cluster/tests/__snapshot__/auth_clusterrolebinding_test.yaml.snap +++ /dev/null @@ -1 +0,0 @@ -{} diff --git a/examples/chart/teleport-cluster/tests/__snapshot__/auth_config_test.yaml.snap b/examples/chart/teleport-cluster/tests/__snapshot__/auth_config_test.yaml.snap index 2d24e3ce51ced..dd491f73aab30 100644 --- a/examples/chart/teleport-cluster/tests/__snapshot__/auth_config_test.yaml.snap +++ b/examples/chart/teleport-cluster/tests/__snapshot__/auth_config_test.yaml.snap @@ -294,13 +294,79 @@ matches snapshot for auth-type.yaml: matches snapshot for auth-webauthn-legacy.yaml: 1: | |- - Error: 'error converting YAML to JSON: yaml: line 20: mapping values are not allowed - in this context' + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + attestation_allowed_cas: + - /etc/ssl/certs/ca-certificates.crt + attestation_denied_cas: + - /etc/ssl/certs/ca-certificates.crt + rp_id: helm-lint + cluster_name: helm-lint + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: helm-lint + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 matches snapshot for auth-webauthn.yaml: 1: | |- - Error: 'error converting YAML to JSON: yaml: line 20: mapping values are not allowed - in this context' + auth_service: + authentication: + local_auth: true + second_factor: "on" + type: local + webauthn: + attestation_allowed_cas: + - /etc/ssl/certs/ca-certificates.crt + attestation_denied_cas: + - /etc/ssl/certs/ca-certificates.crt + rp_id: helm-lint + cluster_name: helm-lint + enabled: true + proxy_listener_mode: separate + kubernetes_service: + enabled: true + kube_cluster_name: helm-lint + listen_addr: 0.0.0.0:3026 + public_addr: RELEASE-NAME-auth.NAMESPACE.svc.cluster.local:3026 + proxy_service: + enabled: false + ssh_service: + enabled: false + teleport: + log: + format: + extra_fields: + - timestamp + - level + - component + - caller + output: text + output: stderr + severity: INFO + version: v3 matches snapshot for aws-dynamodb-autoscaling.yaml: 1: | |- diff --git a/examples/chart/teleport-cluster/tests/__snapshot__/config_test.yaml.snap b/examples/chart/teleport-cluster/tests/__snapshot__/config_test.yaml.snap deleted file mode 100644 index 7254ce3b9faa1..0000000000000 --- a/examples/chart/teleport-cluster/tests/__snapshot__/config_test.yaml.snap +++ /dev/null @@ -1,1660 +0,0 @@ -matches snapshot and tests for annotations.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - auth_service: - enabled: true - cluster_name: helm-lint - authentication: - type: "local" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: helm-lint - proxy_service: - public_addr: 'helm-lint:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - ssh_service: - enabled: false - kind: ConfigMap - metadata: - annotations: - kubernetes.io/config: test-annotation - kubernetes.io/config-different: 2 - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for acme-off.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - auth_service: - enabled: true - cluster_name: test-cluster-name - authentication: - type: "local" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: test-cluster-name - proxy_service: - public_addr: 'test-cluster-name:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for acme-on.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - auth_service: - enabled: true - cluster_name: test-acme-cluster - authentication: - type: "local" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: test-acme-cluster - proxy_service: - public_addr: 'test-acme-cluster:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - acme: - enabled: true - email: test@email.com - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for acme-uri-staging.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - auth_service: - enabled: true - cluster_name: test-acme-cluster - authentication: - type: "local" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: test-acme-cluster - proxy_service: - public_addr: 'test-acme-cluster:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - acme: - enabled: true - email: test@email.com - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for affinity.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - storage: - type: firestore - project_id: gcpproj-123456 - collection_name: test-teleport-firestore-storage-collection - credentials_path: /etc/teleport-secrets/gcp-credentials.json - audit_events_uri: ['firestore://test-teleport-firestore-auditlog-collection?projectID=gcpproj-123456&credentialsPath=/etc/teleport-secrets/gcp-credentials.json'] - audit_sessions_uri: "gs://test-gcp-session-storage-bucket?projectID=gcpproj-123456&credentialsPath=/etc/teleport-secrets/gcp-credentials.json" - auth_service: - enabled: true - cluster_name: test-gcp-cluster - authentication: - type: "local" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: test-gcp-cluster - proxy_service: - public_addr: 'test-gcp-cluster:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for auth-connector-name.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - auth_service: - enabled: true - cluster_name: helm-lint - authentication: - type: "local" - local_auth: true - connector_name: "okta" - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: helm-lint - proxy_service: - public_addr: 'helm-lint:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for auth-disable-local.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - auth_service: - enabled: true - cluster_name: helm-lint - authentication: - type: "github" - local_auth: false - second_factor: "off" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: helm-lint - proxy_service: - public_addr: 'helm-lint:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for auth-locking-mode.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - auth_service: - enabled: true - cluster_name: helm-lint - authentication: - type: "local" - local_auth: true - locking_mode: "strict" - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: helm-lint - proxy_service: - public_addr: 'helm-lint:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for auth-passwordless.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - auth_service: - enabled: true - cluster_name: helm-lint - authentication: - type: "local" - local_auth: true - connector_name: "passwordless" - second_factor: "webauthn" - webauthn: - rp_id: helm-lint - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: helm-lint - proxy_service: - public_addr: 'helm-lint:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for auth-type-legacy.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - auth_service: - enabled: true - cluster_name: helm-lint - authentication: - type: "github" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: helm-lint - proxy_service: - public_addr: 'helm-lint:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for auth-type.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - auth_service: - enabled: true - cluster_name: helm-lint - authentication: - type: "github" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: helm-lint - proxy_service: - public_addr: 'helm-lint:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for auth-webauthn-legacy.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - auth_service: - enabled: true - cluster_name: helm-lint - authentication: - type: "local" - local_auth: true - second_factor: "on" - webauthn: - rp_id: helm-lint - attestation_allowed_cas: - - /etc/ssl/certs/ca-certificates.crt - attestation_denied_cas: - - /etc/ssl/certs/ca-certificates.crt - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: helm-lint - proxy_service: - public_addr: 'helm-lint:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for auth-webauthn.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - auth_service: - enabled: true - cluster_name: helm-lint - authentication: - type: "local" - local_auth: true - second_factor: "on" - webauthn: - rp_id: helm-lint - attestation_allowed_cas: - - /etc/ssl/certs/ca-certificates.crt - attestation_denied_cas: - - /etc/ssl/certs/ca-certificates.crt - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: helm-lint - proxy_service: - public_addr: 'helm-lint:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for aws-dynamodb-autoscaling.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - storage: - type: dynamodb - region: us-west-2 - table_name: test-dynamodb-backend-table - audit_events_uri: ['dynamodb://test-dynamodb-auditlog-table'] - audit_sessions_uri: s3://test-s3-session-storage-bucket - continuous_backups: false - auto_scaling: true - read_min_capacity: 5 - read_max_capacity: 100 - read_target_value: 50 - write_min_capacity: 5 - write_max_capacity: 100 - write_target_value: 50 - auth_service: - enabled: true - cluster_name: test-aws-cluster - authentication: - type: "local" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: test-aws-cluster - proxy_service: - public_addr: 'test-aws-cluster:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for aws-ha-acme.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - storage: - type: dynamodb - region: us-west-2 - table_name: test-dynamodb-backend-table - audit_events_uri: ['dynamodb://test-dynamodb-auditlog-table'] - audit_sessions_uri: s3://test-s3-session-storage-bucket - continuous_backups: false - auto_scaling: false - auth_service: - enabled: true - cluster_name: test-aws-cluster - authentication: - type: "local" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: test-aws-cluster - labels: - env: aws - proxy_service: - public_addr: 'test-aws-cluster:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - https_keypairs: - - key_file: /etc/teleport-tls/tls.key - cert_file: /etc/teleport-tls/tls.crt - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for aws-ha-antiaffinity.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - storage: - type: dynamodb - region: us-west-2 - table_name: test-dynamodb-backend-table - audit_events_uri: ['dynamodb://test-dynamodb-auditlog-table'] - audit_sessions_uri: s3://test-s3-session-storage-bucket - continuous_backups: false - auto_scaling: false - auth_service: - enabled: true - cluster_name: test-aws-cluster - authentication: - type: "local" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: test-aws-cluster - labels: - env: aws - proxy_service: - public_addr: 'test-aws-cluster:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for aws-ha-log.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: DEBUG - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - storage: - type: dynamodb - region: us-west-2 - table_name: test-dynamodb-backend-table - audit_events_uri: ['dynamodb://test-dynamodb-auditlog-table', 'stdout://'] - audit_sessions_uri: s3://test-s3-session-storage-bucket - continuous_backups: false - auto_scaling: false - auth_service: - enabled: true - cluster_name: test-aws-cluster - authentication: - type: "local" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: test-aws-cluster - labels: - env: aws - proxy_service: - public_addr: 'test-aws-cluster:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - https_keypairs: - - key_file: /etc/teleport-tls/tls.key - cert_file: /etc/teleport-tls/tls.crt - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for aws-ha.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - storage: - type: dynamodb - region: us-west-2 - table_name: test-dynamodb-backend-table - audit_events_uri: ['dynamodb://test-dynamodb-auditlog-table'] - audit_sessions_uri: s3://test-s3-session-storage-bucket - continuous_backups: false - auto_scaling: false - auth_service: - enabled: true - cluster_name: test-aws-cluster - authentication: - type: "local" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: test-aws-cluster - labels: - env: aws - proxy_service: - public_addr: 'test-aws-cluster:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for aws.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - storage: - type: dynamodb - region: us-west-2 - table_name: test-dynamodb-backend-table - audit_events_uri: ['dynamodb://test-dynamodb-auditlog-table'] - audit_sessions_uri: s3://test-s3-session-storage-bucket - continuous_backups: false - auto_scaling: false - auth_service: - enabled: true - cluster_name: test-aws-cluster - authentication: - type: "local" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: test-aws-cluster - labels: - env: aws - proxy_service: - public_addr: 'test-aws-cluster:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - acme: - enabled: true - email: test@email.com - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for existing-tls-secret-with-ca.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - auth_service: - enabled: true - cluster_name: test-cluster-name - authentication: - type: "local" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: test-cluster-name - proxy_service: - public_addr: 'test-cluster-name:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - https_keypairs: - - key_file: /etc/teleport-tls/tls.key - cert_file: /etc/teleport-tls/tls.crt - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for existing-tls-secret.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - auth_service: - enabled: true - cluster_name: test-cluster-name - authentication: - type: "local" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: test-cluster-name - proxy_service: - public_addr: 'test-cluster-name:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - https_keypairs: - - key_file: /etc/teleport-tls/tls.key - cert_file: /etc/teleport-tls/tls.crt - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for gcp-ha-acme.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - storage: - type: firestore - project_id: gcpproj-123456 - collection_name: test-teleport-firestore-storage-collection - credentials_path: /etc/teleport-secrets/gcp-credentials.json - audit_events_uri: ['firestore://test-teleport-firestore-auditlog-collection?projectID=gcpproj-123456&credentialsPath=/etc/teleport-secrets/gcp-credentials.json'] - audit_sessions_uri: "gs://test-gcp-session-storage-bucket?projectID=gcpproj-123456&credentialsPath=/etc/teleport-secrets/gcp-credentials.json" - auth_service: - enabled: true - cluster_name: test-gcp-cluster - authentication: - type: "local" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: test-gcp-cluster - labels: - env: gcp - proxy_service: - public_addr: 'test-gcp-cluster:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - https_keypairs: - - key_file: /etc/teleport-tls/tls.key - cert_file: /etc/teleport-tls/tls.crt - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for gcp-ha-antiaffinity.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - storage: - type: firestore - project_id: gcpproj-123456 - collection_name: test-teleport-firestore-storage-collection - credentials_path: /etc/teleport-secrets/gcp-credentials.json - audit_events_uri: ['firestore://test-teleport-firestore-auditlog-collection?projectID=gcpproj-123456&credentialsPath=/etc/teleport-secrets/gcp-credentials.json'] - audit_sessions_uri: "gs://test-gcp-session-storage-bucket?projectID=gcpproj-123456&credentialsPath=/etc/teleport-secrets/gcp-credentials.json" - auth_service: - enabled: true - cluster_name: test-gcp-cluster - authentication: - type: "local" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: test-gcp-cluster - labels: - env: gcp - proxy_service: - public_addr: 'test-gcp-cluster:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for gcp-ha-log.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: DEBUG - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - storage: - type: firestore - project_id: gcpproj-123456 - collection_name: test-teleport-firestore-storage-collection - credentials_path: /etc/teleport-secrets/gcp-credentials.json - audit_events_uri: ['firestore://test-teleport-firestore-auditlog-collection?projectID=gcpproj-123456&credentialsPath=/etc/teleport-secrets/gcp-credentials.json', 'stdout://'] - audit_sessions_uri: "gs://test-gcp-session-storage-bucket?projectID=gcpproj-123456&credentialsPath=/etc/teleport-secrets/gcp-credentials.json" - auth_service: - enabled: true - cluster_name: test-gcp-cluster - authentication: - type: "local" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: test-gcp-cluster - labels: - env: gcp - proxy_service: - public_addr: 'test-gcp-cluster:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - https_keypairs: - - key_file: /etc/teleport-tls/tls.key - cert_file: /etc/teleport-tls/tls.crt - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for gcp.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - storage: - type: firestore - project_id: gcpproj-123456 - collection_name: test-teleport-firestore-storage-collection - credentials_path: /etc/teleport-secrets/gcp-credentials.json - audit_events_uri: ['firestore://test-teleport-firestore-auditlog-collection?projectID=gcpproj-123456&credentialsPath=/etc/teleport-secrets/gcp-credentials.json'] - audit_sessions_uri: "gs://test-gcp-session-storage-bucket?projectID=gcpproj-123456&credentialsPath=/etc/teleport-secrets/gcp-credentials.json" - auth_service: - enabled: true - cluster_name: test-gcp-cluster - authentication: - type: "local" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: test-gcp-cluster - labels: - env: gcp - proxy_service: - public_addr: 'test-gcp-cluster:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - acme: - enabled: true - email: test@email.com - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for initcontainers.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - auth_service: - enabled: true - cluster_name: helm-lint - authentication: - type: "local" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: helm-lint - proxy_service: - public_addr: 'helm-lint:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for kube-cluster-name.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - auth_service: - enabled: true - cluster_name: test-aws-cluster - authentication: - type: "local" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: test-kube-cluster - proxy_service: - public_addr: 'test-aws-cluster:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for log-basic.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: json - extra_fields: ["timestamp","level","component","caller"] - auth_service: - enabled: true - cluster_name: test-log-cluster - authentication: - type: "local" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: test-log-cluster - proxy_service: - public_addr: 'test-log-cluster:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for log-extra.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: DEBUG - output: /var/lib/teleport/test.log - format: - output: json - extra_fields: ["level","timestamp","component","caller"] - auth_service: - enabled: true - cluster_name: test-log-cluster - authentication: - type: "local" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: test-log-cluster - proxy_service: - public_addr: 'test-log-cluster:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for log-legacy.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: DEBUG - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - auth_service: - enabled: true - cluster_name: test-log-cluster - authentication: - type: "local" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: test-log-cluster - proxy_service: - public_addr: 'test-log-cluster:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for priority-class-name.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - auth_service: - enabled: true - cluster_name: helm-lint - authentication: - type: "local" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: helm-lint - proxy_service: - public_addr: 'helm-lint:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for proxy-listener-mode.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - version: v2 - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - auth_service: - enabled: true - cluster_name: test-proxy-listener-mode - authentication: - type: "local" - local_auth: true - second_factor: "otp" - proxy_listener_mode: multiplex - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:443 - kube_cluster_name: test-proxy-listener-mode - proxy_service: - public_addr: 'test-proxy-listener-mode:443' - enabled: true - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for public-addresses.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - auth_service: - enabled: true - cluster_name: helm-lint - authentication: - type: "local" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: helm-lint - proxy_service: - public_addr: - - loadbalancer.example.com:443 - ssh_public_addr: - - loadbalancer.example.com:3023 - tunnel_public_addr: - - loadbalancer.example.com:3024 - kube_listen_addr: 0.0.0.0:3026 - kube_public_addr: - - loadbalancer.example.com:3026 - mysql_listen_addr: 0.0.0.0:3036 - mysql_public_addr: - - loadbalancer.example.com:3036 - postgres_listen_addr: 0.0.0.0:5432 - postgres_public_addr: - - loadbalancer.example.com:5432 - mongo_listen_addr: 0.0.0.0:27017 - mongo_public_addr: - - loadbalancer.example.com:27017 - enabled: true - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for resources.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - auth_service: - enabled: true - cluster_name: helm-lint - authentication: - type: "local" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: helm-lint - proxy_service: - public_addr: 'helm-lint:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for separate-mongo-listener.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - auth_service: - enabled: true - cluster_name: helm-lint - authentication: - type: "local" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: helm-lint - proxy_service: - public_addr: 'helm-lint:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - mongo_listen_addr: 0.0.0.0:27017 - mongo_public_addr: helm-lint:27017 - enabled: true - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for separate-postgres-listener.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - auth_service: - enabled: true - cluster_name: helm-lint - authentication: - type: "local" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: helm-lint - proxy_service: - public_addr: 'helm-lint:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - postgres_listen_addr: 0.0.0.0:5432 - postgres_public_addr: helm-lint:5432 - enabled: true - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for service.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - auth_service: - enabled: true - cluster_name: helm-lint - authentication: - type: "local" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: helm-lint - proxy_service: - public_addr: 'helm-lint:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for session-recording.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - auth_service: - enabled: true - cluster_name: helm-lint - authentication: - type: "local" - local_auth: true - second_factor: "otp" - session_recording: node-sync - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: helm-lint - proxy_service: - public_addr: 'helm-lint:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for standalone-customsize.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - auth_service: - enabled: true - cluster_name: test-standalone-cluster - authentication: - type: "local" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: test-standalone-cluster - labels: - env: standalone - proxy_service: - public_addr: 'test-standalone-cluster:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - acme: - enabled: true - email: test@email.com - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for standalone-existingpvc.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - auth_service: - enabled: true - cluster_name: test-standalone-cluster - authentication: - type: "local" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: test-standalone-cluster - labels: - env: standalone - proxy_service: - public_addr: 'test-standalone-cluster:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - acme: - enabled: true - email: test@email.com - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for tolerations.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - storage: - type: dynamodb - region: us-west-2 - table_name: test-dynamodb-backend-table - audit_events_uri: ['dynamodb://test-dynamodb-auditlog-table'] - audit_sessions_uri: s3://test-s3-session-storage-bucket - continuous_backups: false - auto_scaling: false - auth_service: - enabled: true - cluster_name: test-aws-cluster - authentication: - type: "local" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: test-aws-cluster - proxy_service: - public_addr: 'test-aws-cluster:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for version-override.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - auth_service: - enabled: true - cluster_name: test-cluster-name - authentication: - type: "local" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: test-cluster-name - labels: - env: test - version: 5.2.1 - proxy_service: - public_addr: 'test-cluster-name:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE -matches snapshot for volumes.yaml: - 1: | - apiVersion: v1 - data: - teleport.yaml: |- - teleport: - log: - severity: INFO - output: stderr - format: - output: text - extra_fields: ["timestamp","level","component","caller"] - auth_service: - enabled: true - cluster_name: helm-lint - authentication: - type: "local" - local_auth: true - second_factor: "otp" - kubernetes_service: - enabled: true - listen_addr: 0.0.0.0:3027 - kube_cluster_name: helm-lint - proxy_service: - public_addr: 'helm-lint:443' - kube_listen_addr: 0.0.0.0:3026 - mysql_listen_addr: 0.0.0.0:3036 - enabled: true - ssh_service: - enabled: false - kind: ConfigMap - metadata: - name: RELEASE-NAME - namespace: NAMESPACE From ec07530deb9a27c5cf309469b4364a8ec9290b5a Mon Sep 17 00:00:00 2001 From: Hugo Shaka Date: Mon, 19 Dec 2022 12:02:49 -0500 Subject: [PATCH 08/14] helm: add PodMonitor support (#19291) --- .../templates/podmonitor.yaml | 31 +++++++++++++++ .../tests/podmonitor_test.yaml | 39 +++++++++++++++++++ .../chart/teleport-cluster/values.schema.json | 24 ++++++++++++ examples/chart/teleport-cluster/values.yaml | 17 ++++++++ 4 files changed, 111 insertions(+) create mode 100644 examples/chart/teleport-cluster/templates/podmonitor.yaml create mode 100644 examples/chart/teleport-cluster/tests/podmonitor_test.yaml diff --git a/examples/chart/teleport-cluster/templates/podmonitor.yaml b/examples/chart/teleport-cluster/templates/podmonitor.yaml new file mode 100644 index 0000000000000..7201caec35b7c --- /dev/null +++ b/examples/chart/teleport-cluster/templates/podmonitor.yaml @@ -0,0 +1,31 @@ +{{- if.Values.podMonitor.enabled -}} +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "teleport-cluster.labels" . | nindent 4 }} + {{- with .Values.podMonitor.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + jobLabel: {{ .Release.Name }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: {{- include "teleport-cluster.selectorLabels" . | nindent 6 }} + podMetricsEndpoints: + - port: diag + path: /metrics + {{- with .Values.podMonitor.interval }} + interval: {{ . | quote }} + {{- end }} + podTargetLabels: + - "app.kubernetes.io/name" + - "app.kubernetes.io/instance" + - "app.kubernetes.io/component" + - "app.kubernetes.io/version" + - "teleport.dev/majorVersion" +{{- end }} diff --git a/examples/chart/teleport-cluster/tests/podmonitor_test.yaml b/examples/chart/teleport-cluster/tests/podmonitor_test.yaml new file mode 100644 index 0000000000000..44b0ecb700426 --- /dev/null +++ b/examples/chart/teleport-cluster/tests/podmonitor_test.yaml @@ -0,0 +1,39 @@ +suite: PodMonitor +templates: + - podmonitor.yaml +tests: + - it: does not create a PodMonitor by default + asserts: + - hasDocuments: + count: 0 + + - it: creates a PodMonitor when enabled + set: + podMonitor: + enabled: true + asserts: + - hasDocuments: + count: 1 + - isKind: + of: PodMonitor + + - it: configures scrape interval if provided + set: + podMonitor: + enabled: true + interval: 2m + asserts: + - equal: + path: spec.endpoints[0].interval + value: 2m + + - it: wears additional labels if provided + set: + podMonitor: + enabled: true + additionalLabels: + prometheus: teleport-only + asserts: + - equal: + path: metadata.labels.prometheus + value: teleport-only diff --git a/examples/chart/teleport-cluster/values.schema.json b/examples/chart/teleport-cluster/values.schema.json index 97c2f1bbba76e..e1bae8e4b0889 100644 --- a/examples/chart/teleport-cluster/values.schema.json +++ b/examples/chart/teleport-cluster/values.schema.json @@ -10,6 +10,7 @@ "labels", "chartMode", "highAvailability", + "podMonitor", "tls", "image", "enterpriseImage", @@ -43,6 +44,29 @@ "type": "boolean", "default": true }, + "podMonitor": { + "$id": "#/properties/podMonitor", + "type": "object", + "required": ["enabled"], + "properties": { + "enabled": { + "$id": "#/properties/podMonitor/enabled", + "type": "boolean", + "default": false + }, + "additionalLabels": { + "$id": "#/properties/podMonitor/additionalLabels", + "type": "object", + "default": {"prometheus": "default"}, + "additionalProperties": {"type": "string"} + }, + "interval": { + "$id": "#/properties/podMonitor/interval", + "type": "string", + "default": "30s" + } + } + }, "authentication": { "$id": "#/properties/authentication", "type": "object", diff --git a/examples/chart/teleport-cluster/values.yaml b/examples/chart/teleport-cluster/values.yaml index 80927f64a25a4..ea215e13ea13d 100644 --- a/examples/chart/teleport-cluster/values.yaml +++ b/examples/chart/teleport-cluster/values.yaml @@ -185,6 +185,23 @@ chartMode: standalone # Helm chart user is responsible for configuring working join_params on the proxy. createProxyToken: true +# podMonitor controls the PodMonitor CR (from monitoring.coreos.com/v1) +# This CRD is managed by the prometheus-operator and allows workload to +# get monitored. To use this value, you need to run a `prometheus-operator` +# in the cluster for this value to take effect. +# See https://prometheus-operator.dev/docs/prologue/introduction/ +podMonitor: + # Whether the chart should deploy a PodMonitor. + # Disabled by default as it requires the PodMonitor CRD to be installed. + enabled: false + # additionalLabels to put on the PodMonitor. + # This is used to be selected by a specific prometheus instance. + # Defaults to {prometheus: default} which seems to be the common default prometheus selector + additionalLabels: + prometheus: default + # interval is the interval between two metrics scrapes. Defaults to 30s + interval: 30s + ###################################################################### # Persistence settings (only used in "standalone" and "custom" modes) # NOTE: Changes in Kubernetes 1.23+ mean that persistent volumes will not automatically be provisioned in AWS EKS clusters From 3ec9f8fd3c347f010e49cf9ea07be8ffb1ca2428 Mon Sep 17 00:00:00 2001 From: Hugo Shaka Date: Mon, 19 Dec 2022 12:30:21 -0500 Subject: [PATCH 09/14] helm: add job validating configuration on deploy (#19333) Part of [RFD-0096](https://github.com/gravitational/teleport/pull/18274) This PR adds helm hooks deploying a test configuration job and running `teleport configure --test` to validate the `teleport.yaml` configuration is sane. --- .../templates/auth/predeploy_config.yaml | 31 ++++++ .../templates/auth/predeploy_job.yaml | 95 +++++++++++++++++++ .../templates/proxy/predeploy_config.yaml | 16 ++++ .../templates/proxy/predeploy_job.yaml | 93 ++++++++++++++++++ .../tests/predeploy_test.yaml | 55 +++++++++++ .../chart/teleport-cluster/values.schema.json | 6 ++ examples/chart/teleport-cluster/values.yaml | 4 + 7 files changed, 300 insertions(+) create mode 100644 examples/chart/teleport-cluster/templates/auth/predeploy_config.yaml create mode 100644 examples/chart/teleport-cluster/templates/auth/predeploy_job.yaml create mode 100644 examples/chart/teleport-cluster/templates/proxy/predeploy_config.yaml create mode 100644 examples/chart/teleport-cluster/templates/proxy/predeploy_job.yaml create mode 100644 examples/chart/teleport-cluster/tests/predeploy_test.yaml diff --git a/examples/chart/teleport-cluster/templates/auth/predeploy_config.yaml b/examples/chart/teleport-cluster/templates/auth/predeploy_config.yaml new file mode 100644 index 0000000000000..2019812169012 --- /dev/null +++ b/examples/chart/teleport-cluster/templates/auth/predeploy_config.yaml @@ -0,0 +1,31 @@ +{{- $auth := mustMergeOverwrite .Values .Values.auth -}} +{{- if $auth.validateConfigOnDeploy }} +{{- $configTemplate := printf "teleport-cluster.auth.config.%s" $auth.chartMode -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-auth-test + namespace: {{ .Release.Namespace }} + labels: {{- include "teleport-cluster.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "4" + "helm.sh/hook-delete-policy": before-hook-creation +data: +{{- if $auth.createProxyToken }} + apply-on-startup.yaml: |2 + kind: token + version: v2 + metadata: + name: {{ .Release.Name }}-proxy + expires: "3000-01-01T00:00:00Z" + spec: + roles: [Proxy] + join_method: kubernetes + kubernetes: + allow: + - service_account: "{{ .Release.Namespace }}:{{ include "teleport-cluster.proxy.serviceAccountName" . }}" +{{- end }} + teleport.yaml: |2 + {{- mustMergeOverwrite (include $configTemplate . | fromYaml) $auth.teleportConfig | toYaml | nindent 4 -}} +{{- end }} diff --git a/examples/chart/teleport-cluster/templates/auth/predeploy_job.yaml b/examples/chart/teleport-cluster/templates/auth/predeploy_job.yaml new file mode 100644 index 0000000000000..78b9b73abd402 --- /dev/null +++ b/examples/chart/teleport-cluster/templates/auth/predeploy_job.yaml @@ -0,0 +1,95 @@ +{{- $auth := mustMergeOverwrite .Values .Values.auth -}} +{{- if $auth.validateConfigOnDeploy }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ .Release.Name }}-auth-test + namespace: {{ .Release.Namespace }} + labels: {{- include "teleport-cluster.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "5" + "helm.sh/hook-delete-policy": hook-succeeded +spec: + backoffLimit: 1 + template: + spec: +{{- if $auth.affinity }} + affinity: {{- toYaml $auth.affinity | nindent 8 }} +{{- end }} +{{- if $auth.tolerations }} + tolerations: {{- toYaml $auth.tolerations | nindent 6 }} +{{- end }} + restartPolicy: Never + containers: + - name: "teleport-config-check" + image: '{{ if $auth.enterprise }}{{ $auth.enterpriseImage }}{{ else }}{{ $auth.image }}{{ end }}:{{ include "teleport-cluster.version" . }}' + imagePullPolicy: {{ $auth.imagePullPolicy }} +{{- if or $auth.extraEnv $auth.tls.existingCASecretName }} + env: + {{- if (gt (len $auth.extraEnv) 0) }} + {{- toYaml $auth.extraEnv | nindent 8 }} + {{- end }} + {{- if $auth.tls.existingCASecretName }} + - name: SSL_CERT_FILE + value: /etc/teleport-tls-ca/ca.pem + {{- end }} +{{- end }} + command: + - "teleport" + - "configure" + args: + - "--test" + - "/etc/teleport/teleport.yaml" +{{- if .Values.securityContext }} + securityContext: {{- toYaml .Values.securityContext | nindent 10 }} +{{- end }} + volumeMounts: +{{- if .Values.enterprise }} + - mountPath: /var/lib/license + name: "license" + readOnly: true +{{- end }} + {{- if and (.Values.gcp.credentialSecretName) (eq .Values.chartMode "gcp") }} + - mountPath: /etc/teleport-secrets + name: "gcp-credentials" + readOnly: true +{{- end }} +{{- if .Values.tls.existingCASecretName }} + - mountPath: /etc/teleport-tls-ca + name: "teleport-tls-ca" + readOnly: true +{{- end }} + - mountPath: /etc/teleport + name: "config" + readOnly: true + - mountPath: /var/lib/teleport + name: "data" +{{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 8 }} +{{- end }} + volumes: +{{- if .Values.enterprise }} + - name: license + secret: + secretName: "license" +{{- end }} +{{- if (eq .Values.chartMode "gcp") }} + - name: gcp-credentials + secret: + secretName: {{ required "gcp.credentialSecretName is required in chart values" .Values.gcp.credentialSecretName }} +{{- end }} +{{- if .Values.tls.existingCASecretName }} + - name: teleport-tls-ca + secret: + secretName: {{ .Values.tls.existingCASecretName }} +{{- end }} + - name: "config" + configMap: + name: {{ .Release.Name }}-auth-test + - name: "data" + emptyDir: {} +{{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 6 }} +{{- end }} +{{- end }} diff --git a/examples/chart/teleport-cluster/templates/proxy/predeploy_config.yaml b/examples/chart/teleport-cluster/templates/proxy/predeploy_config.yaml new file mode 100644 index 0000000000000..582dc0f72589b --- /dev/null +++ b/examples/chart/teleport-cluster/templates/proxy/predeploy_config.yaml @@ -0,0 +1,16 @@ +{{- $proxy := mustMergeOverwrite .Values .Values.proxy -}} +{{- if $proxy.validateConfigOnDeploy }} +{{- $configTemplate := printf "teleport-cluster.proxy.config.%s" $proxy.chartMode -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-proxy-test + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "4" + "helm.sh/hook-delete-policy": before-hook-creation +data: + teleport.yaml: |2 + {{- mustMergeOverwrite (include $configTemplate . | fromYaml) $proxy.teleportConfig | toYaml | nindent 4 -}} +{{- end }} diff --git a/examples/chart/teleport-cluster/templates/proxy/predeploy_job.yaml b/examples/chart/teleport-cluster/templates/proxy/predeploy_job.yaml new file mode 100644 index 0000000000000..bdfa28c183040 --- /dev/null +++ b/examples/chart/teleport-cluster/templates/proxy/predeploy_job.yaml @@ -0,0 +1,93 @@ +{{- $proxy := mustMergeOverwrite .Values .Values.proxy -}} +{{- if $proxy.validateConfigOnDeploy }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ .Release.Name }}-proxy-test + namespace: {{ .Release.Namespace }} + labels: {{- include "teleport-cluster.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "5" + "helm.sh/hook-delete-policy": hook-succeeded +spec: + backoffLimit: 1 + template: + spec: +{{- if $proxy.affinity }} + affinity: {{- toYaml $proxy.affinity | nindent 8 }} +{{- end }} +{{- if $proxy.tolerations }} + tolerations: {{- toYaml $proxy.tolerations | nindent 6 }} +{{- end }} + restartPolicy: Never + containers: + - name: "teleport" + image: '{{ if $proxy.enterprise }}{{ $proxy.enterpriseImage }}{{ else }}{{ $proxy.image }}{{ end }}:{{ include "teleport-cluster.version" . }}' + imagePullPolicy: {{ $proxy.imagePullPolicy }} +{{- if or $proxy.extraEnv $proxy.tls.existingCASecretName }} + env: + {{- if (gt (len $proxy.extraEnv) 0) }} + {{- toYaml $proxy.extraEnv | nindent 8 }} + {{- end }} + {{- if $proxy.tls.existingCASecretName }} + - name: SSL_CERT_FILE + value: /etc/teleport-tls-ca/ca.pem + {{- end }} +{{- end }} + command: + - "teleport" + - "configure" + args: + - "--test" + - "/etc/teleport/teleport.yaml" +{{- if $proxy.securityContext }} + securityContext: {{- toYaml $proxy.securityContext | nindent 10 }} +{{- end }} + volumeMounts: +{{- if $proxy.highAvailability.certManager.enabled }} + - mountPath: /etc/teleport-tls + name: "teleport-tls" + readOnly: true +{{- else if $proxy.tls.existingSecretName }} + - mountPath: /etc/teleport-tls + name: "teleport-tls" + readOnly: true +{{- end }} +{{- if $proxy.tls.existingCASecretName }} + - mountPath: /etc/teleport-tls-ca + name: "teleport-tls-ca" + readOnly: true +{{- end }} + - mountPath: /etc/teleport + name: "config" + readOnly: true + - mountPath: /var/lib/teleport + name: "data" +{{- if $proxy.extraVolumeMounts }} + {{- toYaml $proxy.extraVolumeMounts | nindent 8 }} +{{- end }} + volumes: +{{- if $proxy.highAvailability.certManager.enabled }} + - name: teleport-tls + secret: + secretName: teleport-tls +{{- else if $proxy.tls.existingSecretName }} + - name: teleport-tls + secret: + secretName: {{ $proxy.tls.existingSecretName }} +{{- end }} +{{- if $proxy.tls.existingCASecretName }} + - name: teleport-tls-ca + secret: + secretName: {{ $proxy.tls.existingCASecretName }} +{{- end }} + - name: "config" + configMap: + name: {{ .Release.Name }}-proxy-test + - name: "data" + emptyDir: {} +{{- if $proxy.extraVolumes }} + {{- toYaml $proxy.extraVolumes | nindent 6 }} +{{- end }} +{{- end }} diff --git a/examples/chart/teleport-cluster/tests/predeploy_test.yaml b/examples/chart/teleport-cluster/tests/predeploy_test.yaml new file mode 100644 index 0000000000000..ab307c42d5e09 --- /dev/null +++ b/examples/chart/teleport-cluster/tests/predeploy_test.yaml @@ -0,0 +1,55 @@ +suite: Pre-Deploy Config Test Hooks +templates: + - auth/predeploy_job.yaml + - auth/predeploy_config.yaml + - proxy/predeploy_job.yaml + - proxy/predeploy_config.yaml +tests: + - it: Deploys the auth-test config + template: auth/predeploy_config.yaml + set: + clusterName: helm-lint + asserts: + - containsDocument: + kind: ConfigMap + apiVersion: v1 + name: RELEASE-NAME-auth-test + namespace: NAMESPACE + + - it: Deploys the proxy-test config + template: proxy/predeploy_config.yaml + set: + clusterName: helm-lint + asserts: + - containsDocument: + kind: ConfigMap + apiVersion: v1 + name: RELEASE-NAME-proxy-test + namespace: NAMESPACE + + - it: Deploys the auth-test job + template: auth/predeploy_job.yaml + set: + clusterName: helm-lint + asserts: + - containsDocument: + kind: Job + apiVersion: batch/v1 + name: RELEASE-NAME-auth-test + namespace: NAMESPACE + + - it: Is executed as a pre-install and pre-upgrade hook + set: + clusterName: helm-lint + asserts: + - equal: + path: metadata.annotations.helm\.sh/hook + value: pre-install,pre-upgrade + + - it: Does not render hooks when config validation is disabled + set: + clusterName: helm-lint + validateConfigOnDeploy: false + asserts: + - hasDocuments: + count: 0 diff --git a/examples/chart/teleport-cluster/values.schema.json b/examples/chart/teleport-cluster/values.schema.json index e1bae8e4b0889..f12db18c53064 100644 --- a/examples/chart/teleport-cluster/values.schema.json +++ b/examples/chart/teleport-cluster/values.schema.json @@ -9,6 +9,7 @@ "podSecurityPolicy", "labels", "chartMode", + "validateConfigOnDeploy", "highAvailability", "podMonitor", "tls", @@ -308,6 +309,11 @@ ], "default": "standalone" }, + "validateConfigOnDeploy": { + "$id": "#/properties/validateConfigOnDeploy", + "type": "boolean", + "default": true + }, "standalone": { "$id": "#/properties/standalone", "type": "object", diff --git a/examples/chart/teleport-cluster/values.yaml b/examples/chart/teleport-cluster/values.yaml index ea215e13ea13d..3e2a4c9b0c3a8 100644 --- a/examples/chart/teleport-cluster/values.yaml +++ b/examples/chart/teleport-cluster/values.yaml @@ -180,6 +180,10 @@ labels: {} # (3) When set to "scratch", you must write the teleport configuration in auth.teleportConfig and proxy.teleportConfig. chartMode: standalone +# validateConfigOnDeploy enables a Kubernetes job before install and upgrade that will verify +# if the teleport.yaml configuration is valid and will block the deployment if it is not +validateConfigOnDeploy: true + # Whether the chart should create a Teleport ProvisionToken for the proxies to join the Teleport cluster. # Disabling this flag will cause the proxies not to be able to join the auth pods. In this case, the # Helm chart user is responsible for configuring working join_params on the proxy. From b2962e3b2e6b9bc5c762ccb1bb34dce5d20f0f75 Mon Sep 17 00:00:00 2001 From: Hugo Shaka Date: Fri, 6 Jan 2023 11:40:40 -0500 Subject: [PATCH 10/14] helm: use a deployment instead of a statefulset for auth + fix podmonitor --- .../{statefulset.yaml => deployment.yaml} | 20 ++- .../templates/proxy/deployment.yaml | 2 + ...ml.snap => auth_deployment_test.yaml.snap} | 0 ...et_test.yaml => auth_deployment_test.yaml} | 128 ++++++++++++------ .../tests/podmonitor_test.yaml | 2 +- .../tests/proxy_deployment_test.yaml | 12 ++ 6 files changed, 120 insertions(+), 44 deletions(-) rename examples/chart/teleport-cluster/templates/auth/{statefulset.yaml => deployment.yaml} (93%) rename examples/chart/teleport-cluster/tests/__snapshot__/{auth_statefulset_test.yaml.snap => auth_deployment_test.yaml.snap} (100%) rename examples/chart/teleport-cluster/tests/{auth_statefulset_test.yaml => auth_deployment_test.yaml} (87%) diff --git a/examples/chart/teleport-cluster/templates/auth/statefulset.yaml b/examples/chart/teleport-cluster/templates/auth/deployment.yaml similarity index 93% rename from examples/chart/teleport-cluster/templates/auth/statefulset.yaml rename to examples/chart/teleport-cluster/templates/auth/deployment.yaml index 276d7e3606ce4..b31be2461f6f2 100644 --- a/examples/chart/teleport-cluster/templates/auth/statefulset.yaml +++ b/examples/chart/teleport-cluster/templates/auth/deployment.yaml @@ -1,6 +1,7 @@ {{- $auth := mustMergeOverwrite .Values .Values.auth -}} +{{- $replicated := gt (int $auth.highAvailability.replicaCount) 1 -}} apiVersion: apps/v1 -kind: StatefulSet +kind: Deployment metadata: name: {{ .Release.Name }}-auth namespace: {{ .Release.Namespace }} @@ -12,7 +13,20 @@ metadata: {{- end }} spec: replicas: {{ $auth.highAvailability.replicaCount }} - serviceName: {{ .Release.Name }}-auth +{{- if and $replicated $auth.highAvailability.minReadySeconds }} + minReadySeconds: {{ $auth.highAvailability.minReadySeconds }} +{{- end }} + strategy: +{{- if $replicated }} + # some backends support a maximum amount of auth pods (e.g. DynamoDB), + # we don't want to exceed this during a rollout. + maxSurge: 0 + maxUnavailable: 1 +{{- else }} + # using a single replica can be because of a non-replicable storage or when applying upgrade migrations. + # In those cases, we don't want a rolling update. + type: Recreate +{{- end }} selector: matchLabels: {{- include "teleport-cluster.auth.selectorLabels" . | nindent 6 }} template: @@ -48,7 +62,7 @@ spec: values: - proxy topologyKey: "kubernetes.io/hostname" - {{- else if gt (int $auth.highAvailability.replicaCount) 1 }} + {{- else if $replicated }} preferredDuringSchedulingIgnoredDuringExecution: - weight: 50 podAffinityTerm: diff --git a/examples/chart/teleport-cluster/templates/proxy/deployment.yaml b/examples/chart/teleport-cluster/templates/proxy/deployment.yaml index 6388a344c3648..fa31085bf7a0b 100644 --- a/examples/chart/teleport-cluster/templates/proxy/deployment.yaml +++ b/examples/chart/teleport-cluster/templates/proxy/deployment.yaml @@ -32,7 +32,9 @@ spec: {{- else }} replicas: {{ max .Values.highAvailability.replicaCount 2 }} {{- end }} + {{- if $proxy.highAvailability.minReadySeconds }} minReadySeconds: {{ $proxy.highAvailability.minReadySeconds }} + {{- end }} {{- else }} replicas: 1 {{- end }} diff --git a/examples/chart/teleport-cluster/tests/__snapshot__/auth_statefulset_test.yaml.snap b/examples/chart/teleport-cluster/tests/__snapshot__/auth_deployment_test.yaml.snap similarity index 100% rename from examples/chart/teleport-cluster/tests/__snapshot__/auth_statefulset_test.yaml.snap rename to examples/chart/teleport-cluster/tests/__snapshot__/auth_deployment_test.yaml.snap diff --git a/examples/chart/teleport-cluster/tests/auth_statefulset_test.yaml b/examples/chart/teleport-cluster/tests/auth_deployment_test.yaml similarity index 87% rename from examples/chart/teleport-cluster/tests/auth_statefulset_test.yaml rename to examples/chart/teleport-cluster/tests/auth_deployment_test.yaml index 3c49b61d2c83c..37693af3439e8 100644 --- a/examples/chart/teleport-cluster/tests/auth_statefulset_test.yaml +++ b/examples/chart/teleport-cluster/tests/auth_deployment_test.yaml @@ -1,10 +1,10 @@ suite: Auth StatefulSet templates: - - auth/statefulset.yaml + - auth/deployment.yaml - auth/config.yaml tests: - it: sets Statefulset annotations when specified - template: auth/statefulset.yaml + template: auth/deployment.yaml values: - ../.lint/annotations.yaml asserts: @@ -16,7 +16,7 @@ tests: value: 3 - it: sets Pod annotations when specified - template: auth/statefulset.yaml + template: auth/deployment.yaml values: - ../.lint/annotations.yaml asserts: @@ -28,7 +28,7 @@ tests: value: 4 - it: should not have more than one replica in standalone mode - template: auth/statefulset.yaml + template: auth/deployment.yaml set: chartMode: standalone clusterName: helm-lint.example.com @@ -38,7 +38,7 @@ tests: value: 1 - it: should have multiple replicas when replicaCount is set - template: auth/statefulset.yaml + template: auth/deployment.yaml set: chartMode: scratch clusterName: helm-lint.example.com @@ -50,7 +50,7 @@ tests: value: 3 - it: should set affinity when set in values - template: auth/statefulset.yaml + template: auth/deployment.yaml set: clusterName: helm-lint.example.com affinity: @@ -69,7 +69,7 @@ tests: path: spec.template.spec.affinity - it: should set required affinity when highAvailability.requireAntiAffinity is set - template: auth/statefulset.yaml + template: auth/deployment.yaml values: - ../.lint/aws-ha-antiaffinity.yaml asserts: @@ -83,7 +83,7 @@ tests: path: spec.template.spec.affinity - it: should set tolerations when set in values - template: auth/statefulset.yaml + template: auth/deployment.yaml values: - ../.lint/tolerations.yaml asserts: @@ -93,7 +93,7 @@ tests: path: spec.template.spec.tolerations - it: should set resources when set in values - template: auth/statefulset.yaml + template: auth/deployment.yaml values: - ../.lint/resources.yaml asserts: @@ -113,7 +113,7 @@ tests: path: spec.template.spec - it: should set securityContext when set in values - template: auth/statefulset.yaml + template: auth/deployment.yaml values: - ../.lint/security-context.yaml asserts: @@ -139,7 +139,7 @@ tests: path: spec.template.spec - it: should not set securityContext when is empty object (default value) - template: auth/statefulset.yaml + template: auth/deployment.yaml values: - ../.lint/security-context-empty.yaml asserts: @@ -150,7 +150,7 @@ tests: # so we override it manually and check that gets set instead # this saves us having to update the test every time we cut a new release - it: should use enterprise image and mount license when enterprise is set in values - template: auth/statefulset.yaml + template: auth/deployment.yaml set: clusterName: helm-lint.example.com enterprise: true @@ -173,7 +173,7 @@ tests: secretName: license - it: should use OSS image and not mount license when enterprise is not set in values - template: auth/statefulset.yaml + template: auth/deployment.yaml set: clusterName: helm-lint teleportVersionOverride: 8.3.4 @@ -197,7 +197,7 @@ tests: path: spec.template.spec - it: should mount GCP credentials in GCP mode - template: auth/statefulset.yaml + template: auth/deployment.yaml values: - ../.lint/gcp-ha.yaml asserts: @@ -215,7 +215,7 @@ tests: secretName: teleport-gcp-credentials - it: should not mount secret when credentialSecretName is blank in values - template: deployment.yaml + template: auth/deployment.yaml values: - ../.lint/gcp-ha-workload.yaml asserts: @@ -233,7 +233,7 @@ tests: secretName: teleport-gcp-credentials - it: should mount GCP credentials for initContainer in GCP mode - template: auth/statefulset.yaml + template: auth/deployment.yaml values: - ../.lint/gcp-ha.yaml - ../.lint/initcontainers.yaml @@ -246,7 +246,7 @@ tests: readOnly: true - it: should mount ConfigMap containing Teleport config - template: auth/statefulset.yaml + template: auth/deployment.yaml set: clusterName: helm-lint.example.com asserts: @@ -264,7 +264,7 @@ tests: name: RELEASE-NAME-auth - it: should mount extraVolumes and extraVolumeMounts on container and initContainers - template: auth/statefulset.yaml + template: auth/deployment.yaml values: - ../.lint/volumes.yaml - ../.lint/initcontainers.yaml @@ -291,7 +291,7 @@ tests: secret: secretName: mySecret - it: should set imagePullPolicy when set in values - template: auth/statefulset.yaml + template: auth/deployment.yaml set: clusterName: helm-lint.example.com imagePullPolicy: Always @@ -301,7 +301,7 @@ tests: value: Always - it: should set environment when extraEnv set in values - template: auth/statefulset.yaml + template: auth/deployment.yaml values: - ../.lint/extra-env.yaml asserts: @@ -312,7 +312,7 @@ tests: value: "some-value" - it: should provision initContainer correctly when set in values - template: auth/statefulset.yaml + template: auth/deployment.yaml values: - ../.lint/initcontainers.yaml - ../.lint/resources.yaml @@ -364,7 +364,7 @@ tests: path: spec.template.spec.initContainers - it: should add insecureSkipProxyTLSVerify to args when set in values - template: auth/statefulset.yaml + template: auth/deployment.yaml set: clusterName: helm-lint.example.com insecureSkipProxyTLSVerify: true @@ -374,7 +374,7 @@ tests: content: "--insecure" - it: should expose diag port - template: auth/statefulset.yaml + template: auth/deployment.yaml set: clusterName: helm-lint.example.com asserts: @@ -386,7 +386,7 @@ tests: protocol: TCP - it: should set postStart command if set in values - template: auth/statefulset.yaml + template: auth/deployment.yaml set: clusterName: helm-lint.example.com postStart: @@ -397,7 +397,7 @@ tests: value: ["/bin/echo", "test"] - it: should add PersistentVolumeClaim as volume when in standalone mode and persistence.enabled is true - template: auth/statefulset.yaml + template: auth/deployment.yaml set: chartMode: standalone clusterName: helm-lint.example.com @@ -412,7 +412,7 @@ tests: claimName: RELEASE-NAME - it: should not add PersistentVolumeClaim as volume when in standalone mode and persistence.enabled is false - template: auth/statefulset.yaml + template: auth/deployment.yaml set: chartMode: standalone clusterName: helm-lint.example.com @@ -427,7 +427,7 @@ tests: claimName: RELEASE-NAME - it: should add PersistentVolumeClaim as volume when in scratch mode and persistence.enabled is true - template: auth/statefulset.yaml + template: auth/deployment.yaml set: chartMode: scratch clusterName: helm-lint.example.com @@ -442,7 +442,7 @@ tests: claimName: RELEASE-NAME - it: should not add PersistentVolumeClaim as volume when in scratch mode and persistence.enabled is false - template: auth/statefulset.yaml + template: auth/deployment.yaml set: chartMode: scratch clusterName: helm-lint.example.com @@ -457,7 +457,7 @@ tests: claimName: RELEASE-NAME - it: should add an operator side-car when operator is enabled - template: auth/statefulset.yaml + template: auth/deployment.yaml values: - ../.lint/operator.yaml asserts: @@ -468,7 +468,7 @@ tests: path: spec.template.spec.containers[1] - it: should add named PersistentVolumeClaim as volume when in standalone mode, persistence.existingClaimName is set and persistence.enabled is true - template: auth/statefulset.yaml + template: auth/deployment.yaml values: - ../.lint/standalone-existingpvc.yaml asserts: @@ -480,7 +480,7 @@ tests: claimName: teleport-storage - it: should not add named PersistentVolumeClaim as volume when in standalone mode, persistence.existingClaimName is set but persistence.enabled is false - template: auth/statefulset.yaml + template: auth/deployment.yaml values: - ../.lint/standalone-existingpvc.yaml set: @@ -495,7 +495,7 @@ tests: claimName: teleport-storage - it: should add named PersistentVolumeClaim as volume when in scratch mode and persistence.existingClaimName is set - template: auth/statefulset.yaml + template: auth/deployment.yaml values: - ../.lint/standalone-existingpvc.yaml asserts: @@ -507,7 +507,7 @@ tests: claimName: teleport-storage - it: should not add named PersistentVolumeClaim as volume when in scratch mode, persistence.existingClaimName is set and persistence.enabled is false - template: auth/statefulset.yaml + template: auth/deployment.yaml values: - ../.lint/standalone-existingpvc.yaml set: @@ -524,7 +524,7 @@ tests: path: spec.template.spec - it: should add emptyDir for data in AWS mode - template: auth/statefulset.yaml + template: auth/deployment.yaml values: - ../.lint/aws-ha.yaml asserts: @@ -535,7 +535,7 @@ tests: emptyDir: {} - it: should add emptyDir for data in GCP mode - template: auth/statefulset.yaml + template: auth/deployment.yaml values: - ../.lint/gcp-ha.yaml asserts: @@ -546,7 +546,7 @@ tests: emptyDir: {} - it: should set priorityClassName when set in values - template: auth/statefulset.yaml + template: auth/deployment.yaml values: - ../.lint/priority-class-name.yaml asserts: @@ -555,7 +555,7 @@ tests: value: system-cluster-critical - it: should set probeTimeoutSeconds when set in values - template: auth/statefulset.yaml + template: auth/deployment.yaml values: - ../.lint/probe-timeout-seconds.yaml asserts: @@ -567,7 +567,7 @@ tests: value: 5 - it: should mount tls.existingCASecretName and set environment when set in values - template: auth/statefulset.yaml + template: auth/deployment.yaml values: - ../.lint/existing-tls-secret-with-ca.yaml asserts: @@ -590,7 +590,7 @@ tests: value: /etc/teleport-tls-ca/ca.pem - it: should mount tls.existingCASecretName and set extra environment when set in values - template: auth/statefulset.yaml + template: auth/deployment.yaml values: - ../.lint/existing-tls-secret-with-ca.yaml - ../.lint/extra-env.yaml @@ -619,7 +619,7 @@ tests: value: some-value - it: should set minReadySeconds when replicaCount > 1 - template: auth/statefulset.yaml + template: auth/deployment.yaml set: chartMode: scratch highAvailability: @@ -629,3 +629,51 @@ tests: - equal: path: spec.minReadySeconds value: 60 + + - it: should not set minReadySeconds when replicaCount = 1 + template: auth/deployment.yaml + set: + chartMode: scratch + highAvailability: + minReadySeconds: 60 + replicaCount: 1 + asserts: + - equal: + path: spec.minReadySeconds + value: null + + - it: should use Recreate strategy when replicaCount = 1 + template: auth/deployment.yaml + set: + chartMode: scratch + highAvailability: + replicaCount: 1 + asserts: + - equal: + path: spec.strategy.type + value: Recreate + + - it: should not set strategy when replicaCount > 1 + template: auth/deployment.yaml + set: + chartMode: scratch + highAvailability: + replicaCount: 2 + asserts: + - equal: + path: spec.strategy.type + value: null + + - it: should not perform surge rolling updates when replicaCount > 1 + template: auth/deployment.yaml + set: + chartMode: scratch + highAvailability: + replicaCount: 2 + asserts: + - equal: + path: spec.strategy.maxSurge + value: 0 + - equal: + path: spec.strategy.maxUnavailable + value: 1 diff --git a/examples/chart/teleport-cluster/tests/podmonitor_test.yaml b/examples/chart/teleport-cluster/tests/podmonitor_test.yaml index 44b0ecb700426..d07ebc9a8bf2d 100644 --- a/examples/chart/teleport-cluster/tests/podmonitor_test.yaml +++ b/examples/chart/teleport-cluster/tests/podmonitor_test.yaml @@ -24,7 +24,7 @@ tests: interval: 2m asserts: - equal: - path: spec.endpoints[0].interval + path: spec.podMetricsEndpoints[0].interval value: 2m - it: wears additional labels if provided diff --git a/examples/chart/teleport-cluster/tests/proxy_deployment_test.yaml b/examples/chart/teleport-cluster/tests/proxy_deployment_test.yaml index e0392f1f5e12b..062f69f5082b3 100644 --- a/examples/chart/teleport-cluster/tests/proxy_deployment_test.yaml +++ b/examples/chart/teleport-cluster/tests/proxy_deployment_test.yaml @@ -549,3 +549,15 @@ tests: - equal: path: spec.minReadySeconds value: 60 + + - it: should not set minReadySeconds when replicaCount = 1 + template: proxy/deployment.yaml + set: + chartMode: scratch + highAvailability: + minReadySeconds: 60 + replicaCount: 1 + asserts: + - equal: + path: spec.minReadySeconds + value: null From 4edfcdd004c97fcf12f5e602d85f389b93c6c8d3 Mon Sep 17 00:00:00 2001 From: Hugo Shaka Date: Fri, 6 Jan 2023 14:10:47 -0500 Subject: [PATCH 11/14] helm: warn that multiplexing requires TLS termination --- examples/chart/teleport-cluster/values.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/examples/chart/teleport-cluster/values.yaml b/examples/chart/teleport-cluster/values.yaml index 3e2a4c9b0c3a8..5420a57aa877d 100644 --- a/examples/chart/teleport-cluster/values.yaml +++ b/examples/chart/teleport-cluster/values.yaml @@ -89,6 +89,10 @@ authentication: # Teleport supports TLS routing. In this mode, all client connections are wrapped in TLS and multiplexed on one Teleport proxy port. # Default mode will not utilize TLS routing and operate in backwards-compatibility mode. +# +# WARNING: setting this value to 'multiplex' requires Teleport to terminate TLS itself. +# TLS multiplexing is not supported when using ACM+NLB for TLS termination. +# # Possible values are 'separate' and 'multiplex' proxyListenerMode: "separate" From dfe4a53e76a1f2c7eeca321311cb65a1de7f92fe Mon Sep 17 00:00:00 2001 From: Hugo Shaka Date: Fri, 6 Jan 2023 14:16:49 -0500 Subject: [PATCH 12/14] helm: fix regression with predeploy job and gcp credentials --- .../chart/teleport-cluster/templates/auth/deployment.yaml | 2 +- .../teleport-cluster/templates/auth/predeploy_job.yaml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/chart/teleport-cluster/templates/auth/deployment.yaml b/examples/chart/teleport-cluster/templates/auth/deployment.yaml index b31be2461f6f2..d1349857f0328 100644 --- a/examples/chart/teleport-cluster/templates/auth/deployment.yaml +++ b/examples/chart/teleport-cluster/templates/auth/deployment.yaml @@ -175,7 +175,7 @@ spec: name: "license" readOnly: true {{- end }} - {{- if and (.Values.gcp.credentialSecretName) (eq .Values.chartMode "gcp") }} +{{- if and (.Values.gcp.credentialSecretName) (eq .Values.chartMode "gcp") }} - mountPath: /etc/teleport-secrets name: "gcp-credentials" readOnly: true diff --git a/examples/chart/teleport-cluster/templates/auth/predeploy_job.yaml b/examples/chart/teleport-cluster/templates/auth/predeploy_job.yaml index 78b9b73abd402..e968dd4d864b6 100644 --- a/examples/chart/teleport-cluster/templates/auth/predeploy_job.yaml +++ b/examples/chart/teleport-cluster/templates/auth/predeploy_job.yaml @@ -50,7 +50,7 @@ spec: name: "license" readOnly: true {{- end }} - {{- if and (.Values.gcp.credentialSecretName) (eq .Values.chartMode "gcp") }} +{{- if and (.Values.gcp.credentialSecretName) (eq .Values.chartMode "gcp") }} - mountPath: /etc/teleport-secrets name: "gcp-credentials" readOnly: true @@ -74,10 +74,10 @@ spec: secret: secretName: "license" {{- end }} -{{- if (eq .Values.chartMode "gcp") }} +{{- if and (.Values.gcp.credentialSecretName) (eq .Values.chartMode "gcp") }} - name: gcp-credentials secret: - secretName: {{ required "gcp.credentialSecretName is required in chart values" .Values.gcp.credentialSecretName }} + secretName: {{ .Values.gcp.credentialSecretName | quote }} {{- end }} {{- if .Values.tls.existingCASecretName }} - name: teleport-tls-ca From 49157b0c97220d1bd562155cf3fbccc4ac4b87d2 Mon Sep 17 00:00:00 2001 From: Hugo Shaka Date: Mon, 9 Jan 2023 09:47:33 -0500 Subject: [PATCH 13/14] fixup! helm: use a deployment instead of a statefulset for auth + fix podmonitor --- .../chart/teleport-cluster/templates/auth/deployment.yaml | 5 +++-- .../chart/teleport-cluster/tests/auth_deployment_test.yaml | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/examples/chart/teleport-cluster/templates/auth/deployment.yaml b/examples/chart/teleport-cluster/templates/auth/deployment.yaml index d1349857f0328..762d99fdacc3e 100644 --- a/examples/chart/teleport-cluster/templates/auth/deployment.yaml +++ b/examples/chart/teleport-cluster/templates/auth/deployment.yaml @@ -20,8 +20,9 @@ spec: {{- if $replicated }} # some backends support a maximum amount of auth pods (e.g. DynamoDB), # we don't want to exceed this during a rollout. - maxSurge: 0 - maxUnavailable: 1 + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 {{- else }} # using a single replica can be because of a non-replicable storage or when applying upgrade migrations. # In those cases, we don't want a rolling update. diff --git a/examples/chart/teleport-cluster/tests/auth_deployment_test.yaml b/examples/chart/teleport-cluster/tests/auth_deployment_test.yaml index 37693af3439e8..69ebf1365ede0 100644 --- a/examples/chart/teleport-cluster/tests/auth_deployment_test.yaml +++ b/examples/chart/teleport-cluster/tests/auth_deployment_test.yaml @@ -672,8 +672,8 @@ tests: replicaCount: 2 asserts: - equal: - path: spec.strategy.maxSurge + path: spec.strategy.rollingUpdate.maxSurge value: 0 - equal: - path: spec.strategy.maxUnavailable + path: spec.strategy.rollingUpdate.maxUnavailable value: 1 From 1f3ef01ec03b178ac9aaedd775d435edf800d4bf Mon Sep 17 00:00:00 2001 From: Hugo Shaka Date: Wed, 11 Jan 2023 11:28:30 -0500 Subject: [PATCH 14/14] helm: explicit update strategy + add documentation links --- .../chart/teleport-cluster/templates/auth/_config.scratch.tpl | 2 +- examples/chart/teleport-cluster/templates/auth/deployment.yaml | 1 + examples/chart/teleport-cluster/templates/auth/pvc.yaml | 2 +- .../chart/teleport-cluster/templates/proxy/_config.scratch.tpl | 2 +- examples/chart/teleport-cluster/tests/auth_deployment_test.yaml | 2 +- 5 files changed, 5 insertions(+), 4 deletions(-) diff --git a/examples/chart/teleport-cluster/templates/auth/_config.scratch.tpl b/examples/chart/teleport-cluster/templates/auth/_config.scratch.tpl index dfbb48bfe9a99..e238dc1afdf1b 100644 --- a/examples/chart/teleport-cluster/templates/auth/_config.scratch.tpl +++ b/examples/chart/teleport-cluster/templates/auth/_config.scratch.tpl @@ -9,5 +9,5 @@ auth_service: {{- end -}} {{- define "teleport-cluster.auth.config.custom" -}} -{{ fail "'custom' mode has been removed with chart v12 because of the proxy/auth split breaking change, see http://link" }} +{{ fail "'custom' mode has been removed with chart v12 because of the proxy/auth split breaking change, see https://goteleport.com/docs/deploy-a-cluster/helm-deployments/migration-v12/" }} {{- end -}} diff --git a/examples/chart/teleport-cluster/templates/auth/deployment.yaml b/examples/chart/teleport-cluster/templates/auth/deployment.yaml index 762d99fdacc3e..803df194c4185 100644 --- a/examples/chart/teleport-cluster/templates/auth/deployment.yaml +++ b/examples/chart/teleport-cluster/templates/auth/deployment.yaml @@ -20,6 +20,7 @@ spec: {{- if $replicated }} # some backends support a maximum amount of auth pods (e.g. DynamoDB), # we don't want to exceed this during a rollout. + type: RollingUpdate rollingUpdate: maxSurge: 0 maxUnavailable: 1 diff --git a/examples/chart/teleport-cluster/templates/auth/pvc.yaml b/examples/chart/teleport-cluster/templates/auth/pvc.yaml index 751e120ad90fd..b9f7e90a60e02 100644 --- a/examples/chart/teleport-cluster/templates/auth/pvc.yaml +++ b/examples/chart/teleport-cluster/templates/auth/pvc.yaml @@ -1,6 +1,6 @@ {{- $auth := mustMergeOverwrite .Values .Values.auth -}} {{- if $auth.persistence.enabled }} - {{/* Disable persistence for aws and gpc modes */}} + {{/* Disable persistence for aws and gcp modes */}} {{- if and (not (eq $auth.chartMode "aws")) (not (eq $auth.chartMode "gcp")) }} {{/* No need to create a PVC if we reuse an existing claim */}} {{- if not $auth.persistence.existingClaimName }} diff --git a/examples/chart/teleport-cluster/templates/proxy/_config.scratch.tpl b/examples/chart/teleport-cluster/templates/proxy/_config.scratch.tpl index 4fbc03e225a27..7fa1ea30b6713 100644 --- a/examples/chart/teleport-cluster/templates/proxy/_config.scratch.tpl +++ b/examples/chart/teleport-cluster/templates/proxy/_config.scratch.tpl @@ -9,5 +9,5 @@ proxy_service: {{- end -}} {{- define "teleport-cluster.proxy.config.custom" -}} -{{ fail "'custom' mode has been removed with chart v12 because of the proxy/auth split breaking change, see http://link" }} +{{ fail "'custom' mode has been removed with chart v12 because of the proxy/auth split breaking change, see https://goteleport.com/docs/deploy-a-cluster/helm-deployments/migration-v12/" }} {{- end -}} diff --git a/examples/chart/teleport-cluster/tests/auth_deployment_test.yaml b/examples/chart/teleport-cluster/tests/auth_deployment_test.yaml index 69ebf1365ede0..1030abc67b9a1 100644 --- a/examples/chart/teleport-cluster/tests/auth_deployment_test.yaml +++ b/examples/chart/teleport-cluster/tests/auth_deployment_test.yaml @@ -662,7 +662,7 @@ tests: asserts: - equal: path: spec.strategy.type - value: null + value: RollingUpdate - it: should not perform surge rolling updates when replicaCount > 1 template: auth/deployment.yaml