From 6577f97e1feb29d856cba69eec02d639abea1df2 Mon Sep 17 00:00:00 2001 From: Hugo Shaka Date: Tue, 20 Dec 2022 10:13:03 -0500 Subject: [PATCH] docs: Document `teleport-cluster` v12 Helm chart --- docs/config.json | 5 + .../deploy-a-cluster/helm-deployments.mdx | 4 +- .../deploy-a-cluster/helm-deployments/aws.mdx | 41 +- .../helm-deployments/custom.mdx | 319 ++++---- .../helm-deployments/digitalocean.mdx | 23 +- .../deploy-a-cluster/helm-deployments/gcp.mdx | 40 +- .../helm-deployments/kubernetes-cluster.mdx | 120 +-- .../helm-deployments/migration-v12.mdx | 308 ++++++++ .../helm-deployments/migration.mdx | 56 +- .../helm-reference/teleport-cluster.mdx | 729 ++++++++++-------- .../try-out-teleport/local-kubernetes.mdx | 20 +- examples/chart/teleport-cluster/README.md | 55 +- examples/chart/teleport-cluster/values.yaml | 86 ++- 13 files changed, 1156 insertions(+), 650 deletions(-) create mode 100644 docs/pages/deploy-a-cluster/helm-deployments/migration-v12.mdx diff --git a/docs/config.json b/docs/config.json index 992ba09bb393c..8a698a8d49401 100644 --- a/docs/config.json +++ b/docs/config.json @@ -171,6 +171,11 @@ "title": "Migrating From Older Charts", "slug": "/deploy-a-cluster/helm-deployments/migration/", "forScopes": ["oss", "enterprise"] + }, + { + "title": "Migrating from v11 to v12", + "slug": "/deploy-a-cluster/helm-deployments/migration-v12/", + "forScopes": ["oss", "enterprise"] } ] }, diff --git a/docs/pages/deploy-a-cluster/helm-deployments.mdx b/docs/pages/deploy-a-cluster/helm-deployments.mdx index 2d1e932113e2a..cc0489d7301c2 100644 --- a/docs/pages/deploy-a-cluster/helm-deployments.mdx +++ b/docs/pages/deploy-a-cluster/helm-deployments.mdx @@ -15,5 +15,5 @@ our `teleport-cluster` Helm chart. ## Migration Guides -- [Migrating from the legacy Teleport chart](./helm-deployments/migration.mdx): - +- [Migrating from the legacy Teleport chart](./helm-deployments/migration.mdx) +- [Migrating from v11 to v12](./helm-deployments/migration-v12.mdx) diff --git a/docs/pages/deploy-a-cluster/helm-deployments/aws.mdx b/docs/pages/deploy-a-cluster/helm-deployments/aws.mdx index ff12aa7369557..e270882a1be13 100644 --- a/docs/pages/deploy-a-cluster/helm-deployments/aws.mdx +++ b/docs/pages/deploy-a-cluster/helm-deployments/aws.mdx @@ -411,18 +411,25 @@ Once the chart is installed, you can use `kubectl` commands to view the deployme ```code $ kubectl --namespace teleport get all -# NAME READY STATUS RESTARTS AGE -# pod/teleport-5cf46ddf5f-dzh65 1/1 Running 0 4m21s -# pod/teleport-5cf46ddf5f-mpghq 1/1 Running 0 4m21s - -# NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -# service/teleport LoadBalancer 10.100.37.171 a232d92df01f940339adea0e645d88bb-1576732600.us-east-1.elb.amazonaws.com 443:30821/TCP,3023:30801/TCP,3026:32612/TCP,3024:31253/TCP 4m21s - -# NAME READY UP-TO-DATE AVAILABLE AGE -# deployment.apps/teleport 2/2 2 2 4m21s - -# NAME DESIRED CURRENT READY AGE -# replicaset.apps/teleport-5cf46ddf5f 2 2 2 4m21s +NAME READY STATUS RESTARTS AGE +pod/teleport-auth-57989d4cbd-4q2ds 1/1 Running 0 22h +pod/teleport-auth-57989d4cbd-rtrzn 1/1 Running 0 22h +pod/teleport-proxy-c6bf55cfc-w96d2 1/1 Running 0 22h +pod/teleport-proxy-c6bf55cfc-z256w 1/1 Running 0 22h + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/teleport LoadBalancer 10.40.11.180 xxxxx.elb.us-east-1.amazonaws.com 443:30258/TCP,3023:31802/TCP,3026:32182/TCP,3024:30101/TCP,3036:30302/TCP 22h +service/teleport-auth ClusterIP 10.40.8.251 3025/TCP,3026/TCP 22h +service/teleport-auth-v11 ClusterIP None 22h +service/teleport-auth-v12 ClusterIP None 22h + +NAME READY UP-TO-DATE AVAILABLE AGE +deployment.apps/teleport-auth 2/2 2 2 22h +deployment.apps/teleport-proxy 2/2 2 2 22h + +NAME DESIRED CURRENT READY AGE +replicaset.apps/teleport-auth-57989d4cbd 2 2 2 22h +replicaset.apps/teleport-proxy-c6bf55cfc 2 2 2 22h ``` ## Step 6/7. Set up DNS @@ -499,11 +506,12 @@ Create a user to be able to log into Teleport. This needs to be done on the Tele so we can run the command using `kubectl`: ```code -$ kubectl --namespace teleport exec deploy/teleport -- tctl users add test --roles=access,editor -# User "test" has been created but requires a password. Share this URL with the user to complete user setup, link is valid for 1h: -# https://teleport.example.com:443/web/invite/91cfbd08bc89122275006e48b516cc68 +$ kubectl --namespace teleport exec deploy/teleport-auth -- tctl users add test --roles=access,editor -# NOTE: Make sure teleport.example.com:443 points at a Teleport proxy that users can access. +User "test" has been created but requires a password. Share this URL with the user to complete user setup, link is valid for 1h: +https://teleport.example.com:443/web/invite/91cfbd08bc89122275006e48b516cc68 + +NOTE: Make sure teleport.example.com:443 points at a Teleport proxy that users can access. ``` Load the user creation link to create a password and set up 2-factor authentication for the Teleport user via the web UI. @@ -615,4 +623,3 @@ Teleport cluster. See the [high availability section of our Helm chart reference](../../reference/helm-reference/teleport-cluster.mdx#highavailability) for more details on high availability. Read the [`cert-manager` documentation](https://cert-manager.io/docs/). - diff --git a/docs/pages/deploy-a-cluster/helm-deployments/custom.mdx b/docs/pages/deploy-a-cluster/helm-deployments/custom.mdx index 644dbc948879f..1e8628808eceb 100644 --- a/docs/pages/deploy-a-cluster/helm-deployments/custom.mdx +++ b/docs/pages/deploy-a-cluster/helm-deployments/custom.mdx @@ -3,11 +3,19 @@ title: Running Teleport with a Custom Configuration using Helm description: Install and configure a Teleport cluster with a custom configuration using Helm --- -In this guide, we'll explain how to set up a Teleport cluster in Kubernetes using a custom [`teleport.yaml`](../../reference/config.mdx) -config file using Teleport Helm charts. +In this guide, we'll explain how to set up a Teleport cluster in Kubernetes +with a custom [`teleport.yaml`](../../reference/config.mdx) config file +using Teleport Helm charts. -This setup can be useful when you already have an existing Teleport cluster and would like to start running it in Kubernetes, or when -migrating your setup from a legacy version of the Helm charts. +This setup can be useful when you already have an existing Teleport cluster and would +like to start running it in Kubernetes, or when migrating your setup from a legacy +version of the Helm charts. + + +Those instructions are both for v12 Teleport and the v12 `teleport-cluster` chart. +If you are running an older Teleport version, use the version selector at the top +of this page to choose the correct version. + ## Prerequisites @@ -23,67 +31,84 @@ migrating your setup from a legacy version of the Helm charts. ## Step 3/4. Setting up a Teleport cluster with Helm using a custom config -In `custom` mode, the `teleport-cluster` Helm chart does not create a `ConfigMap` containing a `teleport.yaml` file for you, but -expects that you will provide this yourself. +In `scratch` mode, the `teleport-cluster` Helm chart generates a minimal +configuration and lets you pass your custom configuration through the chart's values. -For this example, we'll be using this `teleport.yaml` configuration file with a static join token (for more information on join tokens, see [Adding Nodes to the Cluster](../../management/admin/adding-nodes.mdx)): +`teleport-cluster` deploys two sets of pods: proxy and auth. +You must provide two configurations, one for each pod type. -```code -$ cat << EOF > teleport.yaml -teleport: - log: - output: stderr - severity: INFO - -auth_service: - enabled: true - cluster_name: custom.example.com - tokens: - # These commands will generate random 32-character alphanumeric strings to use as join tokens - - "proxy,node:$(tr -dc A-Za-z0-9 +When using `scratch` or `standalone` mode, you **must** use highly-available +storage (e.g. etcd, DynamoDB, or Firestore) for multiple replicas to be supported. - - You can skip this step if you already have a `teleport.yaml` file locally that you'd like to use. - +[Information on supported Teleport storage backends](../../reference/backends.mdx) -Create the namespace for the config and add the `teleport.yaml` from your local -disk: +Manually configuring NFS-based storage or `ReadWriteMany` volume claims is **NOT** +supported for an HA deployment and will result in errors. + -```code -$ kubectl create namespace teleport -$ kubectl --namespace teleport create configmap teleport --from-file=teleport.yaml +Write the following `my-values.yaml` file, and adapt the teleport configuration as needed. +You can find all possible configuration fields in the [Teleport Config Reference](../../reference/config.mdx). + +```yaml +chartMode: scratch + +auth: + teleportConfig: + # put your teleport.yaml auth configuration here + teleport: + log: + output: stderr + severity: INFO + + auth_service: + enabled: true + listen_addr: 0.0.0.0:3025 + +proxy: + teleportConfig: + # put your teleport.yaml proxy configuration here + teleport: + # The join_params section must be provided for the proxies to join the auth servers + # By default, the chart creates a Kubernetes join token which you can use. + join_params: + method: kubernetes + # The token name pattern is "-proxy" + # Change this if you change the Helm release name. + token_name: "teleport-proxy" + # The auth server domain pattern is "-auth..svc.cluster.local:3025" + # If you change the Helm release name or namespace you must adapt the `auth_server` value. + auth_server: "teleport-auth.teleport.svc.cluster.local:3025" + log: + output: stderr + severity: INFO + proxy_service: + enabled: true + listen_addr: 0.0.0.0:3080 + public_addr: custom.example.com:443 + +# OPTIONAL - when using an highly-available storage for both backend AND session recordings +# you can disable disk persistence and replicate auth pods. +# +# persistence: +# enabled: false +# highAvailability: +# replicaCount: 2 ``` - - The name of the `ConfigMap` used must match the name of the Helm release that you install below (the name just after `helm install`). - In this example, it's `teleport`. - - The name (key) of the configuration file uploaded to your `ConfigMap` must be `teleport.yaml`. If your configuration file is named differently - on disk, you can specify the key that should be used in the `kubectl` command: - - ```code - $ kubectl --namespace teleport create configmap teleport --from-file=teleport.yaml=my-teleport-config-file.yaml - ``` - +You can control the externally-facing name of your cluster using the `public_addr` +sections of `teleport.yaml`. In this example, our `public_addr`s are set to +`custom.example.com`. @@ -103,159 +128,110 @@ $ kubectl -n teleport create secret generic license --from-file=license.pem -After the `ConfigMap` has been created and you -have deployed the secret containing your license file, you can -deploy the Helm chart into a Kubernetes cluster with a command like this: + +Note that although the `proxy_service` listens on port 3080 inside the pod, +the default `LoadBalancer` service configured by the chart will always listen +externally on port 443 (which is redirected internally to port 3080). - +Due to this, your `proxy_service.public_addr` should always end in `:443`: -```code -$ helm install teleport teleport/teleport-cluster \ - --namespace teleport \ - --set chartMode=custom +```yaml +proxy_service: + listen_addr: 0.0.0.0:3080 + public_addr: custom.example.com:443 ``` - - + + +You can now deploy Teleport in your cluster with the command: + + + ```code $ helm install teleport teleport/teleport-cluster \ --namespace teleport \ - --set chartMode=custom \ - --set enterprise=true + --values my-values.yaml ``` - - - - Most settings from `values.yaml` will not be applied in `custom` mode. - - It's important to specify any settings under the `acme`, `aws`, `gcp`, and `logLevel` sections of the chart in your own `teleport.yaml` file that you upload yourself. - - -You can control the externally-facing name of your cluster using the `public_addr` sections of `teleport.yaml`. In this example, -our `public_addr`s are set to `custom.example.com`. - - - Note that although the `proxy_service` listens on port 3080 inside the pod, the default `LoadBalancer` service configured by the chart - will always listen externally on port 443 (which is redirected internally to port 3080). - - Due to this, your `proxy_service.public_addr` should always end in `:443`: + - ```yaml - proxy_service: - listen_addr: 0.0.0.0:3080 - public_addr: custom.example.com:443 - ``` - + - - It will help if you have access to the DNS provider which hosts `example.com` so you can add a `custom.example.com` record - and point it to the external IP or hostname of the Kubernetes load balancer. +```code +$ helm install teleport teleport/teleport-cluster \ + --namespace teleport \ + --set enterprise=true \ + --values my-values.yaml +``` - Don't worry if you can't - you'll just have to remember to replace `custom.example.com` with the external IP or hostname of the Kubernetes load balancer to be able to access Teleport from your local machine. - + + Once the chart is installed, you can use `kubectl` commands to view the deployment: ```code $ kubectl --namespace teleport get all -# NAME READY STATUS RESTARTS AGE -# pod/teleport-5c56b4d869-znmqk 1/1 Running 0 5h8m +NAME READY STATUS RESTARTS AGE +pod/teleport-auth-57989d4cbd-rtrzn 1/1 Running 0 22h +pod/teleport-proxy-c6bf55cfc-w96d2 1/1 Running 0 22h +pod/teleport-proxy-c6bf55cfc-z256w 1/1 Running 0 22h -# NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -# service/teleport LoadBalancer 10.100.162.158 a5f22a02798f541e58c6641c1b158ea3-1989279894.us-east-1.elb.amazonaws.com 443:30945/TCP,3023:32342/TCP,3026:30851/TCP,3024:31521/TCP 5h29m +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/teleport LoadBalancer 10.40.11.180 34.138.177.11 443:30258/TCP,3023:31802/TCP,3026:32182/TCP,3024:30101/TCP,3036:30302/TCP 22h +service/teleport-auth ClusterIP 10.40.8.251 3025/TCP,3026/TCP 22h +service/teleport-auth-v11 ClusterIP None 22h +service/teleport-auth-v12 ClusterIP None 22h -# NAME READY UP-TO-DATE AVAILABLE AGE -# deployment.apps/teleport 1/1 1 1 5h29m +NAME READY UP-TO-DATE AVAILABLE AGE +deployment.apps/teleport-auth 1/1 1 1 22h +deployment.apps/teleport-proxy 2/2 2 2 22h -# NAME DESIRED CURRENT READY AGE -# replicaset.apps/teleport-5c56b4d869 1 1 1 5h8m +NAME DESIRED CURRENT READY AGE +replicaset.apps/teleport-auth-57989d4cbd 1 1 1 22h +replicaset.apps/teleport-proxy-c6bf55cfc 2 2 2 22h ``` ## Step 4/4. Create a Teleport user (optional) -If you're not migrating an existing Teleport cluster, you'll need to create a user to be able to log into Teleport. This needs to be done on the -Teleport auth server, so we can run the command using `kubectl`: +If you're not migrating an existing Teleport cluster, you'll need to create a +user to be able to log into Teleport. This needs to be done on the Teleport +auth server, so we can run the command using `kubectl`: ```code -$ kubectl --namespace teleport exec deploy/teleport -- tctl users add test --roles=access,editor -# User "test" has been created but requires a password. Share this URL with the user to complete user setup, link is valid for 1h: -# https://custom.example.com:443/web/invite/91cfbd08bc89122275006e48b516cc68 - -# NOTE: Make sure custom.example.com:443 points at a Teleport proxy that users can access. -``` - - - If you didn't set up DNS for your hostname earlier, remember to replace `custom.example.com` with the external IP or hostname of the - Kubernetes load balancer. - - (!docs/pages/kubernetes-access/helm/includes/kubernetes-externaladdress.mdx!) - - You should modify your command accordingly and replace `custom.example.com` with either the IP or hostname depending on which you have available. You may need to accept insecure warnings in your browser to view the page successfully. - - - - Using a Kubernetes-issued load balancer IP or hostname is OK for testing but is not viable when running a production Teleport cluster - as the Subject Alternative Name on any public-facing certificate will be expected to match the cluster's configured public address (specified - using `public_addr` when using `custom` mode) - - You must configure DNS properly using the methods described above for production workloads. - +$ kubectl --namespace teleport exec deployment/teleport-auth -- tctl users add test --roles=access,editor -Load the user creation link to create a password and set up 2-factor authentication for the Teleport user via the web UI. +User "test" has been created but requires a password. Share this URL with the user to complete user setup, link is valid for 1h: +https://custom.example.com:443/web/invite/91cfbd08bc89122275006e48b516cc68 -## Upgrading the cluster after deployment - -### Making changes to `teleport.yaml` - -If you make changes to your Teleport `ConfigMap`, you can apply these changes by deleting the old `ConfigMap` and applying a new one: - -```code -$ kubectl --namespace teleport delete configmap teleport && \ -# kubectl --namespace teleport create configmap teleport --from-file=teleport.yaml +NOTE: Make sure custom.example.com:443 points at a Teleport proxy that users can access. ``` - Make sure that the name of the `ConfigMap` (e.g. `teleport`) matches the Helm release name used as described above. - - You can list all available `ConfigMap`s in your namespace using this command: +If you didn't set up DNS for your hostname earlier, remember to replace +`custom.example.com` with the external IP or hostname of the Kubernetes load +balancer. - ```code - $ kubectl --namespace teleport get configmap +(!docs/pages/kubernetes-access/helm/includes/kubernetes-externaladdress.mdx!) - # NAME DATA AGE - # teleport 1 2d21h - ``` +You should modify your command accordingly and replace `custom.example.com` with +either the IP or hostname depending on which you have available. You may need +to accept insecure warnings in your browser to view the page successfully. -After editing the `ConfigMap`, you must initiate a rolling restart of your Teleport deployment to pick up the changed `ConfigMap`: - -```code -$ kubectl --namespace teleport rollout restart deploy/teleport -``` - -### Making changes to other Helm values - -To make changes to your Teleport cluster after deployment which are not covered by the functionality in `teleport.yaml`, you can -use `helm upgrade`. - -Run this command, editing your command line parameters as appropriate: - -```code -$ helm upgrade teleport teleport/teleport-cluster \ - --set highAvailability.replicaCount=3 -``` - - When using `custom` mode, you **must** use highly-available storage (e.g. etcd, DynamoDB, or Firestore) for multiple replicas to be supported. +Using a Kubernetes-issued load balancer IP or hostname is OK for testing but is +not viable when running a production Teleport cluster as the Subject Alternative +Name on any public-facing certificate will be expected to match the cluster's +configured public address (specified using `public_addr` in your configuration) - [Information on supported Teleport storage backends](../../reference/backends.mdx) - - Manually configuring NFS-based storage or `ReadWriteMany` volume claims is **NOT** supported for an HA deployment and will result in errors. +You must configure DNS properly using the methods described above for production workloads. +Load the user creation link to create a password and set up 2-factor +authentication for the Teleport user via the web UI. + ## Uninstalling the Helm chart To uninstall the `teleport-cluster` chart, use `helm uninstall `. For example: @@ -265,7 +241,8 @@ $ helm --namespace teleport uninstall teleport ``` - To change `chartMode`, you must first uninstall the existing chart and then install a new version with the appropriate values. +To change `chartMode`, you must first uninstall the existing chart and then +install a new version with the appropriate values. ## Next steps @@ -274,5 +251,5 @@ To see all of the options you can set in the values file for the `teleport-cluster` Helm chart, consult our [reference guide](../../reference/helm-reference/teleport-cluster.mdx). -You can follow our [Getting Started with Teleport guide](../../management/guides/docker.mdx#step-34-creating-a-teleport-user) to finish setting up your -Teleport cluster. +You can follow our [Getting Started with Teleport guide](../../management/guides/docker.mdx#step-34-creating-a-teleport-user) +to finish setting up your Teleport cluster. diff --git a/docs/pages/deploy-a-cluster/helm-deployments/digitalocean.mdx b/docs/pages/deploy-a-cluster/helm-deployments/digitalocean.mdx index 15a9bc074e914..c2d2fdc821f04 100644 --- a/docs/pages/deploy-a-cluster/helm-deployments/digitalocean.mdx +++ b/docs/pages/deploy-a-cluster/helm-deployments/digitalocean.mdx @@ -117,10 +117,12 @@ First, get the external IP (from the `EXTERNAL-IP` field) for the Kubernetes clu ```code $ kubectl --namespace=teleport-cluster get services -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -# teleport-cluster LoadBalancer 10.245.163.12 192.168.200.200 443:31959/TCP,3023:30525/TCP,3026:30079/TCP,3024:32437/TCP 19m +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +teleport-cluster LoadBalancer 10.245.163.12 192.168.200.200 443:31959/TCP,3023:30525/TCP,3026:30079/TCP,3024:32437/TCP 19m +teleport-cluster-auth ClusterIP 10.245.164.28 3025/TCP,3026/TCP 19m ``` + Once you get the value for the external IP (it may take a few minutes for this field to be populated), update your DNS record such that the clusterName's A record points to this IP address. For example `192.168.200.200` is the external IP in the above case.
![Configure DNS](../../../img/helm/digitalocean/fqdn.png) @@ -130,14 +132,15 @@ Once you get the value for the external IP (it may take a few minutes for this f Now we create a Teleport user by executing the `tctl` command with `kubectl`. ```code -$ kubectl --namespace teleport-cluster exec deploy/teleport-cluster -- tctl users add tadmin --roles=access,editor --logins=ubuntu -# User "tadmin" has been created but requires a password. Share this URL with the user to complete user setup, link is valid for 1h: -# https://tele.example.com:443/web/invite/ +$ kubectl --namespace teleport-cluster exec deployment/teleport-cluster-auth -- tctl users add tadmin --roles=access,editor --logins=ubuntu + +User "tadmin" has been created but requires a password. Share this URL with the user to complete user setup, link is valid for 1h: +https://tele.example.com:443/web/invite/ -# NOTE: Make sure tele.teleporters.dev:443 points at a Teleport proxy which users can access. +NOTE: Make sure tele.example.com:443 points at a Teleport proxy which users can access. ``` -Copy the link shown after executing the above command and open the link in a web browser to complete the user registration process (the link is `https://tele.teleporters.dev:443/web/invite/` in the above case). +Copy the link shown after executing the above command and open the link in a web browser to complete the user registration process (the link is `https://tele.example.com:443/web/invite/` in the above case).
![Set up user](../../../img/helm/digitalocean/setup-user.png)
@@ -169,9 +172,7 @@ spec: Next, create this role in Kubernetes with the command: ```code - -$ POD=$(kubectl --namespace=teleport-cluster get pod -l app=teleport-cluster -o jsonpath='{.items[0].metadata.name}') -$ kubectl --namespace=teleport-cluster exec -i ${POD?} -- tctl create -f < member.yaml +$ kubectl --namespace=teleport-cluster exec -i deployment/teleport-cluster-auth -- tctl create -f < member.yaml ``` ### Assign the "member" role to user "tadmin" @@ -210,7 +211,7 @@ $ export KUBECONFIG=${HOME?}/teleport-kubeconfig.yaml ```code -$ tsh login --proxy=tele.teleporters.dev:443 --auth=local --user=tadmin +$ tsh login --proxy=tele.example.com:443 --auth=local --user=tadmin Enter password for Teleport user tadmin: Enter your OTP token: 540255 diff --git a/docs/pages/deploy-a-cluster/helm-deployments/gcp.mdx b/docs/pages/deploy-a-cluster/helm-deployments/gcp.mdx index 8ad34c74ab5e3..06d5c752c4da2 100644 --- a/docs/pages/deploy-a-cluster/helm-deployments/gcp.mdx +++ b/docs/pages/deploy-a-cluster/helm-deployments/gcp.mdx @@ -361,18 +361,26 @@ Once the chart is installed, you can use `kubectl` commands to view the deployme ```code $ kubectl --namespace teleport get all -# NAME READY STATUS RESTARTS AGE -# pod/teleport-b64dd8849-fklvk 1/1 Running 0 7m4s -# pod/teleport-b64dd8849-jqvns 1/1 Running 0 7m15s -# NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -# service/teleport LoadBalancer 10.40.14.191 35.203.56.38 443:31758/TCP,3023:30409/TCP,3026:30939/TCP,3024:31403/TCP 7m16s - -# NAME READY UP-TO-DATE AVAILABLE AGE -# deployment.apps/teleport 2/2 2 2 7m16s - -# NAME DESIRED CURRENT READY AGE -# replicaset.apps/teleport-b64dd8849 2 2 2 7m16s +NAME READY STATUS RESTARTS AGE +pod/teleport-auth-57989d4cbd-4q2ds 1/1 Running 0 22h +pod/teleport-auth-57989d4cbd-rtrzn 1/1 Running 0 22h +pod/teleport-proxy-c6bf55cfc-w96d2 1/1 Running 0 22h +pod/teleport-proxy-c6bf55cfc-z256w 1/1 Running 0 22h + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/teleport LoadBalancer 10.40.11.180 34.138.177.11 443:30258/TCP,3023:31802/TCP,3026:32182/TCP,3024:30101/TCP,3036:30302/TCP 22h +service/teleport-auth ClusterIP 10.40.8.251 3025/TCP,3026/TCP 22h +service/teleport-auth-v11 ClusterIP None 22h +service/teleport-auth-v12 ClusterIP None 22h + +NAME READY UP-TO-DATE AVAILABLE AGE +deployment.apps/teleport-auth 2/2 2 2 22h +deployment.apps/teleport-proxy 2/2 2 2 22h + +NAME DESIRED CURRENT READY AGE +replicaset.apps/teleport-auth-57989d4cbd 2 2 2 22h +replicaset.apps/teleport-proxy-c6bf55cfc 2 2 2 22h ``` ## Step 6/7. Set up DNS @@ -407,11 +415,12 @@ Create a user to be able to log into Teleport. This needs to be done on the Tele so we can run the command using `kubectl`: ```code -$ kubectl --namespace teleport exec deploy/teleport -- tctl users add test --roles=access,editor -# User "test" has been created but requires a password. Share this URL with the user to complete user setup, link is valid for 1h: -# https://teleport.example.com:443/web/invite/91cfbd08bc89122275006e48b516cc68 +$ kubectl --namespace teleport exec deployment/teleport-auth -- tctl users add test --roles=access,editor -# NOTE: Make sure teleport.example.com:443 points at a Teleport proxy which users can access. +User "test" has been created but requires a password. Share this URL with the user to complete user setup, link is valid for 1h: +https://teleport.example.com:443/web/invite/91cfbd08bc89122275006e48b516cc68 + +NOTE: Make sure teleport.example.com:443 points at a Teleport proxy which users can access. ``` Load the user creation link to create a password and set up 2-factor authentication for the Teleport user via the web UI. @@ -483,4 +492,3 @@ You can follow our [Getting Started with Teleport guide](../../management/guides Teleport cluster. See the [high availability section of our Helm chart reference](../../reference/helm-reference/teleport-cluster.mdx#highavailability) for more details on high availability. - diff --git a/docs/pages/deploy-a-cluster/helm-deployments/kubernetes-cluster.mdx b/docs/pages/deploy-a-cluster/helm-deployments/kubernetes-cluster.mdx index 35f431cc3e1b7..85eaaf6b15563 100644 --- a/docs/pages/deploy-a-cluster/helm-deployments/kubernetes-cluster.mdx +++ b/docs/pages/deploy-a-cluster/helm-deployments/kubernetes-cluster.mdx @@ -109,19 +109,20 @@ to create a public IP for Teleport. # Service is up, load balancer is created $ kubectl get services - # NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - # teleport-cluster LoadBalancer 10.4.4.73 104.199.126.88 443:31204/TCP,3026:32690/TCP 89s + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + teleport-cluster LoadBalancer 10.4.4.73 104.199.126.88 443:31204/TCP,3026:32690/TCP 89s + teleport-cluster-auth ClusterIP 10.4.2.51 3025/TCP,3026/TCP 89s # Save the pod IP or hostname. - $ MYIP=$(kubectl get services teleport-cluster -o jsonpath='{.status.loadBalancer.ingress[0].ip}') - $ echo $MYIP - # 192.168.2.1 + $ SERVICE_IP=$(kubectl get services teleport-cluster -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + $ echo $SERVICE_IP + 104.199.126.88 ``` - If `$MYIP` is blank, your cloud provider may have assigned a hostname to the load balancer rather than an IP address. Run the following command to retrieve the hostname, which you will use in place of `$MYIP` for subsequent commands. + If `$SERVICE_IP` is blank, your cloud provider may have assigned a hostname to the load balancer rather than an IP address. Run the following command to retrieve the hostname, which you will use in place of `$SERVICE_IP` for subsequent commands. ```code - $ MYIP=$(kubectl get services teleport-cluster -o jsonpath='{.status.loadBalancer.ingress[0].hostname}') + $ SERVICE_IP=$(kubectl get services teleport-cluster -o jsonpath='{.status.loadBalancer.ingress[0].hostname}') ``` @@ -132,19 +133,20 @@ to create a public IP for Teleport. # Service is up, load balancer is created $ kubectl get services - # NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - # teleport-cluster-ent LoadBalancer 10.4.4.73 104.199.126.88 443:31204/TCP,3026:32690/TCP 89s + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + teleport-cluster-ent LoadBalancer 10.4.4.73 104.199.126.88 443:31204/TCP,3026:32690/TCP 89s + teleport-cluster-ent-auth ClusterIP 10.4.2.51 3025/TCP,3026/TCP 89s # Save the pod IP or hostname. - $ MYIP=$(kubectl get services teleport-cluster-ent -o jsonpath='{.status.loadBalancer.ingress[0].ip}') - $ echo $MYIP - # 192.168.2.1 + $ SERVICE_IP=$(kubectl get services teleport-cluster-ent -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + $ echo $SERVICE_IP + 104.199.126.88 ``` - If `$MYIP` is blank, your cloud provider may have assigned a hostname to the load balancer rather than an IP address. Run the following command to retrieve the hostname, which you will use in place of `$MYIP` for subsequent commands. + If `$SERVICE_IP` is blank, your cloud provider may have assigned a hostname to the load balancer rather than an IP address. Run the following command to retrieve the hostname, which you will use in place of `$SERVICE_IP` for subsequent commands. ```code - $ MYIP=$(kubectl get services teleport-cluster -o jsonpath='{.status.loadBalancer.ingress[0].hostname}') + $ SERVICE_IP=$(kubectl get services teleport-cluster -o jsonpath='{.status.loadBalancer.ingress[0].hostname}') ``` @@ -180,23 +182,40 @@ spec: Create the role and add a user: -```code -# To create a local user, we are going to run Teleport's admin tool tctl from the pod. -$ POD=$(kubectl get pod -l app=teleport-cluster -o jsonpath='{.items[0].metadata.name}') + + + ```code + # Create a role + $ kubectl exec -i deployment/teleport-cluster-auth -- tctl create -f < member.yaml + + # Generate an invite link for the user. + $ kubectl exec -ti deployment/teleport-cluster-auth -- tctl users add alice --roles=member -# Create a role -$ kubectl exec -i ${POD?} -- tctl create -f < member.yaml + # User "alice" has been created but requires a password. Share this URL with the user to + # complete user setup, link is valid for 1h: -# Generate an invite link for the user. -$ kubectl exec -ti ${POD?} -- tctl users add alice --roles=member + # https://tele.example.com:443/web/invite/random-token-id-goes-here -# User "alice" has been created but requires a password. Share this URL with the user to -# complete user setup, link is valid for 1h: + # NOTE: Make sure tele.example.com:443 points at a Teleport proxy which users can access. + ``` + + + ```code + # Create a role + $ kubectl exec -i deployment/teleport-cluster-ent-auth -- tctl create -f < member.yaml -# https://tele.example.com:443/web/invite/random-token-id-goes-here + # Generate an invite link for the user. + $ kubectl exec -ti deployment/teleport-cluster-ent-auth -- tctl users add alice --roles=member -# NOTE: Make sure tele.example.com:443 points at a Teleport proxy which users can access. -``` + # User "alice" has been created but requires a password. Share this URL with the user to + # complete user setup, link is valid for 1h: + + # https://tele.example.com:443/web/invite/ + + # NOTE: Make sure tele.example.com:443 points at a Teleport proxy which users can access. + ``` + + Let's install `tsh` and `tctl` on Linux. For other install options, check out the [installation guide](../../installation.mdx) @@ -221,29 +240,36 @@ For other install options, check out the [installation guide](../../installation -Try `tsh login` with a local user. Use a custom `KUBECONFIG` to prevent overwriting -the default one in case there is a problem. +Try `tsh login` with a local user. ```code -$ KUBECONFIG=${HOME?}/teleport.yaml tsh login --proxy=tele.example.com:443 --user=alice +$ tsh login --proxy=tele.example.com:443 --user=alice ``` -Teleport updates `KUBECONFIG` with a short-lived 12-hour certificate. +Once you're connected to the Teleport cluster, list the available Kubernetes clusters for your user: ```code # List connected Kubernetes clusters $ tsh kube ls -# Kube Cluster Name Selected -# ----------------- -------- -# tele.example.com -# Login to Kubernetes by name -$ tsh kube login tele.example.com +Kube Cluster Name Selected +----------------- -------- +tele.example.com +``` + +Login to the Kubernetes cluster and create a new separate kubeconfig to connect to the Kubernetes cluster. +Using a separate kubeconfig file allows you to easily switch between the kubeconfig you used to install +Teleport and the one issued by Teleport. This is useful during the install process if something goes wrong. -# Once working, remove the KUBECONFIG= override to switch to teleport -$ KUBECONFIG=${HOME?}/teleport.yaml kubectl get -n teleport-cluster pods -# NAME READY STATUS RESTARTS AGE -# teleport-cluster-6c9b88fd8f-glmhf 1/1 Running 0 127m +``` +$ KUBECONFIG=$HOME/teleport-kubeconfig.yaml tsh kube login tele.example.com + +$ KUBECONFIG=$HOME/teleport-kubeconfig.yaml kubectl get -n teleport-cluster pods +NAME READY STATUS RESTARTS AGE +pod/teleport-cluster-auth-57989d4-4q2ds 1/1 Running 0 22h +pod/teleport-cluster-auth-57989d4-rtrzn 1/1 Running 0 22h +pod/teleport-cluster-proxy-c6bf55-w96d2 1/1 Running 0 22h +pod/teleport-cluster-proxy-c6bf55-z256w 1/1 Running 0 22h ``` ## Step 3/3. SSO for Kubernetes @@ -308,20 +334,16 @@ To create a connector, we are going to run Teleport's admin tool `tctl` from the ```code $ kubectl config set-context --current --namespace=teleport-cluster - $ POD=$(kubectl get po -l app=teleport-cluster -o jsonpath='{.items[0].metadata.name}') - $ kubectl exec -i ${POD?} -- tctl create -f < github.yaml - # authentication connector "github" has been created + $ kubectl exec -i deployment/teleport-cluster-auth -- tctl create -f < github.yaml + authentication connector "github" has been created ``` ```code - # To create an Okta connector, we are going to run Teleport's admin tool tctl from the pod. - $ POD=$(kubectl get po -l app=teleport-cluster-ent -o jsonpath='{.items[0].metadata.name}') - - $ kubectl exec -i ${POD?} -- tctl create -f < okta.yaml - # authentication connector 'okta' has been created + $ kubectl exec -i deployment/teleport-cluster-ent-auth -- tctl create -f < okta.yaml + authentication connector 'okta' has been created ``` @@ -350,9 +372,9 @@ the default one in case there is a problem. If you are getting a login error, take a look at the audit log for details: ```code - $ kubectl exec -ti "${POD?}" -- tail -n 100 /var/lib/teleport/log/events.log + $ kubectl exec -ti deployment/teleport-cluster-auth -- tail -n 100 /var/lib/teleport/log/events.log - # {"error":"user \"alice\" does not belong to any teams configured in \"github\" connector","method":"github","attributes":{"octocats":["devs"]}} + {"error":"user \"alice\" does not belong to any teams configured in \"github\" connector","method":"github","attributes":{"octocats":["devs"]}} ``` diff --git a/docs/pages/deploy-a-cluster/helm-deployments/migration-v12.mdx b/docs/pages/deploy-a-cluster/helm-deployments/migration-v12.mdx new file mode 100644 index 0000000000000..ea5ad64601e13 --- /dev/null +++ b/docs/pages/deploy-a-cluster/helm-deployments/migration-v12.mdx @@ -0,0 +1,308 @@ +--- +title: Migrating to teleport-cluster v12 +description: How to upgrade to teleport-cluster Helm chart version 12 +--- + +This guide covers the major changes of the `teleport-cluster` v12 chart +and how to upgrade existing releases from version 11 to version 12. + +## Changes summary + +The main changes in version 12 of the `teleport-cluster` chart are: + +- Teleport now deploys its auth and proxy services as separate pods. + Running Teleport with this new topology allows it to be more resilient to + disruptions and scale better. +- Proxies are now deployed as stateless workloads. The `proxy` session recording + mode uploads recordings asynchronously. Non-uploaded records might be lost + during rollouts (config changes or version upgrades for example). + `proxy-sync` ensures consistency and does not have this limitation. +- `custom` mode has been removed as it was broken by the topology change. + It is replaced by a new configuration override mechanism allowing you to pass + arbitrary Teleport configuration values. +- The values `standalone.*` that were previously deprecated in favor of `persistence` + have been removed. +- The chart can now be scaled up in `standalone` mode. Proxy replication requires + a TLS certificate; Auth replication requires using [HA storage backends](../../reference/backends.mdx). + + +The chart has always been versioned with Teleport but was often compatible with +the previous Teleport major version. This is not the case for v12. Using the chart +v12 requires at least Teleport v12. + + +## How to upgrade + +The upgrade path mainly depends on the `chartMode` used. If you used a "managed" +mode like `aws`, `gcp` or `standalone` it should be relatively straightforward. +If you relied on the `custom` chart mode, you will have to perform configuration changes. + +Before upgrading, always: + +- [backup the cluster content](../../management/operations/backup-restore.mdx), +- test the upgrade in a non-production environment. + + +During the upgrade, Kubernetes will delete existing deployments and create new ones. +**This is not seamless and will cause some downtime** until the new pods are up and all health checks are passing. +This usually takes around 5 minutes. + + +### If you use `gcp`, `aws` or `standalone` mode + +The upgrade should not require configuration changes. Make sure you don't rely +on `standalone.*` for storage configuration (if you do, switch to using +`persistence` values instead). + +Upgrading to v12 will increase the amount of pods deployed as it will deploy auth +and proxies separately. The chart will try to deploy multiple proxy replicas when +possible (proxies can be replicated if certs are provided through a secret or +`cert-manager`). Make sure you have enough room in your Kubernetes cluster to run +the additional Teleport pods: + +- `aws` and `gcp` will deploy twice the amount of pods +- `standalone` will deploy 1 or 2 additional pods (depending if the proxy can be replicated) + +The additional pods might take more time than before to deploy and become ready. +If you are running helm with `--wait` or `--atomic` make sure to increase your +timeouts to at least 10 minutes. + +### If you use `custom` mode + +The `custom` mode worked by passing the Teleport configuration through a ConfigMap. +Due to the version 12 topology change, existing `custom` configuration won't work +as-is and will need to be split in two separate configurations: one for the proxies +and one for the auths. + +To avoid a surprise breaking upgrade, the `teleport-cluster` v12 chart will refuse +to deploy in `custom` mode and point you to this migration guide. + +Version 12 has introduced a new way to pass arbitrary configuration to Teleport +without having to write a full configuration file. If you were using `custom` mode +because of a missing chart feature (like etcd backend support for example) this +might be a better fit for you than managing a fully-custom config. + +#### If you deploy a Teleport cluster and only need a couple of custom configuration overrides + +You can now use the existing modes `aws`, `gcp` and `standalone` and pass your custom +configuration overrides through the `auth.teleportConfig` and `proxy.teleportConfig` +values. For most use-cases this is the recommended setup as you will automatically +benefit from future configuration upgrades. + +For example - a v11 custom configuration that looked like this: + +```yaml +teleport: + log: + output: stderr + severity: INFO +auth_service: + enabled: true + cluster_name: custom.example.com + tokens: # This is custom configuration + - "proxy,node:(=presets.tokens.first=)" + - "trusted_cluster:(=presets.tokens.second=)" + listen_addr: 0.0.0.0:3025 + public_addr: custom.example.com:3025 +proxy_service: + enabled: true + listen_addr: 0.0.0.0:3080 + public_addr: custom.example.com:443 + ssh_public_addr: ssh-custom.example.com:3023 # This is custom configuration +``` + +Can be converted into these values: + +```yaml +chartMode: standalone +clusterName: custom.example.com + +auth: + teleportConfig: + auth_service: + tokens: + - "proxy,node:(=presets.tokens.first=)" + - "trusted_cluster:(=presets.tokens.second=)" + +proxy: + teleportConfig: + proxy_service: + ssh_public_addr: ssh-custom.example.com:3023 +``` + + +`teleport.cluster_name` and `teleport.auth_service.authentication.webauthn.rp_id` MUST NOT change. + + +#### If you deploy a Teleport cluster and need to manage its full configuration + +If you need to manage the full configuration you must use the `scratch` mode. +This mode will generate an empty configuration file and you will pass all your +custom configuration through the `auth.teleportConfig` and `proxy.teleportConfig` +values. + +You must split the configuration in two configurations, one for each node type: + +- The `proxy` configuration must contain at least the `proxy_service` section + and the `teleport` section without the `storage` part. +- The `auth` configuration must contain at least the `auth_service` and `teleport` sections. + +The chart automatically creates a Kubernetes join token named after the Helm +release, which will enable the proxy pods to seamlessly connect to the auth pods. +If you do not want to use this automatic token, you must provide a valid Teleport +join token in the proxy pods' configuration. + +For example - a v11 custom configuration that looked like this: + +```yaml +version: v1 +teleport: + log: + output: stderr + severity: INFO +auth_service: + enabled: true + cluster_name: custom.example.com + tokens: + - "proxy,node:(=presets.tokens.first=)" + - "trusted_cluster:(=presets.tokens.second=)" + listen_addr: 0.0.0.0:3025 + public_addr: custom.example.com:3025 +proxy_service: + enabled: true + listen_addr: 0.0.0.0:3080 + public_addr: custom.example.com:443 + ssh_public_addr: ssh-custom.example.com:3023 +``` + +Can be split into two configurations and be deployed using these values: + +```yaml +chartMode: scratch + +proxy: + teleportConfig: + version: v1 + teleport: + log: + output: stderr + severity: INFO + + # You MUST insert the following block, this tells the proxies + # how to connect to the auth. The helm chart will automatically create a + # Kubernetes join token named after the Helm release name so the proxies + # can join the cluster. + join_params: + method: kubernetes + # The token name pattern is "-proxy" + # Change this if you change the Helm release name. + token_name: "teleport-proxy" + # The auth server domain pattern is "-auth..svc.cluster.local:3025" + # If you change the Helm release name or namespace you must adapt the `auth_server` value. + auth_server: "teleport-auth.teleport.svc.cluster.local:3025" + + proxy_service: + enabled: true + listen_addr: 0.0.0.0:3080 + public_addr: custom.example.com:443 + ssh_public_addr: ssh-custom.example.com:3023 + +auth: + teleportConfig: + version: v1 + teleport: + log: + output: stderr + severity: INFO + auth_service: + enabled: true + cluster_name: custom.example.com + tokens: + - "proxy,node:(=presets.tokens.first=)" + - "trusted_cluster:(=presets.tokens.second=)" + listen_addr: 0.0.0.0:3025 + public_addr: custom.example.com:3025 +``` + +#### If you deploy Teleport nodes + +If you used the `teleport-cluster` chart in `custom` mode to deploy only services +like `app_service`, `db_service`, `kube_service`, `windows_service` or `discovery_service`, +you should use the `teleport-kube-agent` chart for this purpose. + +The chart offers values to configure `app_service`, `kube_service` and `db_service`, +but other services can be configured through the `teleportConfig` value. + +To migrate to the `teleport-kube-agent` chart from `teleport-cluster`, +use the following values: + +```yaml +proxyAddr: teleport.example.com +# pass the token through joinParams instead of `teleportConfig` so it lives +# in a Kubernetes Secret instead of a ConfigMap +joinParams: + method: token + tokenName: (=presets.tokens.first=) + +# Roles can be empty if you pass all the configuration through `teleportConfig` +roles: "" + +# Put all your previous `teleport.yaml` values except the `teleport` section below +teleportConfig: + # kubernetes_service: + # enabled: true + # [...] + # discovery_service: + # enabled: true + # [...] +``` + +## Going further + +The new topology allows you to replicate the proxies to increase availability. +You might also want to tune settings like Kubernetes resources or affinities. + +By default, each value applies to both `proxy` and `auth` pods, e.g.: + +```yaml +resources: + requests: + cpu: "1" + memory: "2GiB" + limits: + cpu: "1" + memory: "2GiB" + +highAvailability: + requireAntiAffinity: true +``` + +But you can scope the value to a specific pod set by nesting it under the `proxy` +or `auth` values. If both the value at the root and a set-specific value are set, +the specific value takes precedence: + +```yaml +# By default, all pods use those resources +resources: + requests: + cpu: "1" + memory: "2GiB" + limits: + cpu: "1" + memory: "2GiB" + +proxy: + # But the proxy pods have have different resource requests and no cpu limits + resources: + requests: + cpu: "0.5" + memory: "1GiB" + limits: + cpu: ~ # Generic and specific config are merged: if you want to unset a value, you must do it explicitly + memory: "1GiB" + +auth: + # Only auth pods will require an anti-affinity + highAvailability: + requireAntiAffinity: true +``` diff --git a/docs/pages/deploy-a-cluster/helm-deployments/migration.mdx b/docs/pages/deploy-a-cluster/helm-deployments/migration.mdx index d2064c71fd1eb..58aefcf6caa57 100644 --- a/docs/pages/deploy-a-cluster/helm-deployments/migration.mdx +++ b/docs/pages/deploy-a-cluster/helm-deployments/migration.mdx @@ -58,15 +58,15 @@ Firstly, check that the `ConfigMap` is present: ```code $ kubectl --namespace teleport get configmap/teleport -o yaml -# apiVersion: v1 -# data: -# teleport.yaml: | -# teleport: -# log: -# severity: INFO -# output: stderr -# storage: -# type: dir +apiVersion: v1 +data: + teleport.yaml: | + teleport: + log: + severity: INFO + output: stderr + storage: + type: dir # ... ``` @@ -80,13 +80,13 @@ If you see a Teleport config under the `teleport.yaml` key, you can extract it t ```code $ kubectl --namespace teleport get configmap/teleport -o=jsonpath="{.data['teleport\.yaml']}" > teleport.yaml -cat teleport.yaml -# teleport: -# log: -# severity: INFO -# output: stderr -# storage: -# type: dir +$ cat teleport.yaml +teleport: + log: + severity: INFO + output: stderr + storage: + type: dir # ... ``` @@ -95,9 +95,10 @@ namespace (where you intend to run the `teleport-cluster` chart). ```code $ kubectl create namespace teleport-cluster -# namespace/teleport-cluster created +namespace/teleport-cluster created + $ kubectl --namespace teleport-cluster create configmap teleport --from-file=teleport.yaml -# configmap/teleport created +configmap/teleport created ``` ## Step 4/6. Extracting the contents of Teleport's database @@ -200,18 +201,18 @@ Once the chart is installed, you can use `kubectl` commands to view the deployme ```code $ kubectl --namespace teleport-cluster get all -# NAME READY STATUS RESTARTS AGE -# pod/teleport-5cf46ddf5f-dzh65 1/1 Running 0 4m21s -# pod/teleport-5cf46ddf5f-mpghq 1/1 Running 0 4m21s +NAME READY STATUS RESTARTS AGE +pod/teleport-5cf46ddf5f-dzh65 1/1 Running 0 4m21s +pod/teleport-5cf46ddf5f-mpghq 1/1 Running 0 4m21s -# NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -# service/teleport LoadBalancer 10.100.37.171 a232d92df01f940339adea0e645d88bb-1576732600.us-east-1.elb.amazonaws.com 443:30821/TCP,3023:30801/TCP,3026:32612/TCP,3024:31253/TCP 4m21s +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service/teleport LoadBalancer 10.100.37.171 a232d92df01f940339adea0e645d88bb-1576732600.us-east-1.elb.amazonaws.com 443:30821/TCP,3023:30801/TCP,3026:32612/TCP,3024:31253/TCP 4m21s -# NAME READY UP-TO-DATE AVAILABLE AGE -# deployment.apps/teleport 2/2 2 2 4m21s +NAME READY UP-TO-DATE AVAILABLE AGE +deployment.apps/teleport 2/2 2 2 4m21s -# NAME DESIRED CURRENT READY AGE -# replicaset.apps/teleport-5cf46ddf5f 2 2 2 4m21s +NAME DESIRED CURRENT READY AGE +replicaset.apps/teleport-5cf46ddf5f 2 2 2 4m21s ``` @@ -280,4 +281,3 @@ $ helm --namespace teleport-cluster uninstall teleport To see all of the options you can set in the values file for the `teleport-cluster` Helm chart, consult our [reference guide](../../reference/helm-reference/teleport-cluster.mdx). - diff --git a/docs/pages/reference/helm-reference/teleport-cluster.mdx b/docs/pages/reference/helm-reference/teleport-cluster.mdx index ae9083fb6e44f..c8a5c8f979e38 100644 --- a/docs/pages/reference/helm-reference/teleport-cluster.mdx +++ b/docs/pages/reference/helm-reference/teleport-cluster.mdx @@ -3,85 +3,217 @@ title: teleport-cluster Chart Reference description: Values that can be set using the teleport-cluster Helm chart --- -The `teleport-cluster` Helm chart deploys the `teleport` daemon on Kubernetes. -You can use our preset configurations to deploy the Auth Service and Proxy -Service, or a custom configuration to deploy resource services such as the -Teleport Kubernetes Service or Database Service. +The `teleport-cluster` Helm chart deploys a Teleport cluster on Kubernetes. +This includes deploying proxies, auth servers, and [kubernetes-access](../../kubernetes-access/introduction.mdx). +See the [Teleport HA Architecture page](../../deploy-a-cluster/high-availability.mdx) +for more details. You can [browse the source on GitHub](https://github.com/gravitational/teleport/tree/branch/v(=teleport.major_version=)/examples/chart/teleport-cluster). -The `teleport-cluster` chart runs two Teleport services: +The `teleport-cluster` chart runs three Teleport services, split into two sets of pods: -| Teleport service | Purpose | Documentation | -| - | - | - | -| `auth_service` | Authenticates users and hosts, and issues certificates | [Auth documentation](../../architecture/authentication.mdx) -| `proxy_service`| Runs the externally-facing parts of a Teleport cluster, such as the web UI, SSH proxy and reverse tunnel service | [Proxy documentation](../../architecture/proxy.mdx) | +| Teleport service | Running in | Purpose | Documentation | +| - | - | - | - | +| `auth_service` | auth `Deployment` | Authenticates users and hosts, and issues certificates. | [Auth documentation](../../architecture/authentication.mdx) +| `kubernetes_service` | auth `Deployment` | Provides secure access to the Kubernetes
cluster where the Teleport cluster is hosted. | [Kubernetes Access documentation](../../kubernetes-access/introduction.mdx) | +| `proxy_service` | proxy `Deployment` | Runs the externally-facing parts of a Teleport
cluster, such as the web UI, SSH proxy and reverse tunnel service. | [Proxy documentation](../../architecture/proxy.mdx) | -The `teleport-cluster` chart can be deployed in four different modes. Get started with a guide for each mode: + +If you want to provide access to resources like Databases, Applications or other +Kubernetes clusters than the one hosting the Teleport cluster, you should use the +[`teleport-kube-agent` Helm chart](teleport-kube-agent.mdx). -| `chartMode` | Guide | -| - | - | -| `standalone` | [Getting Started - Kubernetes with SSO](../../deploy-a-cluster/helm-deployments/kubernetes-cluster.mdx) | -| `aws` | [Running an HA Teleport cluster using an AWS EKS Cluster](../../deploy-a-cluster/helm-deployments/aws.mdx) | -| `gcp` | [Running an HA Teleport cluster using a Google Cloud GKE cluster](../../deploy-a-cluster/helm-deployments/gcp.mdx) | -| `custom` | [Running a Teleport cluster with a custom config](../../deploy-a-cluster/helm-deployments/custom.mdx) | +- `teleport-cluster` hosts a Teleport cluster, you should only need one. +- `teleport-kube-agent` connects to an existing Teleport cluster and exposes configured resources. This reference details available values for the `teleport-cluster` chart. + + +The `teleport-cluster` chart can be deployed in four different modes. +Get started with a guide for each mode: + +| `chartMode` | Purpose | Guide | +| - | - | - | +| `standalone` | Runs by relying only on Kubernetes resources. | [Getting Started - Kubernetes with SSO](../../deploy-a-cluster/helm-deployments/kubernetes-cluster.mdx) | +| `aws` | Leverages AWS managed services to store data. | [Running an HA Teleport cluster using an AWS EKS Cluster](../../deploy-a-cluster/helm-deployments/aws.mdx) | +| `gcp` | Leverages GCP managed services to store data. | [Running an HA Teleport cluster using a Google Cloud GKE cluster](../../deploy-a-cluster/helm-deployments/gcp.mdx) | +| `scratch` (v12 and above) | Generates empty Teleport configuration. User must pass their own config. | [Running a Teleport cluster with a custom config](../../deploy-a-cluster/helm-deployments/custom.mdx) | + + +`custom` mode has been removed in Teleport version 12. See the [version 12 +migration guide](../../deploy-a-cluster/helm-deployments/migration-v12.mdx) for +more information. + + + +The chart is versioned with Teleport. No compatibility guarantees are ensured +between new charts and previous major Teleport versions. It is strongly recommended +to always deploy a Teleport version with the same major version as the Helm chart. + (!docs/pages/includes/backup-warning.mdx!) ## `clusterName` -| Type | Default value | Required? | `teleport.yaml` equivalent | Can be used in `custom` mode? | -| - | - | - | - | - | -| `string` | `nil` | When `chartMode` is `aws`, `gcp` or `standalone` | `auth_service.cluster_name`, `proxy_service.public_addr` | ✅ | +| Type | Default value | Required? | `teleport.yaml` equivalent | +|----------|---------------|-----------|----------------------------------------------------------| +| `string` | `nil` | Yes | `auth_service.cluster_name`, `proxy_service.public_addr` | -`clusterName` controls the name used to refer to the Teleport cluster, along with the externally-facing public address to use to access it. +`clusterName` controls the name used to refer to the Teleport cluster, along with +the externally-facing public address used to access it. In most setups this must +be a fully-qualified domain name (e.g. `teleport.example.com`) as this value is +used as the cluster's public address by default. - If using a fully qualified domain name as your `clusterName`, you will also need to configure the DNS provider for this domain to point - to the external load balancer address of your Teleport cluster. +When using a fully qualified domain name as your `clusterName`, you will also +need to configure the DNS provider for this domain to point to the external +load balancer address of your Teleport cluster. - (!docs/pages/kubernetes-access/helm/includes/kubernetes-externaladdress.mdx!) +(!docs/pages/kubernetes-access/helm/includes/kubernetes-externaladdress.mdx!) - You will need to manually add a DNS A record pointing `teleport.example.com` to either the IP or hostname of the Kubernetes load balancer. +You will need to manually add a DNS A record pointing `teleport.example.com` to +the IP, or a CNAME record pointing to the hostname of the Kubernetes load balancer.
(!docs/pages/includes/dns-app-access.mdx!)
- - If you are not using ACME certificates, you may also need to accept insecure warnings in your browser to view the page successfully.
+ +The `clusterName` cannot be changed during a Teleport cluster's lifespan. +If you need to change it, you must redeploy a completely new cluster. + ## `kubeClusterName` -| Type | Default value | Required? | `teleport.yaml` equivalent | Can be used in `custom` mode? | -| - | - | - | - | - | -| `string` | `clusterName` value | no | `kubernetes_service.kube_cluster_name` | ✅ | +| Type | Default value | Required? | `teleport.yaml` equivalent | +| - | - | - | - | +| `string` | `clusterName` value | no | `kubernetes_service.kube_cluster_name` | + +`kubeClusterName` sets the name used for Kubernetes access. +This name will be shown to Teleport users connecting to the Kubernetes cluster. + +## `auth` +| Type | Default value | Required? | +|----------|---------------|-----------| +| `object` | {} | no | + +The `teleport-cluster` chart deploys two sets of pods: auth and proxy. +`auth` contains values specific for the auth pods. You can use it to +set specific values for auth pods, taking precedence over chart-scoped values. + +For example, to override the [`postStart`](#postStart) value only for auth pods: +```yaml +# By default all pods postStart command should be "echo starting" +postStart: ["echo", "starting"] + +auth: + # But we override the `postStart` value specifically for auth pods + postStart: ["curl", "http://hook"] + imagePullPolicy: Always +``` + +### `auth.teleportConfig` + +| Type | Default value | Required? | +|----------|---------------|-----------| +| `object` | {} | no | + +`auth.teleportConfig` contains YAML teleport configuration for auth pods. +The configuration will be merged with the chart-generated configuration +and will take precedence in case of conflict. + +The merge logic is as follows: +- object fields are merged recursively +- lists are replaced +- values (string, integer, boolean, ...) are replaced +- fields can be unset by setting them to `null` or `~` + +See the [Teleport Configuration Reference](../config.mdx) for the list of supported fields. + +```yaml +auth: + teleportConfig: + teleport: + cache: + enabled: false + auth_service: + client_idle_timeout: 2h + client_idle_timeout_message: "Connection closed after 2 hours without activity" +``` + +## `proxy` +| Type | Default value | Required? | +|----------|---------------|-----------| +| `object` | {} | no | + +The `teleport-cluster` charts deploys two sets of pods: auth and proxy. +`proxy` contains values specific to the proxy pods. You can use it to +set specific values for proxy pods, taking precedence over chart-scoped values. + +For example, to override the [`postStart`](#postStart) value only for proxy pods: +```yaml +# By default all pods postStart command should be "echo starting" +postStart: ["echo", "starting"] + +proxy: + # But we override the `postStart` value specifically for proxy pods + postStart: ["curl", "http://hook"] + imagePullPolicy: Always +``` -`kubeClusterName` sets the name used for the Kubernetes cluster. This name will be shown to Teleport users connecting to the cluster. +### `proxy.teleportConfig` + +| Type | Default value | Required? | +|----------|---------------|-----------| +| `object` | {} | no | + +`proxy.teleportConfig` contains YAML teleport configuration for proxy pods +The configuration will be merged with the chart-generated configuration +and will take precedence in case of conflict. + +The merge logic is as follows: +- object fields are merged recursively +- lists are replaced +- values (string, integer, boolean, ...) are replaced +- fields can be unset by setting them to `null` or `~` + +See the [Teleport Configuration Reference](../config.mdx) for the list of supported fields. + +```yaml +proxy: + teleportConfig: + teleport: + cache: + enabled: false + proxy_service: + https_keypairs: + - key_file: /my-custom-mount/key.pem + cert_file: /my-custom-mount/cert.pem +``` ## `authentication` ### `authentication.type` -| Type | Default value | Required? | `teleport.yaml` equivalent | Can be used in `custom` mode? | -| - | - | - | - | - | -| `string` | `local` | Yes | `auth_service.authentication.type` | ❌ | +| Type | Default value | Required? | `teleport.yaml` equivalent | +|----------|---------------|-----------|------------------------------------| +| `string` | `local` | Yes | `auth_service.authentication.type` | -`authentication.type` controls the authentication scheme used by Teleport. Possible values are `local` and `github` for OSS, plus `oidc` and `saml` for Enterprise. +`authentication.type` controls the authentication scheme used by Teleport. +Possible values are `local` and `github` for OSS, plus `oidc` and `saml` for Enterprise. ### `authentication.connectorName` -| Type | Default value | Required? | `teleport.yaml` equivalent | Can be used in `custom` mode? | -| - | - | - | - | - | -| `string` | `""` | No | `auth_service.authentication.connector_name` | ❌ | +| Type | Default value | Required? | `teleport.yaml` equivalent | +|----------|---------------|-----------|----------------------------------------------| +| `string` | `""` | No | `auth_service.authentication.connector_name` | `authentication.connectorName` sets the default authentication connector. -[The SSO documentation](../../access-controls/sso.mdx) explains how to create authentication connectors for common identity -providers. In addition to SSO connector names, the following built-in connectors are supported: +[The SSO documentation](../../access-controls/sso.mdx) explains how to create +authentication connectors for common identity providers. In addition to SSO +connector names, the following built-in connectors are supported: - [`local`](../../management/admin/users.mdx) for local users - [`passwordless`](../../access-controls/guides/passwordless.mdx#optional-enable-passwordless-by-default) to enable by @@ -91,9 +223,9 @@ Defaults to `local`. ### `authentication.localAuth` -| Type | Default value | Required? | `teleport.yaml` equivalent | Can be used in `custom` mode? | -| - | - | - | - | - | -| `bool` | `true` | No | `auth_service.authentication.local_auth` | ❌ | +| Type | Default value | Required? | `teleport.yaml` equivalent | +|--------|---------------|-----------|------------------------------------------| +| `bool` | `true` | No | `auth_service.authentication.local_auth` | `authentication.localAuth` controls whether local authentication is enabled. When disabled, users can only log in through authentication connectors like `saml`, `oidc` or `github`. @@ -102,9 +234,9 @@ When disabled, users can only log in through authentication connectors like `sam ### `authentication.lockingMode` -| Type | Default value | Required? | `teleport.yaml` equivalent | Can be used in `custom` mode? | -| - | - | - | - | - | -| `string` | `""` | No | `auth_service.authentication.locking_mode` | ❌ | +| Type | Default value | Required? | `teleport.yaml` equivalent | +|----------|---------------|-----------|--------------------------------------------| +| `string` | `""` | No | `auth_service.authentication.locking_mode` | `authentication.lockingMode` controls the locking mode cluster-wide. Possible values are `best_effort` and `strict`. See [the locking modes documentation](../../access-controls/guides/locking.mdx#next-steps-locking-modes) for more @@ -114,9 +246,9 @@ Defaults to Teleport's binary default when empty: `best_effort`. ### `authentication.secondFactor` -| Type | Default value | Required? | `teleport.yaml` equivalent | Can be used in `custom` mode? | -| - | - | - | - | - | -| `string` | `on` | Yes | `auth_service.authentication.second_factor` | ❌ | +| Type | Default value | Required? | `teleport.yaml` equivalent | +|----------|---------------|-----------|---------------------------------------------| +| `string` | `otp` | Yes | `auth_service.authentication.second_factor` | `authentication.secondFactor` controls the second factor used for local user authentication. Possible values supported by this chart are `off` (not recommended), `on`, `otp`, `optional` and `webauthn`. @@ -148,9 +280,9 @@ See [Second Factor - WebAuthn](../../access-controls/guides/webauthn.mdx) for mo #### `authentication.webauthn.attestationAllowedCas` -| Type | Default value | Required? | `teleport.yaml` equivalent | Can be used in `custom` mode? | -| - | - | - | - | - | -| `array` | `[]` | No | `auth_service.authentication.webauthn.attestation_allowed_cas` | ❌ | +| Type | Default value | Required? | `teleport.yaml` equivalent | +|---------|---------------|-----------|----------------------------------------------------------------| +| `array` | `[]` | No | `auth_service.authentication.webauthn.attestation_allowed_cas` | `authentication.webauthn.attestationAllowedCas` is an optional allow list of certificate authorities (as local file paths or in-line PEM certificate string) for [device verification](https://developers.yubico.com/WebAuthn/WebAuthn_Developer_Guide/Attestation.html). @@ -160,9 +292,9 @@ By default all devices are allowed. #### `authentication.webauthn.attestationDeniedCas` -| Type | Default value | Required? | `teleport.yaml` equivalent | Can be used in `custom` mode? | -| - | - | - | - | - | -| `array` | `[]` | No | `auth_service.authentication.webauthn.attestation_denied_cas` | ❌ | +| Type | Default value | Required? | `teleport.yaml` equivalent | +|---------|---------------|-----------|---------------------------------------------------------------| +| `array` | `[]` | No | `auth_service.authentication.webauthn.attestation_denied_cas` | `authentication.webauthn.attestationDeniedCas` is optional deny list of certificate authorities (as local file paths or in-line PEM certificate string) for [device verification](https://developers.yubico.com/WebAuthn/WebAuthn_Developer_Guide/Attestation.html). @@ -172,9 +304,9 @@ By default no devices are forbidden. ## `proxyListenerMode` -| Type | Default value | Required? | `teleport.yaml` equivalent | Can be used in `custom` mode? | -| - | - | - | - | - | -| `string` | `nil` | no | `auth_service.proxy_listener_mode` | ❌ | +| Type | Default value | Required? | `teleport.yaml` equivalent | +|----------|---------------|-----------|------------------------------------| +| `string` | `nil` | no | `auth_service.proxy_listener_mode` | `proxyListenerMode` controls proxy TLS routing used by Teleport. Possible values are `multiplex`. @@ -193,9 +325,9 @@ By default no devices are forbidden. ## `sessionRecording` -| Type | Default value | Required? | `teleport.yaml` equivalent | Can be used in `custom` mode? | -| - | - | - | - | - | -| `string` | `""` | no | `auth_service.session_recording` | ❌ | +| Type | Default value | Required? | `teleport.yaml` equivalent | +|----------|---------------|-----------|----------------------------------| +| `string` | `""` | no | `auth_service.session_recording` | `sessionRecording` controls the `session_recording` field in the `teleport.yaml` configuration. It is passed as-is in the configuration. @@ -217,9 +349,9 @@ For possible values, [see the Teleport Configuration Reference](../../reference/ ## `separatePostgresListener` -| Type | Default value | Required? | `teleport.yaml` equivalent | Can be used in `custom` mode? | -| - | - | - | - | - | -| `bool` | `false` | no | `proxy_service.postgres_listen_addr` | ❌ | +| Type | Default value | Required? | `teleport.yaml` equivalent | +|--------|---------------|-----------|--------------------------------------| +| `bool` | `false` | no | `proxy_service.postgres_listen_addr` | `separatePostgresListener` controls whether Teleport will multiplex PostgreSQL traffic for Teleport Database Access over a separate TLS listener to Teleport's web UI. @@ -249,9 +381,9 @@ These settings will not apply if [`proxyListenerMode`](#proxylistenermode) is se ## `separateMongoListener` -| Type | Default value | Required? | `teleport.yaml` equivalent | Can be used in `custom` mode? | -| - | - | - | - | - | -| `bool` | `false` | no | `proxy_service.mongo_listen_addr` | ❌ | +| Type | Default value | Required? | `teleport.yaml` equivalent | +|--------|---------------|-----------|-----------------------------------| +| `bool` | `false` | no | `proxy_service.mongo_listen_addr` | `separateMongoListener` controls whether Teleport will multiplex PostgreSQL traffic for Teleport Database Access over a separate TLS listener to Teleport's web UI. @@ -281,9 +413,9 @@ These settings will not apply if [`proxyListenerMode`](#proxylistenermode) is se ## `publicAddr` -| Type | Default value | Required? | `teleport.yaml` equivalent | Can be used in `custom` mode? | -| - | - | - | - | - | -| `list[string]` | `[]` | no | `proxy_service.public_addr` | ❌ | +| Type | Default value | Required? | `teleport.yaml` equivalent | +|----------------|---------------|-----------|-----------------------------| +| `list[string]` | `[]` | no | `proxy_service.public_addr` | `publicAddr` controls the advertised addresses for TLS connections. @@ -322,9 +454,9 @@ Changing the RP ID will invalidate all already registered webauthn second factor ## `kubePublicAddr` -| Type | Default value | Required? | `teleport.yaml` equivalent | Can be used in `custom` mode? | -| - | - | - | - | - | -| `list[string]` | `[]` | no | `proxy_service.kube_public_addr` | ❌ | +| Type | Default value | Required? | `teleport.yaml` equivalent | +|----------------|---------------|-----------|----------------------------------| +| `list[string]` | `[]` | no | `proxy_service.kube_public_addr` | `kubePublicAddr` controls the advertised addresses for the Kubernetes proxy. This setting will not apply if [`proxyListenerMode`](#proxylistenermode) is set to `multiplex`. @@ -347,9 +479,9 @@ else [`clusterName`](#clusterName) is used. Default port is 3026. ## `mongoPublicAddr` -| Type | Default value | Required? | `teleport.yaml` equivalent | Can be used in `custom` mode? | -| - | - | - | - | - | -| `list[string]` | `[]` | no | `proxy_service.mongo_public_addr` | ❌ | +| Type | Default value | Required? | `teleport.yaml` equivalent | +|----------------|---------------|-----------|-----------------------------------| +| `list[string]` | `[]` | no | `proxy_service.mongo_public_addr` | `mongoPublicAddr` controls the advertised addresses to MongoDB clients. This setting will not apply if [`proxyListenerMode`](#proxylistenermode) is set to `multiplex` and @@ -373,9 +505,9 @@ Default port is 27017. ## `mysqlPublicAddr` -| Type | Default value | Required? | `teleport.yaml` equivalent | Can be used in `custom` mode? | -| - | - | - | - | - | -| `list[string]` | `[]` | no | `proxy_service.mysql_public_addr` | ❌ | +| Type | Default value | Required? | `teleport.yaml` equivalent | +|----------------|---------------|-----------|-----------------------------------| +| `list[string]` | `[]` | no | `proxy_service.mysql_public_addr` | `mysqlPublicAddr` controls the advertised addresses for the MySQL proxy. This setting will not apply if [`proxyListenerMode`](#proxylistenermode) is set to `multiplex`. @@ -399,9 +531,9 @@ else [`clusterName`](#clusterName) is used. Default port is 3036. ## `postgresPublicAddr` -| Type | Default value | Required? | `teleport.yaml` equivalent | Can be used in `custom` mode? | -| - | - | - | - | - | -| `list[string]` | `[]` | no | `proxy_service.postgres_public_addr` | ❌ | +| Type | Default value | Required? | `teleport.yaml` equivalent | +| - | - | - | - | +| `list[string]` | `[]` | no | `proxy_service.postgres_public_addr` | `postgresPublicAddr` controls the advertised addresses to postgres clients. This setting will not apply if [`proxyListenerMode`](#proxylistenermode) is set to `multiplex` and @@ -425,9 +557,9 @@ else [`clusterName`](#clusterName) is used. Default port is 5432. ## `sshPublicAddr` -| Type | Default value | Required? | `teleport.yaml` equivalent | Can be used in `custom` mode? | -| - | - | - | - | - | -| `list[string]` | `[]` | no | `proxy_service.ssh_public_addr` | ❌ | +| Type | Default value | Required? | `teleport.yaml` equivalent | +|----------------|---------------|-----------|---------------------------------| +| `list[string]` | `[]` | no | `proxy_service.ssh_public_addr` | `sshPublicAddr` controls the advertised addresses for SSH clients. This is also used by the `tsh` client. This setting will not apply if [`proxyListenerMode`](#proxylistenermode) is set to `multiplex`. @@ -450,9 +582,9 @@ else [`clusterName`](#clusterName) is used. Default port is 3023. ## `tunnelPublicAddr` -| Type | Default value | Required? | `teleport.yaml` equivalent | Can be used in `custom` mode? | -| - | - | - | - | - | -| `list[string]` | `[]` | no | `proxy_service.tunnel_public_addr` | ❌ | +| Type | Default value | Required? | `teleport.yaml` equivalent | +|----------------|---------------|-----------|------------------------------------| +| `list[string]` | `[]` | no | `proxy_service.tunnel_public_addr` | `tunnelPublicAddr` controls the advertised addresses to trusted clusters or nodes joining via node-tunneling. This setting will not apply if [`proxyListenerMode`](#proxylistenermode) is set to `multiplex`. @@ -475,9 +607,9 @@ else [`clusterName`](#clusterName) is used. Default port is 3024. ## `enterprise` -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | -| `bool` | `false`| ✅ | +| Type | Default value | +|--------|---------------| +| `bool` | `false` | `enterprise` controls whether to use Teleport Community Edition or Teleport Enterprise. @@ -516,9 +648,9 @@ $ kubectl --namespace teleport create secret generic license --from-file=/path/t ## `installCRDs` -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | -| `bool` | `false`| ✅ | +| Type | Default value | +|--------|---------------| +| `bool` | `false` | CRDs are not namespace-scoped resources - they can be installed only once in a cluster. @@ -546,9 +678,9 @@ the same Kubernetes cluster or installing the CRDs on your own you should not ha ### `operator.enabled` -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | -| `bool` | `false`| ✅ | +| Type | Default value | +|--------|---------------| +| `bool` | `false` | `operator.enabled` controls whether to deploy the Teleport Kubernetes Operator as a side-car. @@ -572,9 +704,9 @@ If you are deploying multiple releases of the Helm chart in the same cluster you ### `operator.image` -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | -| `string` | `public.ecr.aws/gravitational/teleport-operator`| ✅ | +| Type | Default value | +|----------|--------------------------------------------------| +| `string` | `public.ecr.aws/gravitational/teleport-operator` | `operator.image` sets the Teleport Kubernetes Operator container image used for Teleport pods in the cluster. You can override this to use your own Teleport Operator image rather than a Teleport-published image. @@ -597,9 +729,9 @@ This setting requires [`operator.enabled`](#operatorenabled). ### `operator.resources` -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | -| `object` | `{}` | ✅ | +| Type | Default value | +|----------|---------------| +| `object` | `{}` | See the [Kubernetes resource](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) documentation. @@ -626,9 +758,9 @@ It is recommended to set resource requests/limits for each container based on th ## `teleportVersionOverride` -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | -| `string` | `nil` | ✅ | +| Type | Default value | +|----------|---------------| +| `string` | `nil` | Normally the version of Teleport being used will match the version of the chart being installed. If you install chart version 10.0.0, you'll be using Teleport 10.0.0. Upgrading the Helm chart will use the latest version from the repo. @@ -653,9 +785,9 @@ Docker image versions. ## `acme` -| Type | Default value | Can be used in `custom` mode? | `teleport.yaml` equivalent | -| - | - | - | - | -| `bool` | `false` | ❌ | `proxy_service.acme.enabled` | +| Type | Default value | `teleport.yaml` equivalent | +|--------|---------------|------------------------------| +| `bool` | `false` | `proxy_service.acme.enabled` | ACME is a protocol for getting Web X.509 certificates. @@ -675,17 +807,17 @@ Setting acme to `false` (the default) will cause Teleport to generate and use se ## `acmeEmail` -| Type | Default value | Can be used in `custom` mode? | `teleport.yaml` equivalent | -| - | - | - | - | -| `string` | `nil` | ❌ | `proxy_service.acme.email` | +| Type | Default value | `teleport.yaml` equivalent | +|----------|---------------|----------------------------| +| `string` | `nil` | `proxy_service.acme.email` | `acmeEmail` is the email address to provide during certificate registration (this is a Let's Encrypt requirement). ## `acmeURI` -| Type | Default value | Can be used in `custom` mode? | `teleport.yaml` equivalent | -| - | - | - | - | -| `string` | Let's Encrypt production server | ❌ | `proxy_service.acme.uri` | +| Type | Default value | `teleport.yaml` equivalent | +|----------|---------------------------------|----------------------------| +| `string` | Let's Encrypt production server | `proxy_service.acme.uri` | `acmeURI` is the ACME server to use for getting certificates. @@ -714,9 +846,9 @@ You can also use any other ACME-compatible server. ### `podSecurityPolicy.enabled` -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | -| `bool` | `true` | ✅ | +| Type | Default value | +|--------|---------------| +| `bool` | `true` | By default, Teleport charts also install a [`podSecurityPolicy`](https://github.com/gravitational/teleport/blob/master/examples/chart/teleport-cluster/templates/psp.yaml). @@ -740,9 +872,9 @@ To disable this, you can set `enabled` to `false`. ## `labels` -| Type | Default value | Can be used in `custom` mode? | `teleport.yaml` equivalent | -| - | - | - | - | -| `object` | `{}` | ❌ | `kubernetes_service.labels` | +| Type | Default value | +|----------|---------------| +| `object` | `{}` | `labels` can be used to add a map of key-value pairs relating to the Teleport cluster being deployed. These labels can then be used with Teleport's RBAC policies to define access rules for the cluster. @@ -769,18 +901,18 @@ Teleport's RBAC policies to define access rules for the cluster. ## `chartMode` -| Type | Default value | -| - | - | -| `string` | `standalone` | +| Type | Default value | +|----------|---------------| +| `string` | `standalone` | `chartMode` is used to configure the chart's operation mode. You can find more information about each mode on its specific guide page: -| `chartMode` | Guide | -| - | - | -| `standalone` | [Getting Started - Kubernetes with SSO](../../deploy-a-cluster/helm-deployments/kubernetes-cluster.mdx) | -| `aws` | [Running an HA Teleport cluster using an AWS EKS Cluster](../../deploy-a-cluster/helm-deployments/aws.mdx) | -| `gcp` | [Running an HA Teleport cluster using a Google Cloud GKE cluster](../../deploy-a-cluster/helm-deployments/gcp.mdx) | -| `custom` | [Running a Teleport cluster with a custom config](../../deploy-a-cluster/helm-deployments/custom.mdx) | +| `chartMode` | Guide | +|--------------|--------------------------------------------------------------------------------------------------------------------| +| `standalone` | [Getting Started - Kubernetes with SSO](../../deploy-a-cluster/helm-deployments/kubernetes-cluster.mdx) | +| `aws` | [Running an HA Teleport cluster using an AWS EKS Cluster](../../deploy-a-cluster/helm-deployments/aws.mdx) | +| `gcp` | [Running an HA Teleport cluster using a Google Cloud GKE cluster](../../deploy-a-cluster/helm-deployments/gcp.mdx) | +| `scratch` | [Running a Teleport cluster with a custom config](../../deploy-a-cluster/helm-deployments/custom.mdx) | ## `persistence` @@ -796,9 +928,9 @@ This driver addon must be configured to use persistent volumes in EKS clusters a ### `persistence.enabled` -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | -| `bool` | `true` | ✅ | +| Type | Default value | +|--------|---------------| +| `bool` | `true` | `persistence.enabled` can be used to enable data persistence using either a new or pre-existing `PersistentVolumeClaim`. @@ -818,13 +950,13 @@ This driver addon must be configured to use persistent volumes in EKS clusters a ### `persistence.existingClaimName` -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | -| `string` | `nil` | ✅ | +| Type | Default value | +|----------|---------------| +| `string` | `nil` | `persistence.existingClaimName` can be used to provide the name of a pre-existing `PersistentVolumeClaim` to use if desired. -The default is left blank, which will automatically create a `PersistentVolumeClaim` to use for Teleport storage in `standalone` or `custom` mode. +The default is left blank, which will automatically create a `PersistentVolumeClaim` to use for Teleport storage in `standalone` or `scratch` mode. @@ -842,11 +974,11 @@ The default is left blank, which will automatically create a `PersistentVolumeCl ### `persistence.volumeSize` -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | -| `string` | `10Gi` | ✅ | +| Type | Default value | +|----------|---------------| +| `string` | `10Gi` | -You can set `volumeSize` to request a different size of persistent volume when installing the Teleport chart in `standalone` or `custom` mode. +You can set `volumeSize` to request a different size of persistent volume when installing the Teleport chart in `standalone` or `scratch` mode. `volumeSize` will be ignored if `existingClaimName` is set. @@ -868,70 +1000,54 @@ You can set `volumeSize` to request a different size of persistent volume when i ## `aws` -| Can be used in `custom` mode? | `teleport.yaml` equivalent | -| - | - | -| ❌ | See [Using DynamoDB](../../reference/backends.mdx#dynamodb) and [Using Amazon S3](../../reference/backends.mdx#s3) for details | - `aws` settings are described in the AWS guide: [Running an HA Teleport cluster using an AWS EKS Cluster](../../deploy-a-cluster/helm-deployments/aws.mdx) ## `gcp` -| Can be used in `custom` mode? | `teleport.yaml` equivalent | -| - | - | -| ❌ | See [Using Firestore](../../reference/backends.mdx#dynamodb) and [Using GCS](../../reference/backends.mdx#gcs) for details | - `gcp` settings are described in the GCP guide: [Running an HA Teleport cluster using a Google Cloud GKE cluster](../../deploy-a-cluster/helm-deployments/gcp.mdx) -### `highAvailability` +## `highAvailability` -## `highAvailability.replicaCount` +`highAvailability` contains settings controlling how Teleport pods are +replicated and scheduled. This allows Teleport to run in a highly-available +fashion: Teleport should sustain the crash/loss of a machine without interrupting +the service. -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | -| `int` | `1` | ✅ (when using HA storage) | +### For auth pods -`highAvailability.replicaCount` can be used to set the number of replicas used in the deployment. +When using "standalone" or "scratch" mode, you must use highly-available storage +(etcd, DynamoDB or Firestore) for multiple replicas to be supported. +Manually configuring NFS-based storage or ReadWriteMany volume claims +is NOT supported and will result in errors. Using Teleport's built-in +ACME client (as opposed to using cert-manager or passing certs through a secret) +is not supported with multiple replicas. -Set to a number higher than `1` for a high availability mode where multiple Teleport pods will be deployed and connections will be load balanced between them. +### For proxy pods - - Setting `highAvailability.replicaCount` to a value higher than `1` will disable the use of ACME certs. - - - - As a rough guide, we recommend configuring one replica per distinct availability zone where your cluster has worker nodes. +Proxy pods need to be provided a certificate to be replicated (via either +`tls.existingSecretName` or `highAvailability.certManager`). +If proxy pods are replicable, they will default to 2 replicas, +even if `highAvailability.replicaCount` is 1. To force a single proxy replica, +set `proxy.highAvailability.replicaCount: 1`. - 2 replicas/availability zones will be fine for smaller workloads. 3-5 replicas/availability zones will be more appropriate for bigger - clusters with more traffic. - +### `highAvailability.replicaCount` - - When using `custom` mode, you **must** use highly-available storage (e.g. etcd, DynamoDB or Firestore) for multiple replicas to be supported. +| Type | Default value | +|-------|---------------| +| `int` | `1` | - [Information on supported Teleport storage backends](../../reference/backends.mdx) +Controls the amount of pod replicas. The [`highAvailability`](#highAvailability) section describes +the replication requirements. - Manually configuring NFS-based storage or `ReadWriteMany` volume claims is **NOT** supported for an HA deployment and will result in errors. + + If you set a value greater than 1, you **must** meet the replication criteria described above. Failure to do so will result in errors and inconsistent data. - - - ```yaml - highAvailability: - replicaCount: 3 - ``` - - - ```code - $ --set highAvailability.replicaCount=3 - ``` - - - ## `highAvailability.requireAntiAffinity` -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | -| `bool` | `false` | ✅ (when using HA storage) | +| Type | Default value | +|--------|---------------| +| `bool` | `false` | [Kubernetes reference](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity) @@ -967,9 +1083,9 @@ Teleport pods must not be scheduled on the same physical host. ### `highAvailability.podDisruptionBudget.enabled` -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | -| `bool` | `false` | ✅ (when using HA storage) | +| Type | Default value | +|--------|---------------| +| `bool` | `false` | [Kubernetes reference](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) @@ -992,9 +1108,9 @@ Enable a Pod Disruption Budget for the Teleport Pod to ensure HA during voluntar ### `highAvailability.podDisruptionBudget.minAvailable` -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | -| `int` | `1` | ✅ (when using HA storage) | +| Type | Default value | +|-------|---------------| +| `int` | `1` | [Kubernetes reference](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) @@ -1021,9 +1137,9 @@ See the [cert-manager](https://cert-manager.io/docs/) docs for more information. ### `highAvailability.certManager.enabled` -| Type | Default value | Can be used in `custom` mode? | `teleport.yaml` equivalent | -| - | - | - | - | -| `bool` | `false` | ❌ | `proxy_service.https_keypairs` (to provide your own certificates) | +| Type | Default value | `teleport.yaml` equivalent | +|--------|---------------|-------------------------------------------------------------------| +| `bool` | `false` | `proxy_service.https_keypairs` (to provide your own certificates) | Setting `highAvailability.certManager.enabled` to `true` will use `cert-manager` to provision a TLS certificate for a Teleport cluster deployed in HA mode. @@ -1037,9 +1153,9 @@ cluster deployed in HA mode. ### `highAvailability.certManager.addCommonName` -| Type | Default value | Can be used in `custom` mode? | `teleport.yaml` equivalent | -| - | - | - | - | -| `bool` | `false` | ❌ | `proxy_service.https_keypairs` (to provide your own certificates) | +| Type | Default value | `teleport.yaml` equivalent | +|--------|---------------|-------------------------------------------------------------------| +| `bool` | `false` | `proxy_service.https_keypairs` (to provide your own certificates) | Setting `highAvailability.certManager.addCommonName` to `true` will instruct `cert-manager` to set the commonName field in its certificate signing request to the issuing CA. @@ -1071,9 +1187,9 @@ Setting `highAvailability.certManager.addCommonName` to `true` will instruct `ce ### `highAvailability.certManager.issuerName` -| Type | Default value | Can be used in `custom` mode? | `teleport.yaml` equivalent | -| - | - | - | - | -| `string` | `nil` | ❌ | None | +| Type | Default value | `teleport.yaml` equivalent | +|----------|---------------|----------------------------| +| `string` | `nil` | None | Sets the name of the `cert-manager` `Issuer` or `ClusterIssuer` to use for issuing certificates. @@ -1103,9 +1219,9 @@ Sets the name of the `cert-manager` `Issuer` or `ClusterIssuer` to use for issui ### `highAvailability.certManager.issuerKind` -| Type | Default value | Can be used in `custom` mode? | `teleport.yaml` equivalent | -| - | - | - | - | -| `string` | `Issuer` | ❌ | None | +| Type | Default value | `teleport.yaml` equivalent | +|----------|---------------|----------------------------| +| `string` | `Issuer` | None | Sets the `Kind` of `Issuer` to be used when issuing certificates with `cert-manager`. Defaults to `Issuer` to keep permissions scoped to a single namespace. @@ -1127,9 +1243,9 @@ scoped to a single namespace. ### `highAvailability.certManager.issuerGroup` -| Type | Default value | Can be used in `custom` mode? | `teleport.yaml` equivalent | -| - | - | - | - | -| `string` | `cert-manager.io` | ❌ | None | +| Type | Default value | +|----------|-------------------| +| `string` | `cert-manager.io` | Sets the `Group` of `Issuer` to be used when issuing certificates with `cert-manager`. Defaults to `cert-manager.io` to use built-in issuers. @@ -1150,9 +1266,9 @@ Sets the `Group` of `Issuer` to be used when issuing certificates with `cert-man ## `highAvailability.minReadySeconds` -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | - | -| `integer` | `15` | ✅ | +| Type | Default value | +|-----------|---------------| +| `integer` | `15` | Amount of time to wait during a pod rollout before moving to the next pod. [See Kubernetes documentation](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#min-ready-seconds). @@ -1175,9 +1291,9 @@ This is used to give time for the agents to connect back to newly created pods b ## `tls.existingSecretName` -| Type | Default value | Can be used in `custom` mode? | `teleport.yaml` equivalent | -| - | - | - | - | -| `string` | `""` | ✅ | `proxy_service.https_keypairs` | +| Type | Default value | `teleport.yaml` equivalent | +|----------|---------------|--------------------------------| +| `string` | `""` | `proxy_service.https_keypairs` | `tls.existingSecretName` tells Teleport to use an existing Kubernetes TLS secret to secure its web UI using HTTPS. This can be set to use a TLS certificate issued by a trusted internal CA rather than a public-facing CA like Let's Encrypt. @@ -1206,9 +1322,9 @@ See https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets for mo ## `tls.existingCASecretName` -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | -| `string` | `""` | ✅ | +| Type | Default value | +|----------|---------------| +| `string` | `""` | `tls.existingCASecretName` sets the `SSL_CERT_FILE` environment variable to load a trusted CA or bundle in PEM format into Teleport pods. This can be set to inject a root and/or intermediate CA so that Teleport can build a full trust chain on startup. @@ -1245,9 +1361,9 @@ kubectl create secret generic my-root-ca --from-file=ca.pem=/path/to/root-ca.pem ## `image` -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | -| `string` | `public.ecr.aws/gravitational/teleport` | ✅ | +| Type | Default value | +|----------|-----------------------------------------| +| `string` | `public.ecr.aws/gravitational/teleport` | `image` sets the Teleport container image used for Teleport Community pods in the cluster. @@ -1268,9 +1384,9 @@ You can override this to use your own Teleport Community image rather than a Tel ## `enterpriseImage` -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | -| `string` | `public.ecr.aws/gravitational/teleport-ent` | ✅ | +| Type | Default value | +|----------|---------------------------------------------| +| `string` | `public.ecr.aws/gravitational/teleport-ent` | `enterpriseImage` sets the container image used for Teleport Enterprise pods in the cluster. @@ -1297,9 +1413,9 @@ You can override this to use your own Teleport Enterprise image rather than a Te This field used to be called `logLevel`. For backwards compatibility this name can still be used, but we recommend changing your values file to use `log.level`. -| Type | Default value | Can be used in `custom` mode? | `teleport.yaml` equivalent | -| - | - | - | - | -| `string` | `INFO` | ❌ | `teleport.log.severity` | +| Type | Default value | `teleport.yaml` equivalent | +|----------|---------------|----------------------------| +| `string` | `INFO` | `teleport.log.severity` | `log.level` sets the log level used for the Teleport process. @@ -1325,9 +1441,9 @@ The default is `INFO`, which is recommended in production. ### `log.output` -| Type | Default value | Can be used in `custom` mode? | `teleport.yaml` equivalent | -| - | - | - | - | -| `string` | `stderr` | ❌ | `teleport.log.output` | +| Type | Default value | `teleport.yaml` equivalent | +|----------|---------------|----------------------------| +| `string` | `stderr` | `teleport.log.output` | `log.output` sets the output destination for the Teleport process. @@ -1351,9 +1467,9 @@ The value can also be set to a file path (such as `/var/log/teleport.log`) to wr ### `log.format` -| Type | Default value | Can be used in `custom` mode? | `teleport.yaml` equivalent | -| - | - | - | - | -| `string` | `text` | ❌ | `teleport.log.format.output` | +| Type | Default value | `teleport.yaml` equivalent | +|----------|---------------|------------------------------| +| `string` | `text` | `teleport.log.format.output` | `log.format` sets the output type for the Teleport process. @@ -1375,9 +1491,9 @@ Possible values are `text` (default) or `json`. ### `log.extraFields` -| Type | Default value | Can be used in `custom` mode? | `teleport.yaml` equivalent | -| - | - | - | - | -| `list` | `["timestamp", "level", "component", "caller"]` | ❌ | `teleport.log.format.extra_fields` | +| Type | Default value | `teleport.yaml` equivalent | +|--------|-------------------------------------------------|------------------------------------| +| `list` | `["timestamp", "level", "component", "caller"]` | `teleport.log.format.extra_fields` | `log.extraFields` sets the fields used in logging for the Teleport process. @@ -1400,9 +1516,9 @@ See the [Teleport config file reference](../../reference/config.mdx) for more de ## `nodeSelector` -| Type | Default value | -| - | - | -| `object` | `{}` | +| Type | Default value | +|----------|---------------| +| `object` | `{}` | `nodeSelector` can be used to add a map of key-value pairs to constrain the nodes that Teleport pods will run on. @@ -1427,9 +1543,9 @@ nodes that Teleport pods will run on. ## `affinity` -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | -| `object` | `{}` | ✅ | +| Type | Default value | +|----------|---------------| +| `object` | `{}` | [Kubernetes reference](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) @@ -1464,19 +1580,14 @@ Kubernetes affinity to set for pod assignments. ## `annotations.config` -| Type | Default value | Can be used in `custom` mode? | `teleport.yaml` equivalent | -| - | - | - | - | -| `object` | `{}` | ❌ | None | +| Type | Default value | `teleport.yaml` equivalent | +|----------|---------------|----------------------------| +| `object` | `{}` | None | [Kubernetes reference](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) Kubernetes annotations which should be applied to the `ConfigMap` created by the chart. - - These annotations will not be applied in `custom` mode, as the `ConfigMap` is not managed by the chart. - In this instance, you should apply annotations manually to your created `ConfigMap`. - - ```yaml @@ -1498,9 +1609,9 @@ Kubernetes annotations which should be applied to the `ConfigMap` created by the ## `annotations.deployment` -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | -| `object` | `{}` | ✅ | +| Type | Default value | +|----------|---------------| +| `object` | `{}` | [Kubernetes reference](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) @@ -1527,9 +1638,9 @@ Kubernetes annotations which should be applied to the `Deployment` created by th ## `annotations.pod` -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | -| `object` | `{}` | ✅ | +| Type | Default value | +|----------|---------------| +| `object` | `{}` | [Kubernetes reference](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) @@ -1556,9 +1667,9 @@ Kubernetes annotations which should be applied to each `Pod` created by the char ## `annotations.service` -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | -| `object` | `{}` | ✅ | +| Type | Default value | +|----------|---------------| +| `object` | `{}` | [Kubernetes reference](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) @@ -1585,9 +1696,9 @@ Kubernetes annotations which should be applied to the `Service` created by the c ## `annotations.serviceAccount` -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | -| `object` | `{}` | ✅ | +| Type | Default value | +|----------|---------------| +| `object` | `{}` | [Kubernetes reference](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) @@ -1614,9 +1725,9 @@ Kubernetes annotations which should be applied to the `serviceAccount` created b ## `annotations.certSecret` -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | -| `object` | `{}` | ✅ | +| Type | Default value | +|----------|---------------| +| `object` | `{}` | [Kubernetes reference](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) @@ -1646,9 +1757,9 @@ Kubernetes annotations which should be applied to the `secret` generated by ## `serviceAccount.create` -| Type | Default value | Required? | Can be used in `custom` mode? | -| - | - | - | - | -| `boolean` | `true` | No | ✅ | +| Type | Default value | Required? | +|-----------|---------------|-----------| +| `boolean` | `true` | No | [Kubernetes reference](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) @@ -1656,18 +1767,18 @@ Boolean value that specifies whether service account should be created or not. ## `serviceAccount.name` -| Type | Default value | Required? | Can be used in `custom` mode? | -| - | - | - | - | -| `string` | `""` | No | ✅ | +| Type | Default value | Required? | +|----------|---------------|-----------| +| `string` | `""` | No | Name to use for teleport service account. If `serviceAccount.create` is false, service account with this name should be created in current namespace before installing helm chart. ## `service.type` -| Type | Default value | Required? | Can be used in `custom` mode? | -| - | - | - | - | -| `string` | `LoadBalancer` | Yes | ✅ | +| Type | Default value | Required? | +|----------|----------------|-----------| +| `string` | `LoadBalancer` | Yes | [Kubernetes reference](https://kubernetes.io/docs/concepts/services-networking/service/) @@ -1689,9 +1800,9 @@ Allows to specify the service type. ## `service.spec.loadBalancerIP` -| Type | Default value | Required? | Can be used in `custom` mode? | -| - | - | - | - | -| `string` | `nil` | No | ✅ | +| Type | Default value | Required? | +|----------|---------------|-----------| +| `string` | `nil` | No | [Kubernetes reference](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer) @@ -1714,9 +1825,9 @@ Allows to specify the `loadBalancerIP`. ## `extraArgs` -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | -| `list` | `[]` | ✅ | +| Type | Default value | +|--------|---------------| +| `list` | `[]` | A list of extra arguments to pass to the `teleport start` command when running a Teleport Pod. @@ -1736,9 +1847,9 @@ A list of extra arguments to pass to the `teleport start` command when running a ## `extraEnv` -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | -| `list` | `[]` | ✅ | +| Type | Default value | +|--------|---------------| +| `list` | `[]` | [Kubernetes reference](https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/) @@ -1763,9 +1874,9 @@ A list of extra environment variables to be set on the main Teleport container. ## `extraVolumes` -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | -| `list` | `[]` | ✅ | +| Type | Default value | +|--------|---------------| +| `list` | `[]` | [Kubernetes reference](https://kubernetes.io/docs/concepts/storage/volumes/) @@ -1791,9 +1902,9 @@ will also be available to any `initContainers` configured by the chart. ## `extraVolumeMounts` -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | -| `list` | `[]` | ✅ | +| Type | Default value | +|--------|---------------| +| `list` | `[]` | [Kubernetes reference](https://kubernetes.io/docs/concepts/storage/volumes/) @@ -1818,9 +1929,9 @@ mounts will also be mounted into any `initContainers` configured by the chart. ## `imagePullPolicy` -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | -| `string` | `IfNotPresent` | ✅ | +| Type | Default value | +|----------|----------------| +| `string` | `IfNotPresent` | [Kubernetes reference](https://kubernetes.io/docs/concepts/containers/images/#updating-images) @@ -1841,9 +1952,9 @@ Allows the `imagePullPolicy` for any pods created by the chart to be overridden. ## `initContainers` -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | -| `list` | `[]` | ✅ | +| Type | Default value | +|--------|---------------| +| `list` | `[]` | [Kubernetes reference](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) @@ -1869,6 +1980,10 @@ A list of `initContainers` which will be run before the main Teleport container ## `postStart` +| Type | Default value | +|----------|---------------| +| `object` | `{}` | + [Kubernetes reference](https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/) A `postStart` lifecycle handler to be configured on the main Teleport container. @@ -1892,9 +2007,9 @@ A `postStart` lifecycle handler to be configured on the main Teleport container. ## `resources` -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | -| `object` | `{}` | ✅ | +| Type | Default value | +|----------|---------------| +| `object` | `{}` | [Kubernetes reference](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) @@ -1920,9 +2035,9 @@ applied to `initContainers`. ## `securityContext` -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | -| `object` | `{}` | ✅ | +| Type | Default value | +|----------|---------------| +| `object` | `{}` | [Kubernetes reference](https://kubernetes.io/docs/concepts/security/pod-security-standards/) @@ -1944,9 +2059,9 @@ The `securityContext` applies to any pods created by the chart, including `initC ## `tolerations` -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | -| `list` | `[]` | ✅ | +| Type | Default value | +|--------|---------------| +| `list` | `[]` | [Kubernetes reference](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) @@ -1974,9 +2089,9 @@ Kubernetes Tolerations to set for pod assignment. ## `priorityClassName` -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | -| `string` | `""` | ✅ | +| Type | Default value | +|----------|---------------| +| `string` | `""` | [Kubernetes reference](https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/) @@ -1997,9 +2112,9 @@ Kubernetes PriorityClass to set for pod. ## `probeTimeoutSeconds` -| Type | Default value | Can be used in `custom` mode? | -| - | - | - | -| `integer` | `1` | ✅ | +| Type | Default value | +|-----------|---------------| +| `integer` | `1` | [Kubernetes reference](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) diff --git a/docs/pages/try-out-teleport/local-kubernetes.mdx b/docs/pages/try-out-teleport/local-kubernetes.mdx index 8a89b0868b826..6682d62dad79b 100644 --- a/docs/pages/try-out-teleport/local-kubernetes.mdx +++ b/docs/pages/try-out-teleport/local-kubernetes.mdx @@ -83,8 +83,9 @@ Verify that Teleport is running. ```code $ kubectl get pods -NAME READY STATUS RESTARTS AGE -teleport-cluster-b9dc6c68b-d75fb 1/1 Running 0 46s +NAME READY STATUS RESTARTS AGE +teleport-cluster-auth-57989d4cbd-4q2ds 1/1 Running 0 46s +teleport-cluster-proxy-69c9c4c986-j9v2j 1/1 Running 0 46s ``` ### Expose the Proxy Service to your local machine @@ -115,8 +116,9 @@ The `teleport-cluster` service should now have an external IP: ```code $ kubectl get services -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -teleport-cluster LoadBalancer 10.107.218.212 127.0.0.1 443:32143/TCP,3023:30618/TCP,3026:32750/TCP,3024:32406/TCP,3036:30687/TCP 6m18s +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +teleport-cluster LoadBalancer 10.107.218.212 127.0.0.1 443:32143/TCP,3023:30618/TCP,3026:32750/TCP,3024:32406/TCP,3036:30687/TCP 6m18s +teleport-cluster-auth ClusterIP 10.107.218.220 3025/TCP,3026/TCP 6m18s ``` The Proxy Service enables you to manage your cluster via an HTTP API. Assign the @@ -272,9 +274,7 @@ Run the following commands to get the name of the relevant pod and execute a `tctl` command to create a user: ```code -# We will use this variable for subsequent commands -$ PROXY_POD=$(kubectl get po -l app=teleport-cluster -o jsonpath='{.items[0].metadata.name}') -$ kubectl exec $PROXY_POD -- tctl users add --roles=access appuser +$ kubectl exec -it deployment/teleport-cluster-auth -- tctl users add --roles=access,appuser User "appuser" has been created but requires a password. Share this URL with the user to complete user setup, link is valid for 1h: https://teleport-cluster.teleport-cluster.svc.cluster.local:443/web/invite/ ``` @@ -296,7 +296,7 @@ convenience while setting up your local demo, you can run the following command to create a dynamic configuration resource that disables MFA for your demo user: ```code -$ kubectl exec -i $PROXY_POD -- bash -c "cat<>/home/cp.yaml +$ kubectl exec -i deployment/teleport-cluster-auth -- bash -c "cat<>/home/cp.yaml kind: cluster_auth_preference version: v2 metadata: @@ -327,7 +327,7 @@ Kubernetes Dashboard: ```code # The cluster IP of Kubernetes Dashboard $ DASH_ADDR=$(kubectl -n kubernetes-dashboard get service kubernetes-dashboard -o jsonpath="{.spec.clusterIP}") -$ kubectl exec -i $PROXY_POD -- tctl tokens add \ +$ kubectl exec -it deployment/teleport-cluster-auth -- tctl tokens add \ --type=app \ --app-name=kube-dash \ --app-uri=https://$DASH_ADDR @@ -370,7 +370,7 @@ Production environments must not skip TLS certificate verification. Run the following command: ```code -$ kubectl exec $PROXY_POD -- tctl tokens ls +$ kubectl exec -it deployment/teleport-cluster-auth -- tctl tokens ls ``` diff --git a/examples/chart/teleport-cluster/README.md b/examples/chart/teleport-cluster/README.md index d0b00a5a39a2c..27e6235344c20 100644 --- a/examples/chart/teleport-cluster/README.md +++ b/examples/chart/teleport-cluster/README.md @@ -4,10 +4,16 @@ This chart sets up a single node Teleport cluster. It uses a persistent volume claim for storage. Great for getting started with Teleport. +## Important Notices + +- The chart version follows the Teleport version. e.g. chart v10.x can run Teleport v10.x and v11.x, but is not compatible with Teleport 9.x +- Teleport does mutual TLS to authenticate clients. It currently does not support running behind a L7 LoadBalancer, like a Kubernetes `Ingress`. It requires being exposed through a L4 LoadBalancer (Kubernetes `Service`). + ## Getting Started -Install Teleport in a separate namespace and provision a web certificate using -Let's Encrypt: +### Single-node example + +To install Teleport in a separate namespace and provision a web certificate using Let's Encrypt, run: ```bash $ helm install teleport/teleport-cluster \ @@ -19,34 +25,39 @@ $ helm install teleport/teleport-cluster \ ./teleport-cluster/ ``` -## Uninstalling +Finally, configure the DNS for `teleport.example.com` to point to the newly created LoadBalancer. -```bash -helm uninstall teleport-cluster +Note: this guide uses the built-in ACME client to get certificates. +In this setup, Teleport nodes cannot be replicated. If you want to run multiple +Teleport replicas, you must provide a certificate through `tls.existingSecretName` +or by installing [cert-manager](https://cert-manager.io/docs/) and setting the `highAvailability.certManager.*` values. + +### Replicated setup guides + +- [Running an HA Teleport cluster in Kubernetes using an AWS EKS Cluster](https://goteleport.com/docs/deploy-a-cluster/helm-deployments/aws/) +- [Running an HA Teleport cluster in Kubernetes using a Google Cloud GKE cluster](https://goteleport.com/docs/deploy-a-cluster/helm-deployments/gcp/) +- [Running a Teleport cluster in Kubernetes with a custom Teleport config](https://goteleport.com/docs/deploy-a-cluster/helm-deployments/custom/) + +### Creating first user + +The first user can be created by executing a command in one of the auth pods. + +```shell +kubectl exec it -n teleport-cluster statefulset/teleport-cluster-auth -- tctl users add my-username --roles=editor,auditor,access ``` -## Arguments Reference +The command should output a registration link to finalize the user creation. -To use the enterprise version, set `--set=enterprise=true` value and create a -secret `license` in the chart namespace. +## Uninstalling -| Name | Description | Default | Required | -|----------------------------|-----------------------------------------------------------------------------|--------------------------------------------------|----------| -| `clusterName` | Teleport cluster name (must be an FQDN) | | yes | -| `authentication.type` | Type of authentication to use (`local`, `github`, ...) | `local` | no | -| `teleportVersionOverride` | Teleport version | Current stable version | no | -| `image` | OSS Docker image | `public.ecr.aws/gravitational/teleport` | no | -| `enterpriseImage` | Enterprise Docker image | `public.ecr.aws/gravitational/teleport-ent` | no | -| `acme` | Enable ACME support in Teleport (Letsencrypt.org) | `false` | no | -| `acmeEmail` | Email to use for ACME certificates | | no | -| `acmeURI` | ACME server to use for certificates | `https://acme-v02.api.letsencrypt.org/directory` | no | -| `labels.[name]` | Key-value pairs, for example `--labels.env=local --labels.region=us-west-1` | | no | -| `enterprise` | Use Teleport Enterprise | `false` | no | +```bash +helm uninstall --namespace teleport-cluster teleport-cluster +``` -## Guides +## Documentation See https://goteleport.com/docs/kubernetes-access/helm/guides/ for guides on setting up HA Teleport clusters -in EKS or GKE, plus a more comprehensive chart reference. +in EKS or GKE, plus a comprehensive chart reference. ## Contributing to the chart diff --git a/examples/chart/teleport-cluster/values.yaml b/examples/chart/teleport-cluster/values.yaml index d97922a329be3..7c392167cdde0 100644 --- a/examples/chart/teleport-cluster/values.yaml +++ b/examples/chart/teleport-cluster/values.yaml @@ -2,10 +2,17 @@ # Values that must always be provided by the user. ################################################## -# clusterName is a unique cluster name. -# This value cannot be changed after your cluster starts without rebuilding it from scratch. -# We recommend using the fully qualified domain name that you use to access your cluster, -# for example: teleport.example.com. +# `clusterName` controls the name used to refer to the Teleport cluster, along with +# the externally-facing public address to use to access it. In most setups this must +# be a fully-qualified domain name (e.g. `teleport.example.com`) as this value is +# used as the cluster's public address by default. +# +# Note: When using a fully qualified domain name as your `clusterName`, you will also +# need to configure the DNS provider for this domain to point to the external +# load balancer address of your Teleport cluster. +# +# Warning: The clusterName cannot be changed during a Teleport cluster's lifespan. +# If you need to change it, you must redeploy a completely new cluster. clusterName: "" # Name for this kubernetes cluster to be used by teleport users. @@ -18,26 +25,52 @@ kubeClusterName: "" # Version of teleport image, if different from chart version in Chart.yaml. teleportVersionOverride: "" -# auth contains values specific for the auth pods -# You can override chart-scoped values, for example +# The `teleport-cluster` charts deploys two sets of pods: auth and proxy. +# `auth` contains values specific for the auth pods. You can use it to +# set specific values for auth pods, taking precedence over chart-scoped values. +# For example, to override the [`postStart`](#postStart) value only for auth pods: +# # auth: -# postStart: ["curl", "http://hook"] -# imagePullPolicy: Always +# postStart: ["curl", "http://hook"] +# imagePullPolicy: Always auth: # auth.teleportConfig contains YAML teleport configuration for auth pods # The configuration will be merged with the chart-generated configuration - # and will take precedence in case of conflict + # and will take precedence in case of conflict. + # + # See the Teleport Configuration Reference for the list of supported fields: + # https://goteleport.com/docs/reference/config/ + # + # teleportConfig: + # teleport: + # cache: + # enabled: false + # auth_service: + # client_idle_timeout: 2h + # client_idle_timeout_message: "Connection closed after 2hours without activity" teleportConfig: {} # proxy contains values specific for the proxy pods # You can override chart-scoped values, for example -# auth: +# proxy: # postStart: ["curl", "http://hook"] # imagePullPolicy: Always proxy: # proxy.teleportConfig contains YAML teleport configuration for proxy pods # The configuration will be merged with the chart-generated configuration # and will take precedence in case of conflict + # + # See the Teleport Configuration Reference for the list of supported fields: + # https://goteleport.com/docs/reference/config/ + # + # teleportConfig: + # teleport: + # cache: + # enabled: false + # proxy_service: + # https_keypairs: + # - key_file: /my-custom-mount/key.pem + # cert_file: /my-custom-mount/cert.pem teleportConfig: {} authentication: @@ -243,7 +276,7 @@ podMonitor: interval: 30s ###################################################################### -# Persistence settings (only used in "standalone" and "custom" modes) +# Persistence settings (only used in "standalone" and "scratch" modes) # NOTE: Changes in Kubernetes 1.23+ mean that persistent volumes will not automatically be provisioned in AWS EKS clusters # without additional configuration. See https://docs.aws.amazon.com/eks/latest/userguide/ebs-csi.html for more details. # This driver addon must be configured to use persistent volumes in EKS clusters after Kubernetes 1.23. @@ -320,12 +353,31 @@ gcp: # You can override this to a blank value if the worker node running Teleport already has a service account which grants access. credentialSecretName: teleport-gcp-credentials -# Settings for high availability. These are not used in "standalone" mode. -# When using "custom" mode, you must use highly-available storage (etcd, DynamoDB or Firestore) for multiple replicas to be supported. -# Manually configuring NFS-based storage or ReadWriteMany volume claims is NOT supported and will result in errors. + +# `highAvailability` contains settings controlling how Teleport pods are +# replicated and scheduled. This allows Teleport to run in a highly-available +# fashion: Teleport should sustain the crash/loss of a machine without interrupting +# the service. +# +# For auth pods: +# When using "standalone" or "scratch" mode, you must use highly-available storage +# (etcd, DynamoDB or Firestore) for multiple replicas to be supported. +# Manually configuring NFS-based storage or ReadWriteMany volume claims +# is NOT supported and will result in errors. Using Teleport's built-in +# ACME client (as opposed to using cert-manager or passing certs through a secret) +# is not supported with multiple replicas. +# For proxy pods: +# Proxy pods need to be provided a certificate to be replicated (either via +# `tls.existingSecretName` or via `highAvailability.certManager`). +# If proxy pods are replicable, they will default to 2 replicas, +# even if `highAvailability.replicaCount` is 1. To force a single proxy replica, +# set `proxy.highAvailability.replicaCount: 1`. highAvailability: - # Set to >1 for a high availability mode where multiple Teleport pods will be deployed and connections will be load balanced between them. - # Note: this will disable the use of ACME certs. + # Controls the amount of pod replicas. The `highAvailability` comment describes + # the replication requirements. + # + # WARNING: You **must** meet the replication criteria, + # else the deployment will result in errors and inconsistent data. replicaCount: 1 # Setting 'requireAntiAffinity' to true will use 'requiredDuringSchedulingIgnoredDuringExecution' to require that multiple Teleport pods must not be scheduled on the # same physical host. This will result in Teleport pods failing to be scheduled in very small clusters or during node downtime, so should be used with caution. @@ -413,7 +465,7 @@ affinity: {} # Kubernetes annotations to apply # https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ annotations: - # Annotations for the ConfigMap (note: these will not be applied in 'custom' mode) + # Annotations for the ConfigMap config: {} # Annotations for the Deployment deployment: {}