diff --git a/awsconfigs/common/dex/kustomization.yaml b/awsconfigs/common/dex/kustomization.yaml new file mode 100644 index 0000000000..e3d5dc069e --- /dev/null +++ b/awsconfigs/common/dex/kustomization.yaml @@ -0,0 +1,9 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: auth +bases: +- ../../../upstream/common/dex/overlays/istio + +patchesStrategicMerge: +- patches/service.yaml +- patches/disable-nodeport.yaml \ No newline at end of file diff --git a/awsconfigs/common/dex/patches/disable-nodeport.yaml b/awsconfigs/common/dex/patches/disable-nodeport.yaml new file mode 100644 index 0000000000..5a17f91eb6 --- /dev/null +++ b/awsconfigs/common/dex/patches/disable-nodeport.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: + name: dex +spec: + ports: + - name: dex + port: 5556 + protocol: TCP + targetPort: 5556 + nodePort: null \ No newline at end of file diff --git a/awsconfigs/common/dex/patches/service.yaml b/awsconfigs/common/dex/patches/service.yaml new file mode 100644 index 0000000000..e0bdb47f0f --- /dev/null +++ b/awsconfigs/common/dex/patches/service.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Service +metadata: + name: dex +spec: + type: ClusterIP diff --git a/charts/apps/jupyter-web-app/Chart.yaml b/charts/apps/jupyter-web-app/Chart.yaml index ecedebf17d..97cbc10a24 100644 --- a/charts/apps/jupyter-web-app/Chart.yaml +++ b/charts/apps/jupyter-web-app/Chart.yaml @@ -3,4 +3,4 @@ appVersion: v1.7.0 description: A Helm chart for Kubernetes name: jupyter-web-app type: application -version: 0.2.0 +version: 0.2.2 diff --git a/charts/apps/jupyter-web-app/templates/ConfigMap/jupyter-web-app-config-mgf762gt24-kubeflow-ConfigMap.yaml b/charts/apps/jupyter-web-app/templates/ConfigMap/jupyter-web-app-config-mgf762gt24-kubeflow-ConfigMap.yaml index 0d6d6cc779..14b0683958 100644 --- a/charts/apps/jupyter-web-app/templates/ConfigMap/jupyter-web-app-config-mgf762gt24-kubeflow-ConfigMap.yaml +++ b/charts/apps/jupyter-web-app/templates/ConfigMap/jupyter-web-app-config-mgf762gt24-kubeflow-ConfigMap.yaml @@ -1,95 +1,11 @@ apiVersion: v1 data: - spawner_ui_config.yaml: "# Configuration file for the Jupyter UI.\n#\n# Each Jupyter\ - \ UI option is configured by two keys: 'value' and 'readOnly'\n# - The 'value'\ - \ key contains the default value\n# - The 'readOnly' key determines if the option\ - \ will be available to users\n#\n# If the 'readOnly' key is present and set to\ - \ 'true', the respective option\n# will be disabled for users and only set by\ - \ the admin. Also when a\n# Notebook is POSTED to the API if a necessary field\ - \ is not present then\n# the value from the config will be used.\n#\n# If the\ - \ 'readOnly' key is missing (defaults to 'false'), the respective option\n# will\ - \ be available for users to edit.\n#\n# Note that some values can be templated.\ - \ Such values are the names of the\n# Volumes as well as their StorageClass\n\ - spawnerFormDefaults:\n image:\n # The container Image for the user's Jupyter\ - \ Notebook\n value: public.ecr.aws/kubeflow-on-aws/notebook-servers/jupyter-tensorflow:2.12.0-cpu-py310-ubuntu20.04-ec2-v1.0\n\ - \ # The list of available standard container Images\n options:\n - kubeflownotebookswg/jupyter-scipy:v1.7.0\n\ - \ - public.ecr.aws/kubeflow-on-aws/notebook-servers/jupyter-tensorflow:2.12.0-gpu-py310-cu118-ubuntu20.04-ec2-v1.0\n\ - \ - public.ecr.aws/kubeflow-on-aws/notebook-servers/jupyter-tensorflow:2.12.0-cpu-py310-ubuntu20.04-ec2-v1.0\n\ - \ - public.ecr.aws/kubeflow-on-aws/notebook-servers/jupyter-pytorch:2.0.0-gpu-py310-cu118-ubuntu20.04-ec2-v1.0\n\ - \ - public.ecr.aws/kubeflow-on-aws/notebook-servers/jupyter-pytorch:2.0.0-cpu-py310-ubuntu20.04-ec2-v1.0\n\ - \ imageGroupOne:\n # The container Image for the user's Group One Server\n\ - \ # The annotation `notebooks.kubeflow.org/http-rewrite-uri: /`\n # is applied\ - \ to notebook in this group, configuring\n # the Istio rewrite for containers\ - \ that host their web UI at `/`\n value: kubeflownotebookswg/codeserver-python:v1.7.0\n\ - \ # The list of available standard container Images\n options:\n - kubeflownotebookswg/codeserver-python:v1.7.0\n\ - \ imageGroupTwo:\n # The container Image for the user's Group Two Server\n\ - \ # The annotation `notebooks.kubeflow.org/http-rewrite-uri: /`\n # is applied\ - \ to notebook in this group, configuring\n # the Istio rewrite for containers\ - \ that host their web UI at `/`\n # The annotation `notebooks.kubeflow.org/http-headers-request-set`\n\ - \ # is applied to notebook in this group, configuring Istio\n # to add the\ - \ `X-RStudio-Root-Path` header to requests\n value: kubeflownotebookswg/rstudio-tidyverse:v1.7.0\n\ - \ # The list of available standard container Images\n options:\n - kubeflownotebookswg/rstudio-tidyverse:v1.7.0\n\ - \ # If true, hide registry and/or tag name in the image selection dropdown\n\ - \ hideRegistry: true\n hideTag: false\n allowCustomImage: true\n # If true,\ - \ users can input custom images\n # If false, users can only select from the\ - \ images in this config\n imagePullPolicy:\n # Supported values: Always, IfNotPresent,\ - \ Never\n value: IfNotPresent\n readOnly: false\n cpu:\n # CPU for user's\ - \ Notebook\n value: '0.5'\n # Factor by with to multiply request to calculate\ - \ limit\n # if no limit is set, to disable set \"none\"\n limitFactor: \"\ - 1.2\"\n readOnly: false\n memory:\n # Memory for user's Notebook\n value:\ - \ 1.0Gi\n # Factor by with to multiply request to calculate limit\n # if\ - \ no limit is set, to disable set \"none\"\n limitFactor: \"1.2\"\n readOnly:\ - \ false\n environment:\n value: {}\n readOnly: false\n workspaceVolume:\n\ - \ # Workspace Volume to be attached to user's Notebook\n # If you don't\ - \ want a workspace volume then delete the 'value' key\n value:\n mount:\ - \ /home/jovyan\n newPvc:\n metadata:\n name: '{notebook-name}-workspace'\n\ - \ spec:\n resources:\n requests:\n storage:\ - \ 10Gi\n accessModes:\n - ReadWriteOnce\n readOnly: false\n\ - \ dataVolumes:\n # List of additional Data Volumes to be attached to the user's\ - \ Notebook\n value: []\n # For example, a list with 2 Data Volumes:\n \ - \ # value:\n # - mount: /home/jovyan/datavol-1\n # newPvc:\n #\ - \ metadata:\n # name: '{notebook-name}-datavol-1'\n # \ - \ spec:\n # resources:\n # requests:\n # \ - \ storage: 5Gi\n # accessModes:\n # - ReadWriteOnce\n\ - \ # - mount: /home/jovyan/datavol-1\n # existingSource:\n # \ - \ persistentVolumeClaim:\n # claimName: test-pvc\n readOnly:\ - \ false\n gpus:\n # Number of GPUs to be assigned to the Notebook Container\n\ - \ value:\n # values: \"none\", \"1\", \"2\", \"4\", \"8\"\n num:\ - \ \"none\"\n # Determines what the UI will show and send to the backend\n\ - \ vendors:\n - limitsKey: \"nvidia.com/gpu\"\n uiName: \"NVIDIA\"\ - \n - limitsKey: \"amd.com/gpu\"\n uiName: \"AMD\"\n # Values:\ - \ \"\" or a `limits-key` from the vendors list\n vendor: \"\"\n readOnly:\ - \ false\n affinityConfig:\n # If readonly, the default value will be the only\ - \ option\n # value is a list of `configKey`s that we want to be selected by\ - \ default\n value: \"\"\n # The list of available affinity configs\n \ - \ options: []\n #options:\n # - configKey: \"exclusive__n1-standard-2\"\ - \n # displayName: \"Exclusive: n1-standard-2\"\n # affinity:\n \ - \ # # (Require) Node having label: `node_pool=notebook-n1-standard-2`\n \ - \ # nodeAffinity:\n # requiredDuringSchedulingIgnoredDuringExecution:\n\ - \ # nodeSelectorTerms:\n # - matchExpressions:\n \ - \ # - key: \"node_pool\"\n # operator: \"In\"\ - \n # values:\n # - \"notebook-n1-standard-2\"\ - \n # # (Require) Node WITHOUT existing Pod having label: `notebook-name`\n\ - \ # podAntiAffinity:\n # requiredDuringSchedulingIgnoredDuringExecution:\n\ - \ # - labelSelector:\n # matchExpressions:\n #\ - \ - key: \"notebook-name\"\n # operator: \"\ - Exists\"\n # namespaces: []\n # topologyKey: \"kubernetes.io/hostname\"\ - \n #readOnly: false\n tolerationGroup:\n # The default `groupKey` from\ - \ the options list\n # If readonly, the default value will be the only option\n\ - \ value: \"\"\n # The list of available tolerationGroup configs\n options:\ - \ []\n #options:\n # - groupKey: \"group_1\"\n # displayName: \"\ - Group 1: description\"\n # tolerations:\n # - key: \"key1\"\n \ - \ # operator: \"Equal\"\n # value: \"value1\"\n # \ - \ effect: \"NoSchedule\"\n # - key: \"key2\"\n # operator: \"\ - Equal\"\n # value: \"value2\"\n # effect: \"NoSchedule\"\n\ - \ readOnly: false\n shm:\n value: true\n readOnly: false\n configurations:\n\ - \ # List of labels to be selected, these are the labels from PodDefaults\n\ - \ # value:\n # - add-aws-secret\n # - default-editor\n value:\ - \ []\n readOnly: false\n" + spawner_ui_config.yaml: {{- .Values.spawner_ui_config | toYaml | indent 2 }} kind: ConfigMap metadata: + annotations: {} labels: app: jupyter-web-app kustomize.component: jupyter-web-app name: jupyter-web-app-config-mgf762gt24 - namespace: kubeflow + namespace: kubeflow \ No newline at end of file diff --git a/charts/apps/jupyter-web-app/values.yaml b/charts/apps/jupyter-web-app/values.yaml index 6d800eed5f..e5776c9f61 100644 --- a/charts/apps/jupyter-web-app/values.yaml +++ b/charts/apps/jupyter-web-app/values.yaml @@ -1,2 +1,154 @@ -null -... +spawner_ui_config: | + spawnerFormDefaults: + image: + # The container Image for the user's Jupyter Notebook + value: 369500102003.dkr.ecr.us-east-1.amazonaws.com/dl-research:jupyter-f932cdf + # The list of available standard container Images + options: + - 369500102003.dkr.ecr.us-east-1.amazonaws.com/dl-research:jupyter-f932cdf + - kubeflownotebookswg/jupyter-pytorch-full:v1.6.1 + - kubeflownotebookswg/jupyter-pytorch-cuda-full:v1.6.1 + - kubeflownotebookswg/jupyter-tensorflow-full:v1.6.1 + - kubeflownotebookswg/jupyter-tensorflow-cuda-full:v1.6.1 + imageGroupOne: + # The container Image for the user's Group One Server + # The annotation `notebooks.kubeflow.org/http-rewrite-uri: /` + # is applied to notebook in this group, configuring + # the Istio rewrite for containers that host their web UI at `/` + value: 369500102003.dkr.ecr.us-east-1.amazonaws.com/dl-research:vscode-f932cdf + # The list of available standard container Images + options: + - 369500102003.dkr.ecr.us-east-1.amazonaws.com/dl-research:vscode-f932cdf + imageGroupTwo: + # The container Image for the user's Group Two Server + # The annotation `notebooks.kubeflow.org/http-rewrite-uri: /` + # is applied to notebook in this group, configuring + # the Istio rewrite for containers that host their web UI at `/` + # The annotation `notebooks.kubeflow.org/http-headers-request-set` + # is applied to notebook in this group, configuring Istio + # to add the `X-RStudio-Root-Path` header to requests + value: kubeflownotebookswg/rstudio-tidyverse:v1.6.1 + # The list of available standard container Images + options: + - kubeflownotebookswg/rstudio-tidyverse:v1.6.1 + # If true, hide registry and/or tag name in the image selection dropdown + hideRegistry: false + hideTag: false + allowCustomImage: true + # If true, users can input custom images + # If false, users can only select from the images in this config + imagePullPolicy: + # Supported values: Always, IfNotPresent, Never + value: IfNotPresent + readOnly: false + cpu: + # CPU for user's Notebook + value: '0.5' + # Factor by with to multiply request to calculate limit + # if no limit is set, to disable set "none" + limitFactor: "1.2" + readOnly: false + memory: + # Memory for user's Notebook + value: 1.0Gi + # Factor by with to multiply request to calculate limit + # if no limit is set, to disable set "none" + limitFactor: "1.2" + readOnly: false + environment: + value: {} + readOnly: false + workspaceVolume: + # Workspace Volume to be attached to user's Notebook + # If you don't want a workspace volume then delete the 'value' key + value: + # mount: /home/dl-user + # existingSource: + # persistentVolumeClaim: + # claimName: '-home' + readOnly: false + dataVolumes: + value: + - mount: /data + newPvc: + metadata: + name: '{notebook-name}-vol-1' + spec: + resources: + requests: + storage: 30Gi + accessModes: + - ReadWriteOnce + readOnly: false + gpus: + # Number of GPUs to be assigned to the Notebook Container + value: + # values: "none", "1", "2", "4", "8" + num: "none" + # Determines what the UI will show and send to the backend + vendors: + - limitsKey: "nvidia.com/gpu" + uiName: "NVIDIA" + # - limitsKey: "amd.com/gpu" + # uiName: "AMD" + # Values: "" or a `limits-key` from the vendors list + vendor: "" + readOnly: false + affinityConfig: + # If readonly, the default value will be the only option + # value is a list of `configKey`s that we want to be selected by default + value: "" + # The list of available affinity configs + options: [] + #options: + # - configKey: "exclusive__n1-standard-2" + # displayName: "Exclusive: n1-standard-2" + # affinity: + # # (Require) Node having label: `node_pool=notebook-n1-standard-2` + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: "node_pool" + # operator: "In" + # values: + # - "notebook-n1-standard-2" + # # (Require) Node WITHOUT existing Pod having label: `notebook-name` + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: "notebook-name" + # operator: "Exists" + # namespaces: [] + # topologyKey: "kubernetes.io/hostname" + #readOnly: false + tolerationGroup: + # The default `groupKey` from the options list + # If readonly, the default value will be the only option + value: "" + options: + - groupKey: "gpu" + displayName: "gpu toleration" + tolerations: + - key: "nvidia.com/gpu" + operator: "Equal" + value: "true" + effect: "PreferNoSchedule" + - groupKey: "spot" + displayName: "spot toleration" + tolerations: + - key: "spot" + operator: "Equal" + value: "true" + effect: "NoSchedule" + shm: + value: true + readOnly: false + configurations: + value: + - add-secret-volume + - add-env + - prevent-eviction + - access-ml-pipeline + readOnly: false \ No newline at end of file diff --git a/charts/apps/kubeflow-pipelines/rds-s3/templates/Certificate/kfp-cache-cert-kubeflow-Certificate.yaml b/charts/apps/kubeflow-pipelines/rds-s3/templates/Certificate/kfp-cache-cert-kubeflow-Certificate.yaml index 733cb98e8f..01e331f056 100644 --- a/charts/apps/kubeflow-pipelines/rds-s3/templates/Certificate/kfp-cache-cert-kubeflow-Certificate.yaml +++ b/charts/apps/kubeflow-pipelines/rds-s3/templates/Certificate/kfp-cache-cert-kubeflow-Certificate.yaml @@ -1,6 +1,10 @@ apiVersion: cert-manager.io/v1 kind: Certificate metadata: + annotations: + "helm.sh/hook": "pre-install" + "helm.sh/hook-weight": "-10" + # "helm.sh/hook-delete-policy": hook-succeeded labels: app: cache-server-cert-manager application-crd-id: kubeflow-pipelines diff --git a/charts/apps/kubeflow-pipelines/rds-s3/templates/Deployment/cache-server-kubeflow-Deployment.yaml b/charts/apps/kubeflow-pipelines/rds-s3/templates/Deployment/cache-server-kubeflow-Deployment.yaml index e6f8ef7616..b629285bdf 100644 --- a/charts/apps/kubeflow-pipelines/rds-s3/templates/Deployment/cache-server-kubeflow-Deployment.yaml +++ b/charts/apps/kubeflow-pipelines/rds-s3/templates/Deployment/cache-server-kubeflow-Deployment.yaml @@ -1,6 +1,10 @@ apiVersion: apps/v1 kind: Deployment metadata: + annotations: + "helm.sh/hook": pre-install + "helm.sh/hook-weight": "-8" + # "helm.sh/hook-delete-policy": hook-succeeded labels: app: cache-server app.kubernetes.io/component: ml-pipeline diff --git a/charts/apps/kubeflow-pipelines/rds-s3/templates/Secret/mlpipeline-minio-artifact-kubeflow-Secret.yaml b/charts/apps/kubeflow-pipelines/rds-s3/templates/Secret/mlpipeline-minio-artifact-kubeflow-Secret.yaml index fb885c4f32..61c5f7ee6f 100644 --- a/charts/apps/kubeflow-pipelines/rds-s3/templates/Secret/mlpipeline-minio-artifact-kubeflow-Secret.yaml +++ b/charts/apps/kubeflow-pipelines/rds-s3/templates/Secret/mlpipeline-minio-artifact-kubeflow-Secret.yaml @@ -1,3 +1,4 @@ +{{- if .Values.create_secret }} apiVersion: v1 kind: Secret metadata: @@ -8,3 +9,4 @@ metadata: stringData: accesskey: '' secretkey: '' +{{- end }} \ No newline at end of file diff --git a/charts/apps/kubeflow-pipelines/rds-s3/values.yaml b/charts/apps/kubeflow-pipelines/rds-s3/values.yaml index 817a92967d..f8eaf92d86 100644 --- a/charts/apps/kubeflow-pipelines/rds-s3/values.yaml +++ b/charts/apps/kubeflow-pipelines/rds-s3/values.yaml @@ -5,4 +5,5 @@ s3: bucketName: minioServiceHost: s3.amazonaws.com minioServiceRegion: - roleArn: \ No newline at end of file + roleArn: +create_secret: false \ No newline at end of file diff --git a/charts/common/knative-serving/templates/HorizontalPodAutoscaler/webhook-knative-serving-HorizontalPodAutoscaler.yaml b/charts/common/knative-serving/templates/HorizontalPodAutoscaler/webhook-knative-serving-HorizontalPodAutoscaler.yaml index 4181c84c93..caeff38be6 100644 --- a/charts/common/knative-serving/templates/HorizontalPodAutoscaler/webhook-knative-serving-HorizontalPodAutoscaler.yaml +++ b/charts/common/knative-serving/templates/HorizontalPodAutoscaler/webhook-knative-serving-HorizontalPodAutoscaler.yaml @@ -1,4 +1,4 @@ -apiVersion: autoscaling/v2beta2 +apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler metadata: labels: @@ -20,4 +20,4 @@ spec: scaleTargetRef: apiVersion: apps/v1 kind: Deployment - name: webhook + name: webhook \ No newline at end of file diff --git a/charts/common/oidc-authservice/templates/ConfigMap/oidc-authservice-parameters-istio-system-ConfigMap.yaml b/charts/common/oidc-authservice/templates/ConfigMap/oidc-authservice-parameters-istio-system-ConfigMap.yaml index c6a0d4f52f..b7d779674f 100644 --- a/charts/common/oidc-authservice/templates/ConfigMap/oidc-authservice-parameters-istio-system-ConfigMap.yaml +++ b/charts/common/oidc-authservice/templates/ConfigMap/oidc-authservice-parameters-istio-system-ConfigMap.yaml @@ -1,3 +1,4 @@ +{{- if .Values.create_config }} apiVersion: v1 data: AUTHSERVICE_URL_PREFIX: /authservice/ @@ -14,3 +15,4 @@ kind: ConfigMap metadata: name: oidc-authservice-parameters namespace: istio-system +{{- end }} \ No newline at end of file diff --git a/charts/common/oidc-authservice/templates/Secret/oidc-authservice-client-istio-system-Secret.yaml b/charts/common/oidc-authservice/templates/Secret/oidc-authservice-client-istio-system-Secret.yaml deleted file mode 100644 index b1e29ec1b5..0000000000 --- a/charts/common/oidc-authservice/templates/Secret/oidc-authservice-client-istio-system-Secret.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -data: - CLIENT_ID: a3ViZWZsb3ctb2lkYy1hdXRoc2VydmljZQ== - CLIENT_SECRET: cFVCbkJPWTgwU25YZ2ppYlRZTTlaV056WTJ4cmVOR1Fvaw== -kind: Secret -metadata: - name: oidc-authservice-client - namespace: istio-system -type: Opaque diff --git a/charts/common/oidc-authservice/values.yaml b/charts/common/oidc-authservice/values.yaml index 6d800eed5f..c1155d3712 100644 --- a/charts/common/oidc-authservice/values.yaml +++ b/charts/common/oidc-authservice/values.yaml @@ -1,2 +1 @@ -null -... +create_config: false \ No newline at end of file diff --git a/charts/hyperfine/user/.helmignore b/charts/hyperfine/user/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/charts/hyperfine/user/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/hyperfine/user/Chart.yaml b/charts/hyperfine/user/Chart.yaml new file mode 100644 index 0000000000..99d1b78722 --- /dev/null +++ b/charts/hyperfine/user/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: user +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "0.1.0" diff --git a/charts/hyperfine/user/templates/ConfigMap/env-config-ConfigMap.yaml b/charts/hyperfine/user/templates/ConfigMap/env-config-ConfigMap.yaml new file mode 100644 index 0000000000..207245335e --- /dev/null +++ b/charts/hyperfine/user/templates/ConfigMap/env-config-ConfigMap.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: env-config + namespace: {{ .Values.name }} +data: + NAMESPACE: {{ .Values.name }} \ No newline at end of file diff --git a/charts/hyperfine/user/templates/ConfigMap/profile-ConfigMap.yaml b/charts/hyperfine/user/templates/ConfigMap/profile-ConfigMap.yaml new file mode 100644 index 0000000000..f6e1a1368f --- /dev/null +++ b/charts/hyperfine/user/templates/ConfigMap/profile-ConfigMap.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +data: + profile-name: {{ .Values.name }} + user: {{ .Values.email }} +kind: ConfigMap +metadata: + name: "default-install-config-{{ .Values.name }}" \ No newline at end of file diff --git a/charts/hyperfine/user/templates/Deployment/secret-pod-Deployment.yaml b/charts/hyperfine/user/templates/Deployment/secret-pod-Deployment.yaml new file mode 100644 index 0000000000..f9c80a0cc4 --- /dev/null +++ b/charts/hyperfine/user/templates/Deployment/secret-pod-Deployment.yaml @@ -0,0 +1,53 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kf-secrets-{{ .Values.name }}-deployment + namespace: {{ .Values.name }} + labels: + app: "kf-secrets-{{ .Values.name }}" +spec: + replicas: 1 + selector: + matchLabels: + app: "kf-secrets-{{ .Values.name }}" + template: + metadata: + labels: + app: "kf-secrets-{{ .Values.name }}" + spec: + containers: + - image: k8s.gcr.io/e2e-test-images/busybox:1.29 + command: + - "/bin/sleep" + - "10000" + name: secrets + volumeMounts: + - mountPath: "/mnt/rds-store" + name: "{{ .Values.rdsSecretName }}" + readOnly: true + - mountPath: "/mnt/aws-store" + name: "{{ .Values.s3SecretName }}" + readOnly: true + - mountPath: "/mnt/ssh-store" + name: "{{ .Values.sshKeySecretName }}" + readOnly: true + serviceAccountName: {{ .Values.serviceAccountName }} + volumes: + - csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: aws-secrets + name: "{{ .Values.rdsSecretName }}" + - csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: aws-secrets + name: "{{ .Values.s3SecretName }}" + - csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: aws-secrets + name: "{{ .Values.sshKeySecretName }}" \ No newline at end of file diff --git a/charts/hyperfine/user/templates/PersistentVolumeClaim/efs-home-PersistentVolumeClaim.yaml b/charts/hyperfine/user/templates/PersistentVolumeClaim/efs-home-PersistentVolumeClaim.yaml new file mode 100644 index 0000000000..05a91c7972 --- /dev/null +++ b/charts/hyperfine/user/templates/PersistentVolumeClaim/efs-home-PersistentVolumeClaim.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: "{{ .Values.name }}-home" + namespace: {{ .Values.name }} +spec: + accessModes: + - ReadWriteMany + storageClassName: {{ .Values.efsStorageClassName }} + resources: + requests: + storage: 30Gi \ No newline at end of file diff --git a/charts/hyperfine/user/templates/PodDefault/env-config-PodDefault.yaml b/charts/hyperfine/user/templates/PodDefault/env-config-PodDefault.yaml new file mode 100644 index 0000000000..d985a7b8aa --- /dev/null +++ b/charts/hyperfine/user/templates/PodDefault/env-config-PodDefault.yaml @@ -0,0 +1,13 @@ +apiVersion: "kubeflow.org/v1alpha1" +kind: PodDefault +metadata: + name: "add-env" + namespace: {{ .Values.name }} +spec: + desc: "add env" + selector: + matchLabels: + add-env: "true" + envFrom: + - configMapRef: + name: env-config \ No newline at end of file diff --git a/charts/hyperfine/user/templates/PodDefault/eviction-PodDefault.yaml b/charts/hyperfine/user/templates/PodDefault/eviction-PodDefault.yaml new file mode 100644 index 0000000000..4a67ed1063 --- /dev/null +++ b/charts/hyperfine/user/templates/PodDefault/eviction-PodDefault.yaml @@ -0,0 +1,12 @@ +apiVersion: kubeflow.org/v1alpha1 +kind: PodDefault +metadata: + name: prevent-eviction + namespace: {{ .Values.name }} +spec: + desc: prevent eviction + selector: + matchLabels: + prevent-eviction: "true" + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "false" \ No newline at end of file diff --git a/charts/hyperfine/user/templates/PodDefault/pipeline-PodDefault.yaml b/charts/hyperfine/user/templates/PodDefault/pipeline-PodDefault.yaml new file mode 100644 index 0000000000..c6155491c6 --- /dev/null +++ b/charts/hyperfine/user/templates/PodDefault/pipeline-PodDefault.yaml @@ -0,0 +1,25 @@ +apiVersion: kubeflow.org/v1alpha1 +kind: PodDefault +metadata: + name: access-ml-pipeline + namespace: {{ .Values.name }} +spec: + desc: Allow access to Kubeflow Pipelines + selector: + matchLabels: + access-ml-pipeline: "true" + volumes: + - name: volume-kf-pipeline-token + projected: + sources: + - serviceAccountToken: + path: token + expirationSeconds: 7200 + audience: pipelines.kubeflow.org + volumeMounts: + - mountPath: /var/run/secrets/kubeflow/pipelines + name: volume-kf-pipeline-token + readOnly: true + env: + - name: KF_PIPELINES_SA_TOKEN_PATH + value: /var/run/secrets/kubeflow/pipelines/token \ No newline at end of file diff --git a/charts/hyperfine/user/templates/PodDefault/secret-volume-PodDefault.yaml b/charts/hyperfine/user/templates/PodDefault/secret-volume-PodDefault.yaml new file mode 100644 index 0000000000..031326ae45 --- /dev/null +++ b/charts/hyperfine/user/templates/PodDefault/secret-volume-PodDefault.yaml @@ -0,0 +1,18 @@ +apiVersion: "kubeflow.org/v1alpha1" +kind: PodDefault +metadata: + name: "add-secret-volume" + namespace: {{ .Values.name }} +spec: + desc: "add secret volume" + selector: + matchLabels: + add-secret-volume: "true" + volumeMounts: + - name: secret-volume + mountPath: /etc/ssh-key + volumes: + - name: secret-volume + secret: + secretName: ssh-secret-{{ .Values.name }} + defaultMode: 256 \ No newline at end of file diff --git a/charts/hyperfine/user/templates/Profile/profile-Profile.yaml b/charts/hyperfine/user/templates/Profile/profile-Profile.yaml new file mode 100644 index 0000000000..a8067f306e --- /dev/null +++ b/charts/hyperfine/user/templates/Profile/profile-Profile.yaml @@ -0,0 +1,8 @@ +apiVersion: kubeflow.org/v1beta1 +kind: Profile +metadata: + name: {{ .Values.name }} +spec: + owner: + kind: User + name: {{ .Values.email }} diff --git a/charts/hyperfine/user/templates/Role/access-Role.yaml b/charts/hyperfine/user/templates/Role/access-Role.yaml new file mode 100644 index 0000000000..db08f1203f --- /dev/null +++ b/charts/hyperfine/user/templates/Role/access-Role.yaml @@ -0,0 +1,12 @@ +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: "{{ .Values.name }}-access" + namespace: {{ .Values.name }} +rules: +- apiGroups: [""] + resources: ["*"] + verbs: ["*"] +- apiGroups: ["extensions"] + resources: ["*"] + verbs: ["*"] \ No newline at end of file diff --git a/charts/hyperfine/user/templates/RoleBinding/dashboard-RoleBinding.yaml b/charts/hyperfine/user/templates/RoleBinding/dashboard-RoleBinding.yaml new file mode 100644 index 0000000000..70d3f9f601 --- /dev/null +++ b/charts/hyperfine/user/templates/RoleBinding/dashboard-RoleBinding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: "{{ .Values.name }}-access-dashboard-role-binding" + namespace: {{ .Values.name }} +subjects: +- kind: Group + name: kubernetes-dashboard + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: Role + name: "{{ .Values.name }}-access" + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/charts/hyperfine/user/templates/RoleBinding/service-account-access-RoleBinding.yaml b/charts/hyperfine/user/templates/RoleBinding/service-account-access-RoleBinding.yaml new file mode 100644 index 0000000000..5f8e2acc5e --- /dev/null +++ b/charts/hyperfine/user/templates/RoleBinding/service-account-access-RoleBinding.yaml @@ -0,0 +1,13 @@ +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: "{{ .Values.name }}-role-binding" + namespace: {{ .Values.name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: "{{ .Values.name }}-access" +subjects: + - kind: ServiceAccount + name: {{ .Values.serviceAccountName }} + namespace: {{ .Values.name }} diff --git a/charts/hyperfine/user/templates/SecretProviderClass/secret-store-SecretProviderClass.yaml b/charts/hyperfine/user/templates/SecretProviderClass/secret-store-SecretProviderClass.yaml new file mode 100644 index 0000000000..bc2b7494da --- /dev/null +++ b/charts/hyperfine/user/templates/SecretProviderClass/secret-store-SecretProviderClass.yaml @@ -0,0 +1,70 @@ +apiVersion: secrets-store.csi.x-k8s.io/v1alpha1 +kind: SecretProviderClass +metadata: + name: aws-secrets + namespace: {{ .Values.name }} +spec: + provider: aws + secretObjects: + - secretName: "ssh-secret-{{ .Values.name }}" + type: Opaque + data: + - objectName: "{{ .Values.name }}" + key: private + - objectName: "{{ .Values.name }}.pub" + key: public + - secretName: mysql-secret + type: Opaque + data: + - objectName: "user" + key: username + - objectName: "pass" + key: password + - objectName: "host" + key: host + - objectName: "database" + key: database + - objectName: "port" + key: port + - secretName: mlpipeline-minio-artifact + type: Opaque + data: + - objectName: "access" + key: accesskey + - objectName: "secret" + key: secretkey + parameters: + objects: | + - objectName: "{{ .Values.sshKeySecretName }}" + objectType: "secretsmanager" + objectAlias: "{{ .Values.name }}-ssh" + objectVersionLabel: "AWSCURRENT" + jmesPath: + - path: "private" + objectAlias: "{{ .Values.name }}" + - path: "public" + objectAlias: "{{ .Values.name }}.pub" + - objectName: "{{ .Values.rdsSecretName }}" + objectType: "secretsmanager" + objectAlias: "rds-secret" + objectVersionLabel: "AWSCURRENT" + jmesPath: + - path: "username" + objectAlias: "user" + - path: "password" + objectAlias: "pass" + - path: "host" + objectAlias: "host" + - path: "database" + objectAlias: "database" + - path: "port" + objectAlias: "port" + - objectName: "{{ .Values.s3SecretName }}" + objectType: "secretsmanager" + objectAlias: "s3-secret" + objectVersionLabel: "AWSCURRENT" + jmesPath: + - path: "accesskey" + objectAlias: "access" + - path: "secretkey" + objectAlias: "secret" diff --git a/charts/hyperfine/user/values.yaml b/charts/hyperfine/user/values.yaml new file mode 100644 index 0000000000..a4c90a9f50 --- /dev/null +++ b/charts/hyperfine/user/values.yaml @@ -0,0 +1,7 @@ +name: +email: +s3SecretName: +rdsSecretName: +sshKeySecretName: +serviceAccountName: +efsStorageClassName: \ No newline at end of file diff --git a/deployments/hyperfine/dependencies.tf b/deployments/hyperfine/dependencies.tf new file mode 100644 index 0000000000..e84a439a94 --- /dev/null +++ b/deployments/hyperfine/dependencies.tf @@ -0,0 +1,10 @@ + +data "aws_eks_cluster" "eks_cluster" { + name = var.eks_cluster_name +} + +data "aws_eks_cluster_auth" "kubernetes_token" { + count = var.use_exec_plugin_for_auth ? 0 : 1 + name = var.eks_cluster_name +} + diff --git a/deployments/hyperfine/kubeprovider.tf b/deployments/hyperfine/kubeprovider.tf new file mode 100644 index 0000000000..19103530a9 --- /dev/null +++ b/deployments/hyperfine/kubeprovider.tf @@ -0,0 +1,60 @@ +provider "kubernetes" { + host = data.aws_eks_cluster.eks_cluster.endpoint + cluster_ca_certificate = base64decode(data.aws_eks_cluster.eks_cluster.certificate_authority.0.data) + token = var.use_exec_plugin_for_auth ? null : data.aws_eks_cluster_auth.kubernetes_token[0].token + + # EKS clusters use short-lived authentication tokens that can expire in the middle of an 'apply' or 'destroy'. To + # avoid this issue, we use an exec-based plugin here to fetch an up-to-date token. Note that this code requires a + # binary—either kubergrunt or aws—to be installed and on your PATH. + dynamic "exec" { + for_each = var.use_exec_plugin_for_auth ? ["once"] : [] + + content { + api_version = "client.authentication.k8s.io/v1beta1" + command = var.use_kubergrunt_to_fetch_token ? "kubergrunt" : "aws" + args = ( + var.use_kubergrunt_to_fetch_token + ? ["eks", "token", "--cluster-id", var.eks_cluster_name] + : ["eks", "get-token", "--cluster-name", var.eks_cluster_name] + ) + } + } +} + +provider "helm" { + kubernetes { + host = data.aws_eks_cluster.eks_cluster.endpoint + cluster_ca_certificate = base64decode(data.aws_eks_cluster.eks_cluster.certificate_authority.0.data) + token = var.use_exec_plugin_for_auth ? null : data.aws_eks_cluster_auth.kubernetes_token[0].token + + # EKS clusters use short-lived authentication tokens that can expire in the middle of an 'apply' or 'destroy'. To + # avoid this issue, we use an exec-based plugin here to fetch an up-to-date token. Note that this code requires a + # binary—either kubergrunt or aws—to be installed and on your PATH. + dynamic "exec" { + for_each = var.use_exec_plugin_for_auth ? ["once"] : [] + + content { + api_version = "client.authentication.k8s.io/v1beta1" + command = var.use_kubergrunt_to_fetch_token ? "kubergrunt" : "aws" + args = ( + var.use_kubergrunt_to_fetch_token + ? ["eks", "token", "--cluster-id", var.eks_cluster_name] + : ["eks", "get-token", "--cluster-name", var.eks_cluster_name] + ) + } + } + } +} + +provider "kubectl" { + host = data.aws_eks_cluster.eks_cluster.endpoint + cluster_ca_certificate = base64decode(data.aws_eks_cluster.eks_cluster.certificate_authority.0.data) + load_config_file = false + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + args = ( + ["eks", "get-token", "--cluster-name", var.eks_cluster_name] + ) + } +} \ No newline at end of file diff --git a/deployments/hyperfine/main.tf b/deployments/hyperfine/main.tf new file mode 100644 index 0000000000..21d134e27b --- /dev/null +++ b/deployments/hyperfine/main.tf @@ -0,0 +1,87 @@ +terraform { + required_version = ">= 1.2.7" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.71" + } + kubectl = { + source = "gavinbunney/kubectl" + version = ">= 1.7.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.13.1" + } + helm = { + source = "hashicorp/helm" + version = "~> 2.0" + } + } +} + +# create kubeflow namespace first +resource "kubernetes_namespace_v1" "kubeflow" { + metadata { + labels = { + control-plane = "kubeflow" + istio-injection = "enabled" + } + + name = "kubeflow" + } +} + +module "secrets" { + source = "../../iaac/terraform/hyperfine/secrets" + + eks_cluster_name = var.eks_cluster_name + namespace = kubernetes_namespace_v1.kubeflow.metadata[0].name + + rds_host = var.rds_host + rds_secret_name = var.rds_secret_name + s3_bucket_name = var.s3_bucket_name +} + +module "kubeflow" { + source = "../../iaac/terraform/hyperfine/modules" + + eks_cluster_name = var.eks_cluster_name + + rds_host = module.secrets.rds_host + s3_bucket_name = module.secrets.s3_bucket_name +} + + +module "dex" { + source = "../../iaac/terraform/hyperfine/dex" + + eks_cluster_name = var.eks_cluster_name + + oidc_secret_name = var.oidc_secret_name + okta_secret_name = var.okta_secret_name + kms_key_arns = module.secrets.kms_key_arns + + zone_id = var.zone_id + subdomain = var.subdomain +} + + +module "user" { + for_each = var.users + + source = "../../iaac/terraform/hyperfine/user" + + eks_cluster_name = var.eks_cluster_name + + + email = each.key + ssh_key_secret_name = each.value + + + rds_secret_name = module.secrets.rds_secret_name + s3_secret_name = module.secrets.s3_secret_name + kms_key_arns = module.secrets.kms_key_arns +} + diff --git a/deployments/hyperfine/variables.tf b/deployments/hyperfine/variables.tf new file mode 100644 index 0000000000..ecf0919aac --- /dev/null +++ b/deployments/hyperfine/variables.tf @@ -0,0 +1,97 @@ +# access key + +variable "s3_bucket_name" { + description = "bucket to access" + type = string +} + +variable "rds_secret_name" { + description = "secretmaanger for rds config" + type = string +} + +variable "rds_host" { + description = "rds host name to use" + type = string +} + +# dex configurations + +variable subdomain { + description = "subdomain used to access dex" + type = string + default = "platform" +} + +variable dex_version { + description = "helm chart version for dex" + type = string + default = "0.14.1" +} + +variable zone_id { + description = "top level zone to use fo domain" + type = string +} + +variable okta_secret_name { + description = "secretmanager name to use for okta" + type = string + # secret format + # { + # "okta_client_id":"asdfasdf", + # "okta_client_secret":"asdfasdf", + # "okta_issuer_url":"https://hyperfine.okta.com", + # "okta_client_id_":"asdfasdf", + # "okta_client_secret_":"asdfasdf-asdfasdfG", + # "okta_issuer_url_":"https://dev-1111111.okta.com" + # } +} + +variable oidc_secret_name { + description = "secretmanager name to use for auth service" + type = string + # secret format + # { + # "auth_client_id":"kf-oidc-authservice", + # "auth_client_secret":"asdfasdf" + # } +} + +variable oidc_sa_name { + description = "service account name to use for oidc" + type = string + default = "oidc-secrets-manager-sa" +} + +variable auth_namespace { + description = "namespace to deploy auth service to" + type = string + default = "auth" +} + +# user configurations + +variable users { + description = "map of usernames to ssh secret" + type = map(string) +} + + +# PROVIDER CONFIGS +variable "eks_cluster_name" { + description = "cluster to install to" + type = string +} + +variable "use_exec_plugin_for_auth" { + description = "If this variable is set to true, then use an exec-based plugin to authenticate and fetch tokens for EKS. This is useful because EKS clusters use short-lived authentication tokens that can expire in the middle of an 'apply' or 'destroy', and since the native Kubernetes provider in Terraform doesn't have a way to fetch up-to-date tokens, we recommend using an exec-based provider as a workaround. Use the use_kubergrunt_to_fetch_token input variable to control whether kubergrunt or aws is used to fetch tokens." + type = bool + default = true +} + +variable "use_kubergrunt_to_fetch_token" { + description = "EKS clusters use short-lived authentication tokens that can expire in the middle of an 'apply' or 'destroy'. To avoid this issue, we use an exec-based plugin to fetch an up-to-date token. If this variable is set to true, we'll use kubergrunt to fetch the token (in which case, kubergrunt must be installed and on PATH); if this variable is set to false, we'll use the aws CLI to fetch the token (in which case, aws must be installed and on PATH). Note this functionality is only enabled if use_exec_plugin_for_auth is set to true." + type = bool + default = true +} \ No newline at end of file diff --git a/deployments/rds-s3/base/kustomization.yaml b/deployments/rds-s3/base/kustomization.yaml index 0f6779fb30..f8c5c2c91f 100644 --- a/deployments/rds-s3/base/kustomization.yaml +++ b/deployments/rds-s3/base/kustomization.yaml @@ -12,7 +12,7 @@ resources: # OIDC Authservice - ../../../upstream/common/oidc-authservice/base # Dex - - ../../../upstream/common/dex/overlays/istio + - ../../../awsconfigs/common/dex # KNative - ../../../upstream/common/knative/knative-serving/overlays/gateways - ../../../upstream/common/knative/knative-eventing/base diff --git a/deployments/vanilla/kustomization.yaml b/deployments/vanilla/kustomization.yaml index 593b0d59a0..8c755d9879 100644 --- a/deployments/vanilla/kustomization.yaml +++ b/deployments/vanilla/kustomization.yaml @@ -12,7 +12,7 @@ resources: # OIDC Authservice - ../../upstream/common/oidc-authservice/base # Dex - - ../../upstream/common/dex/overlays/istio + - ../../awsconfigs/common/dex # KNative - ../../upstream/common/knative/knative-serving/overlays/gateways - ../../upstream/common/knative/knative-eventing/base diff --git a/iaac/terraform/hyperfine/dex/dependencies.tf b/iaac/terraform/hyperfine/dex/dependencies.tf new file mode 100644 index 0000000000..905ba3e109 --- /dev/null +++ b/iaac/terraform/hyperfine/dex/dependencies.tf @@ -0,0 +1,27 @@ +data "aws_eks_cluster" "eks_cluster" { + name = var.eks_cluster_name +} +data aws_caller_identity "current" {} +data "aws_partition" "current" {} + + +data "aws_eks_cluster_auth" "kubernetes_token" { + count = var.use_exec_plugin_for_auth ? 0 : 1 + name = var.eks_cluster_name +} + +data "aws_route53_zone" "top_level" { + zone_id = var.zone_id +} + +data "aws_secretsmanager_secret" "oidc_secrets" { + for_each = local.oidc_secret_names + name = each.key +} + +locals { + oidc_id = trimprefix(data.aws_eks_cluster.eks_cluster.identity.0.oidc.0.issuer, "https://") + eks_oidc_provider_arn = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:oidc-provider/${local.oidc_id}" + oidc_secret_names = toset([var.oidc_secret_name, var.okta_secret_name]) +} + diff --git a/iaac/terraform/hyperfine/dex/dex.tf b/iaac/terraform/hyperfine/dex/dex.tf new file mode 100644 index 0000000000..e2759c9638 --- /dev/null +++ b/iaac/terraform/hyperfine/dex/dex.tf @@ -0,0 +1,251 @@ +terraform { + required_version = ">= 1.2.7" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 3.71" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.13.1" + } + helm = { + source = "hashicorp/helm" + version = "~> 2.0" + } + kubectl = { + source = "gavinbunney/kubectl" + version = ">= 1.7.0" + } + } +} + + + +locals { + url = "https://${var.subdomain}.${data.aws_route53_zone.top_level.name}" +} + + + +resource "kubernetes_namespace_v1" "auth" { + metadata { + name = var.auth_namespace + } +} + +data "aws_iam_policy_document" "ssm" { + version = "2012-10-17" + + statement { + effect = "Allow" + actions = ["kms:Decrypt", "kms:DescribeKey"] + resources = var.kms_key_arns + } + + statement { + effect = "Allow" + actions = [ + "secretsmanager:GetSecretValue", + "secretsmanager:DescribeSecret" + ] + resources = [for k, v in data.aws_secretsmanager_secret.oidc_secrets : v.arn] + } +} + +resource "aws_iam_policy" "oidc-ssm" { + name = "${var.eks_cluster_name}-${var.auth_sa_name}-ssm-policy" + policy = data.aws_iam_policy_document.ssm.json +} + +module "dex-irsa" { + source = "git::git@github.com:hyperfine/terraform-aws-eks.git//modules/eks-irsa?ref=v0.48.3" + kubernetes_namespace = var.auth_namespace + kubernetes_service_account = var.auth_sa_name + irsa_iam_policies = [aws_iam_policy.oidc-ssm.arn] + eks_cluster_id = var.eks_cluster_name + + create_kubernetes_namespace = false + create_service_account_secret_token = true +} + +locals { + dex_module_sa = reverse(split("/", module.dex-irsa.service_account))[0] # implicit dependency +} + +resource "kubectl_manifest" "oidc-secret-class" { + yaml_body = <