diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index efe1358..d2a4817 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -27,7 +27,10 @@ 2222, 6000, 7681, - 8080 + 8080, + 10443, + 11443, + 12443 ], "customizations": { "vscode": { diff --git a/Taskfile.yaml b/Taskfile.yaml index dc61cd1..09f8250 100644 --- a/Taskfile.yaml +++ b/Taskfile.yaml @@ -15,7 +15,7 @@ vars: talos_patch: "{{.talos_dir}}/patch/cluster.yaml" talos_config_file: "{{.talos_dir}}/manifest/talosconfig" cluster_name: "talos-kargo-docker" - exposed_ports: "30590:30590/tcp" + exposed_ports: "30590:30590/tcp,10443:10443/tcp" memory: "8192" arch: sh: | @@ -138,6 +138,67 @@ tasks: - source .envrc && pulumi config set --path multus.enabled false - source .envrc && pulumi config set --path vm.enabled false + install-gum: + desc: Installs the gum utility for collecting user input + cmds: + - test -e /usr/bin/gum || wget -P /tmp https://github.com/charmbracelet/gum/releases/download/v0.14.5/gum_0.14.5_amd64.deb + - test -e /usr/bin/gum || sudo dpkg -i /tmp/gum_0.14.5_amd64.deb + + install-pen: + desc: Installs the pen utility for setting up port forwarding + cmds: + - |- + if [[ -z "${GITHUB_USER}" ]]; then + echo "Not running in GitHub CodeSpace" + else + echo "Running in GitHub CodeSpace" + test -e /usr/bin/pen || sudo apt-get update + test -e /usr/bin/pen || sudo apt-get install -y pen + fi + + configure-openunison: + desc: "Configure OpenUnison." + + cmds: + - task: install-gum + - task: install-pen + - pen 11443 127.0.0.1:10443 + - pen 12443 127.0.0.1:10443 + - source .envrc && pulumi stack select --create {{.pulumi_stack_identifier}} || true + - source .envrc && pulumi config set --path openunison.enabled true + - source .envrc && pulumi config set --path kubernetes_dashboard.enabled true + - source .envrc && pulumi config set --path openunison.github.client_id $(gum input --placeholder='GitHub OAuth2 Client Id' --header='GitHub OAuth2 Client Id') + - source .envrc && pulumi config set --secret --path openunison.github.client_secret $(gum input --placeholder='GitHub OAuth2 Client Secret' --header='GitHub OAuth2 Client Secret') + - source .envrc && pulumi config set --path openunison.github.teams $(gum input --placeholder='GitHub OAuth2 Teams' --header='GitHub OAuth2 Teams') + - |- + if [[ -z "${GITHUB_USER}" ]]; then + + else + echo "Set your GitHub OAuth2 Application's 'Authorization callback URL' to https://$CODESPACE_NAME-10443.app.github.dev/auth/github" + fi + + enable-kubevirt-manager: + desc: Enables the KubeVirt Manager Web UI + cmds: + - task: install-gum + - task: install-pen + - pen 13443 127.0.0.1:10443 + - source .envrc && pulumi stack select --create {{.pulumi_stack_identifier}} || true + - source .envrc && pulumi config set --path kubevirt_manager.enabled true + + enable-prometheus: + desc: Enables Prometheus + cmds: + - task: install-gum + - task: install-pen + - pen 14443 127.0.0.1:10443 + - pen 15443 127.0.0.1:10443 + - pen 16443 127.0.0.1:10443 + - source .envrc && pulumi stack select --create {{.pulumi_stack_identifier}} || true + - source .envrc && pulumi config set --path prometheus.enabled true + + + iac-deploy: desc: "Deploy Pulumi infrastructure." cmds: diff --git a/docs/OPENUNISON.md b/docs/OPENUNISON.md index deeba42..de0ea57 100644 --- a/docs/OPENUNISON.md +++ b/docs/OPENUNISON.md @@ -2,31 +2,65 @@ # Dependencies -1. Ingress NGINX (not included for now) +1. Ingress NGINX -For MVP Ingress NGINX is required - https://kubernetes.github.io/ingress-nginx/deploy/ +OpenUnison requires the NGINX Ingress controller for MVP. While several controllers are supported, we wanted to keep it simple for now. If you're running Kargo in GitHub CodeSpaces, NGINX will be configured to support port forwarding so you can access OpenUnison, and your cluster, from anywhere on the internet. If running Kargo on bare metal, You'll need to configure Cilium's `LoadBalancer` to support NGINX. -If you don't have a load balancer setup you'll want to deploy as a `DaemonSet` and update the `Deployment` or `DaemonSet` to listen on `hostPort`. First, patch the `ingress-nginx` `Namespace` to allow privileged pods: +2. DNS suffix (Bare Metal Only) -```sh -kubectl patch namespace ingress-nginx -p '{"metadata":{"labels":{"pod-security.kubernetes.io/enforce":"privileged"}}}' +OpenUnison requires a minimum of three host names. More if deploying additional platform management apps. For this reason, you'll need to create a DNS wildcard for a domain suffix to point to your load balancer. For instance, in the below examples a wildcard of \*.kargo.tremolo.dev was setup with an A record for my lab hosts. For a full explination, see - https://openunison.github.io/deployauth/#host-names-and-networking + +3. GitHub Deployment + +Before deploying OpenUnison, you'll need to create an organization on GitHub. This is 100% free. Once you have created an organization, you can setup an OAuth App. See https://openunison.github.io/deployauth/#github for instructions. + +For deployments to GitHub CodeSpaces, enter a fake URL for the redirect for now. When you setup SSO in the code space, you'll be given a URL to use. + +For bare metal, your redirect URL will be `https://k8sou.DNS Suffix/auth/github`. You should also create a Team that you'll use authorizing access to your lab. Keep your `client_id` and `client_secret`. + + +# Setup + +## GitHub CodeSpace + +Once you've run `task deploy`, the next step is to run: + +```bash +task configure-openunison ``` +You'll be asked for: + +1. GitHub OAuth2 Application Client ID +2. GitHub OAuth2 Application Client Secret +3. The name of a GitHub team in the form of org/team. For Instance, `TremoloSecurity/github-demos` + +When the configuration is done, you'll be presented with a redirect URL. Configure your OAuth2 application with this URL as the redirect. -Next, patch the `DaimonSet` / `Deployment` to listen on 80 and 443: +Next, run the deployment again: ```sh -kubectl patch deployments ingress-nginx-controller -n ingress-nginx -p '{"spec":{"template":{"spec":{"containers":[{"name":"controller","ports":[{"containerPort":80,"hostPort":80,"protocol":"TCP"},{"containerPort":443,"hostPort":443,"protocol":"TCP"}]}]}}}}' +task deploy ``` -2. DNS suffix +With the deployment completed, you need to configure three ports to enable HTTPS and public access: -OpenUnison requires a minimum of three host names. More if deploying aditional platform management apps. For this reason, you'll need to create a DNS wildcard for a domain suffix to point to your nodes/load balancer. For instance, in the below examples a wildcard of \*.kargo.tremolo.dev was setup with an A record for my lab hosts. For a full explination, see - https://openunison.github.io/deployauth/#host-names-and-networking +* 10443 +* 11443 +* 12443 -3. GitHub Deployment +For each port, navigate to the ***PORTS*** tab in you VSCode window: -Before deploying OpenUnison, you'll need to create an orgnaization on GitHub. This is 100% free. Once you have created an organization, you can setup an OAuth App. See https://openunison.github.io/deployauth/#github for instructions. Your redirect URL will be `https://k8sou.DNS Suffix/auth/github`. You should also create a Team that you'll use authorizing access to your lab. Keep your `client_id` and `client_secret`. +![Ports Tab](images/ports.png "Ports Tab") -# Setup +Right click on the port, choose ***Change Port Protocol*** and choose ***HTTPS*** + +![HTTPS](images/https.png "HTTPS") + +Finally, mark the port as public by again right clicking on the port, choose ***Port Visibility*** and select ***Public***. + +Repeat this step for all three of the ports 10443,11442, and 12443. + +## Bare Metal Enable Cert-Manager @@ -72,4 +106,20 @@ If you want to allow more users to access your cluster, add them to the team you # Using OpenUnison +## GitHub CodeSpace + +To access your cluster running a CodeSpace, get hte URL for the 10443 port. If everything is configured correctly, you'll be prompted to continue by GitHub and then to trust your GitHub Application. After that, you'll be logged in to OpenUnison with access to your cluster. + +### Limitations + +When running in CodeSpaces, any action that requires SPDY will not work, this includes: + +* kubectl exec +* kubectl cp +* kubectl port-forward + +You can use the terminal for any `Pod` in the Kubernetes Dashboard though. + +## Bare Metal + See our manual - https://openunison.github.io/documentation/login-portal/ diff --git a/docs/images/https.png b/docs/images/https.png new file mode 100644 index 0000000..9d77812 Binary files /dev/null and b/docs/images/https.png differ diff --git a/docs/images/ports.png b/docs/images/ports.png new file mode 100644 index 0000000..6454228 Binary files /dev/null and b/docs/images/ports.png differ diff --git a/docs/images/public.png b/docs/images/public.png new file mode 100644 index 0000000..a774c51 Binary files /dev/null and b/docs/images/public.png differ diff --git a/pulumi/__main__.py b/pulumi/__main__.py index 1866716..220f394 100644 --- a/pulumi/__main__.py +++ b/pulumi/__main__.py @@ -19,6 +19,8 @@ from src.ceph.deploy import deploy_rook_operator from src.vm.ubuntu import deploy_ubuntu_vm from src.vm.talos import deploy_talos_cluster +from src.ingress_nginx.deploy import deploy_ingress_nginx +from src.kv_manager.deploy import deploy_ui_for_kubevirt ################################################################################## # Load the Pulumi Config @@ -83,6 +85,9 @@ def get_module_config(module_name): depends = [] +# defining a separate depends list for openunison to avoid circular dependencies +openunison_depends = [] + def safe_append(depends, resource): if resource: depends.append(resource) @@ -174,7 +179,7 @@ def run_kubevirt(): versions["kubevirt"] = {"enabled": kubevirt_enabled, "version": kubevirt[0]} kubevirt_operator = kubevirt[1] - safe_append(depends, kubevirt_operator) + safe_append(openunison_depends, kubevirt_operator) return kubevirt, kubevirt_operator return None, None @@ -324,10 +329,10 @@ def run_prometheus(): openunison_enabled ) - versions["prometheus"] = {"enabled": prometheus_enabled, "version": prometheus[0]} + versions["prometheus"] = {"enabled": prometheus_enabled, "version": prometheus[0],"release":prometheus[1]} prometheus_release = prometheus[1] - safe_append(depends, prometheus_release) + safe_append(openunison_depends, prometheus_release) return prometheus, prometheus_release return None, None @@ -348,19 +353,42 @@ def run_kubernetes_dashboard(): depends, ns_name, kubernetes_dashboard_version, - k8s_provider + k8s_provider, + openunison_enabled ) - versions["kubernetes_dashboard"] = {"enabled": kubernetes_dashboard_enabled, "version": kubernetes_dashboard[0]} + versions["kubernetes_dashboard"] = {"enabled": kubernetes_dashboard_enabled, "version": kubernetes_dashboard[0], "release":kubernetes_dashboard[1]} kubernetes_dashboard_release = kubernetes_dashboard[1] - safe_append(depends, kubernetes_dashboard_release) + safe_append(openunison_depends, kubernetes_dashboard_release) return kubernetes_dashboard, kubernetes_dashboard_release return None, None kubernetes_dashboard, kubernetes_dashboard_release = run_kubernetes_dashboard() +################################################################################## +# Deploy Kubevirt Manager +def run_kubevirt_manager(): + kubevirt_manager_enabled = config_kubevirt_manager.get("enabled") or False + if kubevirt_manager_enabled: + kubevirt_manager = deploy_ui_for_kubevirt( + "kargo", + k8s_provider, + ) + + versions["kubevirt_manager"] = {"enabled": kubevirt_manager_enabled, "version": kubevirt_manager[0]} + kubevirt_manager_release = kubevirt_manager[1] + + safe_append(openunison_depends, kubevirt_manager_release) + + return kubevirt_manager, kubevirt_manager_release + + + return None, None + +kubevirt_manager, kubevirt_manager_release = run_kubevirt_manager() + ################################################################################## def run_openunison(): if openunison_enabled: @@ -369,34 +397,39 @@ def run_openunison(): domain_suffix = config_openunison.get('dns_suffix') or "kargo.arpa" cluster_issuer = config_openunison.get('cluster_issuer') or "cluster-selfsigned-issuer-ca" - config_openunison_github = config_openunison.get_object('github') or {} + config_openunison_github = config_openunison.get('github') or {} openunison_github_teams = config_openunison_github.get('teams') openunison_github_client_id = config_openunison_github.get('client_id') openunison_github_client_secret = config_openunison_github.get('client_secret') enabled = {} - if kubevirt_enabled: - enabled["kubevirt"] = {"enabled": kubevirt_enabled} + custom_depends = [] + + + # Assume ingress-nginx for OpenUnison + nginx_release, nginx_version = deploy_ingress_nginx(None,"ingress-nginx",k8s_provider) + versions["nginx"] = {"enabled": openunison_enabled, "version": nginx_version} + - if prometheus_enabled: - enabled["prometheus"] = {"enabled": prometheus_enabled} + safe_append(custom_depends,nginx_release) + + custom_depends.extend(depends) + custom_depends.extend(openunison_depends) - pulumi.export("enabled", enabled) openunison = deploy_openunison( - depends, + custom_depends, ns_name, openunison_version, k8s_provider, domain_suffix, cluster_issuer, cert_manager_selfsigned_cert, - kubernetes_dashboard_release, openunison_github_client_id, openunison_github_client_secret, openunison_github_teams, - enabled, + versions ) versions["openunison"] = {"enabled": openunison_enabled, "version": openunison[0]} @@ -405,6 +438,7 @@ def run_openunison(): safe_append(depends, openunison_release) return openunison, openunison_release + return None, None openunison, openunison_release = run_openunison() @@ -426,23 +460,7 @@ def run_rook_ceph(): rook_operator = run_rook_ceph() -################################################################################## -# Deploy Kubevirt Manager -def run_kubevirt_manager(): - kubevirt_manager_enabled = config.get_bool('kubevirt_manager.enabled') or False - if kubevirt_manager_enabled: - kubevirt_manager = deploy_ui_for_kubevirt( - "kargo", - k8s_provider, - kubernetes_distribution, - "kargo", - "kubevirt_manager" - ) - pulumi.export('kubevirt_manager', kubevirt_manager) - return kubevirt_manager - return None -kubevirt_manager = run_kubevirt_manager() ################################################################################## # Deploy Ubuntu VM diff --git a/pulumi/src/helm/openunison-kargo/.helmignore b/pulumi/src/helm/openunison-kargo/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/pulumi/src/helm/openunison-kargo/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/pulumi/src/helm/openunison-kargo/Chart.yaml b/pulumi/src/helm/openunison-kargo/Chart.yaml new file mode 100644 index 0000000..30940ef --- /dev/null +++ b/pulumi/src/helm/openunison-kargo/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: openunison-kargo +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.3.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/pulumi/src/helm/openunison-kargo/templates/ingresses/localhost.yaml b/pulumi/src/helm/openunison-kargo/templates/ingresses/localhost.yaml new file mode 100644 index 0000000..a028c82 --- /dev/null +++ b/pulumi/src/helm/openunison-kargo/templates/ingresses/localhost.yaml @@ -0,0 +1,60 @@ +{{ if .Values.in_github_codespace }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/affinity: cookie + nginx.ingress.kubernetes.io/backend-protocol: HTTPS + nginx.ingress.kubernetes.io/secure-backends: "true" + nginx.ingress.kubernetes.io/session-cookie-hash: sha1 + nginx.ingress.kubernetes.io/session-cookie-name: {{ .Values.orchestra_service_name }} + nginx.org/ssl-services: {{ .Values.orchestra_service_name }} + labels: + app.kubernetes.io/component: ingress-nginx + app.kubernetes.io/instance: {{ .Values.orchestra_service_name }} + app.kubernetes.io/name: openunison + app.kubernetes.io/part-of: openunison + name: {{ .Values.orchestra_service_name }}-localhost + namespace: openunison +spec: + rules: + - host: localhost + http: + paths: + - backend: + service: + name: {{ .Values.orchestra_service_name }} + port: + number: 443 + path: / + pathType: Prefix + - host: localhost + http: + paths: + - backend: + service: + name: {{ .Values.orchestra_service_name }} + port: + number: 443 + path: / + pathType: Prefix + - host: localhost + http: + paths: + - backend: + service: + name: {{ .Values.orchestra_service_name }} + port: + number: 443 + path: / + pathType: Prefix + tls: + - hosts: + - localhost + secretName: ou-tls-certificate +status: + loadBalancer: + ingress: + - hostname: localhost +{{ end }} diff --git a/pulumi/src/helm/openunison-kargo/templates/resultgroups/grafana-header.yaml b/pulumi/src/helm/openunison-kargo/templates/resultgroups/grafana-header.yaml new file mode 100644 index 0000000..e555443 --- /dev/null +++ b/pulumi/src/helm/openunison-kargo/templates/resultgroups/grafana-header.yaml @@ -0,0 +1,12 @@ +apiVersion: openunison.tremolo.io/v1 +kind: ResultGroup +metadata: + name: grafana-header + namespace: openunison +spec: + - resultType: header + source: static + value: X-WEBAUTH-GROUPS=Admin + - resultType: header + source: user + value: X-WEBAUTH-USER=uid diff --git a/pulumi/src/helm/openunison-kargo/values.yaml b/pulumi/src/helm/openunison-kargo/values.yaml new file mode 100644 index 0000000..e69de29 diff --git a/pulumi/src/ingress_nginx/__init__.py b/pulumi/src/ingress_nginx/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/pulumi/src/ingress_nginx/deploy.py b/pulumi/src/ingress_nginx/deploy.py new file mode 100644 index 0000000..8fd0bdb --- /dev/null +++ b/pulumi/src/ingress_nginx/deploy.py @@ -0,0 +1,104 @@ +import requests +import os +import pulumi +from pulumi import ResourceOptions +import pulumi_kubernetes as k8s +from pulumi_kubernetes.apiextensions import CustomResource +from pulumi_kubernetes.meta.v1 import ObjectMetaArgs +from pulumi_kubernetes.storage.v1 import StorageClass +from src.lib.namespace import create_namespace +from src.lib.helm_chart_versions import get_latest_helm_chart_version + +def deploy_ingress_nginx( + version: str, + ns_name: str, + k8s_provider: k8s.Provider, + ): + + # Create namespace + # we need to allow privileged containers because nginx wants to be able to run on port 80 and 443 + # because it was designed 1995 + ns_retain = False + ns_protect = False + ns_annotations = {} + ns_labels = { + "pod-security.kubernetes.io/enforce": "privileged" + } + namespace = create_namespace( + None, + ns_name, + ns_retain, + ns_protect, + k8s_provider, + custom_labels=ns_labels, + custom_annotations=ns_annotations + ) + + helm_values = { + + } + + # if we're running in GitHub codespace, run NGINX + # on port 10443 + if os.getenv("GITHUB_USER"): + helm_values["controller"] = { + "service": { + "type": "ClusterIP", + }, + "hostPort": { + "enabled": True, + "ports": { + "https": 10443, + } + }, + "extraArgs": { + "https-port": 10443, + }, + "containerPort": { + "https": 10443, + }, + "config": { + "use-forwarded-headers": "true" + } + } + + chart_name = "ingress-nginx" + chart_index_path = "index.yaml" + chart_url = "https://kubernetes.github.io/ingress-nginx" + chart_index_url = f"{chart_url}/{chart_index_path}" + + # Fetch the latest version from the helm chart index + if version is None: + version = get_latest_helm_chart_version(chart_index_url, chart_name) + version = version.lstrip("v") + pulumi.log.info(f"Setting helm release version to latest: {chart_name}/{version}") + else: + # Log the version override + pulumi.log.info(f"Using helm release version: {chart_name}/{version}") + + # Deploy the nginx chart + release = k8s.helm.v3.Release( + chart_name, + k8s.helm.v3.ReleaseArgs( + chart=chart_name, + version=version, + namespace=ns_name, + skip_await=False, + repository_opts= k8s.helm.v3.RepositoryOptsArgs( + repo=chart_url + ), + values=helm_values, + ), + opts=pulumi.ResourceOptions( + provider = k8s_provider, + parent=namespace, + depends_on=[namespace], + custom_timeouts=pulumi.CustomTimeouts( + create="8m", + update="4m", + delete="4m" + ) + ) + ) + + return release, version diff --git a/pulumi/src/kubernetes_dashboard/deploy.py b/pulumi/src/kubernetes_dashboard/deploy.py index c304b78..0a15a6c 100644 --- a/pulumi/src/kubernetes_dashboard/deploy.py +++ b/pulumi/src/kubernetes_dashboard/deploy.py @@ -2,12 +2,21 @@ import pulumi_kubernetes as k8s from src.lib.namespace import create_namespace from src.lib.helm_chart_versions import get_latest_helm_chart_version +import json + +def sanitize_name(name: str) -> str: + """Ensure the name complies with DNS-1035 and RFC 1123.""" + name = name.strip('-') + if not name: + raise ValueError("Invalid name: resulting sanitized name is empty") + return name def deploy_kubernetes_dashboard( depends: pulumi.Input[list], ns_name: str, version: str, k8s_provider: k8s.Provider, + openunison_enabled: bool ): # Create namespace @@ -41,6 +50,9 @@ def deploy_kubernetes_dashboard( # Log the version override pulumi.log.info(f"Using helm release version: {chart_name}/{version}") + + helm_values = gen_helm_values(openunison_enabled) + release = k8s.helm.v3.Release( "kubernetes-dashboard", k8s.helm.v3.ReleaseArgs( @@ -51,6 +63,7 @@ def deploy_kubernetes_dashboard( repository_opts= k8s.helm.v3.RepositoryOptsArgs( repo=chart_url ), + values=helm_values ), opts=pulumi.ResourceOptions( provider = k8s_provider, @@ -64,4 +77,123 @@ def deploy_kubernetes_dashboard( ) ) + return version, release + +def gen_helm_values(openunison_enabled: bool): + if openunison_enabled: + return { + "nginx": { + "enabled": False + }, + "kong": { + "enabled": False + }, + "api": { + "scaling": { + "replicas": 1 + }, + "containers": { + "ports": [ + { + "name": "api-tls", + "containerPort": 8001, + "protocol": "TCP" + } + ], + "volumeMounts": [ + { + "mountPath": "/tmp", + "name": "tmp-volume" + }, + { + "mountPath": "/certs", + "name": "tls" + } + ] + }, + "volumes": [ + { + "name": "tmp-volume", + "emptyDir": { + } + }, + { + "name": "tls", + "secret": { + "secretName": "kubernetes-dashboard-certs", + "optional": True + } + } + ] + }, + "web": { + "scaling": { + "replicas": 1 + }, + "containers": { + "ports": [ + { + "name": "api-tls", + "containerPort": 8001, + "protocol": "TCP" + } + ], + "volumeMounts": [ + { + "mountPath": "/tmp", + "name": "tmp-volume" + }, + { + "mountPath": "/certs", + "name": "tls" + } + ] + }, + "volumes": [ + { + "name": "tmp-volume", + "emptyDir": { + } + }, + { + "name": "tls", + "secret": { + "secretName": "kubernetes-dashboard-certs", + "optional": True + } + } + ] + }, + "auth": { + "scaling": { + "replicas": 0 + }, + "volumeMounts": [ + { + "mountPath": "/tmp", + "name": "tmp-volume" + }, + { + "mountPath": "/certs", + "name": "tls" + } + ], + "volumes": [ + { + "name": "tmp-volume", + "emptyDir": { + } + }, + { + "name": "tls", + "secret": { + "secretName": "kubernetes-dashboard-certs", + "optional": False + } + } + ] + } + } + else: + return {} diff --git a/pulumi/src/kv_manager/deploy.py b/pulumi/src/kv_manager/deploy.py index 9956529..df6d381 100644 --- a/pulumi/src/kv_manager/deploy.py +++ b/pulumi/src/kv_manager/deploy.py @@ -9,10 +9,11 @@ -def deploy_ui_for_kubevirt(name: str, k8s_provider: Provider, kubernetes_distribution: str, project_name: str, namespace: str): +def deploy_ui_for_kubevirt(name: str, k8s_provider: Provider): # Initialize Pulumi configuration pconfig = pulumi.Config() # There's no helm chart for kubevirt-manager so kubevirt_manager_manifest_url = 'https://raw.githubusercontent.com/kubevirt-manager/kubevirt-manager/main/kubernetes/bundled.yaml' k8s_yaml = k8s.yaml.ConfigFile("kubevirt-manager", file=kubevirt_manager_manifest_url) + return "1.4.1", k8s_yaml diff --git a/pulumi/src/openunison/deploy.py b/pulumi/src/openunison/deploy.py index 6aed38a..6ea5447 100644 --- a/pulumi/src/openunison/deploy.py +++ b/pulumi/src/openunison/deploy.py @@ -1,4 +1,5 @@ import json +import os import base64 import secrets import pulumi @@ -22,13 +23,12 @@ def deploy_openunison( domain_suffix: str, cluster_issuer: str, cert_manager_selfsigned_cert: str, - kubernetes_dashboard_release: str, ou_github_client_id: str, ou_github_client_secret: str, ou_github_teams: str, enabled ): - + kubernetes_dashboard_release = enabled["kubernetes_dashboard"]["release"] ns_retain = True ns_protect = False ns_annotations = {} @@ -36,7 +36,7 @@ def deploy_openunison( "kubernetes.io/metadata.name": ns_name } namespace = create_namespace( - depends, + None, ns_name, ns_retain, ns_protect, @@ -89,12 +89,42 @@ def deploy_openunison( ) depends.append(ou_certificate) + ou_host = "" + k8sdb_host = "" + api_server_host = "" + kubevirt_manager_host = "" + prometheus_host = "" + alertmanager_host = "" + grafana_host = "" + + running_in_gh_spaces = os.getenv("GITHUB_USER") or None + + # if running inside of Github Spaces, we'll set the hosts based on the github space name + # if it's standalone, we'll configure based on the suffix + if running_in_gh_spaces: + ou_host = os.getenv("CODESPACE_NAME") + '-10443.app.github.dev' + k8sdb_host = os.getenv("CODESPACE_NAME") + '-11443.app.github.dev' + api_server_host = os.getenv("CODESPACE_NAME") + '-12443.app.github.dev' + kubevirt_manager_host = os.getenv("CODESPACE_NAME") + '-13443.app.github.dev' + prometheus_host = os.getenv("CODESPACE_NAME") + '-14443.app.github.dev' + alertmanager_host = os.getenv("CODESPACE_NAME") + '-15443.app.github.dev' + grafana_host = os.getenv("CODESPACE_NAME") + '-16443.app.github.dev' + else: + ou_host = f"k8sou.{domain_suffix}" + k8sdb_host = f"k8sdb.{domain_suffix}" + api_server_host = f"k8sapi.{domain_suffix}" + kubevirt_manager_host = f"kubevirt-manager.{domain_suffix}" + prometheus_host = f"prometheus.{domain_suffix}" + alertmanager_host = f"alertmanager.{domain_suffix}" + grafana_host = f"grafana.{domain_suffix}" + + ou_helm_values = { "enable_wait_for_job": True, "network": { - "ou_host": f"k8sou.{domain_suffix}", - "dashboard_host": f"k8sdb.{domain_suffix}", - "api_server_host": f"k8sapi.{domain_suffix}", + "openunison_host": ou_host, + "dashboard_host": k8sdb_host, + "api_server_host": api_server_host, "session_inactivity_timeout_seconds": 900, "k8s_url": "https://192.168.2.130:6443", "force_redirect_to_tls": False, @@ -113,22 +143,21 @@ def deploy_openunison( "k8s_cluster_name": "openunison-kargo", "enable_impersonation": True, "impersonation": { - "use_jetstack": True, - "explicit_certificate_trust": True + "use_jetstack": not running_in_gh_spaces, + "explicit_certificate_trust": not running_in_gh_spaces }, "dashboard": { "namespace": "kubernetes-dashboard", - "label": "app.kubernetes.io/name=kubernetes-dashboard", - "require_session": True + "label": "k8s-app=kubernetes-dashboard", + "require_session": True, + "new": True + }, "certs": { "use_k8s_cm": False }, "trusted_certs": [ - { - "name": "unison-ca", - "pem_b64": cert_manager_selfsigned_cert, - } + ], "monitoring": { "prometheus_service_account": "system:serviceaccount:monitoring:prometheus-k8s" @@ -178,6 +207,14 @@ def deploy_openunison( } } + if not running_in_gh_spaces: + ou_helm_values["trusted_certs"].append( + { + "name": "unison-ca", + "pem_b64": cert_manager_selfsigned_cert, + } + ) + # now that OpenUnison is deployed, we'll make ClusterAdmins of all the groups specified in openunison.github.teams github_teams = ou_github_teams.split(',') subjects = [] @@ -207,13 +244,16 @@ def deploy_openunison( alertmanager_icon_json = json.dumps(assets["alertmanager_icon"]) grafana_icon_json = json.dumps(assets["grafana_icon"]) - if enabled["kubevirt"]["enabled"]: + + + # if enabled["kubevirt"] and enabled["kubevirt"]["enabled"]: + if "kubevirt_manager" in enabled and enabled["kubevirt_manager"]["enabled"]: ou_helm_values["openunison"]["apps"].append( { "name": "kubevirt-manager", "label": "KubeVirt Manager", "org": "b1bf4c92-7220-4ad2-91af-ee0fe0af7312", - "badgeUrl": "https://kubeverit-manager." + domain_suffix + "/", + "badgeUrl": "https://" + kubevirt_manager_host, "injectToken": False, "proxyTo": "http://kubevirt-manager.kubevirt-manager.svc:8080${fullURI}", "az_groups": az_groups, @@ -221,13 +261,13 @@ def deploy_openunison( } ) - if enabled["prometheus"]["enabled"]: + if "prometheus" in enabled and enabled["prometheus"]["enabled"]: ou_helm_values["openunison"]["apps"].append( { "name": "prometheus", "label": "Prometheus", "org": "b1bf4c92-7220-4ad2-91af-ee0fe0af7312", - "badgeUrl": f"https://prometheus.{domain_suffix}/", + "badgeUrl": f"https://{prometheus_host}", "injectToken": False, "proxyTo": "http://prometheus.monitoring.svc:9090${fullURI}", "az_groups": az_groups, @@ -240,7 +280,7 @@ def deploy_openunison( "name": "alertmanager", "label": "Alert Manager", "org": "b1bf4c92-7220-4ad2-91af-ee0fe0af7312", - "badgeUrl": "https://alertmanager." + domain_suffix + "/", + "badgeUrl": f"https://{alertmanager_host}", "injectToken": False, "proxyTo": "http://alertmanager.monitoring.svc:9093${fullURI}", "az_groups": az_groups, @@ -253,9 +293,9 @@ def deploy_openunison( "name": "grafana", "label": "Grafana", "org": "b1bf4c92-7220-4ad2-91af-ee0fe0af7312", - "badgeUrl": "https://grafana." + domain_suffix + "/", + "badgeUrl": f"https://{grafana_host}/", "injectToken": False, - "azSuccessResponse":"grafana", + "azSuccessResponse":"grafana-header", "proxyTo": "http://grafana.monitoring.svc${fullURI}", "az_groups": az_groups, "icon": f"{grafana_icon_json}", @@ -263,12 +303,16 @@ def deploy_openunison( ) ou_helm_values["dashboard"]["service_name"] = kubernetes_dashboard_release.name.apply(lambda name: sanitize_name(name)) - ou_helm_values["dashboard"]["cert_name"] = kubernetes_dashboard_release.name.apply(lambda name: sanitize_name(name + "-certs")) + ou_helm_values["dashboard"]["auth_service_name"] = kubernetes_dashboard_release.name.apply(lambda name: sanitize_name(name + '-auth')) + ou_helm_values["dashboard"]["api_service_name"] = kubernetes_dashboard_release.name.apply(lambda name: sanitize_name(name + '-api')) + ou_helm_values["dashboard"]["web_service_name"] = kubernetes_dashboard_release.name.apply(lambda name: sanitize_name(name + '-web')) + # Apply function to wait for the dashboard release names before proceeding def wait_for_dashboard_release_names(): return ou_helm_values + orchesrta_login_portal_helm_values = kubernetes_dashboard_release.name.apply(lambda _: wait_for_dashboard_release_names()) # Fetch the latest version from the helm chart index @@ -442,67 +486,57 @@ def update_values(name): ) ) + deploy_kargo_helm(running_in_gh_spaces=running_in_gh_spaces,ou_orchestra_release=ou_orchestra_release,k8s_provider=k8s_provider) + cluster_admin_cluster_role_binding = k8s.rbac.v1.ClusterRoleBinding( + "clusteradmin-clusterrolebinding", + metadata=k8s.meta.v1.ObjectMetaArgs( + name="openunison-github-cluster-admins" + ), + role_ref=k8s.rbac.v1.RoleRefArgs( + api_group="rbac.authorization.k8s.io", # The API group of the role being referenced + kind="ClusterRole", # Indicates the kind of role being referenced + name="cluster-admin" # The name of the ClusterRole you're binding + ), + subjects=subjects, + opts=pulumi.ResourceOptions( + provider = k8s_provider, + depends_on=[], + custom_timeouts=pulumi.CustomTimeouts( + create="8m", + update="10m", + delete="10m" + ) + ) + ) + return version, operator_release -# cluster_admin_cluster_role_binding = k8s.rbac.v1.ClusterRoleBinding( -# "clusteradmin-clusterrolebinding", -# metadata=k8s.meta.v1.ObjectMetaArgs( -# name="openunison-github-cluster-admins" -# ), -# role_ref=k8s.rbac.v1.RoleRefArgs( -# api_group="rbac.authorization.k8s.io", # The API group of the role being referenced -# kind="ClusterRole", # Indicates the kind of role being referenced -# name="cluster-admin" # The name of the ClusterRole you're binding -# ), -# subjects=subjects, -# opts=pulumi.ResourceOptions( -# provider = k8s_provider, -# depends_on=[], -# custom_timeouts=pulumi.CustomTimeouts( -# create="8m", -# update="10m", -# delete="10m" -# ) -# ) -# ) -# -# -# if prometheus_enabled: -# # create the Grafana ResultGroup -# ou_grafana_resultgroup = CustomResource( -# "openunison-grafana", -# api_version="openunison.tremolo.io/v1", -# kind="ResultGroup", -# metadata={ -# "labels": { -# "app.kubernetes.io/component": "openunison-resultgroups", -# "app.kubernetes.io/instance": "openunison-orchestra-login-portal", -# "app.kubernetes.io/name": "openunison", -# "app.kubernetes.io/part-of": "openunison" -# }, -# "name": "grafana", -# "namespace": "openunison" -# }, -# spec=[ -# { -# "resultType": "header", -# "source": "static", -# "value": "X-WEBAUTH-GROUPS=Admin" -# }, -# { -# "resultType": "header", -# "source": "user", -# "value": "X-WEBAUTH-USER=uid" -# } -# ], -# opts=pulumi.ResourceOptions( -# provider = k8s_provider, -# depends_on=[ou_orchestra_release], -# custom_timeouts=pulumi.CustomTimeouts( -# create="5m", -# update="10m", -# delete="10m" -# ) -# ) -# ) + +def deploy_kargo_helm(running_in_gh_spaces: bool,ou_orchestra_release,k8s_provider: k8s.Provider): + kargo_values = { + "in_github_codespace": running_in_gh_spaces, + "orchestra_service_name": ou_orchestra_release.name.apply(lambda name: sanitize_name('openunison-' + name)) + } + + chart_name = "kargo-openunison" + kargo_openunison_release = k8s.helm.v3.Release( + 'kargo-openunison', + k8s.helm.v3.ReleaseArgs( + chart='src/helm/openunison-kargo', + + namespace='openunison', + skip_await=False, + + values=kargo_values, + ), + opts=pulumi.ResourceOptions( + provider = k8s_provider, + depends_on=[ou_orchestra_release], + custom_timeouts=pulumi.CustomTimeouts( + create="8m", + update="10m", + delete="10m" + ) + ) + )