Skip to content

Deploy Otomi

Deploy Otomi #2527

Workflow file for this run

name: Deploy Otomi
on:
workflow_call:
inputs:
kubernetes_versions:
description: 'Kubernetes version'
type: string
default: "['1.31']"
install_profile:
description: 'Otomi installation profile'
type: string
default: minimal-with-team
cluster_persistence:
description: 'Should a cluster be destroyed on pipeline finish?'
type: string
default: destroy
domain_alias:
description: 'Select Domain Alias'
type: string
default: DNS-Integration
kms:
description: 'Should Otomi encrypt secrets in values repo (DNS or KMS is turned on)?'
type: string
default: age
generate_password:
description: 'Should a unique password be generated?'
type: string
default: 'yes'
certificate:
description: 'Select certificate issuer'
type: string
default: letsencrypt_production
workflow_dispatch:
inputs:
kubernetes_versions:
description: 'Kubernetes version'
type: choice
options:
- "['1.29']"
- "['1.30']"
- "['1.31']"
default: "['1.31']"
install_profile:
description: Otomi installation profile
default: minimal-with-team
type: choice
options:
- minimal
- minimal-with-team
- monitoring-with-team
- full
- upgrade
- no-otomi
cluster_persistence:
type: choice
description: Should a cluster be destroyed on pipeline finish?
options:
- preserve
- destroy
default: preserve
domain_alias:
type: choice
description: Select Domain Alias
options:
- select_your_domain_alias
- Ani-1
- Ani-2
- Matthew-1
- Matthew-2
- Sander-1
- Sander-2
- Matthias-1
- Matthias-2
- Dennis-1
- Dennis-2
- Jehoszafat-1
- Jehoszafat-2
- Ferruh-1
- Ferruh-2
- Cas-1
- Cas-2
kms:
type: choice
description: Should Otomi encrypt secrets in values repo (DNS or KMS is turned on)?
options:
- age
- no_kms
default: age
generate_password:
type: choice
description: Should a unique password be generated?
options:
- 'yes'
- 'no'
default: 'yes'
certificate:
type: choice
description: Select certificate issuer
options:
- gen_custom_ca
- letsencrypt_staging
- letsencrypt_production
default: letsencrypt_production
env:
CACHE_REGISTRY: ghcr.io
CACHE_REPO: linode/apl-core
REPO: linode/apl-core
GIT_USER: svcAPLBot
CHECK_CONTEXT: continuous-integration/integration-test
COMMIT_ID: '${{ github.event.pull_request.head.sha || github.sha }}'
BOT_EMAIL: ${{ vars.BOT_EMAIL }}
BOT_USERNAME: ${{ vars.BOT_USERNAME }}
jobs:
preprocess-input:
name: Preprocess input variables
runs-on: ubuntu-latest
steps:
- name: Check Domain Alias
if: ${{ inputs.select_your_domain_alias == 'select_your_domain_alias' }}
run: |
echo "You did not select a valid domain alias. Select one of your domain aliases from the list and try again" && exit 1
- name: Print user input
run: |
echo 'ref: ${{ github.event.pull_request.head.ref || github.ref }}'
echo 'install_profile: ${{ inputs.install_profile }}'
echo 'kubernetes_versions: ${{ inputs.kubernetes_versions }}'
echo 'cluster_persistence: ${{ inputs.cluster_persistence }}'
echo 'kms: ${{ inputs.kms }}'
echo 'domain_alias: ${{ inputs.domain_alias }}'
echo 'generate_password: ${{ inputs.generate_password }}'
echo 'certificate: ${{ inputs.certificate }}'
preprocess-linode-input:
needs: preprocess-input
name: Preprocess input variables for linode
runs-on: ubuntu-latest
outputs:
kubernetes_versions: ${{ steps.k8s-versions.outputs.versions }}
steps:
- name: Install the Linode CLI
uses: linode/action-linode-cli@v1
with:
token: ${{ secrets.LINODE_TOKEN }}
- name: Check if cluster is running
run: |
LINODE_CLUSTER_NAME=$(echo ${{ inputs.domain_alias }} | cut -d '.' -f1)
if [[ $(linode-cli lke clusters-list --json | jq --arg name "$LINODE_CLUSTER_NAME" '[.[] | select(.label == $name)] | length > 0') == true ]]; then
echo "There is already a running LKE cluster named "$LINODE_CLUSTER_NAME"
echo "Delete the LKE cluster before recreating it. Ending workflow..."
exit 1
fi
- id: k8s-versions
name: Process k8s version input
run: |
if [ -z '${{ inputs.kubernetes_versions }}' ]; then
echo "Kubernetes versions not specified, determine Linode supported versions"
versions=`linode-cli lke versions-list --json | jq -ce '.[] | .id'`
else
versions='${{ inputs.kubernetes_versions }}'
fi
echo $versions
echo "versions=$versions" >> $GITHUB_OUTPUT
run-integration-test-linode:
name: Run integration test on linode cluster
needs: preprocess-linode-input
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
kubernetes_versions: ${{ fromJSON(needs.preprocess-linode-input.outputs.kubernetes_versions) }}
max-parallel: 5
steps:
- name: Install the Linode CLI
uses: linode/action-linode-cli@v1
with:
token: ${{ secrets.LINODE_TOKEN }}
- name: Set k8s cluster name
run: |
echo LINODE_CLUSTER_NAME=${{ inputs.domain_alias }} >> $GITHUB_ENV
# Cluster name must be no longer than 63 characters
- name: Determine exact k8s version
run: |
echo LINODE_K8S_VERSION=$(linode-cli lke versions-list --json | jq -ce --arg version "$(echo ${{ matrix.kubernetes_versions }} | sed -E 's/^([0-9]+\.[0-9])$/\10/')" '.[] | select(.id | tostring | startswith($version)) | .id') >> $GITHUB_ENV
- name: Determine domain name to use
run: |
# Mapping of domain_alias to domain names
case "${{ inputs.domain_alias }}" in
"Ani-1") DOMAIN=$(jq '.Ani[0]' <<< "${{ env.DEV_DOMAINS }}");;
"Ani-2") DOMAIN=$(jq '.Ani[1]' <<< "${{ env.DEV_DOMAINS }}") ;;
"Matthew-1") DOMAIN=$(jq '.Matthew[0]' <<< "${{ env.DEV_DOMAINS }}") ;;
"Matthew-2") DOMAIN=$(jq '.Matthew[1]' <<< "${{ env.DEV_DOMAINS }}") ;;
"Sander-1") DOMAIN=$(jq '.Sander[0]' <<< "${{ env.DEV_DOMAINS }}") ;;
"Sander-2") DOMAIN=$(jq '.Sander[1]' <<< "${{ env.DEV_DOMAINS }}") ;;
"Matthias-1") DOMAIN=$(jq '.Matthias[0]' <<< "${{ env.DEV_DOMAINS }}") ;;
"Matthias-2") DOMAIN=$(jq '.Matthias[1]' <<< "${{ env.DEV_DOMAINS }}") ;;
"Dennis-1") DOMAIN=$(jq '.Dennis[0]' <<< "${{ env.DEV_DOMAINS }}") ;;
"Dennis-2") DOMAIN=$(jq '.Dennis[1]' <<< "${{ env.DEV_DOMAINS }}") ;;
"Jehoszafat-1") DOMAIN=$(jq '.Jehoszafat[0]' <<< "${{ env.DEV_DOMAINS }}") ;;
"Jehoszafat-2") DOMAIN=$(jq '.Jehoszafat[1]' <<< "${{ env.DEV_DOMAINS }}") ;;
"Ferruh-1") DOMAIN=$(jq '.Ferruh[0]' <<< "${{ env.DEV_DOMAINS }}") ;;
"Ferruh-2") DOMAIN=$(jq '.Ferruh[1]' <<< "${{ env.DEV_DOMAINS }}") ;;
"Cas-1") DOMAIN=$(jq '.Cas[0]' <<< "${{ env.DEV_DOMAINS }}") ;;
"Cas-2") DOMAIN=$(jq '.Cas[1]' <<< "${{ env.DEV_DOMAINS }}") ;;
"DNS-Integration") DOMAIN=$(jq '.DNS-Integration[0]' <<< "${{ env.DEV_DOMAINS }}") ;;
*)
echo "Unknown domain alias: $domain_alias"
exit 1
;;
esac
echo DOMAIN=$DOMAIN >> $GITHUB_ENV
- name: Create k8s cluster for testing
run: |
linode-cli lke cluster-create \
--label ${{ env.LINODE_CLUSTER_NAME }} \
--region nl-ams \
--k8s_version ${{ env.LINODE_K8S_VERSION }} \
--control_plane.high_availability true \
--node_pools.type g6-dedicated-8 --node_pools.count 3 \
--node_pools.autoscaler.enabled true \
--node_pools.autoscaler.max 3 \
--node_pools.autoscaler.min 3 \
--tags testing \
--no-defaults
- name: Retrieve cluster id
run: echo "LINODE_CLUSTER_ID=$(linode-cli lke clusters-list --json | jq -ce '.[] | select(.label | startswith("${{ env.LINODE_CLUSTER_NAME }}")) | .id')" >> $GITHUB_ENV
- name: Wait for cluster to be ready
run: |
echo "Waiting for the cluster to be active..."
while :; do
rawOutput=$(linode-cli lke pools-list ${{ env.LINODE_CLUSTER_ID }} --json)
allReady=$(echo "$rawOutput" | jq -r 'map(.nodes | .status == "ready") | all')
echo "All nodes ready: $allReady"
if [ "$allReady" == "true" ]; then
echo "Cluster is ready"
break
fi
sleep 30
done
- name: Save kubectl config with auth token and Get kubectl environment and create docker secret
run: |
# Get the kubeconfig from linode-cli
kubeconfig=$(linode-cli lke kubeconfig-view ${{ env.LINODE_CLUSTER_ID }} --text | sed 1d | base64 --decode)
# Save the kubeconfig to a file
kubeconfigDir="$HOME/.kube"
kubeconfigPath="$HOME/.kube/config"
mkdir -p "$kubeconfigDir" # Create the directory if it doesn't exist
echo "$kubeconfig" > "$kubeconfigPath"
echo "Kubeconfig saved to $kubeconfigPath"
# Set the kubectl context to use the new kubeconfig
export KUBECONFIG="$kubeconfigPath"
contextName=$(kubectl config get-contexts -o name | head -n 1)
kubectl config use-context "$contextName"
echo "Kubectl context set to linode"
echo LINODE_CLUSTER_CONTEXT=`kubectl config current-context` >> $GITHUB_ENV
- name: Create image pull secret on test cluster
run: |
kubectl create secret docker-registry reg-otomi-github \
--docker-server=${{ env.CACHE_REGISTRY }} \
--docker-username=${{ env.BOT_USERNAME }} \
--docker-password='${{ secrets.BOT_PULL_TOKEN }}'
- name: Checkout
uses: actions/checkout@v4
- name: Prepare Otomi chart
if: ${{ inputs.install_profile != 'no-otomi' }}
run: |
ref=${{ github.event.pull_request.head.ref || github.ref }}
tag=${ref##*/}
sed --in-place "s/APP_VERSION_PLACEHOLDER/$tag/g" chart/apl/Chart.yaml
sed --in-place "s/CONTEXT_PLACEHOLDER/${{ env.LINODE_CLUSTER_CONTEXT }}/g" tests/integration/${{ inputs.install_profile }}.yaml
sed --in-place "s/OTOMI_VERSION_PLACEHOLDER/${GITHUB_REF##*/}/g" tests/integration/${{ inputs.install_profile }}.yaml
touch values-container-registry.yaml
# If a pipeline installs Otomi from the semver tag then pull container image from DockerHub
[[ ${GITHUB_REF##*/} =~ ^v[0-9].+$ ]] && exit 0
# Pull image from cache registry
cat << EOF > values-container-registry.yaml
imageName: "${{ env.CACHE_REGISTRY }}/${{ env.CACHE_REPO }}"
imagePullSecretNames:
- reg-otomi-github
EOF
- name: Otomi install
if: ${{ inputs.install_profile != 'no-otomi' }}
env:
LETSENCRYPT_STAGING: ${{ secrets.LETSENCRYPT_STAGING }}
LETSENCRYPT_PRODUCTION: ${{ secrets.LETSENCRYPT_PRODUCTION }}
HOST: ${{ secrets.EDGEDNS_HOST }}
ACCESS_TOKEN: ${{ secrets.EDGEDNS_ACCESS_TOKEN }}
CLIENT_TOKEN: ${{ secrets.EDGEDNS_CLIENT_TOKEN }}
CLIENT_SECRET: ${{ secrets.EDGEDNS_CLIENT_SECRET }}
run: |
touch values.yaml
adminPassword=welcomeotomi
[[ '${{ inputs.certificate }}' == 'letsencrypt_staging' ]] && echo "$LETSENCRYPT_STAGING" >> values.yaml
[[ '${{ inputs.certificate }}' == 'letsencrypt_production' ]] && echo "$LETSENCRYPT_PRODUCTION" >> values.yaml
[[ '${{ inputs.kms }}' == 'age' ]] && kms="--set kms.sops.provider=age"
[[ '${{ inputs.generate_password }}' == 'yes' ]] && adminPassword="$(head /dev/urandom | tr -dc 'A-Za-z0-9' | head -c 24)"
install_args="otomi chart/apl --wait --wait-for-jobs --timeout 90m0s \
--values tests/integration/${{ inputs.install_profile }}.yaml \
--values values-container-registry.yaml \
--values values.yaml \
--set cluster.provider=linode \
--set dns.domainFilters[0]=${{ env.DOMAIN }} \
--set dns.provider.akamai.clientSecret=${CLIENT_SECRET} \
--set dns.provider.akamai.host=${HOST} \
--set dns.provider.akamai.accessToken=${ACCESS_TOKEN} \
--set dns.provider.akamai.clientToken=${CLIENT_TOKEN} \
--set otomi.hasExternalDNS=true \
--set cluster.domainSuffix=${{ env.DOMAIN }} \
--set otomi.adminPassword=$adminPassword \
$kms"
helm install $install_args
- name: Gather k8s events on failure
if: failure()
run: |
kubectl get events --sort-by='.lastTimestamp' -A
- name: Gather k8s pods on failure
if: failure()
run: |
kubectl get pods -A -o wide
- name: Gather otomi logs on failure
if: failure()
run: |
kubectl logs jobs/otomi --tail 150
- name: Gather otomi-e2e logs on failure
if: failure()
run: |
kubectl logs -n maintenance -l app.kubernetes.io/instance=job-e2e --tail 15000
- name: Remove the test cluster
if: always()
run: |
[[ "${{ inputs.cluster_persistence }}" == "preserve" ]] && echo "The cluster ${{ env.LINODE_CLUSTER_NAME }} will NOT be destroyed!!" && exit 0
linode-cli lke cluster-delete ${{ env.LINODE_CLUSTER_ID }}
- name: Slack Notification
if: always()
uses: rtCamp/action-slack-notify@v2
env:
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACK_CHANNEL: github-ci
SLACK_COLOR: ${{ job.status }}
SLACK_ICON: https://github.com/redkubes.png?size=48
SLACK_TITLE: Scheduled integration tests
SLACK_USERNAME: RedKubesBot