Skip to content

e2e: AWS

e2e: AWS #54

Workflow file for this run

#
# THIS FILE IS GENERATED, PLEASE DO NOT EDIT.
#
# Copyright 2022 Flant JSC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# <template: e2e_workflow_template>
name: 'e2e: AWS'
on:
workflow_dispatch:
inputs:
issue_id:
description: 'ID of issue where label was set'
required: false
issue_number:
description: 'Number of issue where label was set'
required: false
comment_id:
description: 'ID of comment in issue where to put workflow run status'
required: false
ci_commit_ref_name:
description: 'Git ref name for image tags'
required: false
pull_request_ref:
description: 'Git ref for checkout PR sources'
required: false
pull_request_sha:
description: 'Git SHA for restoring artifacts from cache'
required: false
pull_request_head_label:
description: 'Head label of pull request. e.g. my_repo:my_feature_branch'
required: false
cri:
description: 'A comma-separated list of cri to test. Available: Containerd.'
required: false
ver:
description: 'A comma-separated list of versions to test. Available: from 1.24 to 1.28.'
required: false
initial_ref_slug:
description: 'An image tag to install first and then switch to workflow context ref'
required: false
env:
# <template: werf_envs>
WERF_CHANNEL: "ea"
WERF_ENV: "FE"
TEST_TIMEOUT: "15m"
# Use fixed string 'sys/deckhouse-oss' for repo name. ${CI_PROJECT_PATH} is not available here in GitHub.
DEV_REGISTRY_PATH: "${{ secrets.DECKHOUSE_DEV_REGISTRY_HOST }}/sys/deckhouse-oss"
# Registry for additional repositories used for testing Github Actions workflows.
GHA_TEST_REGISTRY_PATH: "ghcr.io/${{ github.repository }}"
# </template: werf_envs>
# Note: no concurrency section for e2e workflows.
# Usually you run e2e and wait until it ends.
jobs:
started_at:
name: Save start timestamp
outputs:
started_at: ${{ steps.started_at.outputs.started_at }}
runs-on: "ubuntu-latest"
steps:
# <template: started_at_output>
- name: Job started timestamp
id: started_at
run: |
unixTimestamp=$(date +%s)
echo "started_at=${unixTimestamp}" >> $GITHUB_OUTPUT
# </template: started_at_output>
# <template: git_info_job>
git_info:
name: Get git info
runs-on: ubuntu-latest
outputs:
ci_commit_tag: ${{ steps.git_info.outputs.ci_commit_tag }}
ci_commit_branch: ${{ steps.git_info.outputs.ci_commit_branch }}
ci_commit_ref_name: ${{ steps.git_info.outputs.ci_commit_ref_name }}
ci_commit_ref_slug: ${{ steps.git_info.outputs.ci_commit_ref_slug }}
ref_full: ${{ steps.git_info.outputs.ref_full }}
github_sha: ${{ steps.git_info.outputs.github_sha }}
pr_number: ${{ steps.git_info.outputs.pr_number }}
# Skip the CI for automation PRs, e.g. changelog
if: ${{ github.event.pull_request.user.login != 'deckhouse-BOaTswain' }}
steps:
- id: git_info
name: Get tag name and SHA
uses: actions/github-script@v6.4.1
with:
script: |
const { GITHUB_REF_TYPE, GITHUB_REF_NAME, GITHUB_REF } = process.env
let refSlug = ''
let refName = ''
let refFull = ''
let githubBranch = ''
let githubTag = ''
let githubSHA = ''
let prNumber = ''
if (context.eventName === "workflow_dispatch" && context.payload.inputs && context.payload.inputs.pull_request_ref) {
// Trigger: workflow_dispatch with pull_request_ref.
// Extract pull request number from 'refs/pull/<NUM>/merge'
prNumber = context.payload.inputs.pull_request_ref.replace('refs/pull/', '').replace('/merge', '').replace('/head', '')
refSlug = `pr${prNumber}`
refName = context.payload.inputs.ci_commit_ref_name
refFull = context.payload.inputs.pull_request_ref
githubBranch = refName
githubSHA = context.payload.inputs.pull_request_sha
core.info(`workflow_dispatch event: set git info from inputs. inputs: ${JSON.stringify(context.payload.inputs)}`)
} else if (context.eventName === "pull_request" || context.eventName === "pull_request_target" ) {
// For PRs from forks, tag images with `prXXX` to avoid clashes between branches.
const targetRepo = context.payload.repository.full_name;
const prRepo = context.payload.pull_request.head.repo.full_name
const prRef = context.payload.pull_request.head.ref
refSlug = `pr${context.issue.number}`;
refName = (prRepo === targetRepo) ? prRef : refSlug;
refFull = `refs/pull/${context.issue.number}/head`
githubBranch = refName
githubSHA = context.payload.pull_request.head.sha
core.info(`pull request event: set git info from pull_request.head. pr:${prRepo}:${prRef} target:${targetRepo}:${context.ref}`)
prNumber = context.issue.number
} else {
// Other triggers: workflow_dispatch without pull_request_ref, schedule, push...
// refName is 'main' or tag name, so slugification is not necessary.
refSlug = GITHUB_REF_NAME
refName = GITHUB_REF_NAME
refFull = GITHUB_REF
githubTag = GITHUB_REF_TYPE == "tag" ? refName : ""
githubBranch = GITHUB_REF_TYPE == "branch" ? refName : ""
githubSHA = context.sha
core.info(`${context.eventName} event: set git info from context: ${JSON.stringify({GITHUB_REF_NAME, GITHUB_REF_TYPE, sha: context.sha })}`)
}
core.setCommandEcho(true)
core.setOutput('ci_commit_ref_slug', refSlug)
core.setOutput('ci_commit_ref_name', refName)
core.setOutput(`ci_commit_tag`, githubTag)
core.setOutput(`ci_commit_branch`, githubBranch)
core.setOutput(`ref_full`, refFull)
core.setOutput('github_sha', githubSHA)
core.setOutput('pr_number', prNumber)
core.setCommandEcho(false)
# </template: git_info_job>
# <template: check_e2e_labels_job>
check_e2e_labels:
name: Check e2e labels
runs-on: ubuntu-latest
outputs:
run_containerd_1_26: ${{ steps.check.outputs.run_containerd_1_26 }}
run_containerd_1_27: ${{ steps.check.outputs.run_containerd_1_27 }}
run_containerd_1_28: ${{ steps.check.outputs.run_containerd_1_28 }}
run_containerd_1_29: ${{ steps.check.outputs.run_containerd_1_29 }}
run_containerd_1_30: ${{ steps.check.outputs.run_containerd_1_30 }}
run_containerd_1_31: ${{ steps.check.outputs.run_containerd_1_31 }}
run_containerd_automatic: ${{ steps.check.outputs.run_containerd_automatic }}
multimaster: ${{ steps.check.outputs.multimaster }}
steps:
# <template: checkout_step>
- name: Checkout sources
uses: actions/checkout@v3.5.2
# </template: checkout_step>
- name: Check e2e labels
id: check
uses: actions/github-script@v6.4.1
with:
script: |
const provider = 'aws';
const kubernetesDefaultVersion = '1.27';
const ci = require('./.github/scripts/js/ci');
return await ci.checkE2ELabels({github, context, core, provider, kubernetesDefaultVersion});
# </template: check_e2e_labels_job>
# <template: e2e_run_job_template>
run_containerd_1_26:
name: "e2e: AWS, Containerd, Kubernetes 1.26"
needs:
- check_e2e_labels
- git_info
if: needs.check_e2e_labels.outputs.run_containerd_1_26 == 'true'
outputs:
ssh_master_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_master_connection_string }}
ssh_bastion_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_bastion_connection_string }}
run_id: ${{ github.run_id }}
# need for find state in artifact
cluster_prefix: ${{ steps.setup.outputs.dhctl-prefix }}
ran_for: "aws;WithoutNAT;containerd;1.26"
failed_cluster_stayed: ${{ steps.check_stay_failed_cluster.outputs.failed_cluster_stayed }}
issue_number: ${{ inputs.issue_number }}
install_image_path: ${{ steps.setup.outputs.install-image-path }}
env:
PROVIDER: AWS
CRI: Containerd
LAYOUT: WithoutNAT
KUBERNETES_VERSION: "1.26"
EVENT_LABEL: ${{ github.event.label.name }}
runs-on: [self-hosted, e2e-common]
steps:
# <template: started_at_output>
- name: Job started timestamp
id: started_at
run: |
unixTimestamp=$(date +%s)
echo "started_at=${unixTimestamp}" >> $GITHUB_OUTPUT
# </template: started_at_output>
# <template: checkout_from_event_ref_step>
- name: Checkout sources
uses: actions/checkout@v3.5.2
with:
ref: ${{ github.event.inputs.pull_request_ref || github.event.ref }}
fetch-depth: 0
# </template: checkout_from_event_ref_step>
# <template: update_comment_on_start>
- name: Update comment on start
if: ${{ github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }}
uses: actions/github-script@v6.4.1
with:
github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
retries: 3
script: |
const name = 'e2e: AWS, Containerd, Kubernetes 1.26';
const ci = require('./.github/scripts/js/ci');
return await ci.updateCommentOnStart({github, context, core, name})
# </template: update_comment_on_start>
# <template: login_dev_registry_step>
- name: Check dev registry credentials
id: check_dev_registry
env:
HOST: ${{secrets.DECKHOUSE_DEV_REGISTRY_HOST}}
run: |
if [[ -n $HOST ]]; then
echo "has_credentials=true" >> $GITHUB_OUTPUT
echo "web_registry_path=${{secrets.DECKHOUSE_DEV_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT
fi
- name: Login to dev registry
uses: docker/login-action@v2.1.0
if: ${{ steps.check_dev_registry.outputs.has_credentials == 'true' }}
with:
registry: ${{ secrets.DECKHOUSE_DEV_REGISTRY_HOST }}
username: ${{ secrets.DECKHOUSE_DEV_REGISTRY_USER }}
password: ${{ secrets.DECKHOUSE_DEV_REGISTRY_PASSWORD }}
logout: false
# </template: login_dev_registry_step>
# <template: login_rw_registry_step>
- name: Check rw registry credentials
id: check_rw_registry
env:
HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}}
run: |
if [[ -n $HOST ]]; then
echo "has_credentials=true" >> $GITHUB_OUTPUT
echo "web_registry_path=${{secrets.DECKHOUSE_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT
fi
- name: Login to rw registry
uses: docker/login-action@v2.1.0
if: ${{ steps.check_rw_registry.outputs.has_credentials == 'true' }}
with:
registry: ${{ secrets.DECKHOUSE_REGISTRY_HOST }}
username: ${{ secrets.DECKHOUSE_REGISTRY_USER }}
password: ${{ secrets.DECKHOUSE_REGISTRY_PASSWORD }}
logout: false
- name: Login to Github Container Registry
uses: docker/login-action@v2.1.0
if: ${{ steps.check_rw_registry.outputs.has_credentials != 'true' }}
with:
registry: ghcr.io
username: ${{ secrets.GHCR_IO_REGISTRY_USER }}
password: ${{ secrets.GHCR_IO_REGISTRY_PASSWORD }}
logout: false
# </template: login_rw_registry_step>
# <template: werf_install_step>
- name: Install werf CLI
uses: werf/actions/install@43075e4ab81952b181d33e125ef15b9c060a782e
with:
channel: ${{env.WERF_CHANNEL}}
# </template: werf_install_step>
- name: Setup
id: setup
env:
DECKHOUSE_REGISTRY_HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}}
CI_COMMIT_TAG: ${{needs.git_info.outputs.ci_commit_tag}}
CI_COMMIT_BRANCH: ${{needs.git_info.outputs.ci_commit_branch}}
CI_COMMIT_REF_SLUG: ${{needs.git_info.outputs.ci_commit_ref_slug}}
REF_FULL: ${{needs.git_info.outputs.ref_full}}
INITIAL_REF_SLUG: ${{ github.event.inputs.initial_ref_slug }}
MANUAL_RUN: "true"
MULTIMASTER: ${{ needs.check_e2e_labels.outputs.multimaster }}
run: |
# Calculate unique prefix for e2e test.
# GITHUB_RUN_ID is a unique number for each workflow run.
# GITHUB_RUN_ATTEMPT is a unique number for each attempt of a particular workflow run in a repository.
# Add CRI and KUBERNETES_VERSION to create unique directory for each job.
# CRI and PROVIDER values are trimmed to reduce prefix length.
if [[ "${KUBERNETES_VERSION}" == "Automatic" ]] ; then
KUBERNETES_VERSION_SUF="auto"
else
KUBERNETES_VERSION_SUF=${KUBERNETES_VERSION}
fi
DHCTL_PREFIX=$(echo "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-$(echo ${CRI} | head -c 3)-${KUBERNETES_VERSION_SUF}")
if [[ "${MANUAL_RUN}" == "false" ]] ; then
# for jobs which run multiple providers concurrency (daily e2e, for example)
# add provider suffix to prevent "directory already exists" error
DHCTL_PREFIX="${DHCTL_PREFIX}-$(echo ${PROVIDER} | head -c 2)"
fi
# converts to DNS-like (all letters in lower case and replace all dots to dash)
# because it prefix will use for k8s resources names (nodes, for example)
DHCTL_PREFIX=$(echo "$DHCTL_PREFIX" | tr '.' '-' | tr '[:upper:]' '[:lower:]')
# Create tmppath for test script.
TMP_DIR_PATH=/mnt/cloud-layouts/layouts/${DHCTL_PREFIX}
if [[ -d "${TMP_DIR_PATH}" ]] ; then
echo "Temporary dir already exists: ${TMP_DIR_PATH}. ERROR!"
ls -la ${TMP_DIR_PATH}
exit 1
else
echo "Create temporary dir for job: ${TMP_DIR_PATH}."
mkdir -p "${TMP_DIR_PATH}"
fi
## Source: ci_templates/build.yml
# Extract REPO_SUFFIX from repository name: trim prefix 'deckhouse/deckhouse-'.
REPO_SUFFIX=${GITHUB_REPOSITORY#deckhouse/deckhouse-}
if [[ $REPO_SUFFIX == $GITHUB_REPOSITORY ]] ; then
# REPO_SUFFIX should be empty for main repo 'deckhouse/deckhouse'.
REPO_SUFFIX=
fi
# Use dev-registry for Git branches.
BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}"
# Use rw-registry for Git tags.
SEMVER_REGISTRY_PATH="${DECKHOUSE_REGISTRY_HOST}/deckhouse"
if [[ -z ${DECKHOUSE_REGISTRY_HOST:-} ]] ; then
# DECKHOUSE_REGISTRY_HOST is empty, so this repo is not the main repo.
# Use dev-regisry for branches and Github Container Registry for semver tags.
BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}"
SEMVER_REGISTRY_PATH="${GHA_TEST_REGISTRY_PATH}"
fi
# Prepare initial image tag for deploy/deckhouse to test switching from previous release.
INITIAL_IMAGE_TAG=
if [[ -n ${INITIAL_REF_SLUG} ]] ; then
INITIAL_IMAGE_TAG=${INITIAL_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}}
fi
# Prepare image tag for deploy/deckhouse (DECKHOUSE_IMAGE_TAG option in testing/cloud_layouts/script.sh).
# CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch.
# Use it as image tag. Add suffix to not overlap with PRs in main repo.
IMAGE_TAG=${CI_COMMIT_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}}
INSTALL_IMAGE_NAME=
if [[ -n ${CI_COMMIT_BRANCH} ]]; then
# CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch.
INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${IMAGE_TAG}
fi
if [[ -n ${CI_COMMIT_TAG} ]] ; then
REGISTRY_SUFFIX=$(echo ${WERF_ENV} | tr '[:upper:]' '[:lower:]') # CE/EE/FE -> ce/ee/fe
INSTALL_IMAGE_NAME=${SEMVER_REGISTRY_PATH}/${REGISTRY_SUFFIX}/install:${CI_COMMIT_REF_SLUG}
fi
if [[ -n ${INITIAL_REF_SLUG} ]] ; then
INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${INITIAL_IMAGE_TAG}
git fetch origin ${INITIAL_REF_SLUG}
git checkout origin/${INITIAL_REF_SLUG} -- testing/cloud_layouts
fi
SAFE_IMAGE_NAME=$(echo ${INSTALL_IMAGE_NAME} | tr '[:lower:]' '[:upper:]')
echo "Deckhouse Deployment will use install image ${SAFE_IMAGE_NAME} to test Git ref ${REF_FULL}"
if [ "${MULTIMASTER}" == true ] ; then
MASTERS_COUNT=3
else
MASTERS_COUNT=1
fi
echo "Multimaster set ${MULTIMASTER}, MASTERS_COUNT set ${MASTERS_COUNT}"
# Print image name in uppercase to prevent hiding non-secret registry host stored in secret.
echo "⚓️ [$(date -u)] Pull 'dev/install' image '${SAFE_IMAGE_NAME}'."
docker pull "${INSTALL_IMAGE_NAME}"
IMAGE_INSTALL_PATH="/${INSTALL_IMAGE_NAME#*/}"
echo '::echo::on'
echo "tmp-dir-path=${TMP_DIR_PATH}" >> $GITHUB_OUTPUT
echo "dhctl-log-file=${TMP_DIR_PATH}/dhctl.log" >> $GITHUB_OUTPUT
echo "dhctl-prefix=${DHCTL_PREFIX}" >> $GITHUB_OUTPUT
echo "install-image-name=${INSTALL_IMAGE_NAME}" >> $GITHUB_OUTPUT
echo "deckhouse-image-tag=${IMAGE_TAG}" >> $GITHUB_OUTPUT
echo "initial-image-tag=${INITIAL_IMAGE_TAG}" >> $GITHUB_OUTPUT
echo "install-image-path=${IMAGE_INSTALL_PATH}" >> $GITHUB_OUTPUT
echo "masters-count=${MASTERS_COUNT}" >> $GITHUB_OUTPUT
echo '::echo::off'
- name: "Run e2e test: AWS/Containerd/1.26"
id: e2e_test_run
timeout-minutes: 80
env:
PROVIDER: AWS
CRI: Containerd
LAYOUT: WithoutNAT
KUBERNETES_VERSION: "1.26"
LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }}
LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}}
TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}}
PREFIX: ${{ steps.setup.outputs.dhctl-prefix}}
MASTERS_COUNT: ${{ steps.setup.outputs.masters-count}}
INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }}
DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }}
INITIAL_IMAGE_TAG: ${{ steps.setup.outputs.initial-image-tag }}
# <template: e2e_run_template>
LAYOUT_AWS_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_ACCESS_KEY }}
LAYOUT_AWS_SECRET_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_SECRET_ACCESS_KEY }}
COMMENT_ID: ${{ inputs.comment_id }}
GITHUB_API_SERVER: ${{ github.api_url }}
REPOSITORY: ${{ github.repository }}
DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}}
GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
run: |
echo "Execute 'script.sh run-test' via 'docker run', using environment:
INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME}
DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG}
INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG}
PREFIX=${PREFIX}
PROVIDER=${PROVIDER}
CRI=${CRI}
LAYOUT=${LAYOUT}
KUBERNETES_VERSION=${KUBERNETES_VERSION}
TMP_DIR_PATH=${TMP_DIR_PATH}
MASTERS_COUNT=${MASTERS_COUNT}
"
ls -lh $(pwd)/testing
dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
echo "DHCTL log file: $dhctl_log_file"
user_runner_id=$(id -u):$(id -g)
echo "running Docker with user_runner_id $user_runner_id"
echo "Start waiting ssh connection string script"
comment_url="${GITHUB_API_SERVER}/repos/${REPOSITORY}/issues/comments/${COMMENT_ID}"
echo "Full comment url for updating ${comment_url}"
ssh_connect_str_file="${DHCTL_LOG_FILE}-ssh-connect_str-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
echo "ssh_connection_str_file=${ssh_connect_str_file}" >> $GITHUB_OUTPUT
bastion_ip_file=""
if [[ "${PROVIDER}" == "Static" ]] ; then
bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
elif [[ "${PROVIDER}" == "VCD" ]] ; then
bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
fi
echo "ssh_bastion_str_file=${bastion_ip_file}" >> $GITHUB_OUTPUT
$(pwd)/testing/cloud_layouts/wait-master-ssh-and-update-comment.sh "$dhctl_log_file" "$comment_url" "$ssh_connect_str_file" "$bastion_ip_file" > "${dhctl_log_file}-wait-log" 2>&1 &
docker run --rm \
-e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \
-e PREFIX=${PREFIX} \
-e MASTERS_COUNT=${MASTERS_COUNT} \
-e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \
-e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \
-e KUBERNETES_VERSION=${KUBERNETES_VERSION} \
-e CRI=${CRI} \
-e PROVIDER=${PROVIDER:-not_provided} \
-e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \
-e LAYOUT=${LAYOUT:-not_provided} \
-e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \
-e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \
-e LAYOUT_AWS_ACCESS_KEY=${LAYOUT_AWS_ACCESS_KEY:-not_provided} \
-e LAYOUT_AWS_SECRET_ACCESS_KEY=${LAYOUT_AWS_SECRET_ACCESS_KEY:-not_provided} \
-e USER_RUNNER_ID=${user_runner_id} \
-e HOME=/tmp \
-v $(pwd)/testing:/deckhouse/testing \
-v $(pwd)/release.yaml:/deckhouse/release.yaml \
-v ${TMP_DIR_PATH}:/tmp \
-u ${user_runner_id} \
-w /deckhouse \
${INSTALL_IMAGE_NAME} \
bash /deckhouse/testing/cloud_layouts/script.sh run-test
# </template: e2e_run_template>
- name: Read connection string
if: ${{ failure() || cancelled() }}
id: check_stay_failed_cluster
uses: actions/github-script@v6.4.1
env:
SSH_CONNECT_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_connection_str_file }}
SSH_BASTION_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_bastion_str_file }}
with:
# it sets `should_run` output var if e2e/failed/stay label
script: |
const e2e_cleanup = require('./.github/scripts/js/e2e/cleanup');
await e2e_cleanup.readConnectionScript({core, context, github});
- name: Label pr if e2e failed
if: ${{ (failure() || cancelled()) && needs.git_info.outputs.pr_number }}
uses: actions-ecosystem/action-add-labels@v1
with:
github_token: ${{ secrets.BOATSWAIN_GITHUB_TOKEN }}
number: ${{ needs.git_info.outputs.pr_number }}
labels: "e2e/cluster/failed"
- name: Cleanup bootstrapped cluster
if: success()
id: cleanup_cluster
timeout-minutes: 60
env:
PROVIDER: AWS
CRI: Containerd
LAYOUT: WithoutNAT
KUBERNETES_VERSION: "1.26"
LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }}
LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}}
TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}}
PREFIX: ${{ steps.setup.outputs.dhctl-prefix}}
MASTERS_COUNT: ${{ steps.setup.outputs.masters-count }}
INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }}
DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }}
# <template: e2e_run_template>
LAYOUT_AWS_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_ACCESS_KEY }}
LAYOUT_AWS_SECRET_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_SECRET_ACCESS_KEY }}
COMMENT_ID: ${{ inputs.comment_id }}
GITHUB_API_SERVER: ${{ github.api_url }}
REPOSITORY: ${{ github.repository }}
DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}}
GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
run: |
echo "Execute 'script.sh cleanup' via 'docker run', using environment:
INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME}
DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG}
INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG}
PREFIX=${PREFIX}
PROVIDER=${PROVIDER}
CRI=${CRI}
LAYOUT=${LAYOUT}
KUBERNETES_VERSION=${KUBERNETES_VERSION}
TMP_DIR_PATH=${TMP_DIR_PATH}
MASTERS_COUNT=${MASTERS_COUNT}
"
ls -lh $(pwd)/testing
dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
echo "DHCTL log file: $dhctl_log_file"
user_runner_id=$(id -u):$(id -g)
echo "running Docker with user_runner_id $user_runner_id"
docker run --rm \
-e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \
-e PREFIX=${PREFIX} \
-e MASTERS_COUNT=${MASTERS_COUNT} \
-e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \
-e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \
-e KUBERNETES_VERSION=${KUBERNETES_VERSION} \
-e CRI=${CRI} \
-e PROVIDER=${PROVIDER:-not_provided} \
-e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \
-e LAYOUT=${LAYOUT:-not_provided} \
-e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \
-e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \
-e LAYOUT_AWS_ACCESS_KEY=${LAYOUT_AWS_ACCESS_KEY:-not_provided} \
-e LAYOUT_AWS_SECRET_ACCESS_KEY=${LAYOUT_AWS_SECRET_ACCESS_KEY:-not_provided} \
-e USER_RUNNER_ID=${user_runner_id} \
-e HOME=/tmp \
-v $(pwd)/testing:/deckhouse/testing \
-v $(pwd)/release.yaml:/deckhouse/release.yaml \
-v ${TMP_DIR_PATH}:/tmp \
-u ${user_runner_id} \
-w /deckhouse \
${INSTALL_IMAGE_NAME} \
bash /deckhouse/testing/cloud_layouts/script.sh cleanup
# </template: e2e_run_template>
- name: Save dhctl state
id: save_failed_cluster_state
if: ${{ failure() }}
uses: actions/upload-artifact@v4.4.0
with:
name: failed_cluster_state_aws_containerd_1_26
path: |
${{ steps.setup.outputs.tmp-dir-path}}/dhctl
${{ steps.setup.outputs.tmp-dir-path}}/*.tfstate
${{ steps.setup.outputs.tmp-dir-path}}/logs
- name: Save test results
if: ${{ steps.setup.outputs.dhctl-log-file }}
uses: actions/upload-artifact@v4.4.0
with:
name: test_output_aws_containerd_1_26
path: |
${{ steps.setup.outputs.dhctl-log-file}}*
${{ steps.setup.outputs.tmp-dir-path}}/logs
testing/cloud_layouts/
!testing/cloud_layouts/**/sshkey
- name: Cleanup temp directory
if: always()
env:
TMPPATH: ${{ steps.setup.outputs.tmppath}}
run: |
echo "Remove temporary directory '${TMPPATH}' ..."
if [[ -d "${TMPPATH}" && ${#TMPPATH} > 1 ]] ; then
rm -rf "${TMPPATH}"
else
echo Not a directory.
fi
if [ -n $USER_RUNNER_ID ]; then
echo "Fix temp directories owner..."
chown -R $USER_RUNNER_ID "$(pwd)/testing" || true
chown -R $USER_RUNNER_ID "/deckhouse/testing" || true
chown -R $USER_RUNNER_ID /tmp || true
else
echo "Fix temp directories permissions..."
chmod -f -R 777 "$(pwd)/testing" || true
chmod -f -R 777 "/deckhouse/testing" || true
chmod -f -R 777 /tmp || true
fi
# <template: update_comment_on_finish>
- name: Update comment on finish
id: update_comment_on_finish
if: ${{ always() && github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }}
env:
NEEDS_CONTEXT: ${{ toJSON(needs) }}
JOB_CONTEXT: ${{ toJSON(job) }}
STEPS_CONTEXT: ${{ toJSON(steps) }}
uses: actions/github-script@v6.4.1
with:
github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
retries: 3
script: |
const statusConfig = 'job,separate';
const name = 'e2e: AWS, Containerd, Kubernetes 1.26';
const needsContext = JSON.parse(process.env.NEEDS_CONTEXT);
const jobContext = JSON.parse(process.env.JOB_CONTEXT);
const stepsContext = JSON.parse(process.env.STEPS_CONTEXT);
let jobNames = null
if (process.env.JOB_NAMES) {
jobNames = JSON.parse(process.env.JOB_NAMES);
}
core.info(`needsContext: ${JSON.stringify(needsContext)}`);
core.info(`jobContext: ${JSON.stringify(jobContext)}`);
core.info(`stepsContext: ${JSON.stringify(stepsContext)}`);
core.info(`jobNames: ${JSON.stringify(jobNames)}`);
const ci = require('./.github/scripts/js/ci');
return await ci.updateCommentOnFinish({github, context, core, statusConfig, name, needsContext, jobContext, stepsContext, jobNames});
# </template: update_comment_on_finish>
# </template: e2e_run_job_template>
# <template: e2e_run_job_template>
run_containerd_1_27:
name: "e2e: AWS, Containerd, Kubernetes 1.27"
needs:
- check_e2e_labels
- git_info
if: needs.check_e2e_labels.outputs.run_containerd_1_27 == 'true'
outputs:
ssh_master_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_master_connection_string }}
ssh_bastion_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_bastion_connection_string }}
run_id: ${{ github.run_id }}
# need for find state in artifact
cluster_prefix: ${{ steps.setup.outputs.dhctl-prefix }}
ran_for: "aws;WithoutNAT;containerd;1.27"
failed_cluster_stayed: ${{ steps.check_stay_failed_cluster.outputs.failed_cluster_stayed }}
issue_number: ${{ inputs.issue_number }}
install_image_path: ${{ steps.setup.outputs.install-image-path }}
env:
PROVIDER: AWS
CRI: Containerd
LAYOUT: WithoutNAT
KUBERNETES_VERSION: "1.27"
EVENT_LABEL: ${{ github.event.label.name }}
runs-on: [self-hosted, e2e-common]
steps:
# <template: started_at_output>
- name: Job started timestamp
id: started_at
run: |
unixTimestamp=$(date +%s)
echo "started_at=${unixTimestamp}" >> $GITHUB_OUTPUT
# </template: started_at_output>
# <template: checkout_from_event_ref_step>
- name: Checkout sources
uses: actions/checkout@v3.5.2
with:
ref: ${{ github.event.inputs.pull_request_ref || github.event.ref }}
fetch-depth: 0
# </template: checkout_from_event_ref_step>
# <template: update_comment_on_start>
- name: Update comment on start
if: ${{ github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }}
uses: actions/github-script@v6.4.1
with:
github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
retries: 3
script: |
const name = 'e2e: AWS, Containerd, Kubernetes 1.27';
const ci = require('./.github/scripts/js/ci');
return await ci.updateCommentOnStart({github, context, core, name})
# </template: update_comment_on_start>
# <template: login_dev_registry_step>
- name: Check dev registry credentials
id: check_dev_registry
env:
HOST: ${{secrets.DECKHOUSE_DEV_REGISTRY_HOST}}
run: |
if [[ -n $HOST ]]; then
echo "has_credentials=true" >> $GITHUB_OUTPUT
echo "web_registry_path=${{secrets.DECKHOUSE_DEV_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT
fi
- name: Login to dev registry
uses: docker/login-action@v2.1.0
if: ${{ steps.check_dev_registry.outputs.has_credentials == 'true' }}
with:
registry: ${{ secrets.DECKHOUSE_DEV_REGISTRY_HOST }}
username: ${{ secrets.DECKHOUSE_DEV_REGISTRY_USER }}
password: ${{ secrets.DECKHOUSE_DEV_REGISTRY_PASSWORD }}
logout: false
# </template: login_dev_registry_step>
# <template: login_rw_registry_step>
- name: Check rw registry credentials
id: check_rw_registry
env:
HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}}
run: |
if [[ -n $HOST ]]; then
echo "has_credentials=true" >> $GITHUB_OUTPUT
echo "web_registry_path=${{secrets.DECKHOUSE_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT
fi
- name: Login to rw registry
uses: docker/login-action@v2.1.0
if: ${{ steps.check_rw_registry.outputs.has_credentials == 'true' }}
with:
registry: ${{ secrets.DECKHOUSE_REGISTRY_HOST }}
username: ${{ secrets.DECKHOUSE_REGISTRY_USER }}
password: ${{ secrets.DECKHOUSE_REGISTRY_PASSWORD }}
logout: false
- name: Login to Github Container Registry
uses: docker/login-action@v2.1.0
if: ${{ steps.check_rw_registry.outputs.has_credentials != 'true' }}
with:
registry: ghcr.io
username: ${{ secrets.GHCR_IO_REGISTRY_USER }}
password: ${{ secrets.GHCR_IO_REGISTRY_PASSWORD }}
logout: false
# </template: login_rw_registry_step>
# <template: werf_install_step>
- name: Install werf CLI
uses: werf/actions/install@43075e4ab81952b181d33e125ef15b9c060a782e
with:
channel: ${{env.WERF_CHANNEL}}
# </template: werf_install_step>
- name: Setup
id: setup
env:
DECKHOUSE_REGISTRY_HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}}
CI_COMMIT_TAG: ${{needs.git_info.outputs.ci_commit_tag}}
CI_COMMIT_BRANCH: ${{needs.git_info.outputs.ci_commit_branch}}
CI_COMMIT_REF_SLUG: ${{needs.git_info.outputs.ci_commit_ref_slug}}
REF_FULL: ${{needs.git_info.outputs.ref_full}}
INITIAL_REF_SLUG: ${{ github.event.inputs.initial_ref_slug }}
MANUAL_RUN: "true"
MULTIMASTER: ${{ needs.check_e2e_labels.outputs.multimaster }}
run: |
# Calculate unique prefix for e2e test.
# GITHUB_RUN_ID is a unique number for each workflow run.
# GITHUB_RUN_ATTEMPT is a unique number for each attempt of a particular workflow run in a repository.
# Add CRI and KUBERNETES_VERSION to create unique directory for each job.
# CRI and PROVIDER values are trimmed to reduce prefix length.
if [[ "${KUBERNETES_VERSION}" == "Automatic" ]] ; then
KUBERNETES_VERSION_SUF="auto"
else
KUBERNETES_VERSION_SUF=${KUBERNETES_VERSION}
fi
DHCTL_PREFIX=$(echo "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-$(echo ${CRI} | head -c 3)-${KUBERNETES_VERSION_SUF}")
if [[ "${MANUAL_RUN}" == "false" ]] ; then
# for jobs which run multiple providers concurrency (daily e2e, for example)
# add provider suffix to prevent "directory already exists" error
DHCTL_PREFIX="${DHCTL_PREFIX}-$(echo ${PROVIDER} | head -c 2)"
fi
# converts to DNS-like (all letters in lower case and replace all dots to dash)
# because it prefix will use for k8s resources names (nodes, for example)
DHCTL_PREFIX=$(echo "$DHCTL_PREFIX" | tr '.' '-' | tr '[:upper:]' '[:lower:]')
# Create tmppath for test script.
TMP_DIR_PATH=/mnt/cloud-layouts/layouts/${DHCTL_PREFIX}
if [[ -d "${TMP_DIR_PATH}" ]] ; then
echo "Temporary dir already exists: ${TMP_DIR_PATH}. ERROR!"
ls -la ${TMP_DIR_PATH}
exit 1
else
echo "Create temporary dir for job: ${TMP_DIR_PATH}."
mkdir -p "${TMP_DIR_PATH}"
fi
## Source: ci_templates/build.yml
# Extract REPO_SUFFIX from repository name: trim prefix 'deckhouse/deckhouse-'.
REPO_SUFFIX=${GITHUB_REPOSITORY#deckhouse/deckhouse-}
if [[ $REPO_SUFFIX == $GITHUB_REPOSITORY ]] ; then
# REPO_SUFFIX should be empty for main repo 'deckhouse/deckhouse'.
REPO_SUFFIX=
fi
# Use dev-registry for Git branches.
BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}"
# Use rw-registry for Git tags.
SEMVER_REGISTRY_PATH="${DECKHOUSE_REGISTRY_HOST}/deckhouse"
if [[ -z ${DECKHOUSE_REGISTRY_HOST:-} ]] ; then
# DECKHOUSE_REGISTRY_HOST is empty, so this repo is not the main repo.
# Use dev-regisry for branches and Github Container Registry for semver tags.
BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}"
SEMVER_REGISTRY_PATH="${GHA_TEST_REGISTRY_PATH}"
fi
# Prepare initial image tag for deploy/deckhouse to test switching from previous release.
INITIAL_IMAGE_TAG=
if [[ -n ${INITIAL_REF_SLUG} ]] ; then
INITIAL_IMAGE_TAG=${INITIAL_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}}
fi
# Prepare image tag for deploy/deckhouse (DECKHOUSE_IMAGE_TAG option in testing/cloud_layouts/script.sh).
# CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch.
# Use it as image tag. Add suffix to not overlap with PRs in main repo.
IMAGE_TAG=${CI_COMMIT_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}}
INSTALL_IMAGE_NAME=
if [[ -n ${CI_COMMIT_BRANCH} ]]; then
# CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch.
INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${IMAGE_TAG}
fi
if [[ -n ${CI_COMMIT_TAG} ]] ; then
REGISTRY_SUFFIX=$(echo ${WERF_ENV} | tr '[:upper:]' '[:lower:]') # CE/EE/FE -> ce/ee/fe
INSTALL_IMAGE_NAME=${SEMVER_REGISTRY_PATH}/${REGISTRY_SUFFIX}/install:${CI_COMMIT_REF_SLUG}
fi
if [[ -n ${INITIAL_REF_SLUG} ]] ; then
INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${INITIAL_IMAGE_TAG}
git fetch origin ${INITIAL_REF_SLUG}
git checkout origin/${INITIAL_REF_SLUG} -- testing/cloud_layouts
fi
SAFE_IMAGE_NAME=$(echo ${INSTALL_IMAGE_NAME} | tr '[:lower:]' '[:upper:]')
echo "Deckhouse Deployment will use install image ${SAFE_IMAGE_NAME} to test Git ref ${REF_FULL}"
if [ "${MULTIMASTER}" == true ] ; then
MASTERS_COUNT=3
else
MASTERS_COUNT=1
fi
echo "Multimaster set ${MULTIMASTER}, MASTERS_COUNT set ${MASTERS_COUNT}"
# Print image name in uppercase to prevent hiding non-secret registry host stored in secret.
echo "⚓️ [$(date -u)] Pull 'dev/install' image '${SAFE_IMAGE_NAME}'."
docker pull "${INSTALL_IMAGE_NAME}"
IMAGE_INSTALL_PATH="/${INSTALL_IMAGE_NAME#*/}"
echo '::echo::on'
echo "tmp-dir-path=${TMP_DIR_PATH}" >> $GITHUB_OUTPUT
echo "dhctl-log-file=${TMP_DIR_PATH}/dhctl.log" >> $GITHUB_OUTPUT
echo "dhctl-prefix=${DHCTL_PREFIX}" >> $GITHUB_OUTPUT
echo "install-image-name=${INSTALL_IMAGE_NAME}" >> $GITHUB_OUTPUT
echo "deckhouse-image-tag=${IMAGE_TAG}" >> $GITHUB_OUTPUT
echo "initial-image-tag=${INITIAL_IMAGE_TAG}" >> $GITHUB_OUTPUT
echo "install-image-path=${IMAGE_INSTALL_PATH}" >> $GITHUB_OUTPUT
echo "masters-count=${MASTERS_COUNT}" >> $GITHUB_OUTPUT
echo '::echo::off'
- name: "Run e2e test: AWS/Containerd/1.27"
id: e2e_test_run
timeout-minutes: 80
env:
PROVIDER: AWS
CRI: Containerd
LAYOUT: WithoutNAT
KUBERNETES_VERSION: "1.27"
LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }}
LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}}
TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}}
PREFIX: ${{ steps.setup.outputs.dhctl-prefix}}
MASTERS_COUNT: ${{ steps.setup.outputs.masters-count}}
INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }}
DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }}
INITIAL_IMAGE_TAG: ${{ steps.setup.outputs.initial-image-tag }}
# <template: e2e_run_template>
LAYOUT_AWS_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_ACCESS_KEY }}
LAYOUT_AWS_SECRET_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_SECRET_ACCESS_KEY }}
COMMENT_ID: ${{ inputs.comment_id }}
GITHUB_API_SERVER: ${{ github.api_url }}
REPOSITORY: ${{ github.repository }}
DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}}
GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
run: |
echo "Execute 'script.sh run-test' via 'docker run', using environment:
INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME}
DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG}
INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG}
PREFIX=${PREFIX}
PROVIDER=${PROVIDER}
CRI=${CRI}
LAYOUT=${LAYOUT}
KUBERNETES_VERSION=${KUBERNETES_VERSION}
TMP_DIR_PATH=${TMP_DIR_PATH}
MASTERS_COUNT=${MASTERS_COUNT}
"
ls -lh $(pwd)/testing
dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
echo "DHCTL log file: $dhctl_log_file"
user_runner_id=$(id -u):$(id -g)
echo "running Docker with user_runner_id $user_runner_id"
echo "Start waiting ssh connection string script"
comment_url="${GITHUB_API_SERVER}/repos/${REPOSITORY}/issues/comments/${COMMENT_ID}"
echo "Full comment url for updating ${comment_url}"
ssh_connect_str_file="${DHCTL_LOG_FILE}-ssh-connect_str-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
echo "ssh_connection_str_file=${ssh_connect_str_file}" >> $GITHUB_OUTPUT
bastion_ip_file=""
if [[ "${PROVIDER}" == "Static" ]] ; then
bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
elif [[ "${PROVIDER}" == "VCD" ]] ; then
bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
fi
echo "ssh_bastion_str_file=${bastion_ip_file}" >> $GITHUB_OUTPUT
$(pwd)/testing/cloud_layouts/wait-master-ssh-and-update-comment.sh "$dhctl_log_file" "$comment_url" "$ssh_connect_str_file" "$bastion_ip_file" > "${dhctl_log_file}-wait-log" 2>&1 &
docker run --rm \
-e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \
-e PREFIX=${PREFIX} \
-e MASTERS_COUNT=${MASTERS_COUNT} \
-e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \
-e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \
-e KUBERNETES_VERSION=${KUBERNETES_VERSION} \
-e CRI=${CRI} \
-e PROVIDER=${PROVIDER:-not_provided} \
-e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \
-e LAYOUT=${LAYOUT:-not_provided} \
-e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \
-e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \
-e LAYOUT_AWS_ACCESS_KEY=${LAYOUT_AWS_ACCESS_KEY:-not_provided} \
-e LAYOUT_AWS_SECRET_ACCESS_KEY=${LAYOUT_AWS_SECRET_ACCESS_KEY:-not_provided} \
-e USER_RUNNER_ID=${user_runner_id} \
-e HOME=/tmp \
-v $(pwd)/testing:/deckhouse/testing \
-v $(pwd)/release.yaml:/deckhouse/release.yaml \
-v ${TMP_DIR_PATH}:/tmp \
-u ${user_runner_id} \
-w /deckhouse \
${INSTALL_IMAGE_NAME} \
bash /deckhouse/testing/cloud_layouts/script.sh run-test
# </template: e2e_run_template>
- name: Read connection string
if: ${{ failure() || cancelled() }}
id: check_stay_failed_cluster
uses: actions/github-script@v6.4.1
env:
SSH_CONNECT_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_connection_str_file }}
SSH_BASTION_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_bastion_str_file }}
with:
# it sets `should_run` output var if e2e/failed/stay label
script: |
const e2e_cleanup = require('./.github/scripts/js/e2e/cleanup');
await e2e_cleanup.readConnectionScript({core, context, github});
- name: Label pr if e2e failed
if: ${{ (failure() || cancelled()) && needs.git_info.outputs.pr_number }}
uses: actions-ecosystem/action-add-labels@v1
with:
github_token: ${{ secrets.BOATSWAIN_GITHUB_TOKEN }}
number: ${{ needs.git_info.outputs.pr_number }}
labels: "e2e/cluster/failed"
- name: Cleanup bootstrapped cluster
if: success()
id: cleanup_cluster
timeout-minutes: 60
env:
PROVIDER: AWS
CRI: Containerd
LAYOUT: WithoutNAT
KUBERNETES_VERSION: "1.27"
LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }}
LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}}
TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}}
PREFIX: ${{ steps.setup.outputs.dhctl-prefix}}
MASTERS_COUNT: ${{ steps.setup.outputs.masters-count }}
INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }}
DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }}
# <template: e2e_run_template>
LAYOUT_AWS_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_ACCESS_KEY }}
LAYOUT_AWS_SECRET_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_SECRET_ACCESS_KEY }}
COMMENT_ID: ${{ inputs.comment_id }}
GITHUB_API_SERVER: ${{ github.api_url }}
REPOSITORY: ${{ github.repository }}
DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}}
GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
run: |
echo "Execute 'script.sh cleanup' via 'docker run', using environment:
INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME}
DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG}
INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG}
PREFIX=${PREFIX}
PROVIDER=${PROVIDER}
CRI=${CRI}
LAYOUT=${LAYOUT}
KUBERNETES_VERSION=${KUBERNETES_VERSION}
TMP_DIR_PATH=${TMP_DIR_PATH}
MASTERS_COUNT=${MASTERS_COUNT}
"
ls -lh $(pwd)/testing
dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
echo "DHCTL log file: $dhctl_log_file"
user_runner_id=$(id -u):$(id -g)
echo "running Docker with user_runner_id $user_runner_id"
docker run --rm \
-e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \
-e PREFIX=${PREFIX} \
-e MASTERS_COUNT=${MASTERS_COUNT} \
-e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \
-e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \
-e KUBERNETES_VERSION=${KUBERNETES_VERSION} \
-e CRI=${CRI} \
-e PROVIDER=${PROVIDER:-not_provided} \
-e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \
-e LAYOUT=${LAYOUT:-not_provided} \
-e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \
-e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \
-e LAYOUT_AWS_ACCESS_KEY=${LAYOUT_AWS_ACCESS_KEY:-not_provided} \
-e LAYOUT_AWS_SECRET_ACCESS_KEY=${LAYOUT_AWS_SECRET_ACCESS_KEY:-not_provided} \
-e USER_RUNNER_ID=${user_runner_id} \
-e HOME=/tmp \
-v $(pwd)/testing:/deckhouse/testing \
-v $(pwd)/release.yaml:/deckhouse/release.yaml \
-v ${TMP_DIR_PATH}:/tmp \
-u ${user_runner_id} \
-w /deckhouse \
${INSTALL_IMAGE_NAME} \
bash /deckhouse/testing/cloud_layouts/script.sh cleanup
# </template: e2e_run_template>
- name: Save dhctl state
id: save_failed_cluster_state
if: ${{ failure() }}
uses: actions/upload-artifact@v4.4.0
with:
name: failed_cluster_state_aws_containerd_1_27
path: |
${{ steps.setup.outputs.tmp-dir-path}}/dhctl
${{ steps.setup.outputs.tmp-dir-path}}/*.tfstate
${{ steps.setup.outputs.tmp-dir-path}}/logs
- name: Save test results
if: ${{ steps.setup.outputs.dhctl-log-file }}
uses: actions/upload-artifact@v4.4.0
with:
name: test_output_aws_containerd_1_27
path: |
${{ steps.setup.outputs.dhctl-log-file}}*
${{ steps.setup.outputs.tmp-dir-path}}/logs
testing/cloud_layouts/
!testing/cloud_layouts/**/sshkey
- name: Cleanup temp directory
if: always()
env:
TMPPATH: ${{ steps.setup.outputs.tmppath}}
run: |
echo "Remove temporary directory '${TMPPATH}' ..."
if [[ -d "${TMPPATH}" && ${#TMPPATH} > 1 ]] ; then
rm -rf "${TMPPATH}"
else
echo Not a directory.
fi
if [ -n $USER_RUNNER_ID ]; then
echo "Fix temp directories owner..."
chown -R $USER_RUNNER_ID "$(pwd)/testing" || true
chown -R $USER_RUNNER_ID "/deckhouse/testing" || true
chown -R $USER_RUNNER_ID /tmp || true
else
echo "Fix temp directories permissions..."
chmod -f -R 777 "$(pwd)/testing" || true
chmod -f -R 777 "/deckhouse/testing" || true
chmod -f -R 777 /tmp || true
fi
# <template: update_comment_on_finish>
- name: Update comment on finish
id: update_comment_on_finish
if: ${{ always() && github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }}
env:
NEEDS_CONTEXT: ${{ toJSON(needs) }}
JOB_CONTEXT: ${{ toJSON(job) }}
STEPS_CONTEXT: ${{ toJSON(steps) }}
uses: actions/github-script@v6.4.1
with:
github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
retries: 3
script: |
const statusConfig = 'job,separate';
const name = 'e2e: AWS, Containerd, Kubernetes 1.27';
const needsContext = JSON.parse(process.env.NEEDS_CONTEXT);
const jobContext = JSON.parse(process.env.JOB_CONTEXT);
const stepsContext = JSON.parse(process.env.STEPS_CONTEXT);
let jobNames = null
if (process.env.JOB_NAMES) {
jobNames = JSON.parse(process.env.JOB_NAMES);
}
core.info(`needsContext: ${JSON.stringify(needsContext)}`);
core.info(`jobContext: ${JSON.stringify(jobContext)}`);
core.info(`stepsContext: ${JSON.stringify(stepsContext)}`);
core.info(`jobNames: ${JSON.stringify(jobNames)}`);
const ci = require('./.github/scripts/js/ci');
return await ci.updateCommentOnFinish({github, context, core, statusConfig, name, needsContext, jobContext, stepsContext, jobNames});
# </template: update_comment_on_finish>
# </template: e2e_run_job_template>
# <template: e2e_run_job_template>
run_containerd_1_28:
name: "e2e: AWS, Containerd, Kubernetes 1.28"
needs:
- check_e2e_labels
- git_info
if: needs.check_e2e_labels.outputs.run_containerd_1_28 == 'true'
outputs:
ssh_master_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_master_connection_string }}
ssh_bastion_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_bastion_connection_string }}
run_id: ${{ github.run_id }}
# need for find state in artifact
cluster_prefix: ${{ steps.setup.outputs.dhctl-prefix }}
ran_for: "aws;WithoutNAT;containerd;1.28"
failed_cluster_stayed: ${{ steps.check_stay_failed_cluster.outputs.failed_cluster_stayed }}
issue_number: ${{ inputs.issue_number }}
install_image_path: ${{ steps.setup.outputs.install-image-path }}
env:
PROVIDER: AWS
CRI: Containerd
LAYOUT: WithoutNAT
KUBERNETES_VERSION: "1.28"
EVENT_LABEL: ${{ github.event.label.name }}
runs-on: [self-hosted, e2e-common]
steps:
# <template: started_at_output>
- name: Job started timestamp
id: started_at
run: |
unixTimestamp=$(date +%s)
echo "started_at=${unixTimestamp}" >> $GITHUB_OUTPUT
# </template: started_at_output>
# <template: checkout_from_event_ref_step>
- name: Checkout sources
uses: actions/checkout@v3.5.2
with:
ref: ${{ github.event.inputs.pull_request_ref || github.event.ref }}
fetch-depth: 0
# </template: checkout_from_event_ref_step>
# <template: update_comment_on_start>
- name: Update comment on start
if: ${{ github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }}
uses: actions/github-script@v6.4.1
with:
github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
retries: 3
script: |
const name = 'e2e: AWS, Containerd, Kubernetes 1.28';
const ci = require('./.github/scripts/js/ci');
return await ci.updateCommentOnStart({github, context, core, name})
# </template: update_comment_on_start>
# <template: login_dev_registry_step>
- name: Check dev registry credentials
id: check_dev_registry
env:
HOST: ${{secrets.DECKHOUSE_DEV_REGISTRY_HOST}}
run: |
if [[ -n $HOST ]]; then
echo "has_credentials=true" >> $GITHUB_OUTPUT
echo "web_registry_path=${{secrets.DECKHOUSE_DEV_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT
fi
- name: Login to dev registry
uses: docker/login-action@v2.1.0
if: ${{ steps.check_dev_registry.outputs.has_credentials == 'true' }}
with:
registry: ${{ secrets.DECKHOUSE_DEV_REGISTRY_HOST }}
username: ${{ secrets.DECKHOUSE_DEV_REGISTRY_USER }}
password: ${{ secrets.DECKHOUSE_DEV_REGISTRY_PASSWORD }}
logout: false
# </template: login_dev_registry_step>
# <template: login_rw_registry_step>
- name: Check rw registry credentials
id: check_rw_registry
env:
HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}}
run: |
if [[ -n $HOST ]]; then
echo "has_credentials=true" >> $GITHUB_OUTPUT
echo "web_registry_path=${{secrets.DECKHOUSE_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT
fi
- name: Login to rw registry
uses: docker/login-action@v2.1.0
if: ${{ steps.check_rw_registry.outputs.has_credentials == 'true' }}
with:
registry: ${{ secrets.DECKHOUSE_REGISTRY_HOST }}
username: ${{ secrets.DECKHOUSE_REGISTRY_USER }}
password: ${{ secrets.DECKHOUSE_REGISTRY_PASSWORD }}
logout: false
- name: Login to Github Container Registry
uses: docker/login-action@v2.1.0
if: ${{ steps.check_rw_registry.outputs.has_credentials != 'true' }}
with:
registry: ghcr.io
username: ${{ secrets.GHCR_IO_REGISTRY_USER }}
password: ${{ secrets.GHCR_IO_REGISTRY_PASSWORD }}
logout: false
# </template: login_rw_registry_step>
# <template: werf_install_step>
- name: Install werf CLI
uses: werf/actions/install@43075e4ab81952b181d33e125ef15b9c060a782e
with:
channel: ${{env.WERF_CHANNEL}}
# </template: werf_install_step>
- name: Setup
id: setup
env:
DECKHOUSE_REGISTRY_HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}}
CI_COMMIT_TAG: ${{needs.git_info.outputs.ci_commit_tag}}
CI_COMMIT_BRANCH: ${{needs.git_info.outputs.ci_commit_branch}}
CI_COMMIT_REF_SLUG: ${{needs.git_info.outputs.ci_commit_ref_slug}}
REF_FULL: ${{needs.git_info.outputs.ref_full}}
INITIAL_REF_SLUG: ${{ github.event.inputs.initial_ref_slug }}
MANUAL_RUN: "true"
MULTIMASTER: ${{ needs.check_e2e_labels.outputs.multimaster }}
run: |
# Calculate unique prefix for e2e test.
# GITHUB_RUN_ID is a unique number for each workflow run.
# GITHUB_RUN_ATTEMPT is a unique number for each attempt of a particular workflow run in a repository.
# Add CRI and KUBERNETES_VERSION to create unique directory for each job.
# CRI and PROVIDER values are trimmed to reduce prefix length.
if [[ "${KUBERNETES_VERSION}" == "Automatic" ]] ; then
KUBERNETES_VERSION_SUF="auto"
else
KUBERNETES_VERSION_SUF=${KUBERNETES_VERSION}
fi
DHCTL_PREFIX=$(echo "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-$(echo ${CRI} | head -c 3)-${KUBERNETES_VERSION_SUF}")
if [[ "${MANUAL_RUN}" == "false" ]] ; then
# for jobs which run multiple providers concurrency (daily e2e, for example)
# add provider suffix to prevent "directory already exists" error
DHCTL_PREFIX="${DHCTL_PREFIX}-$(echo ${PROVIDER} | head -c 2)"
fi
# converts to DNS-like (all letters in lower case and replace all dots to dash)
# because it prefix will use for k8s resources names (nodes, for example)
DHCTL_PREFIX=$(echo "$DHCTL_PREFIX" | tr '.' '-' | tr '[:upper:]' '[:lower:]')
# Create tmppath for test script.
TMP_DIR_PATH=/mnt/cloud-layouts/layouts/${DHCTL_PREFIX}
if [[ -d "${TMP_DIR_PATH}" ]] ; then
echo "Temporary dir already exists: ${TMP_DIR_PATH}. ERROR!"
ls -la ${TMP_DIR_PATH}
exit 1
else
echo "Create temporary dir for job: ${TMP_DIR_PATH}."
mkdir -p "${TMP_DIR_PATH}"
fi
## Source: ci_templates/build.yml
# Extract REPO_SUFFIX from repository name: trim prefix 'deckhouse/deckhouse-'.
REPO_SUFFIX=${GITHUB_REPOSITORY#deckhouse/deckhouse-}
if [[ $REPO_SUFFIX == $GITHUB_REPOSITORY ]] ; then
# REPO_SUFFIX should be empty for main repo 'deckhouse/deckhouse'.
REPO_SUFFIX=
fi
# Use dev-registry for Git branches.
BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}"
# Use rw-registry for Git tags.
SEMVER_REGISTRY_PATH="${DECKHOUSE_REGISTRY_HOST}/deckhouse"
if [[ -z ${DECKHOUSE_REGISTRY_HOST:-} ]] ; then
# DECKHOUSE_REGISTRY_HOST is empty, so this repo is not the main repo.
# Use dev-regisry for branches and Github Container Registry for semver tags.
BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}"
SEMVER_REGISTRY_PATH="${GHA_TEST_REGISTRY_PATH}"
fi
# Prepare initial image tag for deploy/deckhouse to test switching from previous release.
INITIAL_IMAGE_TAG=
if [[ -n ${INITIAL_REF_SLUG} ]] ; then
INITIAL_IMAGE_TAG=${INITIAL_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}}
fi
# Prepare image tag for deploy/deckhouse (DECKHOUSE_IMAGE_TAG option in testing/cloud_layouts/script.sh).
# CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch.
# Use it as image tag. Add suffix to not overlap with PRs in main repo.
IMAGE_TAG=${CI_COMMIT_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}}
INSTALL_IMAGE_NAME=
if [[ -n ${CI_COMMIT_BRANCH} ]]; then
# CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch.
INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${IMAGE_TAG}
fi
if [[ -n ${CI_COMMIT_TAG} ]] ; then
REGISTRY_SUFFIX=$(echo ${WERF_ENV} | tr '[:upper:]' '[:lower:]') # CE/EE/FE -> ce/ee/fe
INSTALL_IMAGE_NAME=${SEMVER_REGISTRY_PATH}/${REGISTRY_SUFFIX}/install:${CI_COMMIT_REF_SLUG}
fi
if [[ -n ${INITIAL_REF_SLUG} ]] ; then
INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${INITIAL_IMAGE_TAG}
git fetch origin ${INITIAL_REF_SLUG}
git checkout origin/${INITIAL_REF_SLUG} -- testing/cloud_layouts
fi
SAFE_IMAGE_NAME=$(echo ${INSTALL_IMAGE_NAME} | tr '[:lower:]' '[:upper:]')
echo "Deckhouse Deployment will use install image ${SAFE_IMAGE_NAME} to test Git ref ${REF_FULL}"
if [ "${MULTIMASTER}" == true ] ; then
MASTERS_COUNT=3
else
MASTERS_COUNT=1
fi
echo "Multimaster set ${MULTIMASTER}, MASTERS_COUNT set ${MASTERS_COUNT}"
# Print image name in uppercase to prevent hiding non-secret registry host stored in secret.
echo "⚓️ [$(date -u)] Pull 'dev/install' image '${SAFE_IMAGE_NAME}'."
docker pull "${INSTALL_IMAGE_NAME}"
IMAGE_INSTALL_PATH="/${INSTALL_IMAGE_NAME#*/}"
echo '::echo::on'
echo "tmp-dir-path=${TMP_DIR_PATH}" >> $GITHUB_OUTPUT
echo "dhctl-log-file=${TMP_DIR_PATH}/dhctl.log" >> $GITHUB_OUTPUT
echo "dhctl-prefix=${DHCTL_PREFIX}" >> $GITHUB_OUTPUT
echo "install-image-name=${INSTALL_IMAGE_NAME}" >> $GITHUB_OUTPUT
echo "deckhouse-image-tag=${IMAGE_TAG}" >> $GITHUB_OUTPUT
echo "initial-image-tag=${INITIAL_IMAGE_TAG}" >> $GITHUB_OUTPUT
echo "install-image-path=${IMAGE_INSTALL_PATH}" >> $GITHUB_OUTPUT
echo "masters-count=${MASTERS_COUNT}" >> $GITHUB_OUTPUT
echo '::echo::off'
- name: "Run e2e test: AWS/Containerd/1.28"
id: e2e_test_run
timeout-minutes: 80
env:
PROVIDER: AWS
CRI: Containerd
LAYOUT: WithoutNAT
KUBERNETES_VERSION: "1.28"
LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }}
LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}}
TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}}
PREFIX: ${{ steps.setup.outputs.dhctl-prefix}}
MASTERS_COUNT: ${{ steps.setup.outputs.masters-count}}
INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }}
DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }}
INITIAL_IMAGE_TAG: ${{ steps.setup.outputs.initial-image-tag }}
# <template: e2e_run_template>
LAYOUT_AWS_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_ACCESS_KEY }}
LAYOUT_AWS_SECRET_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_SECRET_ACCESS_KEY }}
COMMENT_ID: ${{ inputs.comment_id }}
GITHUB_API_SERVER: ${{ github.api_url }}
REPOSITORY: ${{ github.repository }}
DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}}
GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
run: |
echo "Execute 'script.sh run-test' via 'docker run', using environment:
INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME}
DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG}
INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG}
PREFIX=${PREFIX}
PROVIDER=${PROVIDER}
CRI=${CRI}
LAYOUT=${LAYOUT}
KUBERNETES_VERSION=${KUBERNETES_VERSION}
TMP_DIR_PATH=${TMP_DIR_PATH}
MASTERS_COUNT=${MASTERS_COUNT}
"
ls -lh $(pwd)/testing
dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
echo "DHCTL log file: $dhctl_log_file"
user_runner_id=$(id -u):$(id -g)
echo "running Docker with user_runner_id $user_runner_id"
echo "Start waiting ssh connection string script"
comment_url="${GITHUB_API_SERVER}/repos/${REPOSITORY}/issues/comments/${COMMENT_ID}"
echo "Full comment url for updating ${comment_url}"
ssh_connect_str_file="${DHCTL_LOG_FILE}-ssh-connect_str-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
echo "ssh_connection_str_file=${ssh_connect_str_file}" >> $GITHUB_OUTPUT
bastion_ip_file=""
if [[ "${PROVIDER}" == "Static" ]] ; then
bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
elif [[ "${PROVIDER}" == "VCD" ]] ; then
bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
fi
echo "ssh_bastion_str_file=${bastion_ip_file}" >> $GITHUB_OUTPUT
$(pwd)/testing/cloud_layouts/wait-master-ssh-and-update-comment.sh "$dhctl_log_file" "$comment_url" "$ssh_connect_str_file" "$bastion_ip_file" > "${dhctl_log_file}-wait-log" 2>&1 &
docker run --rm \
-e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \
-e PREFIX=${PREFIX} \
-e MASTERS_COUNT=${MASTERS_COUNT} \
-e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \
-e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \
-e KUBERNETES_VERSION=${KUBERNETES_VERSION} \
-e CRI=${CRI} \
-e PROVIDER=${PROVIDER:-not_provided} \
-e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \
-e LAYOUT=${LAYOUT:-not_provided} \
-e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \
-e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \
-e LAYOUT_AWS_ACCESS_KEY=${LAYOUT_AWS_ACCESS_KEY:-not_provided} \
-e LAYOUT_AWS_SECRET_ACCESS_KEY=${LAYOUT_AWS_SECRET_ACCESS_KEY:-not_provided} \
-e USER_RUNNER_ID=${user_runner_id} \
-e HOME=/tmp \
-v $(pwd)/testing:/deckhouse/testing \
-v $(pwd)/release.yaml:/deckhouse/release.yaml \
-v ${TMP_DIR_PATH}:/tmp \
-u ${user_runner_id} \
-w /deckhouse \
${INSTALL_IMAGE_NAME} \
bash /deckhouse/testing/cloud_layouts/script.sh run-test
# </template: e2e_run_template>
- name: Read connection string
if: ${{ failure() || cancelled() }}
id: check_stay_failed_cluster
uses: actions/github-script@v6.4.1
env:
SSH_CONNECT_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_connection_str_file }}
SSH_BASTION_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_bastion_str_file }}
with:
# it sets `should_run` output var if e2e/failed/stay label
script: |
const e2e_cleanup = require('./.github/scripts/js/e2e/cleanup');
await e2e_cleanup.readConnectionScript({core, context, github});
- name: Label pr if e2e failed
if: ${{ (failure() || cancelled()) && needs.git_info.outputs.pr_number }}
uses: actions-ecosystem/action-add-labels@v1
with:
github_token: ${{ secrets.BOATSWAIN_GITHUB_TOKEN }}
number: ${{ needs.git_info.outputs.pr_number }}
labels: "e2e/cluster/failed"
- name: Cleanup bootstrapped cluster
if: success()
id: cleanup_cluster
timeout-minutes: 60
env:
PROVIDER: AWS
CRI: Containerd
LAYOUT: WithoutNAT
KUBERNETES_VERSION: "1.28"
LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }}
LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}}
TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}}
PREFIX: ${{ steps.setup.outputs.dhctl-prefix}}
MASTERS_COUNT: ${{ steps.setup.outputs.masters-count }}
INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }}
DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }}
# <template: e2e_run_template>
LAYOUT_AWS_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_ACCESS_KEY }}
LAYOUT_AWS_SECRET_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_SECRET_ACCESS_KEY }}
COMMENT_ID: ${{ inputs.comment_id }}
GITHUB_API_SERVER: ${{ github.api_url }}
REPOSITORY: ${{ github.repository }}
DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}}
GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
run: |
echo "Execute 'script.sh cleanup' via 'docker run', using environment:
INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME}
DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG}
INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG}
PREFIX=${PREFIX}
PROVIDER=${PROVIDER}
CRI=${CRI}
LAYOUT=${LAYOUT}
KUBERNETES_VERSION=${KUBERNETES_VERSION}
TMP_DIR_PATH=${TMP_DIR_PATH}
MASTERS_COUNT=${MASTERS_COUNT}
"
ls -lh $(pwd)/testing
dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
echo "DHCTL log file: $dhctl_log_file"
user_runner_id=$(id -u):$(id -g)
echo "running Docker with user_runner_id $user_runner_id"
docker run --rm \
-e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \
-e PREFIX=${PREFIX} \
-e MASTERS_COUNT=${MASTERS_COUNT} \
-e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \
-e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \
-e KUBERNETES_VERSION=${KUBERNETES_VERSION} \
-e CRI=${CRI} \
-e PROVIDER=${PROVIDER:-not_provided} \
-e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \
-e LAYOUT=${LAYOUT:-not_provided} \
-e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \
-e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \
-e LAYOUT_AWS_ACCESS_KEY=${LAYOUT_AWS_ACCESS_KEY:-not_provided} \
-e LAYOUT_AWS_SECRET_ACCESS_KEY=${LAYOUT_AWS_SECRET_ACCESS_KEY:-not_provided} \
-e USER_RUNNER_ID=${user_runner_id} \
-e HOME=/tmp \
-v $(pwd)/testing:/deckhouse/testing \
-v $(pwd)/release.yaml:/deckhouse/release.yaml \
-v ${TMP_DIR_PATH}:/tmp \
-u ${user_runner_id} \
-w /deckhouse \
${INSTALL_IMAGE_NAME} \
bash /deckhouse/testing/cloud_layouts/script.sh cleanup
# </template: e2e_run_template>
- name: Save dhctl state
id: save_failed_cluster_state
if: ${{ failure() }}
uses: actions/upload-artifact@v4.4.0
with:
name: failed_cluster_state_aws_containerd_1_28
path: |
${{ steps.setup.outputs.tmp-dir-path}}/dhctl
${{ steps.setup.outputs.tmp-dir-path}}/*.tfstate
${{ steps.setup.outputs.tmp-dir-path}}/logs
- name: Save test results
if: ${{ steps.setup.outputs.dhctl-log-file }}
uses: actions/upload-artifact@v4.4.0
with:
name: test_output_aws_containerd_1_28
path: |
${{ steps.setup.outputs.dhctl-log-file}}*
${{ steps.setup.outputs.tmp-dir-path}}/logs
testing/cloud_layouts/
!testing/cloud_layouts/**/sshkey
- name: Cleanup temp directory
if: always()
env:
TMPPATH: ${{ steps.setup.outputs.tmppath}}
run: |
echo "Remove temporary directory '${TMPPATH}' ..."
if [[ -d "${TMPPATH}" && ${#TMPPATH} > 1 ]] ; then
rm -rf "${TMPPATH}"
else
echo Not a directory.
fi
if [ -n $USER_RUNNER_ID ]; then
echo "Fix temp directories owner..."
chown -R $USER_RUNNER_ID "$(pwd)/testing" || true
chown -R $USER_RUNNER_ID "/deckhouse/testing" || true
chown -R $USER_RUNNER_ID /tmp || true
else
echo "Fix temp directories permissions..."
chmod -f -R 777 "$(pwd)/testing" || true
chmod -f -R 777 "/deckhouse/testing" || true
chmod -f -R 777 /tmp || true
fi
# <template: update_comment_on_finish>
- name: Update comment on finish
id: update_comment_on_finish
if: ${{ always() && github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }}
env:
NEEDS_CONTEXT: ${{ toJSON(needs) }}
JOB_CONTEXT: ${{ toJSON(job) }}
STEPS_CONTEXT: ${{ toJSON(steps) }}
uses: actions/github-script@v6.4.1
with:
github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
retries: 3
script: |
const statusConfig = 'job,separate';
const name = 'e2e: AWS, Containerd, Kubernetes 1.28';
const needsContext = JSON.parse(process.env.NEEDS_CONTEXT);
const jobContext = JSON.parse(process.env.JOB_CONTEXT);
const stepsContext = JSON.parse(process.env.STEPS_CONTEXT);
let jobNames = null
if (process.env.JOB_NAMES) {
jobNames = JSON.parse(process.env.JOB_NAMES);
}
core.info(`needsContext: ${JSON.stringify(needsContext)}`);
core.info(`jobContext: ${JSON.stringify(jobContext)}`);
core.info(`stepsContext: ${JSON.stringify(stepsContext)}`);
core.info(`jobNames: ${JSON.stringify(jobNames)}`);
const ci = require('./.github/scripts/js/ci');
return await ci.updateCommentOnFinish({github, context, core, statusConfig, name, needsContext, jobContext, stepsContext, jobNames});
# </template: update_comment_on_finish>
# </template: e2e_run_job_template>
# <template: e2e_run_job_template>
run_containerd_1_29:
name: "e2e: AWS, Containerd, Kubernetes 1.29"
needs:
- check_e2e_labels
- git_info
if: needs.check_e2e_labels.outputs.run_containerd_1_29 == 'true'
outputs:
ssh_master_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_master_connection_string }}
ssh_bastion_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_bastion_connection_string }}
run_id: ${{ github.run_id }}
# need for find state in artifact
cluster_prefix: ${{ steps.setup.outputs.dhctl-prefix }}
ran_for: "aws;WithoutNAT;containerd;1.29"
failed_cluster_stayed: ${{ steps.check_stay_failed_cluster.outputs.failed_cluster_stayed }}
issue_number: ${{ inputs.issue_number }}
install_image_path: ${{ steps.setup.outputs.install-image-path }}
env:
PROVIDER: AWS
CRI: Containerd
LAYOUT: WithoutNAT
KUBERNETES_VERSION: "1.29"
EVENT_LABEL: ${{ github.event.label.name }}
runs-on: [self-hosted, e2e-common]
steps:
# <template: started_at_output>
- name: Job started timestamp
id: started_at
run: |
unixTimestamp=$(date +%s)
echo "started_at=${unixTimestamp}" >> $GITHUB_OUTPUT
# </template: started_at_output>
# <template: checkout_from_event_ref_step>
- name: Checkout sources
uses: actions/checkout@v3.5.2
with:
ref: ${{ github.event.inputs.pull_request_ref || github.event.ref }}
fetch-depth: 0
# </template: checkout_from_event_ref_step>
# <template: update_comment_on_start>
- name: Update comment on start
if: ${{ github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }}
uses: actions/github-script@v6.4.1
with:
github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
retries: 3
script: |
const name = 'e2e: AWS, Containerd, Kubernetes 1.29';
const ci = require('./.github/scripts/js/ci');
return await ci.updateCommentOnStart({github, context, core, name})
# </template: update_comment_on_start>
# <template: login_dev_registry_step>
- name: Check dev registry credentials
id: check_dev_registry
env:
HOST: ${{secrets.DECKHOUSE_DEV_REGISTRY_HOST}}
run: |
if [[ -n $HOST ]]; then
echo "has_credentials=true" >> $GITHUB_OUTPUT
echo "web_registry_path=${{secrets.DECKHOUSE_DEV_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT
fi
- name: Login to dev registry
uses: docker/login-action@v2.1.0
if: ${{ steps.check_dev_registry.outputs.has_credentials == 'true' }}
with:
registry: ${{ secrets.DECKHOUSE_DEV_REGISTRY_HOST }}
username: ${{ secrets.DECKHOUSE_DEV_REGISTRY_USER }}
password: ${{ secrets.DECKHOUSE_DEV_REGISTRY_PASSWORD }}
logout: false
# </template: login_dev_registry_step>
# <template: login_rw_registry_step>
- name: Check rw registry credentials
id: check_rw_registry
env:
HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}}
run: |
if [[ -n $HOST ]]; then
echo "has_credentials=true" >> $GITHUB_OUTPUT
echo "web_registry_path=${{secrets.DECKHOUSE_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT
fi
- name: Login to rw registry
uses: docker/login-action@v2.1.0
if: ${{ steps.check_rw_registry.outputs.has_credentials == 'true' }}
with:
registry: ${{ secrets.DECKHOUSE_REGISTRY_HOST }}
username: ${{ secrets.DECKHOUSE_REGISTRY_USER }}
password: ${{ secrets.DECKHOUSE_REGISTRY_PASSWORD }}
logout: false
- name: Login to Github Container Registry
uses: docker/login-action@v2.1.0
if: ${{ steps.check_rw_registry.outputs.has_credentials != 'true' }}
with:
registry: ghcr.io
username: ${{ secrets.GHCR_IO_REGISTRY_USER }}
password: ${{ secrets.GHCR_IO_REGISTRY_PASSWORD }}
logout: false
# </template: login_rw_registry_step>
# <template: werf_install_step>
- name: Install werf CLI
uses: werf/actions/install@43075e4ab81952b181d33e125ef15b9c060a782e
with:
channel: ${{env.WERF_CHANNEL}}
# </template: werf_install_step>
- name: Setup
id: setup
env:
DECKHOUSE_REGISTRY_HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}}
CI_COMMIT_TAG: ${{needs.git_info.outputs.ci_commit_tag}}
CI_COMMIT_BRANCH: ${{needs.git_info.outputs.ci_commit_branch}}
CI_COMMIT_REF_SLUG: ${{needs.git_info.outputs.ci_commit_ref_slug}}
REF_FULL: ${{needs.git_info.outputs.ref_full}}
INITIAL_REF_SLUG: ${{ github.event.inputs.initial_ref_slug }}
MANUAL_RUN: "true"
MULTIMASTER: ${{ needs.check_e2e_labels.outputs.multimaster }}
run: |
# Calculate unique prefix for e2e test.
# GITHUB_RUN_ID is a unique number for each workflow run.
# GITHUB_RUN_ATTEMPT is a unique number for each attempt of a particular workflow run in a repository.
# Add CRI and KUBERNETES_VERSION to create unique directory for each job.
# CRI and PROVIDER values are trimmed to reduce prefix length.
if [[ "${KUBERNETES_VERSION}" == "Automatic" ]] ; then
KUBERNETES_VERSION_SUF="auto"
else
KUBERNETES_VERSION_SUF=${KUBERNETES_VERSION}
fi
DHCTL_PREFIX=$(echo "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-$(echo ${CRI} | head -c 3)-${KUBERNETES_VERSION_SUF}")
if [[ "${MANUAL_RUN}" == "false" ]] ; then
# for jobs which run multiple providers concurrency (daily e2e, for example)
# add provider suffix to prevent "directory already exists" error
DHCTL_PREFIX="${DHCTL_PREFIX}-$(echo ${PROVIDER} | head -c 2)"
fi
# converts to DNS-like (all letters in lower case and replace all dots to dash)
# because it prefix will use for k8s resources names (nodes, for example)
DHCTL_PREFIX=$(echo "$DHCTL_PREFIX" | tr '.' '-' | tr '[:upper:]' '[:lower:]')
# Create tmppath for test script.
TMP_DIR_PATH=/mnt/cloud-layouts/layouts/${DHCTL_PREFIX}
if [[ -d "${TMP_DIR_PATH}" ]] ; then
echo "Temporary dir already exists: ${TMP_DIR_PATH}. ERROR!"
ls -la ${TMP_DIR_PATH}
exit 1
else
echo "Create temporary dir for job: ${TMP_DIR_PATH}."
mkdir -p "${TMP_DIR_PATH}"
fi
## Source: ci_templates/build.yml
# Extract REPO_SUFFIX from repository name: trim prefix 'deckhouse/deckhouse-'.
REPO_SUFFIX=${GITHUB_REPOSITORY#deckhouse/deckhouse-}
if [[ $REPO_SUFFIX == $GITHUB_REPOSITORY ]] ; then
# REPO_SUFFIX should be empty for main repo 'deckhouse/deckhouse'.
REPO_SUFFIX=
fi
# Use dev-registry for Git branches.
BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}"
# Use rw-registry for Git tags.
SEMVER_REGISTRY_PATH="${DECKHOUSE_REGISTRY_HOST}/deckhouse"
if [[ -z ${DECKHOUSE_REGISTRY_HOST:-} ]] ; then
# DECKHOUSE_REGISTRY_HOST is empty, so this repo is not the main repo.
# Use dev-regisry for branches and Github Container Registry for semver tags.
BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}"
SEMVER_REGISTRY_PATH="${GHA_TEST_REGISTRY_PATH}"
fi
# Prepare initial image tag for deploy/deckhouse to test switching from previous release.
INITIAL_IMAGE_TAG=
if [[ -n ${INITIAL_REF_SLUG} ]] ; then
INITIAL_IMAGE_TAG=${INITIAL_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}}
fi
# Prepare image tag for deploy/deckhouse (DECKHOUSE_IMAGE_TAG option in testing/cloud_layouts/script.sh).
# CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch.
# Use it as image tag. Add suffix to not overlap with PRs in main repo.
IMAGE_TAG=${CI_COMMIT_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}}
INSTALL_IMAGE_NAME=
if [[ -n ${CI_COMMIT_BRANCH} ]]; then
# CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch.
INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${IMAGE_TAG}
fi
if [[ -n ${CI_COMMIT_TAG} ]] ; then
REGISTRY_SUFFIX=$(echo ${WERF_ENV} | tr '[:upper:]' '[:lower:]') # CE/EE/FE -> ce/ee/fe
INSTALL_IMAGE_NAME=${SEMVER_REGISTRY_PATH}/${REGISTRY_SUFFIX}/install:${CI_COMMIT_REF_SLUG}
fi
if [[ -n ${INITIAL_REF_SLUG} ]] ; then
INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${INITIAL_IMAGE_TAG}
git fetch origin ${INITIAL_REF_SLUG}
git checkout origin/${INITIAL_REF_SLUG} -- testing/cloud_layouts
fi
SAFE_IMAGE_NAME=$(echo ${INSTALL_IMAGE_NAME} | tr '[:lower:]' '[:upper:]')
echo "Deckhouse Deployment will use install image ${SAFE_IMAGE_NAME} to test Git ref ${REF_FULL}"
if [ "${MULTIMASTER}" == true ] ; then
MASTERS_COUNT=3
else
MASTERS_COUNT=1
fi
echo "Multimaster set ${MULTIMASTER}, MASTERS_COUNT set ${MASTERS_COUNT}"
# Print image name in uppercase to prevent hiding non-secret registry host stored in secret.
echo "⚓️ [$(date -u)] Pull 'dev/install' image '${SAFE_IMAGE_NAME}'."
docker pull "${INSTALL_IMAGE_NAME}"
IMAGE_INSTALL_PATH="/${INSTALL_IMAGE_NAME#*/}"
echo '::echo::on'
echo "tmp-dir-path=${TMP_DIR_PATH}" >> $GITHUB_OUTPUT
echo "dhctl-log-file=${TMP_DIR_PATH}/dhctl.log" >> $GITHUB_OUTPUT
echo "dhctl-prefix=${DHCTL_PREFIX}" >> $GITHUB_OUTPUT
echo "install-image-name=${INSTALL_IMAGE_NAME}" >> $GITHUB_OUTPUT
echo "deckhouse-image-tag=${IMAGE_TAG}" >> $GITHUB_OUTPUT
echo "initial-image-tag=${INITIAL_IMAGE_TAG}" >> $GITHUB_OUTPUT
echo "install-image-path=${IMAGE_INSTALL_PATH}" >> $GITHUB_OUTPUT
echo "masters-count=${MASTERS_COUNT}" >> $GITHUB_OUTPUT
echo '::echo::off'
- name: "Run e2e test: AWS/Containerd/1.29"
id: e2e_test_run
timeout-minutes: 80
env:
PROVIDER: AWS
CRI: Containerd
LAYOUT: WithoutNAT
KUBERNETES_VERSION: "1.29"
LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }}
LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}}
TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}}
PREFIX: ${{ steps.setup.outputs.dhctl-prefix}}
MASTERS_COUNT: ${{ steps.setup.outputs.masters-count}}
INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }}
DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }}
INITIAL_IMAGE_TAG: ${{ steps.setup.outputs.initial-image-tag }}
# <template: e2e_run_template>
LAYOUT_AWS_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_ACCESS_KEY }}
LAYOUT_AWS_SECRET_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_SECRET_ACCESS_KEY }}
COMMENT_ID: ${{ inputs.comment_id }}
GITHUB_API_SERVER: ${{ github.api_url }}
REPOSITORY: ${{ github.repository }}
DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}}
GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
run: |
echo "Execute 'script.sh run-test' via 'docker run', using environment:
INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME}
DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG}
INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG}
PREFIX=${PREFIX}
PROVIDER=${PROVIDER}
CRI=${CRI}
LAYOUT=${LAYOUT}
KUBERNETES_VERSION=${KUBERNETES_VERSION}
TMP_DIR_PATH=${TMP_DIR_PATH}
MASTERS_COUNT=${MASTERS_COUNT}
"
ls -lh $(pwd)/testing
dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
echo "DHCTL log file: $dhctl_log_file"
user_runner_id=$(id -u):$(id -g)
echo "running Docker with user_runner_id $user_runner_id"
echo "Start waiting ssh connection string script"
comment_url="${GITHUB_API_SERVER}/repos/${REPOSITORY}/issues/comments/${COMMENT_ID}"
echo "Full comment url for updating ${comment_url}"
ssh_connect_str_file="${DHCTL_LOG_FILE}-ssh-connect_str-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
echo "ssh_connection_str_file=${ssh_connect_str_file}" >> $GITHUB_OUTPUT
bastion_ip_file=""
if [[ "${PROVIDER}" == "Static" ]] ; then
bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
elif [[ "${PROVIDER}" == "VCD" ]] ; then
bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
fi
echo "ssh_bastion_str_file=${bastion_ip_file}" >> $GITHUB_OUTPUT
$(pwd)/testing/cloud_layouts/wait-master-ssh-and-update-comment.sh "$dhctl_log_file" "$comment_url" "$ssh_connect_str_file" "$bastion_ip_file" > "${dhctl_log_file}-wait-log" 2>&1 &
docker run --rm \
-e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \
-e PREFIX=${PREFIX} \
-e MASTERS_COUNT=${MASTERS_COUNT} \
-e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \
-e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \
-e KUBERNETES_VERSION=${KUBERNETES_VERSION} \
-e CRI=${CRI} \
-e PROVIDER=${PROVIDER:-not_provided} \
-e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \
-e LAYOUT=${LAYOUT:-not_provided} \
-e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \
-e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \
-e LAYOUT_AWS_ACCESS_KEY=${LAYOUT_AWS_ACCESS_KEY:-not_provided} \
-e LAYOUT_AWS_SECRET_ACCESS_KEY=${LAYOUT_AWS_SECRET_ACCESS_KEY:-not_provided} \
-e USER_RUNNER_ID=${user_runner_id} \
-e HOME=/tmp \
-v $(pwd)/testing:/deckhouse/testing \
-v $(pwd)/release.yaml:/deckhouse/release.yaml \
-v ${TMP_DIR_PATH}:/tmp \
-u ${user_runner_id} \
-w /deckhouse \
${INSTALL_IMAGE_NAME} \
bash /deckhouse/testing/cloud_layouts/script.sh run-test
# </template: e2e_run_template>
- name: Read connection string
if: ${{ failure() || cancelled() }}
id: check_stay_failed_cluster
uses: actions/github-script@v6.4.1
env:
SSH_CONNECT_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_connection_str_file }}
SSH_BASTION_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_bastion_str_file }}
with:
# it sets `should_run` output var if e2e/failed/stay label
script: |
const e2e_cleanup = require('./.github/scripts/js/e2e/cleanup');
await e2e_cleanup.readConnectionScript({core, context, github});
- name: Label pr if e2e failed
if: ${{ (failure() || cancelled()) && needs.git_info.outputs.pr_number }}
uses: actions-ecosystem/action-add-labels@v1
with:
github_token: ${{ secrets.BOATSWAIN_GITHUB_TOKEN }}
number: ${{ needs.git_info.outputs.pr_number }}
labels: "e2e/cluster/failed"
- name: Cleanup bootstrapped cluster
if: success()
id: cleanup_cluster
timeout-minutes: 60
env:
PROVIDER: AWS
CRI: Containerd
LAYOUT: WithoutNAT
KUBERNETES_VERSION: "1.29"
LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }}
LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}}
TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}}
PREFIX: ${{ steps.setup.outputs.dhctl-prefix}}
MASTERS_COUNT: ${{ steps.setup.outputs.masters-count }}
INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }}
DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }}
# <template: e2e_run_template>
LAYOUT_AWS_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_ACCESS_KEY }}
LAYOUT_AWS_SECRET_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_SECRET_ACCESS_KEY }}
COMMENT_ID: ${{ inputs.comment_id }}
GITHUB_API_SERVER: ${{ github.api_url }}
REPOSITORY: ${{ github.repository }}
DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}}
GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
run: |
echo "Execute 'script.sh cleanup' via 'docker run', using environment:
INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME}
DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG}
INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG}
PREFIX=${PREFIX}
PROVIDER=${PROVIDER}
CRI=${CRI}
LAYOUT=${LAYOUT}
KUBERNETES_VERSION=${KUBERNETES_VERSION}
TMP_DIR_PATH=${TMP_DIR_PATH}
MASTERS_COUNT=${MASTERS_COUNT}
"
ls -lh $(pwd)/testing
dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
echo "DHCTL log file: $dhctl_log_file"
user_runner_id=$(id -u):$(id -g)
echo "running Docker with user_runner_id $user_runner_id"
docker run --rm \
-e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \
-e PREFIX=${PREFIX} \
-e MASTERS_COUNT=${MASTERS_COUNT} \
-e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \
-e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \
-e KUBERNETES_VERSION=${KUBERNETES_VERSION} \
-e CRI=${CRI} \
-e PROVIDER=${PROVIDER:-not_provided} \
-e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \
-e LAYOUT=${LAYOUT:-not_provided} \
-e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \
-e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \
-e LAYOUT_AWS_ACCESS_KEY=${LAYOUT_AWS_ACCESS_KEY:-not_provided} \
-e LAYOUT_AWS_SECRET_ACCESS_KEY=${LAYOUT_AWS_SECRET_ACCESS_KEY:-not_provided} \
-e USER_RUNNER_ID=${user_runner_id} \
-e HOME=/tmp \
-v $(pwd)/testing:/deckhouse/testing \
-v $(pwd)/release.yaml:/deckhouse/release.yaml \
-v ${TMP_DIR_PATH}:/tmp \
-u ${user_runner_id} \
-w /deckhouse \
${INSTALL_IMAGE_NAME} \
bash /deckhouse/testing/cloud_layouts/script.sh cleanup
# </template: e2e_run_template>
- name: Save dhctl state
id: save_failed_cluster_state
if: ${{ failure() }}
uses: actions/upload-artifact@v4.4.0
with:
name: failed_cluster_state_aws_containerd_1_29
path: |
${{ steps.setup.outputs.tmp-dir-path}}/dhctl
${{ steps.setup.outputs.tmp-dir-path}}/*.tfstate
${{ steps.setup.outputs.tmp-dir-path}}/logs
- name: Save test results
if: ${{ steps.setup.outputs.dhctl-log-file }}
uses: actions/upload-artifact@v4.4.0
with:
name: test_output_aws_containerd_1_29
path: |
${{ steps.setup.outputs.dhctl-log-file}}*
${{ steps.setup.outputs.tmp-dir-path}}/logs
testing/cloud_layouts/
!testing/cloud_layouts/**/sshkey
- name: Cleanup temp directory
if: always()
env:
TMPPATH: ${{ steps.setup.outputs.tmppath}}
run: |
echo "Remove temporary directory '${TMPPATH}' ..."
if [[ -d "${TMPPATH}" && ${#TMPPATH} > 1 ]] ; then
rm -rf "${TMPPATH}"
else
echo Not a directory.
fi
if [ -n $USER_RUNNER_ID ]; then
echo "Fix temp directories owner..."
chown -R $USER_RUNNER_ID "$(pwd)/testing" || true
chown -R $USER_RUNNER_ID "/deckhouse/testing" || true
chown -R $USER_RUNNER_ID /tmp || true
else
echo "Fix temp directories permissions..."
chmod -f -R 777 "$(pwd)/testing" || true
chmod -f -R 777 "/deckhouse/testing" || true
chmod -f -R 777 /tmp || true
fi
# <template: update_comment_on_finish>
- name: Update comment on finish
id: update_comment_on_finish
if: ${{ always() && github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }}
env:
NEEDS_CONTEXT: ${{ toJSON(needs) }}
JOB_CONTEXT: ${{ toJSON(job) }}
STEPS_CONTEXT: ${{ toJSON(steps) }}
uses: actions/github-script@v6.4.1
with:
github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
retries: 3
script: |
const statusConfig = 'job,separate';
const name = 'e2e: AWS, Containerd, Kubernetes 1.29';
const needsContext = JSON.parse(process.env.NEEDS_CONTEXT);
const jobContext = JSON.parse(process.env.JOB_CONTEXT);
const stepsContext = JSON.parse(process.env.STEPS_CONTEXT);
let jobNames = null
if (process.env.JOB_NAMES) {
jobNames = JSON.parse(process.env.JOB_NAMES);
}
core.info(`needsContext: ${JSON.stringify(needsContext)}`);
core.info(`jobContext: ${JSON.stringify(jobContext)}`);
core.info(`stepsContext: ${JSON.stringify(stepsContext)}`);
core.info(`jobNames: ${JSON.stringify(jobNames)}`);
const ci = require('./.github/scripts/js/ci');
return await ci.updateCommentOnFinish({github, context, core, statusConfig, name, needsContext, jobContext, stepsContext, jobNames});
# </template: update_comment_on_finish>
# </template: e2e_run_job_template>
# <template: e2e_run_job_template>
run_containerd_1_30:
name: "e2e: AWS, Containerd, Kubernetes 1.30"
needs:
- check_e2e_labels
- git_info
if: needs.check_e2e_labels.outputs.run_containerd_1_30 == 'true'
outputs:
ssh_master_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_master_connection_string }}
ssh_bastion_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_bastion_connection_string }}
run_id: ${{ github.run_id }}
# need for find state in artifact
cluster_prefix: ${{ steps.setup.outputs.dhctl-prefix }}
ran_for: "aws;WithoutNAT;containerd;1.30"
failed_cluster_stayed: ${{ steps.check_stay_failed_cluster.outputs.failed_cluster_stayed }}
issue_number: ${{ inputs.issue_number }}
install_image_path: ${{ steps.setup.outputs.install-image-path }}
env:
PROVIDER: AWS
CRI: Containerd
LAYOUT: WithoutNAT
KUBERNETES_VERSION: "1.30"
EVENT_LABEL: ${{ github.event.label.name }}
runs-on: [self-hosted, e2e-common]
steps:
# <template: started_at_output>
- name: Job started timestamp
id: started_at
run: |
unixTimestamp=$(date +%s)
echo "started_at=${unixTimestamp}" >> $GITHUB_OUTPUT
# </template: started_at_output>
# <template: checkout_from_event_ref_step>
- name: Checkout sources
uses: actions/checkout@v3.5.2
with:
ref: ${{ github.event.inputs.pull_request_ref || github.event.ref }}
fetch-depth: 0
# </template: checkout_from_event_ref_step>
# <template: update_comment_on_start>
- name: Update comment on start
if: ${{ github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }}
uses: actions/github-script@v6.4.1
with:
github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
retries: 3
script: |
const name = 'e2e: AWS, Containerd, Kubernetes 1.30';
const ci = require('./.github/scripts/js/ci');
return await ci.updateCommentOnStart({github, context, core, name})
# </template: update_comment_on_start>
# <template: login_dev_registry_step>
- name: Check dev registry credentials
id: check_dev_registry
env:
HOST: ${{secrets.DECKHOUSE_DEV_REGISTRY_HOST}}
run: |
if [[ -n $HOST ]]; then
echo "has_credentials=true" >> $GITHUB_OUTPUT
echo "web_registry_path=${{secrets.DECKHOUSE_DEV_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT
fi
- name: Login to dev registry
uses: docker/login-action@v2.1.0
if: ${{ steps.check_dev_registry.outputs.has_credentials == 'true' }}
with:
registry: ${{ secrets.DECKHOUSE_DEV_REGISTRY_HOST }}
username: ${{ secrets.DECKHOUSE_DEV_REGISTRY_USER }}
password: ${{ secrets.DECKHOUSE_DEV_REGISTRY_PASSWORD }}
logout: false
# </template: login_dev_registry_step>
# <template: login_rw_registry_step>
- name: Check rw registry credentials
id: check_rw_registry
env:
HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}}
run: |
if [[ -n $HOST ]]; then
echo "has_credentials=true" >> $GITHUB_OUTPUT
echo "web_registry_path=${{secrets.DECKHOUSE_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT
fi
- name: Login to rw registry
uses: docker/login-action@v2.1.0
if: ${{ steps.check_rw_registry.outputs.has_credentials == 'true' }}
with:
registry: ${{ secrets.DECKHOUSE_REGISTRY_HOST }}
username: ${{ secrets.DECKHOUSE_REGISTRY_USER }}
password: ${{ secrets.DECKHOUSE_REGISTRY_PASSWORD }}
logout: false
- name: Login to Github Container Registry
uses: docker/login-action@v2.1.0
if: ${{ steps.check_rw_registry.outputs.has_credentials != 'true' }}
with:
registry: ghcr.io
username: ${{ secrets.GHCR_IO_REGISTRY_USER }}
password: ${{ secrets.GHCR_IO_REGISTRY_PASSWORD }}
logout: false
# </template: login_rw_registry_step>
# <template: werf_install_step>
- name: Install werf CLI
uses: werf/actions/install@43075e4ab81952b181d33e125ef15b9c060a782e
with:
channel: ${{env.WERF_CHANNEL}}
# </template: werf_install_step>
- name: Setup
id: setup
env:
DECKHOUSE_REGISTRY_HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}}
CI_COMMIT_TAG: ${{needs.git_info.outputs.ci_commit_tag}}
CI_COMMIT_BRANCH: ${{needs.git_info.outputs.ci_commit_branch}}
CI_COMMIT_REF_SLUG: ${{needs.git_info.outputs.ci_commit_ref_slug}}
REF_FULL: ${{needs.git_info.outputs.ref_full}}
INITIAL_REF_SLUG: ${{ github.event.inputs.initial_ref_slug }}
MANUAL_RUN: "true"
MULTIMASTER: ${{ needs.check_e2e_labels.outputs.multimaster }}
run: |
# Calculate unique prefix for e2e test.
# GITHUB_RUN_ID is a unique number for each workflow run.
# GITHUB_RUN_ATTEMPT is a unique number for each attempt of a particular workflow run in a repository.
# Add CRI and KUBERNETES_VERSION to create unique directory for each job.
# CRI and PROVIDER values are trimmed to reduce prefix length.
if [[ "${KUBERNETES_VERSION}" == "Automatic" ]] ; then
KUBERNETES_VERSION_SUF="auto"
else
KUBERNETES_VERSION_SUF=${KUBERNETES_VERSION}
fi
DHCTL_PREFIX=$(echo "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-$(echo ${CRI} | head -c 3)-${KUBERNETES_VERSION_SUF}")
if [[ "${MANUAL_RUN}" == "false" ]] ; then
# for jobs which run multiple providers concurrency (daily e2e, for example)
# add provider suffix to prevent "directory already exists" error
DHCTL_PREFIX="${DHCTL_PREFIX}-$(echo ${PROVIDER} | head -c 2)"
fi
# converts to DNS-like (all letters in lower case and replace all dots to dash)
# because it prefix will use for k8s resources names (nodes, for example)
DHCTL_PREFIX=$(echo "$DHCTL_PREFIX" | tr '.' '-' | tr '[:upper:]' '[:lower:]')
# Create tmppath for test script.
TMP_DIR_PATH=/mnt/cloud-layouts/layouts/${DHCTL_PREFIX}
if [[ -d "${TMP_DIR_PATH}" ]] ; then
echo "Temporary dir already exists: ${TMP_DIR_PATH}. ERROR!"
ls -la ${TMP_DIR_PATH}
exit 1
else
echo "Create temporary dir for job: ${TMP_DIR_PATH}."
mkdir -p "${TMP_DIR_PATH}"
fi
## Source: ci_templates/build.yml
# Extract REPO_SUFFIX from repository name: trim prefix 'deckhouse/deckhouse-'.
REPO_SUFFIX=${GITHUB_REPOSITORY#deckhouse/deckhouse-}
if [[ $REPO_SUFFIX == $GITHUB_REPOSITORY ]] ; then
# REPO_SUFFIX should be empty for main repo 'deckhouse/deckhouse'.
REPO_SUFFIX=
fi
# Use dev-registry for Git branches.
BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}"
# Use rw-registry for Git tags.
SEMVER_REGISTRY_PATH="${DECKHOUSE_REGISTRY_HOST}/deckhouse"
if [[ -z ${DECKHOUSE_REGISTRY_HOST:-} ]] ; then
# DECKHOUSE_REGISTRY_HOST is empty, so this repo is not the main repo.
# Use dev-regisry for branches and Github Container Registry for semver tags.
BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}"
SEMVER_REGISTRY_PATH="${GHA_TEST_REGISTRY_PATH}"
fi
# Prepare initial image tag for deploy/deckhouse to test switching from previous release.
INITIAL_IMAGE_TAG=
if [[ -n ${INITIAL_REF_SLUG} ]] ; then
INITIAL_IMAGE_TAG=${INITIAL_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}}
fi
# Prepare image tag for deploy/deckhouse (DECKHOUSE_IMAGE_TAG option in testing/cloud_layouts/script.sh).
# CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch.
# Use it as image tag. Add suffix to not overlap with PRs in main repo.
IMAGE_TAG=${CI_COMMIT_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}}
INSTALL_IMAGE_NAME=
if [[ -n ${CI_COMMIT_BRANCH} ]]; then
# CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch.
INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${IMAGE_TAG}
fi
if [[ -n ${CI_COMMIT_TAG} ]] ; then
REGISTRY_SUFFIX=$(echo ${WERF_ENV} | tr '[:upper:]' '[:lower:]') # CE/EE/FE -> ce/ee/fe
INSTALL_IMAGE_NAME=${SEMVER_REGISTRY_PATH}/${REGISTRY_SUFFIX}/install:${CI_COMMIT_REF_SLUG}
fi
if [[ -n ${INITIAL_REF_SLUG} ]] ; then
INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${INITIAL_IMAGE_TAG}
git fetch origin ${INITIAL_REF_SLUG}
git checkout origin/${INITIAL_REF_SLUG} -- testing/cloud_layouts
fi
SAFE_IMAGE_NAME=$(echo ${INSTALL_IMAGE_NAME} | tr '[:lower:]' '[:upper:]')
echo "Deckhouse Deployment will use install image ${SAFE_IMAGE_NAME} to test Git ref ${REF_FULL}"
if [ "${MULTIMASTER}" == true ] ; then
MASTERS_COUNT=3
else
MASTERS_COUNT=1
fi
echo "Multimaster set ${MULTIMASTER}, MASTERS_COUNT set ${MASTERS_COUNT}"
# Print image name in uppercase to prevent hiding non-secret registry host stored in secret.
echo "⚓️ [$(date -u)] Pull 'dev/install' image '${SAFE_IMAGE_NAME}'."
docker pull "${INSTALL_IMAGE_NAME}"
IMAGE_INSTALL_PATH="/${INSTALL_IMAGE_NAME#*/}"
echo '::echo::on'
echo "tmp-dir-path=${TMP_DIR_PATH}" >> $GITHUB_OUTPUT
echo "dhctl-log-file=${TMP_DIR_PATH}/dhctl.log" >> $GITHUB_OUTPUT
echo "dhctl-prefix=${DHCTL_PREFIX}" >> $GITHUB_OUTPUT
echo "install-image-name=${INSTALL_IMAGE_NAME}" >> $GITHUB_OUTPUT
echo "deckhouse-image-tag=${IMAGE_TAG}" >> $GITHUB_OUTPUT
echo "initial-image-tag=${INITIAL_IMAGE_TAG}" >> $GITHUB_OUTPUT
echo "install-image-path=${IMAGE_INSTALL_PATH}" >> $GITHUB_OUTPUT
echo "masters-count=${MASTERS_COUNT}" >> $GITHUB_OUTPUT
echo '::echo::off'
- name: "Run e2e test: AWS/Containerd/1.30"
id: e2e_test_run
timeout-minutes: 80
env:
PROVIDER: AWS
CRI: Containerd
LAYOUT: WithoutNAT
KUBERNETES_VERSION: "1.30"
LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }}
LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}}
TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}}
PREFIX: ${{ steps.setup.outputs.dhctl-prefix}}
MASTERS_COUNT: ${{ steps.setup.outputs.masters-count}}
INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }}
DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }}
INITIAL_IMAGE_TAG: ${{ steps.setup.outputs.initial-image-tag }}
# <template: e2e_run_template>
LAYOUT_AWS_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_ACCESS_KEY }}
LAYOUT_AWS_SECRET_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_SECRET_ACCESS_KEY }}
COMMENT_ID: ${{ inputs.comment_id }}
GITHUB_API_SERVER: ${{ github.api_url }}
REPOSITORY: ${{ github.repository }}
DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}}
GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
run: |
echo "Execute 'script.sh run-test' via 'docker run', using environment:
INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME}
DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG}
INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG}
PREFIX=${PREFIX}
PROVIDER=${PROVIDER}
CRI=${CRI}
LAYOUT=${LAYOUT}
KUBERNETES_VERSION=${KUBERNETES_VERSION}
TMP_DIR_PATH=${TMP_DIR_PATH}
MASTERS_COUNT=${MASTERS_COUNT}
"
ls -lh $(pwd)/testing
dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
echo "DHCTL log file: $dhctl_log_file"
user_runner_id=$(id -u):$(id -g)
echo "running Docker with user_runner_id $user_runner_id"
echo "Start waiting ssh connection string script"
comment_url="${GITHUB_API_SERVER}/repos/${REPOSITORY}/issues/comments/${COMMENT_ID}"
echo "Full comment url for updating ${comment_url}"
ssh_connect_str_file="${DHCTL_LOG_FILE}-ssh-connect_str-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
echo "ssh_connection_str_file=${ssh_connect_str_file}" >> $GITHUB_OUTPUT
bastion_ip_file=""
if [[ "${PROVIDER}" == "Static" ]] ; then
bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
elif [[ "${PROVIDER}" == "VCD" ]] ; then
bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
fi
echo "ssh_bastion_str_file=${bastion_ip_file}" >> $GITHUB_OUTPUT
$(pwd)/testing/cloud_layouts/wait-master-ssh-and-update-comment.sh "$dhctl_log_file" "$comment_url" "$ssh_connect_str_file" "$bastion_ip_file" > "${dhctl_log_file}-wait-log" 2>&1 &
docker run --rm \
-e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \
-e PREFIX=${PREFIX} \
-e MASTERS_COUNT=${MASTERS_COUNT} \
-e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \
-e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \
-e KUBERNETES_VERSION=${KUBERNETES_VERSION} \
-e CRI=${CRI} \
-e PROVIDER=${PROVIDER:-not_provided} \
-e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \
-e LAYOUT=${LAYOUT:-not_provided} \
-e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \
-e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \
-e LAYOUT_AWS_ACCESS_KEY=${LAYOUT_AWS_ACCESS_KEY:-not_provided} \
-e LAYOUT_AWS_SECRET_ACCESS_KEY=${LAYOUT_AWS_SECRET_ACCESS_KEY:-not_provided} \
-e USER_RUNNER_ID=${user_runner_id} \
-e HOME=/tmp \
-v $(pwd)/testing:/deckhouse/testing \
-v $(pwd)/release.yaml:/deckhouse/release.yaml \
-v ${TMP_DIR_PATH}:/tmp \
-u ${user_runner_id} \
-w /deckhouse \
${INSTALL_IMAGE_NAME} \
bash /deckhouse/testing/cloud_layouts/script.sh run-test
# </template: e2e_run_template>
- name: Read connection string
if: ${{ failure() || cancelled() }}
id: check_stay_failed_cluster
uses: actions/github-script@v6.4.1
env:
SSH_CONNECT_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_connection_str_file }}
SSH_BASTION_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_bastion_str_file }}
with:
# it sets `should_run` output var if e2e/failed/stay label
script: |
const e2e_cleanup = require('./.github/scripts/js/e2e/cleanup');
await e2e_cleanup.readConnectionScript({core, context, github});
- name: Label pr if e2e failed
if: ${{ (failure() || cancelled()) && needs.git_info.outputs.pr_number }}
uses: actions-ecosystem/action-add-labels@v1
with:
github_token: ${{ secrets.BOATSWAIN_GITHUB_TOKEN }}
number: ${{ needs.git_info.outputs.pr_number }}
labels: "e2e/cluster/failed"
- name: Cleanup bootstrapped cluster
if: success()
id: cleanup_cluster
timeout-minutes: 60
env:
PROVIDER: AWS
CRI: Containerd
LAYOUT: WithoutNAT
KUBERNETES_VERSION: "1.30"
LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }}
LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}}
TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}}
PREFIX: ${{ steps.setup.outputs.dhctl-prefix}}
MASTERS_COUNT: ${{ steps.setup.outputs.masters-count }}
INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }}
DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }}
# <template: e2e_run_template>
LAYOUT_AWS_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_ACCESS_KEY }}
LAYOUT_AWS_SECRET_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_SECRET_ACCESS_KEY }}
COMMENT_ID: ${{ inputs.comment_id }}
GITHUB_API_SERVER: ${{ github.api_url }}
REPOSITORY: ${{ github.repository }}
DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}}
GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
run: |
echo "Execute 'script.sh cleanup' via 'docker run', using environment:
INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME}
DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG}
INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG}
PREFIX=${PREFIX}
PROVIDER=${PROVIDER}
CRI=${CRI}
LAYOUT=${LAYOUT}
KUBERNETES_VERSION=${KUBERNETES_VERSION}
TMP_DIR_PATH=${TMP_DIR_PATH}
MASTERS_COUNT=${MASTERS_COUNT}
"
ls -lh $(pwd)/testing
dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
echo "DHCTL log file: $dhctl_log_file"
user_runner_id=$(id -u):$(id -g)
echo "running Docker with user_runner_id $user_runner_id"
docker run --rm \
-e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \
-e PREFIX=${PREFIX} \
-e MASTERS_COUNT=${MASTERS_COUNT} \
-e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \
-e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \
-e KUBERNETES_VERSION=${KUBERNETES_VERSION} \
-e CRI=${CRI} \
-e PROVIDER=${PROVIDER:-not_provided} \
-e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \
-e LAYOUT=${LAYOUT:-not_provided} \
-e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \
-e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \
-e LAYOUT_AWS_ACCESS_KEY=${LAYOUT_AWS_ACCESS_KEY:-not_provided} \
-e LAYOUT_AWS_SECRET_ACCESS_KEY=${LAYOUT_AWS_SECRET_ACCESS_KEY:-not_provided} \
-e USER_RUNNER_ID=${user_runner_id} \
-e HOME=/tmp \
-v $(pwd)/testing:/deckhouse/testing \
-v $(pwd)/release.yaml:/deckhouse/release.yaml \
-v ${TMP_DIR_PATH}:/tmp \
-u ${user_runner_id} \
-w /deckhouse \
${INSTALL_IMAGE_NAME} \
bash /deckhouse/testing/cloud_layouts/script.sh cleanup
# </template: e2e_run_template>
- name: Save dhctl state
id: save_failed_cluster_state
if: ${{ failure() }}
uses: actions/upload-artifact@v4.4.0
with:
name: failed_cluster_state_aws_containerd_1_30
path: |
${{ steps.setup.outputs.tmp-dir-path}}/dhctl
${{ steps.setup.outputs.tmp-dir-path}}/*.tfstate
${{ steps.setup.outputs.tmp-dir-path}}/logs
- name: Save test results
if: ${{ steps.setup.outputs.dhctl-log-file }}
uses: actions/upload-artifact@v4.4.0
with:
name: test_output_aws_containerd_1_30
path: |
${{ steps.setup.outputs.dhctl-log-file}}*
${{ steps.setup.outputs.tmp-dir-path}}/logs
testing/cloud_layouts/
!testing/cloud_layouts/**/sshkey
- name: Cleanup temp directory
if: always()
env:
TMPPATH: ${{ steps.setup.outputs.tmppath}}
run: |
echo "Remove temporary directory '${TMPPATH}' ..."
if [[ -d "${TMPPATH}" && ${#TMPPATH} > 1 ]] ; then
rm -rf "${TMPPATH}"
else
echo Not a directory.
fi
if [ -n $USER_RUNNER_ID ]; then
echo "Fix temp directories owner..."
chown -R $USER_RUNNER_ID "$(pwd)/testing" || true
chown -R $USER_RUNNER_ID "/deckhouse/testing" || true
chown -R $USER_RUNNER_ID /tmp || true
else
echo "Fix temp directories permissions..."
chmod -f -R 777 "$(pwd)/testing" || true
chmod -f -R 777 "/deckhouse/testing" || true
chmod -f -R 777 /tmp || true
fi
# <template: update_comment_on_finish>
- name: Update comment on finish
id: update_comment_on_finish
if: ${{ always() && github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }}
env:
NEEDS_CONTEXT: ${{ toJSON(needs) }}
JOB_CONTEXT: ${{ toJSON(job) }}
STEPS_CONTEXT: ${{ toJSON(steps) }}
uses: actions/github-script@v6.4.1
with:
github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
retries: 3
script: |
const statusConfig = 'job,separate';
const name = 'e2e: AWS, Containerd, Kubernetes 1.30';
const needsContext = JSON.parse(process.env.NEEDS_CONTEXT);
const jobContext = JSON.parse(process.env.JOB_CONTEXT);
const stepsContext = JSON.parse(process.env.STEPS_CONTEXT);
let jobNames = null
if (process.env.JOB_NAMES) {
jobNames = JSON.parse(process.env.JOB_NAMES);
}
core.info(`needsContext: ${JSON.stringify(needsContext)}`);
core.info(`jobContext: ${JSON.stringify(jobContext)}`);
core.info(`stepsContext: ${JSON.stringify(stepsContext)}`);
core.info(`jobNames: ${JSON.stringify(jobNames)}`);
const ci = require('./.github/scripts/js/ci');
return await ci.updateCommentOnFinish({github, context, core, statusConfig, name, needsContext, jobContext, stepsContext, jobNames});
# </template: update_comment_on_finish>
# </template: e2e_run_job_template>
# <template: e2e_run_job_template>
run_containerd_1_31:
name: "e2e: AWS, Containerd, Kubernetes 1.31"
needs:
- check_e2e_labels
- git_info
if: needs.check_e2e_labels.outputs.run_containerd_1_31 == 'true'
outputs:
ssh_master_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_master_connection_string }}
ssh_bastion_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_bastion_connection_string }}
run_id: ${{ github.run_id }}
# need for find state in artifact
cluster_prefix: ${{ steps.setup.outputs.dhctl-prefix }}
ran_for: "aws;WithoutNAT;containerd;1.31"
failed_cluster_stayed: ${{ steps.check_stay_failed_cluster.outputs.failed_cluster_stayed }}
issue_number: ${{ inputs.issue_number }}
install_image_path: ${{ steps.setup.outputs.install-image-path }}
env:
PROVIDER: AWS
CRI: Containerd
LAYOUT: WithoutNAT
KUBERNETES_VERSION: "1.31"
EVENT_LABEL: ${{ github.event.label.name }}
runs-on: [self-hosted, e2e-common]
steps:
# <template: started_at_output>
- name: Job started timestamp
id: started_at
run: |
unixTimestamp=$(date +%s)
echo "started_at=${unixTimestamp}" >> $GITHUB_OUTPUT
# </template: started_at_output>
# <template: checkout_from_event_ref_step>
- name: Checkout sources
uses: actions/checkout@v3.5.2
with:
ref: ${{ github.event.inputs.pull_request_ref || github.event.ref }}
fetch-depth: 0
# </template: checkout_from_event_ref_step>
# <template: update_comment_on_start>
- name: Update comment on start
if: ${{ github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }}
uses: actions/github-script@v6.4.1
with:
github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
retries: 3
script: |
const name = 'e2e: AWS, Containerd, Kubernetes 1.31';
const ci = require('./.github/scripts/js/ci');
return await ci.updateCommentOnStart({github, context, core, name})
# </template: update_comment_on_start>
# <template: login_dev_registry_step>
- name: Check dev registry credentials
id: check_dev_registry
env:
HOST: ${{secrets.DECKHOUSE_DEV_REGISTRY_HOST}}
run: |
if [[ -n $HOST ]]; then
echo "has_credentials=true" >> $GITHUB_OUTPUT
echo "web_registry_path=${{secrets.DECKHOUSE_DEV_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT
fi
- name: Login to dev registry
uses: docker/login-action@v2.1.0
if: ${{ steps.check_dev_registry.outputs.has_credentials == 'true' }}
with:
registry: ${{ secrets.DECKHOUSE_DEV_REGISTRY_HOST }}
username: ${{ secrets.DECKHOUSE_DEV_REGISTRY_USER }}
password: ${{ secrets.DECKHOUSE_DEV_REGISTRY_PASSWORD }}
logout: false
# </template: login_dev_registry_step>
# <template: login_rw_registry_step>
- name: Check rw registry credentials
id: check_rw_registry
env:
HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}}
run: |
if [[ -n $HOST ]]; then
echo "has_credentials=true" >> $GITHUB_OUTPUT
echo "web_registry_path=${{secrets.DECKHOUSE_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT
fi
- name: Login to rw registry
uses: docker/login-action@v2.1.0
if: ${{ steps.check_rw_registry.outputs.has_credentials == 'true' }}
with:
registry: ${{ secrets.DECKHOUSE_REGISTRY_HOST }}
username: ${{ secrets.DECKHOUSE_REGISTRY_USER }}
password: ${{ secrets.DECKHOUSE_REGISTRY_PASSWORD }}
logout: false
- name: Login to Github Container Registry
uses: docker/login-action@v2.1.0
if: ${{ steps.check_rw_registry.outputs.has_credentials != 'true' }}
with:
registry: ghcr.io
username: ${{ secrets.GHCR_IO_REGISTRY_USER }}
password: ${{ secrets.GHCR_IO_REGISTRY_PASSWORD }}
logout: false
# </template: login_rw_registry_step>
# <template: werf_install_step>
- name: Install werf CLI
uses: werf/actions/install@43075e4ab81952b181d33e125ef15b9c060a782e
with:
channel: ${{env.WERF_CHANNEL}}
# </template: werf_install_step>
- name: Setup
id: setup
env:
DECKHOUSE_REGISTRY_HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}}
CI_COMMIT_TAG: ${{needs.git_info.outputs.ci_commit_tag}}
CI_COMMIT_BRANCH: ${{needs.git_info.outputs.ci_commit_branch}}
CI_COMMIT_REF_SLUG: ${{needs.git_info.outputs.ci_commit_ref_slug}}
REF_FULL: ${{needs.git_info.outputs.ref_full}}
INITIAL_REF_SLUG: ${{ github.event.inputs.initial_ref_slug }}
MANUAL_RUN: "true"
MULTIMASTER: ${{ needs.check_e2e_labels.outputs.multimaster }}
run: |
# Calculate unique prefix for e2e test.
# GITHUB_RUN_ID is a unique number for each workflow run.
# GITHUB_RUN_ATTEMPT is a unique number for each attempt of a particular workflow run in a repository.
# Add CRI and KUBERNETES_VERSION to create unique directory for each job.
# CRI and PROVIDER values are trimmed to reduce prefix length.
if [[ "${KUBERNETES_VERSION}" == "Automatic" ]] ; then
KUBERNETES_VERSION_SUF="auto"
else
KUBERNETES_VERSION_SUF=${KUBERNETES_VERSION}
fi
DHCTL_PREFIX=$(echo "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-$(echo ${CRI} | head -c 3)-${KUBERNETES_VERSION_SUF}")
if [[ "${MANUAL_RUN}" == "false" ]] ; then
# for jobs which run multiple providers concurrency (daily e2e, for example)
# add provider suffix to prevent "directory already exists" error
DHCTL_PREFIX="${DHCTL_PREFIX}-$(echo ${PROVIDER} | head -c 2)"
fi
# converts to DNS-like (all letters in lower case and replace all dots to dash)
# because it prefix will use for k8s resources names (nodes, for example)
DHCTL_PREFIX=$(echo "$DHCTL_PREFIX" | tr '.' '-' | tr '[:upper:]' '[:lower:]')
# Create tmppath for test script.
TMP_DIR_PATH=/mnt/cloud-layouts/layouts/${DHCTL_PREFIX}
if [[ -d "${TMP_DIR_PATH}" ]] ; then
echo "Temporary dir already exists: ${TMP_DIR_PATH}. ERROR!"
ls -la ${TMP_DIR_PATH}
exit 1
else
echo "Create temporary dir for job: ${TMP_DIR_PATH}."
mkdir -p "${TMP_DIR_PATH}"
fi
## Source: ci_templates/build.yml
# Extract REPO_SUFFIX from repository name: trim prefix 'deckhouse/deckhouse-'.
REPO_SUFFIX=${GITHUB_REPOSITORY#deckhouse/deckhouse-}
if [[ $REPO_SUFFIX == $GITHUB_REPOSITORY ]] ; then
# REPO_SUFFIX should be empty for main repo 'deckhouse/deckhouse'.
REPO_SUFFIX=
fi
# Use dev-registry for Git branches.
BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}"
# Use rw-registry for Git tags.
SEMVER_REGISTRY_PATH="${DECKHOUSE_REGISTRY_HOST}/deckhouse"
if [[ -z ${DECKHOUSE_REGISTRY_HOST:-} ]] ; then
# DECKHOUSE_REGISTRY_HOST is empty, so this repo is not the main repo.
# Use dev-regisry for branches and Github Container Registry for semver tags.
BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}"
SEMVER_REGISTRY_PATH="${GHA_TEST_REGISTRY_PATH}"
fi
# Prepare initial image tag for deploy/deckhouse to test switching from previous release.
INITIAL_IMAGE_TAG=
if [[ -n ${INITIAL_REF_SLUG} ]] ; then
INITIAL_IMAGE_TAG=${INITIAL_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}}
fi
# Prepare image tag for deploy/deckhouse (DECKHOUSE_IMAGE_TAG option in testing/cloud_layouts/script.sh).
# CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch.
# Use it as image tag. Add suffix to not overlap with PRs in main repo.
IMAGE_TAG=${CI_COMMIT_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}}
INSTALL_IMAGE_NAME=
if [[ -n ${CI_COMMIT_BRANCH} ]]; then
# CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch.
INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${IMAGE_TAG}
fi
if [[ -n ${CI_COMMIT_TAG} ]] ; then
REGISTRY_SUFFIX=$(echo ${WERF_ENV} | tr '[:upper:]' '[:lower:]') # CE/EE/FE -> ce/ee/fe
INSTALL_IMAGE_NAME=${SEMVER_REGISTRY_PATH}/${REGISTRY_SUFFIX}/install:${CI_COMMIT_REF_SLUG}
fi
if [[ -n ${INITIAL_REF_SLUG} ]] ; then
INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${INITIAL_IMAGE_TAG}
git fetch origin ${INITIAL_REF_SLUG}
git checkout origin/${INITIAL_REF_SLUG} -- testing/cloud_layouts
fi
SAFE_IMAGE_NAME=$(echo ${INSTALL_IMAGE_NAME} | tr '[:lower:]' '[:upper:]')
echo "Deckhouse Deployment will use install image ${SAFE_IMAGE_NAME} to test Git ref ${REF_FULL}"
if [ "${MULTIMASTER}" == true ] ; then
MASTERS_COUNT=3
else
MASTERS_COUNT=1
fi
echo "Multimaster set ${MULTIMASTER}, MASTERS_COUNT set ${MASTERS_COUNT}"
# Print image name in uppercase to prevent hiding non-secret registry host stored in secret.
echo "⚓️ [$(date -u)] Pull 'dev/install' image '${SAFE_IMAGE_NAME}'."
docker pull "${INSTALL_IMAGE_NAME}"
IMAGE_INSTALL_PATH="/${INSTALL_IMAGE_NAME#*/}"
echo '::echo::on'
echo "tmp-dir-path=${TMP_DIR_PATH}" >> $GITHUB_OUTPUT
echo "dhctl-log-file=${TMP_DIR_PATH}/dhctl.log" >> $GITHUB_OUTPUT
echo "dhctl-prefix=${DHCTL_PREFIX}" >> $GITHUB_OUTPUT
echo "install-image-name=${INSTALL_IMAGE_NAME}" >> $GITHUB_OUTPUT
echo "deckhouse-image-tag=${IMAGE_TAG}" >> $GITHUB_OUTPUT
echo "initial-image-tag=${INITIAL_IMAGE_TAG}" >> $GITHUB_OUTPUT
echo "install-image-path=${IMAGE_INSTALL_PATH}" >> $GITHUB_OUTPUT
echo "masters-count=${MASTERS_COUNT}" >> $GITHUB_OUTPUT
echo '::echo::off'
- name: "Run e2e test: AWS/Containerd/1.31"
id: e2e_test_run
timeout-minutes: 80
env:
PROVIDER: AWS
CRI: Containerd
LAYOUT: WithoutNAT
KUBERNETES_VERSION: "1.31"
LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }}
LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}}
TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}}
PREFIX: ${{ steps.setup.outputs.dhctl-prefix}}
MASTERS_COUNT: ${{ steps.setup.outputs.masters-count}}
INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }}
DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }}
INITIAL_IMAGE_TAG: ${{ steps.setup.outputs.initial-image-tag }}
# <template: e2e_run_template>
LAYOUT_AWS_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_ACCESS_KEY }}
LAYOUT_AWS_SECRET_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_SECRET_ACCESS_KEY }}
COMMENT_ID: ${{ inputs.comment_id }}
GITHUB_API_SERVER: ${{ github.api_url }}
REPOSITORY: ${{ github.repository }}
DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}}
GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
run: |
echo "Execute 'script.sh run-test' via 'docker run', using environment:
INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME}
DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG}
INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG}
PREFIX=${PREFIX}
PROVIDER=${PROVIDER}
CRI=${CRI}
LAYOUT=${LAYOUT}
KUBERNETES_VERSION=${KUBERNETES_VERSION}
TMP_DIR_PATH=${TMP_DIR_PATH}
MASTERS_COUNT=${MASTERS_COUNT}
"
ls -lh $(pwd)/testing
dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
echo "DHCTL log file: $dhctl_log_file"
user_runner_id=$(id -u):$(id -g)
echo "running Docker with user_runner_id $user_runner_id"
echo "Start waiting ssh connection string script"
comment_url="${GITHUB_API_SERVER}/repos/${REPOSITORY}/issues/comments/${COMMENT_ID}"
echo "Full comment url for updating ${comment_url}"
ssh_connect_str_file="${DHCTL_LOG_FILE}-ssh-connect_str-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
echo "ssh_connection_str_file=${ssh_connect_str_file}" >> $GITHUB_OUTPUT
bastion_ip_file=""
if [[ "${PROVIDER}" == "Static" ]] ; then
bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
elif [[ "${PROVIDER}" == "VCD" ]] ; then
bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
fi
echo "ssh_bastion_str_file=${bastion_ip_file}" >> $GITHUB_OUTPUT
$(pwd)/testing/cloud_layouts/wait-master-ssh-and-update-comment.sh "$dhctl_log_file" "$comment_url" "$ssh_connect_str_file" "$bastion_ip_file" > "${dhctl_log_file}-wait-log" 2>&1 &
docker run --rm \
-e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \
-e PREFIX=${PREFIX} \
-e MASTERS_COUNT=${MASTERS_COUNT} \
-e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \
-e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \
-e KUBERNETES_VERSION=${KUBERNETES_VERSION} \
-e CRI=${CRI} \
-e PROVIDER=${PROVIDER:-not_provided} \
-e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \
-e LAYOUT=${LAYOUT:-not_provided} \
-e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \
-e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \
-e LAYOUT_AWS_ACCESS_KEY=${LAYOUT_AWS_ACCESS_KEY:-not_provided} \
-e LAYOUT_AWS_SECRET_ACCESS_KEY=${LAYOUT_AWS_SECRET_ACCESS_KEY:-not_provided} \
-e USER_RUNNER_ID=${user_runner_id} \
-e HOME=/tmp \
-v $(pwd)/testing:/deckhouse/testing \
-v $(pwd)/release.yaml:/deckhouse/release.yaml \
-v ${TMP_DIR_PATH}:/tmp \
-u ${user_runner_id} \
-w /deckhouse \
${INSTALL_IMAGE_NAME} \
bash /deckhouse/testing/cloud_layouts/script.sh run-test
# </template: e2e_run_template>
- name: Read connection string
if: ${{ failure() || cancelled() }}
id: check_stay_failed_cluster
uses: actions/github-script@v6.4.1
env:
SSH_CONNECT_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_connection_str_file }}
SSH_BASTION_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_bastion_str_file }}
with:
# it sets `should_run` output var if e2e/failed/stay label
script: |
const e2e_cleanup = require('./.github/scripts/js/e2e/cleanup');
await e2e_cleanup.readConnectionScript({core, context, github});
- name: Label pr if e2e failed
if: ${{ (failure() || cancelled()) && needs.git_info.outputs.pr_number }}
uses: actions-ecosystem/action-add-labels@v1
with:
github_token: ${{ secrets.BOATSWAIN_GITHUB_TOKEN }}
number: ${{ needs.git_info.outputs.pr_number }}
labels: "e2e/cluster/failed"
- name: Cleanup bootstrapped cluster
if: success()
id: cleanup_cluster
timeout-minutes: 60
env:
PROVIDER: AWS
CRI: Containerd
LAYOUT: WithoutNAT
KUBERNETES_VERSION: "1.31"
LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }}
LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}}
TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}}
PREFIX: ${{ steps.setup.outputs.dhctl-prefix}}
MASTERS_COUNT: ${{ steps.setup.outputs.masters-count }}
INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }}
DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }}
# <template: e2e_run_template>
LAYOUT_AWS_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_ACCESS_KEY }}
LAYOUT_AWS_SECRET_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_SECRET_ACCESS_KEY }}
COMMENT_ID: ${{ inputs.comment_id }}
GITHUB_API_SERVER: ${{ github.api_url }}
REPOSITORY: ${{ github.repository }}
DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}}
GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
run: |
echo "Execute 'script.sh cleanup' via 'docker run', using environment:
INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME}
DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG}
INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG}
PREFIX=${PREFIX}
PROVIDER=${PROVIDER}
CRI=${CRI}
LAYOUT=${LAYOUT}
KUBERNETES_VERSION=${KUBERNETES_VERSION}
TMP_DIR_PATH=${TMP_DIR_PATH}
MASTERS_COUNT=${MASTERS_COUNT}
"
ls -lh $(pwd)/testing
dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
echo "DHCTL log file: $dhctl_log_file"
user_runner_id=$(id -u):$(id -g)
echo "running Docker with user_runner_id $user_runner_id"
docker run --rm \
-e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \
-e PREFIX=${PREFIX} \
-e MASTERS_COUNT=${MASTERS_COUNT} \
-e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \
-e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \
-e KUBERNETES_VERSION=${KUBERNETES_VERSION} \
-e CRI=${CRI} \
-e PROVIDER=${PROVIDER:-not_provided} \
-e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \
-e LAYOUT=${LAYOUT:-not_provided} \
-e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \
-e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \
-e LAYOUT_AWS_ACCESS_KEY=${LAYOUT_AWS_ACCESS_KEY:-not_provided} \
-e LAYOUT_AWS_SECRET_ACCESS_KEY=${LAYOUT_AWS_SECRET_ACCESS_KEY:-not_provided} \
-e USER_RUNNER_ID=${user_runner_id} \
-e HOME=/tmp \
-v $(pwd)/testing:/deckhouse/testing \
-v $(pwd)/release.yaml:/deckhouse/release.yaml \
-v ${TMP_DIR_PATH}:/tmp \
-u ${user_runner_id} \
-w /deckhouse \
${INSTALL_IMAGE_NAME} \
bash /deckhouse/testing/cloud_layouts/script.sh cleanup
# </template: e2e_run_template>
- name: Save dhctl state
id: save_failed_cluster_state
if: ${{ failure() }}
uses: actions/upload-artifact@v4.4.0
with:
name: failed_cluster_state_aws_containerd_1_31
path: |
${{ steps.setup.outputs.tmp-dir-path}}/dhctl
${{ steps.setup.outputs.tmp-dir-path}}/*.tfstate
${{ steps.setup.outputs.tmp-dir-path}}/logs
- name: Save test results
if: ${{ steps.setup.outputs.dhctl-log-file }}
uses: actions/upload-artifact@v4.4.0
with:
name: test_output_aws_containerd_1_31
path: |
${{ steps.setup.outputs.dhctl-log-file}}*
${{ steps.setup.outputs.tmp-dir-path}}/logs
testing/cloud_layouts/
!testing/cloud_layouts/**/sshkey
- name: Cleanup temp directory
if: always()
env:
TMPPATH: ${{ steps.setup.outputs.tmppath}}
run: |
echo "Remove temporary directory '${TMPPATH}' ..."
if [[ -d "${TMPPATH}" && ${#TMPPATH} > 1 ]] ; then
rm -rf "${TMPPATH}"
else
echo Not a directory.
fi
if [ -n $USER_RUNNER_ID ]; then
echo "Fix temp directories owner..."
chown -R $USER_RUNNER_ID "$(pwd)/testing" || true
chown -R $USER_RUNNER_ID "/deckhouse/testing" || true
chown -R $USER_RUNNER_ID /tmp || true
else
echo "Fix temp directories permissions..."
chmod -f -R 777 "$(pwd)/testing" || true
chmod -f -R 777 "/deckhouse/testing" || true
chmod -f -R 777 /tmp || true
fi
# <template: update_comment_on_finish>
- name: Update comment on finish
id: update_comment_on_finish
if: ${{ always() && github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }}
env:
NEEDS_CONTEXT: ${{ toJSON(needs) }}
JOB_CONTEXT: ${{ toJSON(job) }}
STEPS_CONTEXT: ${{ toJSON(steps) }}
uses: actions/github-script@v6.4.1
with:
github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
retries: 3
script: |
const statusConfig = 'job,separate';
const name = 'e2e: AWS, Containerd, Kubernetes 1.31';
const needsContext = JSON.parse(process.env.NEEDS_CONTEXT);
const jobContext = JSON.parse(process.env.JOB_CONTEXT);
const stepsContext = JSON.parse(process.env.STEPS_CONTEXT);
let jobNames = null
if (process.env.JOB_NAMES) {
jobNames = JSON.parse(process.env.JOB_NAMES);
}
core.info(`needsContext: ${JSON.stringify(needsContext)}`);
core.info(`jobContext: ${JSON.stringify(jobContext)}`);
core.info(`stepsContext: ${JSON.stringify(stepsContext)}`);
core.info(`jobNames: ${JSON.stringify(jobNames)}`);
const ci = require('./.github/scripts/js/ci');
return await ci.updateCommentOnFinish({github, context, core, statusConfig, name, needsContext, jobContext, stepsContext, jobNames});
# </template: update_comment_on_finish>
# </template: e2e_run_job_template>
# <template: e2e_run_job_template>
run_containerd_Automatic:
name: "e2e: AWS, Containerd, Kubernetes Automatic"
needs:
- check_e2e_labels
- git_info
if: needs.check_e2e_labels.outputs.run_containerd_Automatic == 'true'
outputs:
ssh_master_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_master_connection_string }}
ssh_bastion_connection_string: ${{ steps.check_stay_failed_cluster.outputs.ssh_bastion_connection_string }}
run_id: ${{ github.run_id }}
# need for find state in artifact
cluster_prefix: ${{ steps.setup.outputs.dhctl-prefix }}
ran_for: "aws;WithoutNAT;containerd;Automatic"
failed_cluster_stayed: ${{ steps.check_stay_failed_cluster.outputs.failed_cluster_stayed }}
issue_number: ${{ inputs.issue_number }}
install_image_path: ${{ steps.setup.outputs.install-image-path }}
env:
PROVIDER: AWS
CRI: Containerd
LAYOUT: WithoutNAT
KUBERNETES_VERSION: "Automatic"
EVENT_LABEL: ${{ github.event.label.name }}
runs-on: [self-hosted, e2e-common]
steps:
# <template: started_at_output>
- name: Job started timestamp
id: started_at
run: |
unixTimestamp=$(date +%s)
echo "started_at=${unixTimestamp}" >> $GITHUB_OUTPUT
# </template: started_at_output>
# <template: checkout_from_event_ref_step>
- name: Checkout sources
uses: actions/checkout@v3.5.2
with:
ref: ${{ github.event.inputs.pull_request_ref || github.event.ref }}
fetch-depth: 0
# </template: checkout_from_event_ref_step>
# <template: update_comment_on_start>
- name: Update comment on start
if: ${{ github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }}
uses: actions/github-script@v6.4.1
with:
github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
retries: 3
script: |
const name = 'e2e: AWS, Containerd, Kubernetes Automatic';
const ci = require('./.github/scripts/js/ci');
return await ci.updateCommentOnStart({github, context, core, name})
# </template: update_comment_on_start>
# <template: login_dev_registry_step>
- name: Check dev registry credentials
id: check_dev_registry
env:
HOST: ${{secrets.DECKHOUSE_DEV_REGISTRY_HOST}}
run: |
if [[ -n $HOST ]]; then
echo "has_credentials=true" >> $GITHUB_OUTPUT
echo "web_registry_path=${{secrets.DECKHOUSE_DEV_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT
fi
- name: Login to dev registry
uses: docker/login-action@v2.1.0
if: ${{ steps.check_dev_registry.outputs.has_credentials == 'true' }}
with:
registry: ${{ secrets.DECKHOUSE_DEV_REGISTRY_HOST }}
username: ${{ secrets.DECKHOUSE_DEV_REGISTRY_USER }}
password: ${{ secrets.DECKHOUSE_DEV_REGISTRY_PASSWORD }}
logout: false
# </template: login_dev_registry_step>
# <template: login_rw_registry_step>
- name: Check rw registry credentials
id: check_rw_registry
env:
HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}}
run: |
if [[ -n $HOST ]]; then
echo "has_credentials=true" >> $GITHUB_OUTPUT
echo "web_registry_path=${{secrets.DECKHOUSE_REGISTRY_HOST }}/deckhouse/site" >> $GITHUB_OUTPUT
fi
- name: Login to rw registry
uses: docker/login-action@v2.1.0
if: ${{ steps.check_rw_registry.outputs.has_credentials == 'true' }}
with:
registry: ${{ secrets.DECKHOUSE_REGISTRY_HOST }}
username: ${{ secrets.DECKHOUSE_REGISTRY_USER }}
password: ${{ secrets.DECKHOUSE_REGISTRY_PASSWORD }}
logout: false
- name: Login to Github Container Registry
uses: docker/login-action@v2.1.0
if: ${{ steps.check_rw_registry.outputs.has_credentials != 'true' }}
with:
registry: ghcr.io
username: ${{ secrets.GHCR_IO_REGISTRY_USER }}
password: ${{ secrets.GHCR_IO_REGISTRY_PASSWORD }}
logout: false
# </template: login_rw_registry_step>
# <template: werf_install_step>
- name: Install werf CLI
uses: werf/actions/install@43075e4ab81952b181d33e125ef15b9c060a782e
with:
channel: ${{env.WERF_CHANNEL}}
# </template: werf_install_step>
- name: Setup
id: setup
env:
DECKHOUSE_REGISTRY_HOST: ${{secrets.DECKHOUSE_REGISTRY_HOST}}
CI_COMMIT_TAG: ${{needs.git_info.outputs.ci_commit_tag}}
CI_COMMIT_BRANCH: ${{needs.git_info.outputs.ci_commit_branch}}
CI_COMMIT_REF_SLUG: ${{needs.git_info.outputs.ci_commit_ref_slug}}
REF_FULL: ${{needs.git_info.outputs.ref_full}}
INITIAL_REF_SLUG: ${{ github.event.inputs.initial_ref_slug }}
MANUAL_RUN: "true"
MULTIMASTER: ${{ needs.check_e2e_labels.outputs.multimaster }}
run: |
# Calculate unique prefix for e2e test.
# GITHUB_RUN_ID is a unique number for each workflow run.
# GITHUB_RUN_ATTEMPT is a unique number for each attempt of a particular workflow run in a repository.
# Add CRI and KUBERNETES_VERSION to create unique directory for each job.
# CRI and PROVIDER values are trimmed to reduce prefix length.
if [[ "${KUBERNETES_VERSION}" == "Automatic" ]] ; then
KUBERNETES_VERSION_SUF="auto"
else
KUBERNETES_VERSION_SUF=${KUBERNETES_VERSION}
fi
DHCTL_PREFIX=$(echo "${GITHUB_RUN_ID}-${GITHUB_RUN_ATTEMPT}-$(echo ${CRI} | head -c 3)-${KUBERNETES_VERSION_SUF}")
if [[ "${MANUAL_RUN}" == "false" ]] ; then
# for jobs which run multiple providers concurrency (daily e2e, for example)
# add provider suffix to prevent "directory already exists" error
DHCTL_PREFIX="${DHCTL_PREFIX}-$(echo ${PROVIDER} | head -c 2)"
fi
# converts to DNS-like (all letters in lower case and replace all dots to dash)
# because it prefix will use for k8s resources names (nodes, for example)
DHCTL_PREFIX=$(echo "$DHCTL_PREFIX" | tr '.' '-' | tr '[:upper:]' '[:lower:]')
# Create tmppath for test script.
TMP_DIR_PATH=/mnt/cloud-layouts/layouts/${DHCTL_PREFIX}
if [[ -d "${TMP_DIR_PATH}" ]] ; then
echo "Temporary dir already exists: ${TMP_DIR_PATH}. ERROR!"
ls -la ${TMP_DIR_PATH}
exit 1
else
echo "Create temporary dir for job: ${TMP_DIR_PATH}."
mkdir -p "${TMP_DIR_PATH}"
fi
## Source: ci_templates/build.yml
# Extract REPO_SUFFIX from repository name: trim prefix 'deckhouse/deckhouse-'.
REPO_SUFFIX=${GITHUB_REPOSITORY#deckhouse/deckhouse-}
if [[ $REPO_SUFFIX == $GITHUB_REPOSITORY ]] ; then
# REPO_SUFFIX should be empty for main repo 'deckhouse/deckhouse'.
REPO_SUFFIX=
fi
# Use dev-registry for Git branches.
BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}"
# Use rw-registry for Git tags.
SEMVER_REGISTRY_PATH="${DECKHOUSE_REGISTRY_HOST}/deckhouse"
if [[ -z ${DECKHOUSE_REGISTRY_HOST:-} ]] ; then
# DECKHOUSE_REGISTRY_HOST is empty, so this repo is not the main repo.
# Use dev-regisry for branches and Github Container Registry for semver tags.
BRANCH_REGISTRY_PATH="${DEV_REGISTRY_PATH}"
SEMVER_REGISTRY_PATH="${GHA_TEST_REGISTRY_PATH}"
fi
# Prepare initial image tag for deploy/deckhouse to test switching from previous release.
INITIAL_IMAGE_TAG=
if [[ -n ${INITIAL_REF_SLUG} ]] ; then
INITIAL_IMAGE_TAG=${INITIAL_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}}
fi
# Prepare image tag for deploy/deckhouse (DECKHOUSE_IMAGE_TAG option in testing/cloud_layouts/script.sh).
# CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch.
# Use it as image tag. Add suffix to not overlap with PRs in main repo.
IMAGE_TAG=${CI_COMMIT_REF_SLUG}${REPO_SUFFIX:+-${REPO_SUFFIX}}
INSTALL_IMAGE_NAME=
if [[ -n ${CI_COMMIT_BRANCH} ]]; then
# CI_COMMIT_REF_SLUG is a 'prNUM' for dev branches or 'main' for default branch.
INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${IMAGE_TAG}
fi
if [[ -n ${CI_COMMIT_TAG} ]] ; then
REGISTRY_SUFFIX=$(echo ${WERF_ENV} | tr '[:upper:]' '[:lower:]') # CE/EE/FE -> ce/ee/fe
INSTALL_IMAGE_NAME=${SEMVER_REGISTRY_PATH}/${REGISTRY_SUFFIX}/install:${CI_COMMIT_REF_SLUG}
fi
if [[ -n ${INITIAL_REF_SLUG} ]] ; then
INSTALL_IMAGE_NAME=${BRANCH_REGISTRY_PATH}/install:${INITIAL_IMAGE_TAG}
git fetch origin ${INITIAL_REF_SLUG}
git checkout origin/${INITIAL_REF_SLUG} -- testing/cloud_layouts
fi
SAFE_IMAGE_NAME=$(echo ${INSTALL_IMAGE_NAME} | tr '[:lower:]' '[:upper:]')
echo "Deckhouse Deployment will use install image ${SAFE_IMAGE_NAME} to test Git ref ${REF_FULL}"
if [ "${MULTIMASTER}" == true ] ; then
MASTERS_COUNT=3
else
MASTERS_COUNT=1
fi
echo "Multimaster set ${MULTIMASTER}, MASTERS_COUNT set ${MASTERS_COUNT}"
# Print image name in uppercase to prevent hiding non-secret registry host stored in secret.
echo "⚓️ [$(date -u)] Pull 'dev/install' image '${SAFE_IMAGE_NAME}'."
docker pull "${INSTALL_IMAGE_NAME}"
IMAGE_INSTALL_PATH="/${INSTALL_IMAGE_NAME#*/}"
echo '::echo::on'
echo "tmp-dir-path=${TMP_DIR_PATH}" >> $GITHUB_OUTPUT
echo "dhctl-log-file=${TMP_DIR_PATH}/dhctl.log" >> $GITHUB_OUTPUT
echo "dhctl-prefix=${DHCTL_PREFIX}" >> $GITHUB_OUTPUT
echo "install-image-name=${INSTALL_IMAGE_NAME}" >> $GITHUB_OUTPUT
echo "deckhouse-image-tag=${IMAGE_TAG}" >> $GITHUB_OUTPUT
echo "initial-image-tag=${INITIAL_IMAGE_TAG}" >> $GITHUB_OUTPUT
echo "install-image-path=${IMAGE_INSTALL_PATH}" >> $GITHUB_OUTPUT
echo "masters-count=${MASTERS_COUNT}" >> $GITHUB_OUTPUT
echo '::echo::off'
- name: "Run e2e test: AWS/Containerd/Automatic"
id: e2e_test_run
timeout-minutes: 80
env:
PROVIDER: AWS
CRI: Containerd
LAYOUT: WithoutNAT
KUBERNETES_VERSION: "Automatic"
LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }}
LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}}
TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}}
PREFIX: ${{ steps.setup.outputs.dhctl-prefix}}
MASTERS_COUNT: ${{ steps.setup.outputs.masters-count}}
INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }}
DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }}
INITIAL_IMAGE_TAG: ${{ steps.setup.outputs.initial-image-tag }}
# <template: e2e_run_template>
LAYOUT_AWS_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_ACCESS_KEY }}
LAYOUT_AWS_SECRET_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_SECRET_ACCESS_KEY }}
COMMENT_ID: ${{ inputs.comment_id }}
GITHUB_API_SERVER: ${{ github.api_url }}
REPOSITORY: ${{ github.repository }}
DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}}
GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
run: |
echo "Execute 'script.sh run-test' via 'docker run', using environment:
INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME}
DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG}
INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG}
PREFIX=${PREFIX}
PROVIDER=${PROVIDER}
CRI=${CRI}
LAYOUT=${LAYOUT}
KUBERNETES_VERSION=${KUBERNETES_VERSION}
TMP_DIR_PATH=${TMP_DIR_PATH}
MASTERS_COUNT=${MASTERS_COUNT}
"
ls -lh $(pwd)/testing
dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
echo "DHCTL log file: $dhctl_log_file"
user_runner_id=$(id -u):$(id -g)
echo "running Docker with user_runner_id $user_runner_id"
echo "Start waiting ssh connection string script"
comment_url="${GITHUB_API_SERVER}/repos/${REPOSITORY}/issues/comments/${COMMENT_ID}"
echo "Full comment url for updating ${comment_url}"
ssh_connect_str_file="${DHCTL_LOG_FILE}-ssh-connect_str-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
echo "ssh_connection_str_file=${ssh_connect_str_file}" >> $GITHUB_OUTPUT
bastion_ip_file=""
if [[ "${PROVIDER}" == "Static" ]] ; then
bastion_ip_file="${DHCTL_LOG_FILE}-ssh-bastion-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
elif [[ "${PROVIDER}" == "VCD" ]] ; then
bastion_ip_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
fi
echo "ssh_bastion_str_file=${bastion_ip_file}" >> $GITHUB_OUTPUT
$(pwd)/testing/cloud_layouts/wait-master-ssh-and-update-comment.sh "$dhctl_log_file" "$comment_url" "$ssh_connect_str_file" "$bastion_ip_file" > "${dhctl_log_file}-wait-log" 2>&1 &
docker run --rm \
-e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \
-e PREFIX=${PREFIX} \
-e MASTERS_COUNT=${MASTERS_COUNT} \
-e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \
-e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \
-e KUBERNETES_VERSION=${KUBERNETES_VERSION} \
-e CRI=${CRI} \
-e PROVIDER=${PROVIDER:-not_provided} \
-e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \
-e LAYOUT=${LAYOUT:-not_provided} \
-e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \
-e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \
-e LAYOUT_AWS_ACCESS_KEY=${LAYOUT_AWS_ACCESS_KEY:-not_provided} \
-e LAYOUT_AWS_SECRET_ACCESS_KEY=${LAYOUT_AWS_SECRET_ACCESS_KEY:-not_provided} \
-e USER_RUNNER_ID=${user_runner_id} \
-e HOME=/tmp \
-v $(pwd)/testing:/deckhouse/testing \
-v $(pwd)/release.yaml:/deckhouse/release.yaml \
-v ${TMP_DIR_PATH}:/tmp \
-u ${user_runner_id} \
-w /deckhouse \
${INSTALL_IMAGE_NAME} \
bash /deckhouse/testing/cloud_layouts/script.sh run-test
# </template: e2e_run_template>
- name: Read connection string
if: ${{ failure() || cancelled() }}
id: check_stay_failed_cluster
uses: actions/github-script@v6.4.1
env:
SSH_CONNECT_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_connection_str_file }}
SSH_BASTION_STR_FILE: ${{ steps.e2e_test_run.outputs.ssh_bastion_str_file }}
with:
# it sets `should_run` output var if e2e/failed/stay label
script: |
const e2e_cleanup = require('./.github/scripts/js/e2e/cleanup');
await e2e_cleanup.readConnectionScript({core, context, github});
- name: Label pr if e2e failed
if: ${{ (failure() || cancelled()) && needs.git_info.outputs.pr_number }}
uses: actions-ecosystem/action-add-labels@v1
with:
github_token: ${{ secrets.BOATSWAIN_GITHUB_TOKEN }}
number: ${{ needs.git_info.outputs.pr_number }}
labels: "e2e/cluster/failed"
- name: Cleanup bootstrapped cluster
if: success()
id: cleanup_cluster
timeout-minutes: 60
env:
PROVIDER: AWS
CRI: Containerd
LAYOUT: WithoutNAT
KUBERNETES_VERSION: "Automatic"
LAYOUT_DECKHOUSE_DOCKERCFG: ${{ secrets.LAYOUT_DECKHOUSE_DOCKERCFG }}
LAYOUT_SSH_KEY: ${{ secrets.LAYOUT_SSH_KEY}}
TMP_DIR_PATH: ${{ steps.setup.outputs.tmp-dir-path}}
PREFIX: ${{ steps.setup.outputs.dhctl-prefix}}
MASTERS_COUNT: ${{ steps.setup.outputs.masters-count }}
INSTALL_IMAGE_NAME: ${{ steps.setup.outputs.install-image-name }}
DECKHOUSE_IMAGE_TAG: ${{ steps.setup.outputs.deckhouse-image-tag }}
# <template: e2e_run_template>
LAYOUT_AWS_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_ACCESS_KEY }}
LAYOUT_AWS_SECRET_ACCESS_KEY: ${{ secrets.LAYOUT_AWS_SECRET_ACCESS_KEY }}
COMMENT_ID: ${{ inputs.comment_id }}
GITHUB_API_SERVER: ${{ github.api_url }}
REPOSITORY: ${{ github.repository }}
DHCTL_LOG_FILE: ${{ steps.setup.outputs.dhctl-log-file}}
GITHUB_TOKEN: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
run: |
echo "Execute 'script.sh cleanup' via 'docker run', using environment:
INSTALL_IMAGE_NAME=${INSTALL_IMAGE_NAME}
DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG}
INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG}
PREFIX=${PREFIX}
PROVIDER=${PROVIDER}
CRI=${CRI}
LAYOUT=${LAYOUT}
KUBERNETES_VERSION=${KUBERNETES_VERSION}
TMP_DIR_PATH=${TMP_DIR_PATH}
MASTERS_COUNT=${MASTERS_COUNT}
"
ls -lh $(pwd)/testing
dhctl_log_file="${DHCTL_LOG_FILE}-${PROVIDER}-${LAYOUT}-${CRI}-${KUBERNETES_VERSION}"
echo "DHCTL log file: $dhctl_log_file"
user_runner_id=$(id -u):$(id -g)
echo "running Docker with user_runner_id $user_runner_id"
docker run --rm \
-e DECKHOUSE_DOCKERCFG=${LAYOUT_DECKHOUSE_DOCKERCFG} \
-e PREFIX=${PREFIX} \
-e MASTERS_COUNT=${MASTERS_COUNT} \
-e DECKHOUSE_IMAGE_TAG=${DECKHOUSE_IMAGE_TAG} \
-e INITIAL_IMAGE_TAG=${INITIAL_IMAGE_TAG} \
-e KUBERNETES_VERSION=${KUBERNETES_VERSION} \
-e CRI=${CRI} \
-e PROVIDER=${PROVIDER:-not_provided} \
-e MASTER_CONNECTION_STRING=${SSH_MASTER_CONNECTION_STRING:-} \
-e LAYOUT=${LAYOUT:-not_provided} \
-e DHCTL_LOG_FILE="/tmp/$(basename ${dhctl_log_file})" \
-e SSH_KEY=${LAYOUT_SSH_KEY:-not_provided} \
-e LAYOUT_AWS_ACCESS_KEY=${LAYOUT_AWS_ACCESS_KEY:-not_provided} \
-e LAYOUT_AWS_SECRET_ACCESS_KEY=${LAYOUT_AWS_SECRET_ACCESS_KEY:-not_provided} \
-e USER_RUNNER_ID=${user_runner_id} \
-e HOME=/tmp \
-v $(pwd)/testing:/deckhouse/testing \
-v $(pwd)/release.yaml:/deckhouse/release.yaml \
-v ${TMP_DIR_PATH}:/tmp \
-u ${user_runner_id} \
-w /deckhouse \
${INSTALL_IMAGE_NAME} \
bash /deckhouse/testing/cloud_layouts/script.sh cleanup
# </template: e2e_run_template>
- name: Save dhctl state
id: save_failed_cluster_state
if: ${{ failure() }}
uses: actions/upload-artifact@v4.4.0
with:
name: failed_cluster_state_aws_containerd_Automatic
path: |
${{ steps.setup.outputs.tmp-dir-path}}/dhctl
${{ steps.setup.outputs.tmp-dir-path}}/*.tfstate
${{ steps.setup.outputs.tmp-dir-path}}/logs
- name: Save test results
if: ${{ steps.setup.outputs.dhctl-log-file }}
uses: actions/upload-artifact@v4.4.0
with:
name: test_output_aws_containerd_Automatic
path: |
${{ steps.setup.outputs.dhctl-log-file}}*
${{ steps.setup.outputs.tmp-dir-path}}/logs
testing/cloud_layouts/
!testing/cloud_layouts/**/sshkey
- name: Cleanup temp directory
if: always()
env:
TMPPATH: ${{ steps.setup.outputs.tmppath}}
run: |
echo "Remove temporary directory '${TMPPATH}' ..."
if [[ -d "${TMPPATH}" && ${#TMPPATH} > 1 ]] ; then
rm -rf "${TMPPATH}"
else
echo Not a directory.
fi
if [ -n $USER_RUNNER_ID ]; then
echo "Fix temp directories owner..."
chown -R $USER_RUNNER_ID "$(pwd)/testing" || true
chown -R $USER_RUNNER_ID "/deckhouse/testing" || true
chown -R $USER_RUNNER_ID /tmp || true
else
echo "Fix temp directories permissions..."
chmod -f -R 777 "$(pwd)/testing" || true
chmod -f -R 777 "/deckhouse/testing" || true
chmod -f -R 777 /tmp || true
fi
# <template: update_comment_on_finish>
- name: Update comment on finish
id: update_comment_on_finish
if: ${{ always() && github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }}
env:
NEEDS_CONTEXT: ${{ toJSON(needs) }}
JOB_CONTEXT: ${{ toJSON(job) }}
STEPS_CONTEXT: ${{ toJSON(steps) }}
uses: actions/github-script@v6.4.1
with:
github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
retries: 3
script: |
const statusConfig = 'job,separate';
const name = 'e2e: AWS, Containerd, Kubernetes Automatic';
const needsContext = JSON.parse(process.env.NEEDS_CONTEXT);
const jobContext = JSON.parse(process.env.JOB_CONTEXT);
const stepsContext = JSON.parse(process.env.STEPS_CONTEXT);
let jobNames = null
if (process.env.JOB_NAMES) {
jobNames = JSON.parse(process.env.JOB_NAMES);
}
core.info(`needsContext: ${JSON.stringify(needsContext)}`);
core.info(`jobContext: ${JSON.stringify(jobContext)}`);
core.info(`stepsContext: ${JSON.stringify(stepsContext)}`);
core.info(`jobNames: ${JSON.stringify(jobNames)}`);
const ci = require('./.github/scripts/js/ci');
return await ci.updateCommentOnFinish({github, context, core, statusConfig, name, needsContext, jobContext, stepsContext, jobNames});
# </template: update_comment_on_finish>
# </template: e2e_run_job_template>
last_comment:
name: Update comment on finish
needs: ["started_at","git_info","run_containerd_1_26","run_containerd_1_27","run_containerd_1_28","run_containerd_1_29","run_containerd_1_30","run_containerd_1_31","run_containerd_Automatic"]
if: ${{ always() }}
runs-on: ubuntu-latest
env:
JOB_NAMES: |
{"run_containerd_1_26":"e2e: AWS, Containerd, Kubernetes 1.26","run_containerd_1_27":"e2e: AWS, Containerd, Kubernetes 1.27","run_containerd_1_28":"e2e: AWS, Containerd, Kubernetes 1.28","run_containerd_1_29":"e2e: AWS, Containerd, Kubernetes 1.29","run_containerd_1_30":"e2e: AWS, Containerd, Kubernetes 1.30","run_containerd_1_31":"e2e: AWS, Containerd, Kubernetes 1.31","run_containerd_Automatic":"e2e: AWS, Containerd, Kubernetes Automatic"}
steps:
# <template: checkout_step>
- name: Checkout sources
uses: actions/checkout@v3.5.2
# </template: checkout_step>
# <template: update_comment_on_finish>
- name: Update comment on finish
id: update_comment_on_finish
if: ${{ always() && github.event_name == 'workflow_dispatch' && !!github.event.inputs.issue_number }}
env:
NEEDS_CONTEXT: ${{ toJSON(needs) }}
JOB_CONTEXT: ${{ toJSON(job) }}
STEPS_CONTEXT: ${{ toJSON(steps) }}
uses: actions/github-script@v6.4.1
with:
github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
retries: 3
script: |
const statusConfig = 'workflow,final,no-skipped,restore-separate';
const name = 'e2e: AWS';
const needsContext = JSON.parse(process.env.NEEDS_CONTEXT);
const jobContext = JSON.parse(process.env.JOB_CONTEXT);
const stepsContext = JSON.parse(process.env.STEPS_CONTEXT);
let jobNames = null
if (process.env.JOB_NAMES) {
jobNames = JSON.parse(process.env.JOB_NAMES);
}
core.info(`needsContext: ${JSON.stringify(needsContext)}`);
core.info(`jobContext: ${JSON.stringify(jobContext)}`);
core.info(`stepsContext: ${JSON.stringify(stepsContext)}`);
core.info(`jobNames: ${JSON.stringify(jobNames)}`);
const ci = require('./.github/scripts/js/ci');
return await ci.updateCommentOnFinish({github, context, core, statusConfig, name, needsContext, jobContext, stepsContext, jobNames});
# </template: update_comment_on_finish>
# <template: set_e2e_requirement_status>
- name: Set commit status after e2e run
id: set_e2e_requirement_status
if: ${{ always() }}
uses: actions/github-script@v6.4.1
env:
JOB_STATUS: ${{ job.status }}
STATUS_TARGET_COMMIT: ${{needs.git_info.outputs.github_sha}}
with:
github-token: ${{secrets.BOATSWAIN_GITHUB_TOKEN}}
script: |
const e2eStatus = require('./.github/scripts/js/e2e-commit-status');
await e2eStatus.setStatusAfterE2eRun({github, context, core});
# </template: set_e2e_requirement_status>
# </template: e2e_workflow_template>