Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[E2E] k3s test plugin #2730

Merged
merged 1 commit into from
Sep 25, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -48,3 +48,5 @@ docs/_build

# development files
/tmp

/k3s-ansible
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import (
"k8s.io/apimachinery/pkg/version"
"k8s.io/client-go/discovery"

"github.com/liqotech/liqo/test/e2e/testconsts"
"github.com/liqotech/liqo/test/e2e/testutils/apiserver"
"github.com/liqotech/liqo/test/e2e/testutils/config"
"github.com/liqotech/liqo/test/e2e/testutils/tester"
Expand All @@ -41,6 +42,11 @@ const (

func TestE2E(t *testing.T) {
util.CheckIfTestIsSkipped(t, clustersRequired, testName)

if util.GetEnvironmentVariableOrDie(testconsts.InfrastructureEnvVar) == testconsts.ProviderK3s {
t.Skipf("Skipping %s test on k3s", testName)
}

RegisterFailHandler(Fail)
RunSpecs(t, "Liqo E2E Suite")
}
Expand Down
16 changes: 14 additions & 2 deletions test/e2e/cruise/network/network_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ import (
"github.com/liqotech/liqo/pkg/gateway"
networkflags "github.com/liqotech/liqo/pkg/liqoctl/test/network/flags"
"github.com/liqotech/liqo/pkg/liqoctl/test/network/setup"
"github.com/liqotech/liqo/test/e2e/testconsts"
"github.com/liqotech/liqo/test/e2e/testutils/config"
"github.com/liqotech/liqo/test/e2e/testutils/tester"
"github.com/liqotech/liqo/test/e2e/testutils/util"
Expand All @@ -49,6 +50,11 @@ const (

func TestE2E(t *testing.T) {
util.CheckIfTestIsSkipped(t, clustersRequired, testName)

if util.GetEnvironmentVariableOrDie(testconsts.InfrastructureEnvVar) == testconsts.ProviderK3s {
t.Skipf("Skipping %s test on k3s", testName)
}

RegisterFailHandler(Fail)
RunSpecs(t, "Liqo E2E Suite")
}
Expand Down Expand Up @@ -96,7 +102,9 @@ var _ = BeforeSuite(func() {

switch testContext.Infrastructure {
case "cluster-api":
ovverideArgsClusterAPI(&args)
overrideArgsClusterAPI(&args)
case "k3s":
overrideArgsK3s(&args)
case "kind":
overrideArgsKind(&args)
case "eks":
Expand Down Expand Up @@ -209,7 +217,11 @@ func overrideArgsFlannel(args *networkTestsArgs) {
args.nodePortNodes = networkflags.NodePortNodesWorkers
}

func ovverideArgsClusterAPI(args *networkTestsArgs) {
func overrideArgsClusterAPI(args *networkTestsArgs) {
args.loadBalancer = false
}

func overrideArgsK3s(args *networkTestsArgs) {
args.loadBalancer = false
}

Expand Down
6 changes: 6 additions & 0 deletions test/e2e/cruise/storage/storage_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ import (

liqoconst "github.com/liqotech/liqo/pkg/consts"
liqoctlmove "github.com/liqotech/liqo/pkg/liqoctl/move"
"github.com/liqotech/liqo/test/e2e/testconsts"
"github.com/liqotech/liqo/test/e2e/testutils/config"
"github.com/liqotech/liqo/test/e2e/testutils/storage"
"github.com/liqotech/liqo/test/e2e/testutils/tester"
Expand All @@ -46,6 +47,11 @@ const (

func TestE2E(t *testing.T) {
util.CheckIfTestIsSkipped(t, clustersRequired, testName)

if util.GetEnvironmentVariableOrDie(testconsts.InfrastructureEnvVar) == testconsts.ProviderK3s {
t.Skipf("Skipping %s test on k3s", testName)
}

RegisterFailHandler(Fail)
RunSpecs(t, "Liqo E2E Suite")
}
Expand Down
2 changes: 2 additions & 0 deletions test/e2e/pipeline/infra/k3s/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
k3s-ansible/
inventory.yml
18 changes: 18 additions & 0 deletions test/e2e/pipeline/infra/k3s/ansible-blocking-io.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
#!/usr/bin/python3

# This python script is to fix ansible error in CI test. It's not a bug of ansible, but a side-effect of a different change
# Will remove it later if we do not see ansible error in CI

import os
import sys

for handle in (sys.stdin, sys.stdout, sys.stderr):
try:
fd = handle.fileno()
except Exception as e:
print(f"Error: {e}")
continue

os.set_blocking(fd, True)

print("Blocking I/O is set for all file descriptors")
14 changes: 11 additions & 3 deletions test/e2e/pipeline/infra/k3s/clean.sh
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,16 @@ error() {
}
trap 'error "${BASH_SOURCE}" "${LINENO}"' ERR

# Cleaning all remaining clusters
CLUSTER_NAME=cluster
RUNNER_NAME=${RUNNER_NAME:-"test"}

K3D="${BINDIR}/k3d"
TARGET_NAMESPACE="liqo-ci"

${K3D} cluster delete --all
for i in $(seq 1 "${CLUSTER_NUMBER}");
do
K3S_CLUSTER_NAME="${RUNNER_NAME}-${CLUSTER_NAME}${i}"
echo "Deleting cluster ${K3S_CLUSTER_NAME}"
"${KUBECTL}" delete -n "${TARGET_NAMESPACE}" vms "${K3S_CLUSTER_NAME}-control-plane" --ignore-not-found
"${KUBECTL}" delete -n "${TARGET_NAMESPACE}" vms "${K3S_CLUSTER_NAME}-worker-1" --ignore-not-found
"${KUBECTL}" delete -n "${TARGET_NAMESPACE}" vms "${K3S_CLUSTER_NAME}-worker-2" --ignore-not-found
done
48 changes: 48 additions & 0 deletions test/e2e/pipeline/infra/k3s/inventory.template.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
---
k3s_cluster:
children:
server:
hosts:
${CONTROL_PLANE_IP}:
agent:
hosts:
${WORKER_1_IP}:
${WORKER_2_IP}:

# Required Vars
vars:
ansible_port: 22
ansible_user: ubuntu
k3s_version: ${K8S_VERSION}+k3s1
# The token should be a random string of reasonable length. You can generate
# one with the following commands:
# - openssl rand -base64 64
# - pwgen -s 64 1
# You can use ansible-vault to encrypt this value / keep it secret.
token: "changeme!"
api_endpoint: "{{ hostvars[groups['server'][0]]['ansible_host'] | default(groups['server'][0]) }}"
extra_server_args: "--cluster-cidr=${POD_CIDR} --service-cidr=${SERVICE_CIDR}"
extra_agent_args: ""

# Optional vars

# cluster_context: k3s-ansible
# api_port: 6443
# k3s_server_location: /var/lib/rancher/k3s
# systemd_dir: /etc/systemd/system
# extra_service_envs: [ 'ENV_VAR1=VALUE1', 'ENV_VAR2=VALUE2' ]
# user_kubectl: true, by default kubectl is symlinked and configured for use by ansible_user. Set to false to only kubectl via root user.

# Manifests or Airgap should be either full paths or relative to the playbook directory.
# List of locally available manifests to apply to the cluster, useful for PVCs or Traefik modifications.
# extra_manifests: [ '/path/to/manifest1.yaml', '/path/to/manifest2.yaml' ]
# airgap_dir: /tmp/k3s-airgap-images

# server_config_yaml: |
# This is now an inner yaml file. Maintain the indentation.
# YAML here will be placed as the content of /etc/rancher/k3s/config.yaml
# See https://docs.k3s.io/installation/configuration#configuration-file
# registries_config_yaml: |
# Containerd can be configured to connect to private registries and use them to pull images as needed by the kubelet.
# YAML here will be placed as the content of /etc/rancher/k3s/registries.yaml
# See https://docs.k3s.io/installation/private-registry
28 changes: 18 additions & 10 deletions test/e2e/pipeline/infra/k3s/pre-requirements.sh
Original file line number Diff line number Diff line change
Expand Up @@ -47,18 +47,26 @@ install_kubectl "${OS}" "${ARCH}" "${K8S_VERSION}"

install_helm "${OS}" "${ARCH}"

K3D_VERSION="v5.4.7"
# install ansible

echo "Downloading K3D ${K3D_VERSION}"
# ensure pipx is installed
if ! command -v pipx &> /dev/null; then
python3 -m pip install --user pipx
python3 -m pipx ensurepath --force
source "$HOME/.bashrc" || true

if ! command -v docker &> /dev/null;
then
echo "MISSING REQUIREMENT: docker engine could not be found on your system. Please install docker engine to continue: https://docs.docker.com/get-docker/"
return 1
sudo apt update
sudo apt install -y python3-venv
fi

if [[ ! -f "${BINDIR}/k3d" ]]; then
echo "k3d could not be found. Downloading https://k3d.sigs.k8s.io/dl/${K3D_VERSION}/k3d-${OS}-${ARCH} ..."
curl -Lo "${BINDIR}"/k3d "https://github.com/k3d-io/k3d/releases/download/${K3D_VERSION}/k3d-${OS}-${ARCH}"
chmod +x "${BINDIR}"/k3d
# ensure envsubst is installed
if ! command -v envsubst &> /dev/null; then
sudo apt update
sudo apt install -y gettext
fi

# ensure ansible is installed
if ! command -v ansible &> /dev/null; then
pipx install --include-deps ansible
ansible-playbook --version
fi
117 changes: 102 additions & 15 deletions test/e2e/pipeline/infra/k3s/setup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -26,31 +26,118 @@ error() {
}
trap 'error "${BASH_SOURCE}" "${LINENO}"' ERR

K3D="${BINDIR}/k3d"
check_host_login() {
local host=$1
local user=$2
local key=$3
local timeout=${4:-"600"}

s=$(date +%s)
local start=${s}
while true; do
if ssh -i "${key}" -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ConnectTimeout=5 "${user}@${host}" exit; then
break
fi
if [[ $(( $(date +%s) - start )) -gt ${timeout} ]]; then
echo "Timeout reached while waiting for the host to be reachable"
exit 1
fi
sleep 5
done

sleep 5

# check apt is able to take the lock
start=$(date +%s)
while true; do
if ssh -i "${key}" -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ConnectTimeout=5 "${user}@${host}" sudo apt update; then
break
fi
if [[ $(( $(date +%s) - start )) -gt ${timeout} ]]; then
echo "Timeout reached while waiting for apt to be available"
exit 1
fi
sleep 5
done
}

# shellcheck disable=SC1091
source "$HOME/.bashrc" || true

CLUSTER_NAME=cluster
RUNNER_NAME=${RUNNER_NAME:-"test"}

TARGET_NAMESPACE="liqo-ci"

BASE_DIR=$(dirname "$0")

export SERVICE_CIDR=10.100.0.0/16
export POD_CIDR=10.200.0.0/16
export POD_CIDR_OVERLAPPING=${POD_CIDR_OVERLAPPING:-"false"}

for i in $(seq 1 "${CLUSTER_NUMBER}");
do
if [[ ${POD_CIDR_OVERLAPPING} != "true" ]]; then
K3S_CLUSTER_NAME="${RUNNER_NAME}-${CLUSTER_NAME}${i}"
echo "Creating cluster ${K3S_CLUSTER_NAME}"
CLUSTER_NAME="$K3S_CLUSTER_NAME" envsubst < "$BASE_DIR/vms.template.yaml" | "${KUBECTL}" apply -n "${TARGET_NAMESPACE}" -f -
done

# Wait for the clusters to be ready
for i in $(seq 1 "${CLUSTER_NUMBER}");
do
K3S_CLUSTER_NAME="${RUNNER_NAME}-${CLUSTER_NAME}${i}"
"${KUBECTL}" wait --for=condition=Ready --timeout=20m vm "${K3S_CLUSTER_NAME}-control-plane" -n "${TARGET_NAMESPACE}"
"${KUBECTL}" wait --for=condition=Ready --timeout=20m vm "${K3S_CLUSTER_NAME}-worker-1" -n "${TARGET_NAMESPACE}"
"${KUBECTL}" wait --for=condition=Ready --timeout=20m vm "${K3S_CLUSTER_NAME}-worker-2" -n "${TARGET_NAMESPACE}"

"${KUBECTL}" wait --for=condition=Ready --timeout=20m vmi "${K3S_CLUSTER_NAME}-control-plane" -n "${TARGET_NAMESPACE}"
"${KUBECTL}" wait --for=condition=Ready --timeout=20m vmi "${K3S_CLUSTER_NAME}-worker-1" -n "${TARGET_NAMESPACE}"
"${KUBECTL}" wait --for=condition=Ready --timeout=20m vmi "${K3S_CLUSTER_NAME}-worker-2" -n "${TARGET_NAMESPACE}"
done

SSH_KEY_FILE="${TMPDIR}/id_rsa"
echo "${SSH_KEY_PATH}" > "${SSH_KEY_FILE}"
chmod 600 "${SSH_KEY_FILE}"

rm -rf k3s-ansible || true
git clone https://github.com/k3s-io/k3s-ansible.git
cd k3s-ansible

for i in $(seq 1 "${CLUSTER_NUMBER}");
do
K3S_CLUSTER_NAME="${RUNNER_NAME}-${CLUSTER_NAME}${i}"

if [[ ${POD_CIDR_OVERLAPPING} != "true" ]]; then
# this should avoid the ipam to reserve a pod CIDR of another cluster as local external CIDR causing remapping
export POD_CIDR="10.$((i * 10)).0.0/16"
fi
echo "Creating cluster ${CLUSTER_NAME}${i}"
${K3D} cluster create "${CLUSTER_NAME}${i}" \
--k3s-arg "--cluster-cidr=${POD_CIDR}@server:*" \
--k3s-arg "--service-cidr=${SERVICE_CIDR}@server:*" \
--no-lb \
--network k3d \
--verbose
${K3D} node create "${CLUSTER_NAME}${i}-agent" \
--cluster "${CLUSTER_NAME}${i}" \
--role agent \
--verbose
${K3D} kubeconfig write "${CLUSTER_NAME}${i}" \
--output "${TMPDIR}/kubeconfigs/liqo_kubeconf_${i}"

_CONTROL_PLANE_IP=$("${KUBECTL}" get vmi "${K3S_CLUSTER_NAME}-control-plane" -n "${TARGET_NAMESPACE}" -o jsonpath='{.status.interfaces[0].ipAddress}')
_WORKER_1_IP=$("${KUBECTL}" get vmi "${K3S_CLUSTER_NAME}-worker-1" -n "${TARGET_NAMESPACE}" -o jsonpath='{.status.interfaces[0].ipAddress}')
_WORKER_2_IP=$("${KUBECTL}" get vmi "${K3S_CLUSTER_NAME}-worker-2" -n "${TARGET_NAMESPACE}" -o jsonpath='{.status.interfaces[0].ipAddress}')
export CONTROL_PLANE_IP="${_CONTROL_PLANE_IP}"
export WORKER_1_IP="${_WORKER_1_IP}"
export WORKER_2_IP="${_WORKER_2_IP}"

check_host_login "${CONTROL_PLANE_IP}" "ubuntu" "${SSH_KEY_FILE}"
check_host_login "${WORKER_1_IP}" "ubuntu" "${SSH_KEY_FILE}"
check_host_login "${WORKER_2_IP}" "ubuntu" "${SSH_KEY_FILE}"

# if running in GitHub Actions
if [[ -n "${GITHUB_ACTIONS}" ]]; then
sudo python3 "${BASE_DIR}/ansible-blocking-io.py"
fi

ansible-playbook --version
envsubst < "$BASE_DIR/inventory.template.yml" > inventory.yml
ansible-playbook playbooks/site.yml -i inventory.yml --key-file "${SSH_KEY_FILE}"

mkdir -p "${TMPDIR}/kubeconfigs"
scp -i "${SSH_KEY_FILE}" -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ubuntu@"${CONTROL_PLANE_IP}":~/.kube/config "${TMPDIR}/kubeconfigs/liqo_kubeconf_${i}"
sed -i "s/127.0.0.1/${CONTROL_PLANE_IP}/g" "${TMPDIR}/kubeconfigs/liqo_kubeconf_${i}"

# add default namespace to kubeconfig
KUBECONFIG="${TMPDIR}/kubeconfigs/liqo_kubeconf_${i}" "${KUBECTL}" config set-context --current --namespace=default
done

cd ..
Loading