Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add support for containerd to antrea-eks-node-init.yml #3840

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 23 additions & 5 deletions build/yamls/antrea-eks-node-init.yml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ spec:
name: host-aws-node-run-dir
containers:
- name: node-init
image: gcr.io/google-containers/startup-script:v1
image: gcr.io/google-containers/startup-script:v2
imagePullPolicy: IfNotPresent
securityContext:
privileged: true
Expand All @@ -42,7 +42,11 @@ spec:
set -o pipefail
set -o nounset

if [ -f /opt/cni/antrea-node-init-status ]; then
# The STARTUP_SCRIPT environment variable (which is set to the contents of this
# script) will be available when the script is executed :)
checkpoint_path="/opt/cni/antrea-node-init-status-$(md5sum <<<"${STARTUP_SCRIPT}" | cut -c-32)"

if [ -f "$checkpoint_path" ]; then
echo "Antrea node init already done. Exiting"
exit
fi
Expand All @@ -63,6 +67,11 @@ spec:
sleep 2s
done

echo "Detecting container runtime (docker / containerd) based on whether /var/run/docker.sock exists"
container_runtime="docker"
test -e /var/run/docker.sock || container_runtime="containerd"
echo "Container runtime: $container_runtime"

# Wait for kubelet to register the file update. Default sync time is 5sec
# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/dockershim/network/cni/cni.go#L50
sleep 5s
Expand All @@ -78,10 +87,19 @@ spec:
echo "\n"
for container_id in $(cat /var/run/aws-node/ipam.json | jq -r '.allocations | .[] | .containerID'); do
echo "Restarting container with ID: ${container_id}"
docker kill "${container_id}" || true
if [[ "$container_runtime" == "docker" ]]; then
docker kill "${container_id}" || true
else
ctr -n=k8s.io tasks kill "${container_id}" || true
fi
done

# Save the node init status, to avoid container restart in case of node-init pod restart or worker node reboot
touch /opt/cni/antrea-node-init-status
# Save the Node init status, to avoid container restart in case of node-init Pod
# restart or worker Node reboot.
# Note that gcr.io/google-containers/startup-script:v2 also includes a similar
# mechanism but it doesn't prevent the script from being run again when the Node
# restarts, since the checkpoint path is located in the /tmp folder.
# See https://github.com/kubernetes-retired/contrib/blob/master/startup-script/manage-startup-script.sh.
touch "$checkpoint_path"

echo "Node initialization completed"
61 changes: 51 additions & 10 deletions ci/test-conformance-eks.sh
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ function echoerr {
>&2 echo "$@"
}

CLUSTER=""
REGION="us-east-2"
K8S_VERSION="1.21"
AWS_NODE_TYPE="t3.medium"
Expand All @@ -32,6 +33,7 @@ KUBECONFIG_PATH="$HOME/jenkins/out/eks"
MODE="report"
TEST_SCRIPT_RC=0
KUBE_CONFORMANCE_IMAGE_VERSION=auto
INSTALL_EKSCTL=true

_usage="Usage: $0 [--cluster-name <EKSClusterNameToUse>] [--kubeconfig <KubeconfigSavePath>] [--k8s-version <ClusterVersion>]\
[--aws-access-key <AccessKey>] [--aws-secret-key <SecretKey>] [--aws-region <Region>] [--ssh-key <SSHKey] \
Expand All @@ -48,7 +50,8 @@ Setup a EKS cluster to run K8s e2e community tests (Conformance & Network Policy
--ssh-key The path of key to be used for ssh access to worker nodes.
--log-mode Use the flag to set either 'report', 'detail', or 'dump' level data for sonobouy results.
--setup-only Only perform setting up the cluster and run test.
--cleanup-only Only perform cleaning up the cluster."
--cleanup-only Only perform cleaning up the cluster.
--skip-eksctl-install Do not install the latest eksctl version. Eksctl must be installed already."

function print_usage {
echoerr "$_usage"
Expand Down Expand Up @@ -109,6 +112,10 @@ case $key in
RUN_ALL=false
shift
;;
--skip-eksctl-install)
INSTALL_EKSCTL=false
shift
;;
-h|--help)
print_usage
exit 0
Expand All @@ -120,6 +127,39 @@ case $key in
esac
done

if [[ "$CLUSTER" == "" ]]; then
echoerr "--cluster-name is required"
exit 1
fi

function generate_eksctl_config() {
AMI_ID=$(aws ssm get-parameter \
--name /aws/service/eks/optimized-ami/${K8S_VERSION}/amazon-linux-2/recommended/image_id \
--query "Parameter.Value" --output text)

cat > eksctl-containerd.yaml <<EOF
---
apiVersion: eksctl.io/v1alpha5
kind: ClusterConfig
metadata:
name: ${CLUSTER}
region: ${REGION}
version: "${K8S_VERSION}"
managedNodeGroups:
- name: containerd
instanceType: ${AWS_NODE_TYPE}
desiredCapacity: 2
ami: ${AMI_ID}
ssh:
allow: true
publicKeyPath: ${SSH_KEY_PATH}
overrideBootstrapCommand: |
#!/bin/bash
/etc/eks/bootstrap.sh ${CLUSTER} --container-runtime containerd
EOF
echo "eksctl-containerd.yaml"
}

function setup_eks() {

echo "=== This cluster to be created is named: ${CLUSTER} ==="
Expand All @@ -139,20 +179,21 @@ ${AWS_SECRET_KEY}
${REGION}
JSON
EOF
echo "=== Installing latest version of eksctl ==="
curl --silent --location "https://github.com/weaveworks/eksctl/releases/latest/download/eksctl_$(uname -s)_amd64.tar.gz" | tar xz -C /tmp
sudo mv /tmp/eksctl /usr/local/bin
if [[ "$INSTALL_EKSCTL" == true ]]; then
echo "=== Installing latest version of eksctl ==="
curl --silent --location "https://github.com/weaveworks/eksctl/releases/latest/download/eksctl_$(uname -s)_amd64.tar.gz" | tar xz -C /tmp
sudo mv /tmp/eksctl /usr/local/bin
fi
set -e
printf "\n"
echo "=== Using the following eksctl ==="
which eksctl
echo "=== Using the following kubectl ==="
which kubectl

echo '=== Creating a cluster in EKS ==='
eksctl create cluster \
--name ${CLUSTER} --region ${REGION} --version=${K8S_VERSION} \
--nodegroup-name workers --node-type ${AWS_NODE_TYPE} --nodes 2 \
--ssh-access --ssh-public-key ${SSH_KEY_PATH} \
--managed
config="$(generate_eksctl_config)"
eksctl create cluster -f $config
if [[ $? -ne 0 ]]; then
echo "=== Failed to deploy EKS cluster! ==="
exit 1
Expand Down Expand Up @@ -203,7 +244,7 @@ function deliver_antrea_to_eks() {

kubectl get nodes -o wide --no-headers=true | awk '{print $7}' | while read IP; do
scp -o StrictHostKeyChecking=no -i ${SSH_PRIVATE_KEY_PATH} ${antrea_image}.tar ec2-user@${IP}:~
ssh -o StrictHostKeyChecking=no -i ${SSH_PRIVATE_KEY_PATH} -n ec2-user@${IP} "sudo docker load -i ~/${antrea_image}.tar ; sudo docker tag ${DOCKER_IMG_NAME}:${DOCKER_IMG_VERSION} ${DOCKER_IMG_NAME}:latest"
ssh -o StrictHostKeyChecking=no -i ${SSH_PRIVATE_KEY_PATH} -n ec2-user@${IP} "sudo ctr -n=k8s.io images import ~/${antrea_image}.tar ; sudo ctr -n=k8s.io images tag ${DOCKER_IMG_NAME}:${DOCKER_IMG_VERSION} ${DOCKER_IMG_NAME}:latest --force"
done
rm ${antrea_image}.tar

Expand Down