From 36db994fe8a6a341a65dcbb1d72fa8038f7ccfbe Mon Sep 17 00:00:00 2001 From: Yecheng Fu Date: Mon, 24 Feb 2020 13:42:15 +0800 Subject: [PATCH] support eks provider in e2e (#1728) * support eks provider in e2e * upgrade to use kubetest v0.0.3 * prefix image tag with CLUSTER, then multiple clusters can be started in same project/account * - upgrade kubetest2-eks to v0.0.4 - use unique node group name * $RANDOM should be enough * support KUBE_WORKERS * fix mngName * fix e2e bug * specify runner suite name * increase open files for containers automatically * use kubetest2 v0.0.6 * --up-retries * decrease concurrency because in each node we will start a lots of pod Co-authored-by: Song Gao --- ci/pingcap_tidb_operator_build_kind.groovy | 8 +- hack/e2e.sh | 135 +++++++++++++++------ hack/lib.sh | 33 ++++- hack/run-e2e.sh | 75 +++++++++++- tests/actions.go | 2 +- tests/images/e2e/Dockerfile | 8 +- 6 files changed, 213 insertions(+), 48 deletions(-) diff --git a/ci/pingcap_tidb_operator_build_kind.groovy b/ci/pingcap_tidb_operator_build_kind.groovy index 45ec44403d..b919be05d8 100644 --- a/ci/pingcap_tidb_operator_build_kind.groovy +++ b/ci/pingcap_tidb_operator_build_kind.groovy @@ -238,16 +238,16 @@ def call(BUILD_BRANCH, CREDENTIALS_ID, CODECOV_CREDENTIALS_ID) { def MIRRORS = "DOCKER_IO_MIRROR=http://172.16.4.143:5000 QUAY_IO_MIRROR=http://172.16.4.143:5001" def builds = [:] builds["E2E v1.12.10"] = { - build("${MIRRORS} IMAGE_TAG=${GITHASH} SKIP_BUILD=y GINKGO_NODES=8 KUBE_VERSION=v1.12.10 REPORT_DIR=\$(pwd)/artifacts REPORT_PREFIX=v1.12.10_ ./hack/e2e.sh -- --preload-images --ginkgo.skip='\\[Serial\\]'", artifacts) + build("${MIRRORS} RUNNER_SUITE_NAME=e2e-v1.12 IMAGE_TAG=${GITHASH} SKIP_BUILD=y GINKGO_NODES=6 KUBE_VERSION=v1.12.10 REPORT_DIR=\$(pwd)/artifacts REPORT_PREFIX=v1.12.10_ ./hack/e2e.sh -- --preload-images --ginkgo.skip='\\[Serial\\]'", artifacts) } builds["E2E v1.12.10 AdvancedStatefulSet"] = { - build("${MIRRORS} IMAGE_TAG=${GITHASH} SKIP_BUILD=y GINKGO_NODES=8 KUBE_VERSION=v1.12.10 REPORT_DIR=\$(pwd)/artifacts REPORT_PREFIX=v1.12.10_advanced_statefulset ./hack/e2e.sh -- --preload-images --ginkgo.skip='\\[Serial\\]' --operator-features AdvancedStatefulSet=true", artifacts) + build("${MIRRORS} RUNNER_SUITE_NAME=e2e-v1.12-advanced-statefulset IMAGE_TAG=${GITHASH} SKIP_BUILD=y GINKGO_NODES=6 KUBE_VERSION=v1.12.10 REPORT_DIR=\$(pwd)/artifacts REPORT_PREFIX=v1.12.10_advanced_statefulset ./hack/e2e.sh -- --preload-images --ginkgo.skip='\\[Serial\\]' --operator-features AdvancedStatefulSet=true", artifacts) } builds["E2E v1.17.0"] = { - build("${MIRRORS} IMAGE_TAG=${GITHASH} SKIP_BUILD=y GINKGO_NODES=8 KUBE_VERSION=v1.17.0 REPORT_DIR=\$(pwd)/artifacts REPORT_PREFIX=v1.17.0_ ./hack/e2e.sh -- -preload-images --ginkgo.skip='\\[Serial\\]'", artifacts) + build("${MIRRORS} RUNNER_SUITE_NAME=e2e-v1.17 IMAGE_TAG=${GITHASH} SKIP_BUILD=y GINKGO_NODES=6 KUBE_VERSION=v1.17.0 REPORT_DIR=\$(pwd)/artifacts REPORT_PREFIX=v1.17.0_ ./hack/e2e.sh -- -preload-images --ginkgo.skip='\\[Serial\\]'", artifacts) } builds["E2E v1.12.10 Serial"] = { - build("${MIRRORS} IMAGE_TAG=${GITHASH} SKIP_BUILD=y KUBE_VERSION=v1.12.10 REPORT_DIR=\$(pwd)/artifacts REPORT_PREFIX=v1.12.10_serial_ ./hack/e2e.sh -- --preload-images --ginkgo.focus='\\[Serial\\]' --install-operator=false", artifacts) + build("${MIRRORS} RUNNER_SUITE_NAME=e2e-v1.12-serial IMAGE_TAG=${GITHASH} SKIP_BUILD=y KUBE_VERSION=v1.12.10 REPORT_DIR=\$(pwd)/artifacts REPORT_PREFIX=v1.12.10_serial_ ./hack/e2e.sh -- --preload-images --ginkgo.focus='\\[Serial\\]' --install-operator=false", artifacts) } builds.failFast = false parallel builds diff --git a/hack/e2e.sh b/hack/e2e.sh index 7411a8133e..2c7ba89233 100755 --- a/hack/e2e.sh +++ b/hack/e2e.sh @@ -46,31 +46,35 @@ Usage: hack/e2e.sh [-h] -- [extra test args] Environments: - PROVIDER Kubernetes provider, e.g. kind, gke, defaults: kind - DOCKER_REGISTRY image docker registry - IMAGE_TAG image tag - CLUSTER the name of e2e cluster, defaults: tidb-operator - KUBECONFIG path to the kubeconfig file, defaults: ~/.kube/config - SKIP_BUILD skip building binaries - SKIP_IMAGE_BUILD skip build and push images - SKIP_UP skip starting the cluster - SKIP_DOWN skip shutting down the cluster - SKIP_TEST skip running the test - KUBE_VERSION the version of Kubernetes to test against - KUBE_WORKERS the number of worker nodes (excludes master nodes), defaults: 3 - DOCKER_IO_MIRROR configure mirror for docker.io - GCR_IO_MIRROR configure mirror for gcr.io - QUAY_IO_MIRROR configure mirror for quay.io - KIND_DATA_HOSTPATH (kind only) the host path of data directory for kind cluster, defaults: none - GCP_PROJECT (gke only) the GCP project to run in - GCP_SERVICE_ACCOUNT (gke only) the GCP service account to use - GCP_REGION (gke only) the GCP region, if specified a regional cluster is creaetd - GCP_ZONE (gke only) the GCP zone, if specified a zonal cluster is created - GCP_SSH_PRIVATE_KEY (gke only) the path to the private ssh key - GCP_SSH_PUBLIC_KEY (gke only) the path to the public ssh key - GINKGO_NODES ginkgo nodes to run specs, defaults: 1 - GINKGO_PARALLEL if set to `y`, will run specs in parallel, the number of nodes will be the number of cpus - GINKGO_NO_COLOR if set to `y`, suppress color output in default reporter + PROVIDER Kubernetes provider, e.g. kind, gke, eks, defaults: kind + DOCKER_REGISTRY image docker registry + IMAGE_TAG image tag + CLUSTER the name of e2e cluster, defaults: tidb-operator + KUBECONFIG path to the kubeconfig file, defaults: ~/.kube/config + SKIP_BUILD skip building binaries + SKIP_IMAGE_BUILD skip build and push images + SKIP_UP skip starting the cluster + SKIP_DOWN skip shutting down the cluster + SKIP_TEST skip running the test + KUBE_VERSION the version of Kubernetes to test against + KUBE_WORKERS the number of worker nodes (excludes master nodes), defaults: 3 + DOCKER_IO_MIRROR configure mirror for docker.io + GCR_IO_MIRROR configure mirror for gcr.io + QUAY_IO_MIRROR configure mirror for quay.io + KIND_DATA_HOSTPATH (kind only) the host path of data directory for kind cluster, defaults: none + GCP_PROJECT (gke only) the GCP project to run in + GCP_SERVICE_ACCOUNT (gke only) the GCP service account to use + GCP_REGION (gke only) the GCP region, if specified a regional cluster is creaetd + GCP_ZONE (gke only) the GCP zone, if specified a zonal cluster is created + GCP_SSH_PRIVATE_KEY (gke only) the path to the private ssh key + GCP_SSH_PUBLIC_KEY (gke only) the path to the public ssh key + AWS_ACCESS_KEY_ID (eks only) the aws access key id + AWS_SECRET_ACCESS_KEY (eks only) the aws secret access key + AWS_REGION (eks only) the aws region + GINKGO_NODES ginkgo nodes to run specs, defaults: 1 + GINKGO_PARALLEL if set to `y`, will run specs in parallel, the number of nodes will be the number of cpus + GINKGO_NO_COLOR if set to `y`, suppress color output in default reporter + RUNNER_SUITE_NAME the suite name of runner Examples: @@ -103,10 +107,7 @@ Examples: 5) run e2e with gke provider locally - You need install Google Cloud SDK first, then prepare GCP servie account - and configure ssh key pairs - - GCP service account must be created with following permissions: + You need prepare GCP service account with the following permissions: - Compute Network Admin - Kubernetes Engine Admin @@ -121,11 +122,32 @@ Examples: Then run with following additional GCP-specific environments: - export GCP_PROJECT= - export GCP_SERVICE_ACCOUNT= - export GCP_ZONE=us-central1-b + export GCP_PROJECT= + export GCP_SERVICE_ACCOUNT= + export GCP_ZONE=us-central1-b + + PROVIDER=gke ./hack/e2e.sh -- + + If you run the outside of the dev containter started by + ./hack/run-in-container.sh, Google Cloud SDK must be installed on you + machine. + +6) run e2e with eks provider locally + + You need configure your aws credential and region or set it via following + environments: - ./hack/e2e.sh -- + export AWS_ACCESS_KEY_ID= + export AWS_SECRET_ACCESS_KEY= + export AWS_REGION= + + then run e2e with eks provider: + + PROVIDER=eks ./hack/e2e.sh -- + + If you run the outside of the dev containter started by + ./hack/run-in-container.sh, AWS CLI must be installed on you + machine. EOF @@ -166,11 +188,15 @@ GCP_REGION=${GCP_REGION:-} GCP_ZONE=${GCP_ZONE:-} GCP_SSH_PRIVATE_KEY=${GCP_SSH_PRIVATE_KEY:-} GCP_SSH_PUBLIC_KEY=${GCP_SSH_PUBLIC_KEY:-} +AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID:-} +AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY:-} +AWS_REGION=${AWS_REGION:-} KUBE_VERSION=${KUBE_VERSION:-v1.12.10} KUBE_WORKERS=${KUBE_WORKERS:-3} DOCKER_IO_MIRROR=${DOCKER_IO_MIRROR:-} GCR_IO_MIRROR=${GCR_IO_MIRROR:-} QUAY_IO_MIRROR=${QUAY_IO_MIRROR:-} +RUNNER_SUITE_NAME=${RUNNER_SUITE_NAME:-} echo "PROVIDER: $PROVIDER" echo "DOCKER_REGISTRY: $DOCKER_REGISTRY" @@ -186,6 +212,9 @@ echo "GCP_PROJECT: $GCP_PROJECT" echo "GCP_SERVICE_ACCOUNT: $GCP_SERVICE_ACCOUNT" echo "GCP_REGION: $GCP_REGION" echo "GCP_ZONE: $GCP_ZONE" +echo "AWS_ACCESS_KEY_ID: $AWS_ACCESS_KEY_ID" +echo "AWS_SECRET_ACCESS_KEY: $AWS_SECRET_ACCESS_KEY" +echo "AWS_REGION: $AWS_REGION" echo "KUBE_VERSION: $KUBE_VERSION" echo "KUBE_WORKERS: $KUBE_WORKERS" echo "DOCKER_IO_MIRROR: $DOCKER_IO_MIRROR" @@ -333,6 +362,12 @@ kubetest2_args=( $PROVIDER ) +if [ -n "$RUNNER_SUITE_NAME" ]; then + kubetest2_args+=( + --suite-name "$RUNNER_SUITE_NAME" + ) +fi + if [ -z "$SKIP_UP" ]; then kubetest2_args+=(--up) fi @@ -364,6 +399,9 @@ if [ "$PROVIDER" == "kind" ]; then fi kubetest2_args+=(--image-name $image) kubetest2_args+=( + # add some retires because kind may fail to start the cluster when the + # load is high + --up-retries 3 --cluster-name "$CLUSTER" --config "$tmpfile" --verbosity 4 @@ -384,7 +422,7 @@ elif [ "$PROVIDER" == "gke" ]; then echo "error: GCP_REGION or GCP_ZONE cannot be both set" exit 1 fi - echo "info: preparing ssh keypairs for GCP" + echo "info: preparing ssh keypairs for GCP" if [ ! -d ~/.ssh ]; then mkdir ~/.ssh fi @@ -414,6 +452,30 @@ elif [ "$PROVIDER" == "gke" ]; then --zone "$GCP_ZONE" ) fi +elif [ "$PROVIDER" == "eks" ]; then + hack::ensure_aws_k8s_tester + if [ -n "$AWS_REGION" ]; then + aws configure set default.region "$AWS_REGION" + fi + if [ -n "$AWS_ACCESS_KEY_ID" ]; then + aws configure set aws_access_key_id "$AWS_ACCESS_KEY_ID" + fi + if [ -n "$AWS_SECRET_ACCESS_KEY" ]; then + aws configure set aws_secret_access_key "$AWS_SECRET_ACCESS_KEY" + fi + mngName=$CLUSTER-mng-$RANDOM + export AWS_K8S_TESTER_EKS_NAME=$CLUSTER + export AWS_K8S_TESTER_EKS_CONFIG_PATH=/tmp/kubetest2.eks.$CLUSTER + export AWS_K8S_TESTER_EKS_ADD_ON_NLB_HELLO_WORLD_ENABLE="false" + export AWS_K8S_TESTER_EKS_ADD_ON_MANAGED_NODE_GROUPS_MNGS=$(printf '{"%s":{"name":"%s","ami-type":"AL2_x86_64","asg-min-size":%d,"asg-max-size":%d,"asg-desired-capacity":%d,"instance-types":["c5.xlarge"],"volume-size":40}}' "$mngName" "$mngName" "$KUBE_WORKERS" "$KUBE_WORKERS" "$KUBE_WORKERS") + # override KUBECONFIG + KUBECONFIG=$AWS_K8S_TESTER_EKS_CONFIG_PATH.kubeconfig.yaml + if [ -z "$SKIP_UP" ]; then + # clear previous created private key to work around permission issue on this file + if test -f $HOME/.ssh/kube_aws_rsa; then + rm -f $HOME/.ssh/kube_aws_rsa + fi + fi else echo "error: unsupported provider '$PROVIDER'" exit 1 @@ -429,6 +491,11 @@ export TIDB_OPERATOR_IMAGE=$DOCKER_REGISTRY/pingcap/tidb-operator:${IMAGE_TAG} export E2E_IMAGE=$DOCKER_REGISTRY/pingcap/tidb-operator-e2e:${IMAGE_TAG} export PATH=$PATH:$OUTPUT_BIN +# Environments for kubetest2 +if [ -n "${REPORT_DIR:-}" ]; then + export ARTIFACTS=${REPORT_DIR:-} +fi + hack::ensure_kubetest2 echo "info: run 'kubetest2 ${kubetest2_args[@]} -- hack/run-e2e.sh $@'" $KUBETSTS2_BIN ${kubetest2_args[@]} -- hack/run-e2e.sh "$@" diff --git a/hack/lib.sh b/hack/lib.sh index d753775a94..c4314b59ef 100644 --- a/hack/lib.sh +++ b/hack/lib.sh @@ -34,10 +34,13 @@ HELM_BIN=$OUTPUT_BIN/helm HELM_VERSION=${HELM_VERSION:-2.9.1} KIND_VERSION=${KIND_VERSION:-0.7.0} KIND_BIN=$OUTPUT_BIN/kind -KUBETEST2_VERSION=v0.0.1+a810685993a3e100f4c51bc346cdc05eaf753922 -KUBETEST2_GKE_VERSION=v0.0.1+a3755779de7f745733de10f9bf63e01cf0864f9d -KUBETEST2_KIND_VERSION=v0.0.1+d8d70a33d2cc5df85786b7724ac61c221bad3e18 +KUBETEST2_VERSION=v0.0.6+81d814748ab990ecd893cd1313edfb82400752bd +KUBETEST2_EKS_VERSION=v0.0.6+d6afb853359f35999c6aa3c06ec96cb8ebcbd032 +KUBETEST2_GKE_VERSION=v0.0.6+12f40220e086ff4d4aa86b98d05cfc62f17d9cf9 +KUBETEST2_KIND_VERSION=v0.0.6+b4be23daed89152e595dc3ad4826d104107edc62 KUBETSTS2_BIN=$OUTPUT_BIN/kubetest2 +AWS_K8S_TESTER_VERSION=v0.6.2 +AWS_K8S_TESTER_BIN=$OUTPUT_BIN/aws-k8s-tester test -d "$OUTPUT_BIN" || mkdir -p "$OUTPUT_BIN" @@ -168,8 +171,9 @@ function hack::__ensure_kubetest2() { if hack::__verify_kubetest2 $n $h; then return 0 fi - tmpfile=$(mktemp) + local tmpfile=$(mktemp) trap "test -f $tmpfile && rm $tmpfile" RETURN + echo "info: downloading $n $v" curl --retry 10 -L -o - https://github.com/cofyc/kubetest2/releases/download/$v/$n.gz | gunzip > $tmpfile mv $tmpfile $OUTPUT_BIN/$n chmod +x $OUTPUT_BIN/$n @@ -179,4 +183,25 @@ function hack::ensure_kubetest2() { hack::__ensure_kubetest2 kubetest2 $KUBETEST2_VERSION hack::__ensure_kubetest2 kubetest2-gke $KUBETEST2_GKE_VERSION hack::__ensure_kubetest2 kubetest2-kind $KUBETEST2_KIND_VERSION + hack::__ensure_kubetest2 kubetest2-eks $KUBETEST2_EKS_VERSION +} + +function hack::verify_aws_k8s_tester() { + if test -x $AWS_K8S_TESTER_BIN; then + [[ "$($AWS_K8S_TESTER_BIN version | awk '/ReleaseVersion/ {print $2}')" == "$AWS_K8S_TESTER_VERSION" ]] + return + fi + return 1 +} + +function hack::ensure_aws_k8s_tester() { + if hack::verify_aws_k8s_tester; then + return + fi + local DOWNLOAD_URL=https://github.com/aws/aws-k8s-tester/releases/download + local tmpfile=$(mktemp) + trap "test -f $tmpfile && rm $tmpfile" RETURN + curl --retry 10 -L -o $tmpfile https://github.com/aws/aws-k8s-tester/releases/download/$AWS_K8S_TESTER_VERSION/aws-k8s-tester-$AWS_K8S_TESTER_VERSION-$OS-$ARCH + mv $tmpfile $AWS_K8S_TESTER_BIN + chmod +x $AWS_K8S_TESTER_BIN } diff --git a/hack/run-e2e.sh b/hack/run-e2e.sh index c4b056a77e..d1b5341250 100755 --- a/hack/run-e2e.sh +++ b/hack/run-e2e.sh @@ -114,7 +114,7 @@ for ((i = 1; i <= 32; i++)) { } EOF done - else + elif [ "$PROVIDER" == "gke" ]; then # disks are created under /mnt/stateful_partition directory # https://cloud.google.com/container-optimized-os/docs/concepts/disks-and-filesystem for n in $($KUBECTL_BIN --context $KUBECONTEXT get nodes -ojsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}'); do @@ -136,12 +136,52 @@ for ((i = 1; i <= 32; i++)) { } '"'" done + elif [ "$PROVIDER" == "eks" ]; then + while IFS=$'\n' read -r line; do + read -r id dns <<< $line + echo "info: prepare disks on $dns" + ssh -T -o "StrictHostKeyChecking no" -i ~/.ssh/kube_aws_rsa ec2-user@$dns <<'EOF' +sudo bash -c ' +test -d /mnt/disks || mkdir -p /mnt/disks +df -h /mnt/disks +if mountpoint /mnt/disks &>/dev/null; then + echo "info: /mnt/disks is a mountpoint" +else + echo "info: /mnt/disks is not a mountpoint, creating local volumes on the rootfs" +fi +cd /mnt/disks +for ((i = 1; i <= 32; i++)) { + if [ ! -d vol$i ]; then + mkdir vol$i + fi + if ! mountpoint vol$i &>/dev/null; then + mount --bind vol$i vol$i + fi +} +echo "info: increase max open files for containers" +if ! grep -qF "OPTIONS" /etc/sysconfig/docker; then + echo 'OPTIONS="--default-ulimit nofile=1024000:1024000"' >> /etc/sysconfig/docker +fi +systemctl restart docker +' +EOF + done <<< "$(e2e::__eks_instances)" fi echo "info: installing local-volume-provisioner" $KUBECTL_BIN --context $KUBECONTEXT apply -f ${ROOT}/manifests/local-dind/local-volume-provisioner.yaml e2e::__wait_for_ds kube-system local-volume-provisioner } +function e2e::__eks_instances() { + aws ec2 describe-instances --filter Name=tag:eks:cluster-name,Values=$CLUSTER --query 'Reservations[*].Instances[*].{InstanceId:InstanceId,PublicDnsName:PublicDnsName}' --output text +} + +function e2e::__ecr_url() { + local account_id=$(aws sts get-caller-identity | awk '/Account/ { gsub("\x27", "", $2); print $2}') + local region=$(aws configure get region) + echo "${account_id}.dkr.ecr.${region}.amazonaws.com" +} + function e2e::get_kube_version() { $KUBECTL_BIN --context $KUBECONTEXT version --short | awk '/Server Version:/ {print $3}' } @@ -176,8 +216,8 @@ function e2e::image_load() { elif [ "$PROVIDER" == "gke" ]; then unset DOCKER_CONFIG # We don't need this and it may be read-only and fail the command to fail gcloud auth configure-docker - GCP_TIDB_OPERATOR_IMAGE=gcr.io/$GCP_PROJECT/tidb-operator:$IMAGE_TAG - GCP_E2E_IMAGE=gcr.io/$GCP_PROJECT/tidb-operator-e2e:$IMAGE_TAG + GCP_TIDB_OPERATOR_IMAGE=gcr.io/$GCP_PROJECT/tidb-operator:$CLUSTER-$IMAGE_TAG + GCP_E2E_IMAGE=gcr.io/$GCP_PROJECT/tidb-operator-e2e:$CLUSTER-$IMAGE_TAG docker tag $TIDB_OPERATOR_IMAGE $GCP_TIDB_OPERATOR_IMAGE docker tag $E2E_IMAGE $GCP_E2E_IMAGE echo "info: pushing $GCP_TIDB_OPERATOR_IMAGE" @@ -186,6 +226,28 @@ function e2e::image_load() { docker push $GCP_E2E_IMAGE TIDB_OPERATOR_IMAGE=$GCP_TIDB_OPERATOR_IMAGE E2E_IMAGE=$GCP_E2E_IMAGE + elif [ "$PROVIDER" == "eks" ]; then + for repoName in e2e/tidb-operator e2e/tidb-operator-e2e; do + local ret=0 + aws ecr describe-repositories --repository-names $repoName || ret=$? + if [ $ret -ne 0 ]; then + echo "info: creating repository $repoName" + aws ecr create-repository --repository-name $repoName + fi + done + local ecrURL=$(e2e::__ecr_url) + echo "info: logging in $ecrURL" + aws ecr get-login-password | docker login --username AWS --password-stdin $ecrURL + AWS_TIDB_OPERATOR_IMAGE=$ecrURL/e2e/tidb-operator:$CLUSTER-$IMAGE_TAG + AWS_E2E_IMAGE=$ecrURL/e2e/tidb-operator-e2e:$CLUSTER-$IMAGE_TAG + docker tag $TIDB_OPERATOR_IMAGE $AWS_TIDB_OPERATOR_IMAGE + docker tag $E2E_IMAGE $AWS_E2E_IMAGE + echo "info: pushing $AWS_TIDB_OPERATOR_IMAGE" + docker push $AWS_TIDB_OPERATOR_IMAGE + echo "info: pushing $AWS_E2E_IMAGE" + docker push $AWS_E2E_IMAGE + TIDB_OPERATOR_IMAGE=$AWS_TIDB_OPERATOR_IMAGE + E2E_IMAGE=$AWS_E2E_IMAGE else echo "info: unsupported provider '$PROVIDER', skip loading images" fi @@ -262,6 +324,13 @@ docker_args=( --env KUBECONTEXT=$KUBECONTEXT ) +if [ "$PROVIDER" == "eks" ]; then + # aws credential is required to get token for EKS + docker_args+=( + -v $HOME/.aws:/root/.aws + ) +fi + if [ -n "$REPORT_DIR" ]; then docker_args+=( -v $REPORT_DIR:$REPORT_DIR diff --git a/tests/actions.go b/tests/actions.go index ce21dc51d2..95639de4d9 100644 --- a/tests/actions.go +++ b/tests/actions.go @@ -2297,7 +2297,7 @@ func (oa *operatorActions) DeployAdHocBackup(info *TidbClusterConfig) error { ) glog.Info(getTSCmd) - res, err := exec.Command("/bin/sh", "-c", getTSCmd).CombinedOutput() + res, err := exec.Command("/bin/bash", "-c", getTSCmd).CombinedOutput() if err != nil { glog.Errorf("failed to get ts %v, %s", err, string(res)) return false, nil diff --git a/tests/images/e2e/Dockerfile b/tests/images/e2e/Dockerfile index 260ec91cf9..ccce271038 100644 --- a/tests/images/e2e/Dockerfile +++ b/tests/images/e2e/Dockerfile @@ -1,9 +1,10 @@ -FROM alpine:3.10 +FROM debian:buster-slim ENV KUBECTL_VERSION=v1.12.2 ENV HELM_VERSION=v2.9.1 -RUN apk update && apk add --no-cache ca-certificates curl git openssl bash mysql-client +RUN apt-get update && \ + apt-get install -y ca-certificates curl git openssl default-mysql-client unzip RUN curl https://storage.googleapis.com/kubernetes-release/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl \ -o /usr/local/bin/kubectl && \ chmod +x /usr/local/bin/kubectl && \ @@ -13,6 +14,9 @@ RUN curl https://storage.googleapis.com/kubernetes-release/release/${KUBECTL_VER mv linux-amd64/helm /usr/local/bin/helm && \ rm -rf linux-amd64 && \ rm helm-${HELM_VERSION}-linux-amd64.tar.gz +RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && \ + unzip awscliv2.zip && \ + ./aws/install ADD tidb-operator /charts/e2e/tidb-operator ADD tidb-cluster /charts/e2e/tidb-cluster