From 70958e4e6f67f59930da749fd61e0d381445ed71 Mon Sep 17 00:00:00 2001 From: Bartosz Chwila <103247439+barchw@users.noreply.github.com> Date: Tue, 4 Jun 2024 12:47:55 +0200 Subject: [PATCH] Improve memory usage of Istio manager (#852) (#860) * Improve memory usage of Istio manager (#852) * Use distinct path for Istio telemetries * Remove deepcopy from gathering pods for restart * Revert unneded * Remove caching for objects * Post review * Use distinct path for Istio telemetries * Remove deepcopy from gathering pods for restart * Revert unneded * Remove caching for objects * Post review * Run tests on Gardener live (#851) * Deprecate kyma provision gardener (#825) * Deprecate kyma provision gardener * fixes * use relative yaml path * fix yaml path again * force k8s version to be a string * try delaying before requesting kubeconfig * don't hibernate * fix varaible name * remove sleep * remove unused GARDENER_GARDENLINUX_VERSION * fix Gardener provisioning (#835) * Use the default Gardener network addresses (#840) * Use the default Gardener network addresses * aws * [skip ci] * Don't skip ci * Dummy --------- Co-authored-by: Patryk Strugacz Co-authored-by: Piotr Halama --- .github/workflows/main-integration.yaml | 16 ++---- .github/workflows/performance-test.yaml | 4 +- .../workflows/pull-integration-gardener.yaml | 13 +---- .github/workflows/verify-commit-pins.yaml | 1 + hack/ci/provision-gardener.sh | 42 ++++++-------- hack/ci/shoot_aws.yaml | 55 +++++++++++++++++++ hack/ci/shoot_gcp.yaml | 45 +++++++++++++++ main.go | 15 +++++ pkg/lib/sidecars/pods/get.go | 17 +++--- pkg/lib/sidecars/remove/remove.go | 2 +- pkg/lib/sidecars/restart/restart.go | 2 +- pkg/lib/sidecars/restart/restart_test.go | 26 ++++----- .../scripts/gardener-kubeconfig.sh | 12 ++-- 13 files changed, 170 insertions(+), 80 deletions(-) create mode 100644 hack/ci/shoot_aws.yaml create mode 100644 hack/ci/shoot_gcp.yaml diff --git a/.github/workflows/main-integration.yaml b/.github/workflows/main-integration.yaml index 6994c31f48..d2959de875 100644 --- a/.github/workflows/main-integration.yaml +++ b/.github/workflows/main-integration.yaml @@ -76,13 +76,11 @@ jobs: shell: bash env: GARDENER_KUBECONFIG: "/home/runner/work/istio/istio/gardener_kubeconfig.yaml" - GARDENER_PROJECT_NAME: "goatz" + GARDENER_PROJECT_NAME: "goats" GARDENER_PROVIDER_SECRET_NAME: "goat" GARDENER_PROVIDER: "gcp" GARDENER_REGION: "europe-west3" - GARDENER_ZONES: "europe-west3-c,europe-west3-b,europe-west3-a" GARDENER_CLUSTER_VERSION: "1.28" - GARDENER_GARDENLINUX_VERSION: "1312.3.0" MACHINE_TYPE: "n2-standard-4" DISK_SIZE: 50 DISK_TYPE: "pd-standard" @@ -108,13 +106,11 @@ jobs: shell: bash env: GARDENER_KUBECONFIG: "/home/runner/work/istio/istio/gardener_kubeconfig.yaml" - GARDENER_PROJECT_NAME: "goatz" + GARDENER_PROJECT_NAME: "goats" GARDENER_PROVIDER_SECRET_NAME: "aws-gardener-access" GARDENER_PROVIDER: "aws" GARDENER_CLUSTER_VERSION: "1.28" GARDENER_REGION: "eu-west-1" - GARDENER_ZONES: "eu-west-1b,eu-west-1c,eu-west-1a" - GARDENER_GARDENLINUX_VERSION: "1312.3.0" MACHINE_TYPE: "m5.xlarge" DISK_SIZE: 50 DISK_TYPE: "gp2" @@ -140,13 +136,11 @@ jobs: shell: bash env: GARDENER_KUBECONFIG: "/home/runner/work/istio/istio/gardener_kubeconfig.yaml" - GARDENER_PROJECT_NAME: "goatz" + GARDENER_PROJECT_NAME: "goats" GARDENER_PROVIDER_SECRET_NAME: "goat" GARDENER_PROVIDER: "gcp" GARDENER_REGION: "europe-west3" - GARDENER_ZONES: "europe-west3-c,europe-west3-b,europe-west3-a" GARDENER_CLUSTER_VERSION: "1.28" - GARDENER_GARDENLINUX_VERSION: "1312.3.0" MACHINE_TYPE: "n2-standard-4" DISK_SIZE: 50 DISK_TYPE: "pd-standard" @@ -172,13 +166,11 @@ jobs: shell: bash env: GARDENER_KUBECONFIG: "/home/runner/work/istio/istio/gardener_kubeconfig.yaml" - GARDENER_PROJECT_NAME: "goatz" + GARDENER_PROJECT_NAME: "goats" GARDENER_PROVIDER_SECRET_NAME: "aws-gardener-access" GARDENER_PROVIDER: "aws" GARDENER_CLUSTER_VERSION: "1.28" GARDENER_REGION: "eu-west-1" - GARDENER_ZONES: "eu-west-1b,eu-west-1c,eu-west-1a" - GARDENER_GARDENLINUX_VERSION: "1312.3.0" MACHINE_TYPE: "m5.xlarge" DISK_SIZE: 50 DISK_TYPE: "gp2" diff --git a/.github/workflows/performance-test.yaml b/.github/workflows/performance-test.yaml index ef24bc2dbe..611e1a0416 100644 --- a/.github/workflows/performance-test.yaml +++ b/.github/workflows/performance-test.yaml @@ -25,13 +25,11 @@ jobs: shell: bash env: GARDENER_KUBECONFIG: "/home/runner/work/istio/istio/gardener_kubeconfig.yaml" - GARDENER_PROJECT_NAME: "goatz" + GARDENER_PROJECT_NAME: "goats" GARDENER_PROVIDER_SECRET_NAME: "goat" GARDENER_PROVIDER: "gcp" GARDENER_REGION: "europe-west3" - GARDENER_ZONES: "europe-west3-c,europe-west3-b,europe-west3-a" GARDENER_CLUSTER_VERSION: "1.27" - GARDENER_GARDENLINUX_VERSION: "1312.3.0" MACHINE_TYPE: "n2-standard-4" DISK_SIZE: 50 DISK_TYPE: "pd-standard" diff --git a/.github/workflows/pull-integration-gardener.yaml b/.github/workflows/pull-integration-gardener.yaml index eae25d335c..c099ff5914 100644 --- a/.github/workflows/pull-integration-gardener.yaml +++ b/.github/workflows/pull-integration-gardener.yaml @@ -35,7 +35,6 @@ jobs: job-name: 'pull-istio-operator-build' github-auth-token: ${{ secrets.GITHUB_TOKEN }} - istio-integration-gcp: name: Istio integration test GCP runs-on: ubuntu-latest @@ -57,13 +56,11 @@ jobs: shell: bash env: GARDENER_KUBECONFIG: "/home/runner/work/istio/istio/gardener_kubeconfig.yaml" - GARDENER_PROJECT_NAME: "goatz" + GARDENER_PROJECT_NAME: "goats" GARDENER_PROVIDER_SECRET_NAME: "goat" GARDENER_PROVIDER: "gcp" GARDENER_REGION: "europe-west3" - GARDENER_ZONES: "europe-west3-c,europe-west3-b,europe-west3-a" GARDENER_CLUSTER_VERSION: "1.28" - GARDENER_GARDENLINUX_VERSION: "1312.3.0" MACHINE_TYPE: "n2-standard-4" DISK_SIZE: 50 DISK_TYPE: "pd-standard" @@ -91,13 +88,11 @@ jobs: shell: bash env: GARDENER_KUBECONFIG: "/home/runner/work/istio/istio/gardener_kubeconfig.yaml" - GARDENER_PROJECT_NAME: "goatz" + GARDENER_PROJECT_NAME: "goats" GARDENER_PROVIDER_SECRET_NAME: "aws-gardener-access" GARDENER_PROVIDER: "aws" GARDENER_CLUSTER_VERSION: "1.28" GARDENER_REGION: "eu-west-1" - GARDENER_ZONES: "eu-west-1b,eu-west-1c,eu-west-1a" - GARDENER_GARDENLINUX_VERSION: "1312.3.0" MACHINE_TYPE: "m5.xlarge" DISK_SIZE: 50 DISK_TYPE: "gp2" @@ -125,13 +120,11 @@ jobs: shell: bash env: GARDENER_KUBECONFIG: "/home/runner/work/istio/istio/gardener_kubeconfig.yaml" - GARDENER_PROJECT_NAME: "goatz" + GARDENER_PROJECT_NAME: "goats" GARDENER_PROVIDER_SECRET_NAME: "goat" GARDENER_PROVIDER: "gcp" GARDENER_REGION: "europe-west3" - GARDENER_ZONES: "europe-west3-c,europe-west3-b,europe-west3-a" GARDENER_CLUSTER_VERSION: "1.28" - GARDENER_GARDENLINUX_VERSION: "1312.3.0" MACHINE_TYPE: "n2-standard-4" DISK_SIZE: 50 DISK_TYPE: "pd-standard" diff --git a/.github/workflows/verify-commit-pins.yaml b/.github/workflows/verify-commit-pins.yaml index 5eb2424722..fed8f21197 100644 --- a/.github/workflows/verify-commit-pins.yaml +++ b/.github/workflows/verify-commit-pins.yaml @@ -7,6 +7,7 @@ on: - '.github/workflows/**' branches: - main + permissions: contents: read diff --git a/hack/ci/provision-gardener.sh b/hack/ci/provision-gardener.sh index ba120d5934..8a0b7cb639 100755 --- a/hack/ci/provision-gardener.sh +++ b/hack/ci/provision-gardener.sh @@ -22,7 +22,6 @@ requiredVars=( CLUSTER_NAME GARDENER_PROVIDER GARDENER_REGION - GARDENER_ZONES GARDENER_KUBECONFIG GARDENER_PROJECT_NAME GARDENER_PROVIDER_SECRET_NAME @@ -36,30 +35,21 @@ requiredVars=( check_required_vars "${requiredVars[@]}" -# Install Kyma CLI in latest version -echo "--> Install kyma CLI locally to /tmp/bin" -curl -Lo kyma.tar.gz "https://github.com/kyma-project/cli/releases/latest/download/kyma_linux_x86_64.tar.gz" \ -&& tar -zxvf kyma.tar.gz && chmod +x kyma \ -&& rm -f kyma.tar.gz -chmod +x kyma +# render and applyshoot template +shoot_template=$(envsubst < ./hack/ci/shoot_${GARDENER_PROVIDER}.yaml) -# Add pwd to path to be able to use Kyma binary -export PATH="${PATH}:${PWD}" +echo "$shoot_template" | kubectl --kubeconfig "${GARDENER_KUBECONFIG}" apply -f - -kyma version --client -kyma provision gardener ${GARDENER_PROVIDER} \ - --secret "${GARDENER_PROVIDER_SECRET_NAME}" \ - --name "${CLUSTER_NAME}" \ - --project "${GARDENER_PROJECT_NAME}" \ - --credentials "${GARDENER_KUBECONFIG}" \ - --region "${GARDENER_REGION}" \ - --zones "${GARDENER_ZONES}" \ - --type "${MACHINE_TYPE}" \ - --disk-size $DISK_SIZE \ - --disk-type "${DISK_TYPE}" \ - --scaler-max $SCALER_MAX \ - --scaler-min $SCALER_MIN \ - --kube-version="${GARDENER_CLUSTER_VERSION}" \ - --gardenlinux-version="${GARDENER_GARDENLINUX_VERSION}" \ - --attempts 3 \ - --verbose +echo "waiting fo cluster to be ready..." +kubectl wait --kubeconfig "${GARDENER_KUBECONFIG}" --for=condition=EveryNodeReady shoot/${CLUSTER_NAME} --timeout=17m + +# create kubeconfig request, that creates a kubeconfig which is valid for one day +kubectl create --kubeconfig "${GARDENER_KUBECONFIG}" \ + -f <(printf '{"spec":{"expirationSeconds":86400}}') \ + --raw /apis/core.gardener.cloud/v1beta1/namespaces/garden-${GARDENER_PROJECT_NAME}/shoots/${CLUSTER_NAME}/adminkubeconfig | \ + jq -r ".status.kubeconfig" | \ + base64 -d > ${CLUSTER_NAME}_kubeconfig.yaml + +# replace the default kubeconfig +mkdir -p ~/.kube +mv ${CLUSTER_NAME}_kubeconfig.yaml ~/.kube/config diff --git a/hack/ci/shoot_aws.yaml b/hack/ci/shoot_aws.yaml new file mode 100644 index 0000000000..1e57d34ad3 --- /dev/null +++ b/hack/ci/shoot_aws.yaml @@ -0,0 +1,55 @@ +apiVersion: core.gardener.cloud/v1beta1 +kind: Shoot +metadata: + name: ${CLUSTER_NAME} +spec: + secretBindingName: ${GARDENER_PROVIDER_SECRET_NAME} + cloudProfileName: aws + region: ${GARDENER_REGION} + purpose: evaluation + provider: + type: aws + infrastructureConfig: + apiVersion: aws.provider.extensions.gardener.cloud/v1alpha1 + kind: InfrastructureConfig + networks: + vpc: + cidr: 10.250.0.0/16 + zones: + - name: ${GARDENER_REGION}a + internal: 10.250.48.0/20 + public: 10.250.32.0/20 + workers: 10.250.0.0/19 + - name: ${GARDENER_REGION}b + internal: 10.250.112.0/20 + public: 10.250.96.0/20 + workers: 10.250.64.0/19 + - name: ${GARDENER_REGION}c + internal: 10.250.176.0/20 + public: 10.250.160.0/20 + workers: 10.250.128.0/19 + workers: + - name: cpu-worker + minimum: ${SCALER_MIN} + maximum: ${SCALER_MAX} + machine: + type: ${MACHINE_TYPE} + volume: + type: ${DISK_TYPE} + size: ${DISK_SIZE}Gi + zones: + - ${GARDENER_REGION}a + - ${GARDENER_REGION}b + - ${GARDENER_REGION}c + networking: + type: calico + pods: 100.64.0.0/12 + nodes: 10.250.0.0/16 + services: 100.104.0.0/13 + kubernetes: + version: "${GARDENER_CLUSTER_VERSION}" + hibernation: + enabled: false + addons: + nginxIngress: + enabled: false diff --git a/hack/ci/shoot_gcp.yaml b/hack/ci/shoot_gcp.yaml new file mode 100644 index 0000000000..28bd34c624 --- /dev/null +++ b/hack/ci/shoot_gcp.yaml @@ -0,0 +1,45 @@ +apiVersion: core.gardener.cloud/v1beta1 +kind: Shoot +metadata: + name: ${CLUSTER_NAME} +spec: + secretBindingName: ${GARDENER_PROVIDER_SECRET_NAME} + cloudProfileName: gcp + region: ${GARDENER_REGION} + purpose: evaluation + provider: + type: gcp + infrastructureConfig: + apiVersion: gcp.provider.extensions.gardener.cloud/v1alpha1 + kind: InfrastructureConfig + networks: + workers: 10.250.0.0/16 + controlPlaneConfig: + apiVersion: gcp.provider.extensions.gardener.cloud/v1alpha1 + kind: ControlPlaneConfig + zone: ${GARDENER_REGION}-a + workers: + - name: cpu-worker + minimum: ${SCALER_MIN} + maximum: ${SCALER_MAX} + machine: + type: ${MACHINE_TYPE} + volume: + type: ${DISK_TYPE} + size: ${DISK_SIZE}Gi + zones: + - ${GARDENER_REGION}-a + - ${GARDENER_REGION}-b + - ${GARDENER_REGION}-c + networking: + type: calico + pods: 100.64.0.0/12 + nodes: 10.250.0.0/16 + services: 100.104.0.0/13 + kubernetes: + version: "${GARDENER_CLUSTER_VERSION}" + hibernation: + enabled: false + addons: + nginxIngress: + enabled: false diff --git a/main.go b/main.go index b1660b28a2..d2009b1046 100644 --- a/main.go +++ b/main.go @@ -18,7 +18,9 @@ package main import ( "flag" + v1 "k8s.io/api/apps/v1" "os" + "sigs.k8s.io/controller-runtime/pkg/client" "time" networkingv1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3" @@ -107,6 +109,19 @@ func main() { HealthProbeBindAddress: flagVar.probeAddr, LeaderElection: flagVar.enableLeaderElection, LeaderElectionID: "76223278.kyma-project.io", + Client: client.Options{ + Cache: &client.CacheOptions{ + // The cache is disabled for these objects to avoid huge memory usage. + // Having the cache enabled had previously caused memory usage + //to have a significant peak when sidecar restart was triggered. + DisableFor: []client.Object{ + &v1.DaemonSet{}, + &v1.Deployment{}, + &v1.StatefulSet{}, + &v1.ReplicaSet{}, + }, + }, + }, }) if err != nil { setupLog.Error(err, "unable to start manager") diff --git a/pkg/lib/sidecars/pods/get.go b/pkg/lib/sidecars/pods/get.go index a5c80f1a7c..61257da214 100644 --- a/pkg/lib/sidecars/pods/get.go +++ b/pkg/lib/sidecars/pods/get.go @@ -51,32 +51,32 @@ func getAllRunningPods(ctx context.Context, c client.Client) (*v1.PodList, error return podList, nil } -func GetPodsToRestart(ctx context.Context, c client.Client, expectedImage SidecarImage, expectedResources v1.ResourceRequirements, predicates []filter.SidecarProxyPredicate, logger *logr.Logger) (outputPodsList v1.PodList, err error) { +func GetPodsToRestart(ctx context.Context, c client.Client, expectedImage SidecarImage, expectedResources v1.ResourceRequirements, predicates []filter.SidecarProxyPredicate, logger *logr.Logger) (outputPodsList *v1.PodList, err error) { podList, err := getAllRunningPods(ctx, c) if err != nil { - return outputPodsList, err + return nil, err } - podList.DeepCopyInto(&outputPodsList) - outputPodsList.Items = []v1.Pod{} - //Add predicate for image version and resources configuration predicates = append(predicates, NewRestartProxyPredicate(expectedImage, expectedResources)) for _, predicate := range predicates { evaluator, err := predicate.NewProxyRestartEvaluator(ctx) if err != nil { - return v1.PodList{}, err + return &v1.PodList{}, err } + outputPodsList = &v1.PodList{} for _, pod := range podList.Items { if evaluator.RequiresProxyRestart(pod) { - outputPodsList.Items = append(outputPodsList.Items, *pod.DeepCopy()) + outputPodsList.Items = append(outputPodsList.Items, pod) } } } - logger.Info("Pods to restart", "number of pods", len(outputPodsList.Items)) + if outputPodsList != nil { + logger.Info("Pods to restart", "number of pods", len(outputPodsList.Items)) + } return outputPodsList, nil } @@ -97,6 +97,7 @@ func containsSidecar(pod v1.Pod) bool { func GetAllInjectedPods(ctx context.Context, k8sclient client.Client) (outputPodList *v1.PodList, err error) { podList := &v1.PodList{} outputPodList = &v1.PodList{} + outputPodList.Items = make([]v1.Pod, len(podList.Items)) err = retry.RetryOnError(retry.DefaultRetry, func() error { return k8sclient.List(ctx, podList, &client.ListOptions{}) diff --git a/pkg/lib/sidecars/remove/remove.go b/pkg/lib/sidecars/remove/remove.go index 770f2e3a1c..16aede487f 100644 --- a/pkg/lib/sidecars/remove/remove.go +++ b/pkg/lib/sidecars/remove/remove.go @@ -15,5 +15,5 @@ func RemoveSidecars(ctx context.Context, k8sclient client.Client, logger *logr.L return nil, err } - return restart.Restart(ctx, k8sclient, *toRestart, logger) + return restart.Restart(ctx, k8sclient, toRestart, logger) } diff --git a/pkg/lib/sidecars/restart/restart.go b/pkg/lib/sidecars/restart/restart.go index 0570b1f4a9..fbf2c71916 100644 --- a/pkg/lib/sidecars/restart/restart.go +++ b/pkg/lib/sidecars/restart/restart.go @@ -26,7 +26,7 @@ func newRestartWarning(o actionObject, message string) RestartWarning { } } -func Restart(ctx context.Context, c client.Client, podList v1.PodList, logger *logr.Logger) ([]RestartWarning, error) { +func Restart(ctx context.Context, c client.Client, podList *v1.PodList, logger *logr.Logger) ([]RestartWarning, error) { warnings := make([]RestartWarning, 0) processedActionObjects := make(map[string]bool) diff --git a/pkg/lib/sidecars/restart/restart_test.go b/pkg/lib/sidecars/restart/restart_test.go index fc6b329661..602d17a25d 100644 --- a/pkg/lib/sidecars/restart/restart_test.go +++ b/pkg/lib/sidecars/restart/restart_test.go @@ -46,7 +46,7 @@ var _ = Describe("Restart Pods", func() { } // when - warnings, err := restart.Restart(ctx, c, podList, &logger) + warnings, err := restart.Restart(ctx, c, &podList, &logger) // then Expect(err).NotTo(HaveOccurred()) @@ -67,7 +67,7 @@ var _ = Describe("Restart Pods", func() { } // when - warnings, err := restart.Restart(ctx, c, podList, &logger) + warnings, err := restart.Restart(ctx, c, &podList, &logger) // then Expect(err).NotTo(HaveOccurred()) @@ -88,7 +88,7 @@ var _ = Describe("Restart Pods", func() { } // when - warnings, err := restart.Restart(ctx, c, podList, &logger) + warnings, err := restart.Restart(ctx, c, &podList, &logger) // then Expect(err).NotTo(HaveOccurred()) @@ -109,7 +109,7 @@ var _ = Describe("Restart Pods", func() { } // when - warnings, err := restart.Restart(ctx, c, podList, &logger) + warnings, err := restart.Restart(ctx, c, &podList, &logger) // then Expect(err).NotTo(HaveOccurred()) @@ -134,7 +134,7 @@ var _ = Describe("Restart Pods", func() { } // when - warnings, err := restart.Restart(ctx, c, podList, &logger) + warnings, err := restart.Restart(ctx, c, &podList, &logger) // then Expect(err).NotTo(HaveOccurred()) @@ -158,7 +158,7 @@ var _ = Describe("Restart Pods", func() { } // when - warnings, err := restart.Restart(ctx, c, podList, &logger) + warnings, err := restart.Restart(ctx, c, &podList, &logger) // then Expect(err).NotTo(HaveOccurred()) @@ -181,7 +181,7 @@ var _ = Describe("Restart Pods", func() { } // when - warnings, err := restart.Restart(ctx, c, podList, &logger) + warnings, err := restart.Restart(ctx, c, &podList, &logger) // then Expect(err).NotTo(HaveOccurred()) @@ -204,7 +204,7 @@ var _ = Describe("Restart Pods", func() { } // when - warnings, err := restart.Restart(ctx, c, podList, &logger) + warnings, err := restart.Restart(ctx, c, &podList, &logger) // then Expect(err).NotTo(HaveOccurred()) @@ -230,7 +230,7 @@ var _ = Describe("Restart Pods", func() { } // when - warnings, err := restart.Restart(ctx, c, podList, &logger) + warnings, err := restart.Restart(ctx, c, &podList, &logger) // then Expect(err).NotTo(HaveOccurred()) @@ -256,7 +256,7 @@ var _ = Describe("Restart Pods", func() { c := fakeClient(&pod) // when - warnings, err := restart.Restart(ctx, c, podList, &logger) + warnings, err := restart.Restart(ctx, c, &podList, &logger) // then Expect(err).NotTo(HaveOccurred()) @@ -291,7 +291,7 @@ var _ = Describe("Restart Pods", func() { }}) // when - warnings, err := restart.Restart(ctx, c, podList, &logger) + warnings, err := restart.Restart(ctx, c, &podList, &logger) // then Expect(err).NotTo(HaveOccurred()) @@ -316,7 +316,7 @@ var _ = Describe("Restart Pods", func() { } // when - warnings, err := restart.Restart(ctx, c, podList, &logger) + warnings, err := restart.Restart(ctx, c, &podList, &logger) // then Expect(err).NotTo(HaveOccurred()) @@ -351,7 +351,7 @@ var _ = Describe("Restart Pods", func() { }}) // when - warnings, err := restart.Restart(ctx, c, podList, &logger) + warnings, err := restart.Restart(ctx, c, &podList, &logger) // then Expect(err).NotTo(HaveOccurred()) diff --git a/tests/integration/scripts/gardener-kubeconfig.sh b/tests/integration/scripts/gardener-kubeconfig.sh index 0a50c4d660..243d234c43 100755 --- a/tests/integration/scripts/gardener-kubeconfig.sh +++ b/tests/integration/scripts/gardener-kubeconfig.sh @@ -1,19 +1,19 @@ cat < /home/runner/work/istio/istio/gardener_kubeconfig.yaml apiVersion: v1 kind: Config -current-context: garden-goatz-cli-test +current-context: garden-goats-github contexts: - - name: garden-goatz-cli-test + - name: garden-goats-github context: cluster: garden - user: cli-test - namespace: garden-goatz + user: github + namespace: garden-goats clusters: - name: garden cluster: - server: https://api.canary.gardener.cloud.sap/ + server: https://api.live.gardener.cloud.sap users: - - name: cli-test + - name: github user: token: >- $GARDENER_TOKEN