From 8def75199e095651b3211f323c92c772b496f611 Mon Sep 17 00:00:00 2001 From: Jorge Turrado Ferrero Date: Fri, 5 May 2023 10:20:10 +0200 Subject: [PATCH] fix: Update e2e tests after upgrade k8s 1.26 (#4526) --- .../polling_cooldown_so_test.go | 54 ++++++++------ .../etcd/etcd_cluster/etcd_cluster_test.go | 13 +++- tests/scalers/etcd/helper/helper.go | 2 +- tests/scalers/kafka/kafka_test.go | 74 ++++++++++++++++--- .../redis_cluster_lists_test.go | 2 +- .../redis_sentinel_lists_test.go | 2 +- .../redis_standalone_lists_test.go | 2 +- .../sequential/disruption/disruption_test.go | 6 +- 8 files changed, 111 insertions(+), 44 deletions(-) diff --git a/tests/internals/polling_cooldown_so/polling_cooldown_so_test.go b/tests/internals/polling_cooldown_so/polling_cooldown_so_test.go index 977040eb86e..ddc58f3bf0d 100644 --- a/tests/internals/polling_cooldown_so/polling_cooldown_so_test.go +++ b/tests/internals/polling_cooldown_so/polling_cooldown_so_test.go @@ -6,7 +6,6 @@ package polling_cooldown_so_test import ( "fmt" "testing" - "time" "github.com/joho/godotenv" "github.com/stretchr/testify/assert" @@ -31,6 +30,7 @@ var ( scaledObjectName = fmt.Sprintf("%s-so", testName) secretName = fmt.Sprintf("%s-secret", testName) metricsServerEndpoint = fmt.Sprintf("http://%s.%s.svc.cluster.local:8080/api/value", serviceName, namespace) + hpaName = fmt.Sprintf("%s-hpa", testName) minReplicas = 0 maxReplicas = 1 pollingInterval = 1 // (don't set it to 0 to avoid cpu leaks) @@ -51,6 +51,7 @@ type templateData struct { MetricValue int PollingInterval int CooldownPeriod int + CustomHpaName string } const ( @@ -144,6 +145,9 @@ metadata: spec: scaleTargetRef: name: {{.DeploymentName}} + advanced: + horizontalPodAutoscalerConfig: + name: {{.CustomHpaName}} pollingInterval: {{.PollingInterval}} cooldownPeriod: {{.CooldownPeriod}} minReplicaCount: {{.MinReplicas}} @@ -166,6 +170,7 @@ metadata: name: update-ms-value namespace: {{.TestNamespace}} spec: + ttlSecondsAfterFinished: 0 backoffLimit: 4 template: spec: @@ -211,8 +216,9 @@ func testPollingIntervalUp(t *testing.T, kc *kubernetes.Clientset, data template data.MetricValue = 0 KubectlApplyWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate) - assert.True(t, WaitForJobSuccess(t, kc, "update-ms-value", data.TestNamespace, 6, 10), "update job failed") - KubectlDeleteWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate) + + // wait some seconds to finish the job + WaitForJobCount(t, kc, namespace, 0, 15, 2) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, namespace, minReplicas, 18, 10), "replica count should be %d after 3 minutes", minReplicas) @@ -221,13 +227,12 @@ func testPollingIntervalUp(t *testing.T, kc *kubernetes.Clientset, data template data.PollingInterval = 60 + 15 // 15 seconds as a reserve KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) - // wait 15 sec to ensure that ScaledObject reconciliation loop has happened - time.Sleep(15 + time.Second) + // wait until HPA to ensure that ScaledObject reconciliation loop has happened + _, err := WaitForHpaCreation(t, kc, hpaName, namespace, 60, 2) + assert.NoError(t, err) data.MetricValue = maxReplicas KubectlApplyWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate) - assert.True(t, WaitForJobSuccess(t, kc, "update-ms-value", data.TestNamespace, 6, 10), "update job failed") - KubectlDeleteWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate) AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, namespace, minReplicas, 60) @@ -242,8 +247,9 @@ func testPollingIntervalDown(t *testing.T, kc *kubernetes.Clientset, data templa data.MetricValue = 1 KubectlApplyWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate) - assert.True(t, WaitForJobSuccess(t, kc, "update-ms-value", data.TestNamespace, 6, 10), "update job failed") - KubectlDeleteWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate) + + // wait some seconds to finish the job + WaitForJobCount(t, kc, namespace, 0, 15, 2) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, namespace, maxReplicas, 18, 10), "replica count should be %d after 3 minutes", minReplicas) @@ -253,13 +259,12 @@ func testPollingIntervalDown(t *testing.T, kc *kubernetes.Clientset, data templa data.CooldownPeriod = 0 KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) - // wait 15 sec to ensure that ScaledObject reconciliation loop has happened - time.Sleep(15 + time.Second) + // wait until HPA to ensure that ScaledObject reconciliation loop has happened + _, err := WaitForHpaCreation(t, kc, hpaName, namespace, 60, 2) + assert.NoError(t, err) data.MetricValue = minReplicas KubectlApplyWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate) - assert.True(t, WaitForJobSuccess(t, kc, "update-ms-value", data.TestNamespace, 6, 10), "update job failed") - KubectlDeleteWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate) AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, namespace, maxReplicas, 60) @@ -272,25 +277,29 @@ func testPollingIntervalDown(t *testing.T, kc *kubernetes.Clientset, data templa func testCooldownPeriod(t *testing.T, kc *kubernetes.Clientset, data templateData) { t.Log("--- test Cooldown Period ---") - data.PollingInterval = 5 // remove polling interval to test CP (don't set it to 0 to avoid cpu leaks) - data.CooldownPeriod = 60 + 15 // 15 seconds as a reserve + data.PollingInterval = 5 + data.CooldownPeriod = 0 KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) - // wait 15 sec to ensure that ScaledObject reconciliation loop has happened - time.Sleep(15 + time.Second) - data.MetricValue = 1 KubectlApplyWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate) - assert.True(t, WaitForJobSuccess(t, kc, "update-ms-value", data.TestNamespace, 6, 10), "update job failed") - KubectlDeleteWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate) + + // wait some seconds to finish the job + WaitForJobCount(t, kc, namespace, 0, 15, 2) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, namespace, maxReplicas, 18, 10), "replica count should be %d after 3 minutes", 1) + data.PollingInterval = 5 // remove polling interval to test CP (don't set it to 0 to avoid cpu leaks) + data.CooldownPeriod = 60 + 15 // 15 seconds as a reserve + KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate) + + // wait until HPA to ensure that ScaledObject reconciliation loop has happened + _, err := WaitForHpaCreation(t, kc, hpaName, namespace, 60, 2) + assert.NoError(t, err) + data.MetricValue = 0 KubectlApplyWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate) - assert.True(t, WaitForJobSuccess(t, kc, "update-ms-value", data.TestNamespace, 6, 10), "update job failed") - KubectlDeleteWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate) AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, namespace, maxReplicas, 60) @@ -315,6 +324,7 @@ func getTemplateData() (templateData, []Template) { MetricValue: 0, PollingInterval: pollingInterval, CooldownPeriod: cooldownPeriod, + CustomHpaName: hpaName, }, []Template{ {Name: "secretTemplate", Config: secretTemplate}, {Name: "metricsServerDeploymentTemplate", Config: metricsServerDeploymentTemplate}, diff --git a/tests/scalers/etcd/etcd_cluster/etcd_cluster_test.go b/tests/scalers/etcd/etcd_cluster/etcd_cluster_test.go index a2c0a065db1..a2427b0936f 100644 --- a/tests/scalers/etcd/etcd_cluster/etcd_cluster_test.go +++ b/tests/scalers/etcd/etcd_cluster/etcd_cluster_test.go @@ -94,7 +94,6 @@ metadata: name: {{.JobName}} namespace: {{.TestNamespace}} spec: - ttlSecondsAfterFinished: 5 template: spec: containers: @@ -136,7 +135,9 @@ func TestScaler(t *testing.T) { func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) { t.Log("--- testing activation ---") data.Value = 4 - KubectlApplyWithTemplate(t, data, "insertJobTemplate", setJobTemplate) + KubectlApplyWithTemplate(t, data, jobName, setJobTemplate) + assert.True(t, WaitForJobSuccess(t, kc, jobName, data.TestNamespace, 6, 10), "update job failed") + KubectlDeleteWithTemplate(t, data, jobName, setJobTemplate) AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, minReplicaCount, 60) } @@ -144,7 +145,9 @@ func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) { func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData) { t.Log("--- testing scale out ---") data.Value = 9 - KubectlApplyWithTemplate(t, data, "deleteJobTemplate", setJobTemplate) + KubectlApplyWithTemplate(t, data, jobName, setJobTemplate) + assert.True(t, WaitForJobSuccess(t, kc, jobName, data.TestNamespace, 6, 10), "update job failed") + KubectlDeleteWithTemplate(t, data, jobName, setJobTemplate) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, 3), "replica count should be %d after 3 minutes", maxReplicaCount) @@ -153,7 +156,9 @@ func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData) { func testScaleIn(t *testing.T, kc *kubernetes.Clientset, data templateData) { t.Log("--- testing scale in ---") data.Value = 0 - KubectlApplyWithTemplate(t, data, "insertJobTemplate", setJobTemplate) + KubectlApplyWithTemplate(t, data, jobName, setJobTemplate) + assert.True(t, WaitForJobSuccess(t, kc, jobName, data.TestNamespace, 6, 10), "update job failed") + KubectlDeleteWithTemplate(t, data, jobName, setJobTemplate) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 3), "replica count should be %d after 3 minutes", minReplicaCount) diff --git a/tests/scalers/etcd/helper/helper.go b/tests/scalers/etcd/helper/helper.go index 6b9c892cf88..0a8453dec8e 100644 --- a/tests/scalers/etcd/helper/helper.go +++ b/tests/scalers/etcd/helper/helper.go @@ -140,6 +140,6 @@ func InstallCluster(t *testing.T, kc *kubernetes.Clientset, name, namespace stri EtcdName: name, } helper.KubectlApplyMultipleWithTemplate(t, data, etcdClusterTemplates) - assert.True(t, helper.WaitForStatefulsetReplicaReadyCount(t, kc, name, namespace, 3, 60, 3), + assert.True(t, helper.WaitForStatefulsetReplicaReadyCount(t, kc, name, namespace, 3, 60, 5), "etcd-cluster should be up") } diff --git a/tests/scalers/kafka/kafka_test.go b/tests/scalers/kafka/kafka_test.go index 7dc6901dd77..5547bf282a0 100644 --- a/tests/scalers/kafka/kafka_test.go +++ b/tests/scalers/kafka/kafka_test.go @@ -135,8 +135,25 @@ metadata: labels: app: {{.DeploymentName}} spec: + pollingInterval: 5 + cooldownPeriod: 0 scaleTargetRef: name: {{.DeploymentName}} + advanced: + horizontalPodAutoscalerConfig: + behavior: + scaleUp: + stabilizationWindowSeconds: 0 + policies: + - type: Percent + value: 100 + periodSeconds: 15 + scaleDown: + stabilizationWindowSeconds: 0 + policies: + - type: Percent + value: 100 + periodSeconds: 15 triggers: - type: kafka metadata: @@ -156,8 +173,25 @@ metadata: labels: app: {{.DeploymentName}} spec: + pollingInterval: 5 + cooldownPeriod: 0 scaleTargetRef: name: {{.DeploymentName}} + advanced: + horizontalPodAutoscalerConfig: + behavior: + scaleUp: + stabilizationWindowSeconds: 0 + policies: + - type: Percent + value: 100 + periodSeconds: 15 + scaleDown: + stabilizationWindowSeconds: 0 + policies: + - type: Percent + value: 100 + periodSeconds: 15 triggers: - type: kafka metadata: @@ -175,8 +209,25 @@ metadata: labels: app: {{.DeploymentName}} spec: + pollingInterval: 5 + cooldownPeriod: 0 scaleTargetRef: name: {{.DeploymentName}} + advanced: + horizontalPodAutoscalerConfig: + behavior: + scaleUp: + stabilizationWindowSeconds: 0 + policies: + - type: Percent + value: 100 + periodSeconds: 15 + scaleDown: + stabilizationWindowSeconds: 0 + policies: + - type: Percent + value: 100 + periodSeconds: 15 triggers: - type: kafka metadata: @@ -196,20 +247,21 @@ metadata: labels: app: {{.DeploymentName}} spec: - pollingInterval: 15 + pollingInterval: 5 + cooldownPeriod: 0 scaleTargetRef: name: {{.DeploymentName}} advanced: horizontalPodAutoscalerConfig: behavior: scaleUp: - stabilizationWindowSeconds: 30 + stabilizationWindowSeconds: 0 policies: - type: Percent value: 100 periodSeconds: 15 scaleDown: - stabilizationWindowSeconds: 30 + stabilizationWindowSeconds: 0 policies: - type: Percent value: 100 @@ -327,11 +379,11 @@ func testEarliestPolicy(t *testing.T, kc *kubernetes.Clientset, data templateDat KubectlApplyWithTemplate(t, data, "singleScaledObjectTemplate", singleScaledObjectTemplate) // Shouldn't scale pods applying earliest policy - AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 60) + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 30) // Shouldn't scale pods with only 1 message due to activation value publishMessage(t, topic1) - AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 60) + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 30) // Scale application with kafka messages publishMessage(t, topic1) @@ -362,11 +414,11 @@ func testLatestPolicy(t *testing.T, kc *kubernetes.Clientset, data templateData) KubectlApplyWithTemplate(t, data, "singleScaledObjectTemplate", singleScaledObjectTemplate) // Shouldn't scale pods - AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 60) + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 30) // Shouldn't scale pods with only 1 message due to activation value publishMessage(t, topic1) - AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 60) + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 30) // Scale application with kafka messages publishMessage(t, topic1) @@ -396,7 +448,7 @@ func testMultiTopic(t *testing.T, kc *kubernetes.Clientset, data templateData) { KubectlApplyWithTemplate(t, data, "multiScaledObjectTemplate", multiScaledObjectTemplate) // Shouldn't scale pods - AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 60) + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 30) // Scale application with kafka messages in topic 1 publishMessage(t, topic1) @@ -428,7 +480,7 @@ func testZeroOnInvalidOffset(t *testing.T, kc *kubernetes.Clientset, data templa KubectlApplyWithTemplate(t, data, "invalidOffsetScaledObjectTemplate", invalidOffsetScaledObjectTemplate) // Shouldn't scale pods - AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 60) + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 30) KubectlDeleteWithTemplate(t, data, "singleDeploymentTemplate", singleDeploymentTemplate) KubectlDeleteWithTemplate(t, data, "invalidOffsetScaledObjectTemplate", invalidOffsetScaledObjectTemplate) @@ -535,13 +587,13 @@ func addTopic(t *testing.T, data templateData, name string, partitions int) { data.KafkaTopicName = name data.KafkaTopicPartitions = partitions KubectlApplyWithTemplate(t, data, "kafkaTopicTemplate", kafkaTopicTemplate) - _, err := ExecuteCommand(fmt.Sprintf("kubectl wait kafkatopic/%s --for=condition=Ready --timeout=300s --namespace %s", name, testNamespace)) + _, err := ExecuteCommand(fmt.Sprintf("kubectl wait kafkatopic/%s --for=condition=Ready --timeout=480s --namespace %s", name, testNamespace)) assert.NoErrorf(t, err, "cannot execute command - %s", err) } func addCluster(t *testing.T, data templateData) { KubectlApplyWithTemplate(t, data, "kafkaClusterTemplate", kafkaClusterTemplate) - _, err := ExecuteCommand(fmt.Sprintf("kubectl wait kafka/%s --for=condition=Ready --timeout=300s --namespace %s", kafkaName, testNamespace)) + _, err := ExecuteCommand(fmt.Sprintf("kubectl wait kafka/%s --for=condition=Ready --timeout=480s --namespace %s", kafkaName, testNamespace)) assert.NoErrorf(t, err, "cannot execute command - %s", err) } diff --git a/tests/scalers/redis/redis_cluster_lists/redis_cluster_lists_test.go b/tests/scalers/redis/redis_cluster_lists/redis_cluster_lists_test.go index fa983faa9d3..b5d055ddcfb 100644 --- a/tests/scalers/redis/redis_cluster_lists/redis_cluster_lists_test.go +++ b/tests/scalers/redis/redis_cluster_lists/redis_cluster_lists_test.go @@ -196,7 +196,7 @@ func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) { func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData) { t.Log("--- testing scale out ---") - data.ItemsToWrite = 200 + data.ItemsToWrite = 400 KubectlApplyWithTemplate(t, data, "insertJobTemplate", insertJobTemplate) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, 3), diff --git a/tests/scalers/redis/redis_sentinel_lists/redis_sentinel_lists_test.go b/tests/scalers/redis/redis_sentinel_lists/redis_sentinel_lists_test.go index b9092238c8d..08580adff3e 100644 --- a/tests/scalers/redis/redis_sentinel_lists/redis_sentinel_lists_test.go +++ b/tests/scalers/redis/redis_sentinel_lists/redis_sentinel_lists_test.go @@ -203,7 +203,7 @@ func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) { func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData) { t.Log("--- testing scale out ---") - data.ItemsToWrite = 200 + data.ItemsToWrite = 400 KubectlApplyWithTemplate(t, data, "insertJobTemplate", insertJobTemplate) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, 3), diff --git a/tests/scalers/redis/redis_standalone_lists/redis_standalone_lists_test.go b/tests/scalers/redis/redis_standalone_lists/redis_standalone_lists_test.go index 40c22fb7c80..a2ed27a627a 100644 --- a/tests/scalers/redis/redis_standalone_lists/redis_standalone_lists_test.go +++ b/tests/scalers/redis/redis_standalone_lists/redis_standalone_lists_test.go @@ -192,7 +192,7 @@ func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) { func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData) { t.Log("--- testing scale out ---") - data.ItemsToWrite = 200 + data.ItemsToWrite = 400 KubectlApplyWithTemplate(t, data, "insertJobTemplate", insertJobTemplate) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, 3), diff --git a/tests/sequential/disruption/disruption_test.go b/tests/sequential/disruption/disruption_test.go index bd6cf16f5e9..71ed8e679cc 100644 --- a/tests/sequential/disruption/disruption_test.go +++ b/tests/sequential/disruption/disruption_test.go @@ -73,16 +73,16 @@ metadata: name: {{.SutDeploymentName}} namespace: {{.TestNamespace}} labels: - deploy: workload-sut + deploy: {{.SutDeploymentName}} spec: replicas: 0 selector: matchLabels: - pod: workload-sut + pod: {{.SutDeploymentName}} template: metadata: labels: - pod: workload-sut + pod: {{.SutDeploymentName}} spec: containers: - name: nginx