Skip to content

Commit

Permalink
fix: Update e2e tests after upgrade k8s 1.26 (kedacore#4526)
Browse files Browse the repository at this point in the history
Signed-off-by: geoffrey1330 <israelgeoffrey13@gmail.com>
  • Loading branch information
JorTurFer authored and geoffrey1330 committed Oct 4, 2023
1 parent 3a0f2a9 commit f185a18
Show file tree
Hide file tree
Showing 8 changed files with 111 additions and 44 deletions.
54 changes: 32 additions & 22 deletions tests/internals/polling_cooldown_so/polling_cooldown_so_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ package polling_cooldown_so_test
import (
"fmt"
"testing"
"time"

"github.com/joho/godotenv"
"github.com/stretchr/testify/assert"
Expand All @@ -31,6 +30,7 @@ var (
scaledObjectName = fmt.Sprintf("%s-so", testName)
secretName = fmt.Sprintf("%s-secret", testName)
metricsServerEndpoint = fmt.Sprintf("http://%s.%s.svc.cluster.local:8080/api/value", serviceName, namespace)
hpaName = fmt.Sprintf("%s-hpa", testName)
minReplicas = 0
maxReplicas = 1
pollingInterval = 1 // (don't set it to 0 to avoid cpu leaks)
Expand All @@ -51,6 +51,7 @@ type templateData struct {
MetricValue int
PollingInterval int
CooldownPeriod int
CustomHpaName string
}

const (
Expand Down Expand Up @@ -144,6 +145,9 @@ metadata:
spec:
scaleTargetRef:
name: {{.DeploymentName}}
advanced:
horizontalPodAutoscalerConfig:
name: {{.CustomHpaName}}
pollingInterval: {{.PollingInterval}}
cooldownPeriod: {{.CooldownPeriod}}
minReplicaCount: {{.MinReplicas}}
Expand All @@ -166,6 +170,7 @@ metadata:
name: update-ms-value
namespace: {{.TestNamespace}}
spec:
ttlSecondsAfterFinished: 0
backoffLimit: 4
template:
spec:
Expand Down Expand Up @@ -211,8 +216,9 @@ func testPollingIntervalUp(t *testing.T, kc *kubernetes.Clientset, data template

data.MetricValue = 0
KubectlApplyWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)
assert.True(t, WaitForJobSuccess(t, kc, "update-ms-value", data.TestNamespace, 6, 10), "update job failed")
KubectlDeleteWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)

// wait some seconds to finish the job
WaitForJobCount(t, kc, namespace, 0, 15, 2)

assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, namespace, minReplicas, 18, 10),
"replica count should be %d after 3 minutes", minReplicas)
Expand All @@ -221,13 +227,12 @@ func testPollingIntervalUp(t *testing.T, kc *kubernetes.Clientset, data template
data.PollingInterval = 60 + 15 // 15 seconds as a reserve
KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate)

// wait 15 sec to ensure that ScaledObject reconciliation loop has happened
time.Sleep(15 + time.Second)
// wait until HPA to ensure that ScaledObject reconciliation loop has happened
_, err := WaitForHpaCreation(t, kc, hpaName, namespace, 60, 2)
assert.NoError(t, err)

data.MetricValue = maxReplicas
KubectlApplyWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)
assert.True(t, WaitForJobSuccess(t, kc, "update-ms-value", data.TestNamespace, 6, 10), "update job failed")
KubectlDeleteWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)

AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, namespace, minReplicas, 60)

Expand All @@ -242,8 +247,9 @@ func testPollingIntervalDown(t *testing.T, kc *kubernetes.Clientset, data templa

data.MetricValue = 1
KubectlApplyWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)
assert.True(t, WaitForJobSuccess(t, kc, "update-ms-value", data.TestNamespace, 6, 10), "update job failed")
KubectlDeleteWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)

// wait some seconds to finish the job
WaitForJobCount(t, kc, namespace, 0, 15, 2)

assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, namespace, maxReplicas, 18, 10),
"replica count should be %d after 3 minutes", minReplicas)
Expand All @@ -253,13 +259,12 @@ func testPollingIntervalDown(t *testing.T, kc *kubernetes.Clientset, data templa
data.CooldownPeriod = 0
KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate)

// wait 15 sec to ensure that ScaledObject reconciliation loop has happened
time.Sleep(15 + time.Second)
// wait until HPA to ensure that ScaledObject reconciliation loop has happened
_, err := WaitForHpaCreation(t, kc, hpaName, namespace, 60, 2)
assert.NoError(t, err)

data.MetricValue = minReplicas
KubectlApplyWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)
assert.True(t, WaitForJobSuccess(t, kc, "update-ms-value", data.TestNamespace, 6, 10), "update job failed")
KubectlDeleteWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)

AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, namespace, maxReplicas, 60)

Expand All @@ -272,25 +277,29 @@ func testPollingIntervalDown(t *testing.T, kc *kubernetes.Clientset, data templa
func testCooldownPeriod(t *testing.T, kc *kubernetes.Clientset, data templateData) {
t.Log("--- test Cooldown Period ---")

data.PollingInterval = 5 // remove polling interval to test CP (don't set it to 0 to avoid cpu leaks)
data.CooldownPeriod = 60 + 15 // 15 seconds as a reserve
data.PollingInterval = 5
data.CooldownPeriod = 0
KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate)

// wait 15 sec to ensure that ScaledObject reconciliation loop has happened
time.Sleep(15 + time.Second)

data.MetricValue = 1
KubectlApplyWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)
assert.True(t, WaitForJobSuccess(t, kc, "update-ms-value", data.TestNamespace, 6, 10), "update job failed")
KubectlDeleteWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)

// wait some seconds to finish the job
WaitForJobCount(t, kc, namespace, 0, 15, 2)

assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, namespace, maxReplicas, 18, 10),
"replica count should be %d after 3 minutes", 1)

data.PollingInterval = 5 // remove polling interval to test CP (don't set it to 0 to avoid cpu leaks)
data.CooldownPeriod = 60 + 15 // 15 seconds as a reserve
KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate)

// wait until HPA to ensure that ScaledObject reconciliation loop has happened
_, err := WaitForHpaCreation(t, kc, hpaName, namespace, 60, 2)
assert.NoError(t, err)

data.MetricValue = 0
KubectlApplyWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)
assert.True(t, WaitForJobSuccess(t, kc, "update-ms-value", data.TestNamespace, 6, 10), "update job failed")
KubectlDeleteWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)

AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, namespace, maxReplicas, 60)

Expand All @@ -315,6 +324,7 @@ func getTemplateData() (templateData, []Template) {
MetricValue: 0,
PollingInterval: pollingInterval,
CooldownPeriod: cooldownPeriod,
CustomHpaName: hpaName,
}, []Template{
{Name: "secretTemplate", Config: secretTemplate},
{Name: "metricsServerDeploymentTemplate", Config: metricsServerDeploymentTemplate},
Expand Down
13 changes: 9 additions & 4 deletions tests/scalers/etcd/etcd_cluster/etcd_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,6 @@ metadata:
name: {{.JobName}}
namespace: {{.TestNamespace}}
spec:
ttlSecondsAfterFinished: 5
template:
spec:
containers:
Expand Down Expand Up @@ -136,15 +135,19 @@ func TestScaler(t *testing.T) {
func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) {
t.Log("--- testing activation ---")
data.Value = 4
KubectlApplyWithTemplate(t, data, "insertJobTemplate", setJobTemplate)
KubectlApplyWithTemplate(t, data, jobName, setJobTemplate)
assert.True(t, WaitForJobSuccess(t, kc, jobName, data.TestNamespace, 6, 10), "update job failed")
KubectlDeleteWithTemplate(t, data, jobName, setJobTemplate)

AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, minReplicaCount, 60)
}

func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData) {
t.Log("--- testing scale out ---")
data.Value = 9
KubectlApplyWithTemplate(t, data, "deleteJobTemplate", setJobTemplate)
KubectlApplyWithTemplate(t, data, jobName, setJobTemplate)
assert.True(t, WaitForJobSuccess(t, kc, jobName, data.TestNamespace, 6, 10), "update job failed")
KubectlDeleteWithTemplate(t, data, jobName, setJobTemplate)

assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, 3),
"replica count should be %d after 3 minutes", maxReplicaCount)
Expand All @@ -153,7 +156,9 @@ func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData) {
func testScaleIn(t *testing.T, kc *kubernetes.Clientset, data templateData) {
t.Log("--- testing scale in ---")
data.Value = 0
KubectlApplyWithTemplate(t, data, "insertJobTemplate", setJobTemplate)
KubectlApplyWithTemplate(t, data, jobName, setJobTemplate)
assert.True(t, WaitForJobSuccess(t, kc, jobName, data.TestNamespace, 6, 10), "update job failed")
KubectlDeleteWithTemplate(t, data, jobName, setJobTemplate)

assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 3),
"replica count should be %d after 3 minutes", minReplicaCount)
Expand Down
2 changes: 1 addition & 1 deletion tests/scalers/etcd/helper/helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,6 @@ func InstallCluster(t *testing.T, kc *kubernetes.Clientset, name, namespace stri
EtcdName: name,
}
helper.KubectlApplyMultipleWithTemplate(t, data, etcdClusterTemplates)
assert.True(t, helper.WaitForStatefulsetReplicaReadyCount(t, kc, name, namespace, 3, 60, 3),
assert.True(t, helper.WaitForStatefulsetReplicaReadyCount(t, kc, name, namespace, 3, 60, 5),
"etcd-cluster should be up")
}
74 changes: 63 additions & 11 deletions tests/scalers/kafka/kafka_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -135,8 +135,25 @@ metadata:
labels:
app: {{.DeploymentName}}
spec:
pollingInterval: 5
cooldownPeriod: 0
scaleTargetRef:
name: {{.DeploymentName}}
advanced:
horizontalPodAutoscalerConfig:
behavior:
scaleUp:
stabilizationWindowSeconds: 0
policies:
- type: Percent
value: 100
periodSeconds: 15
scaleDown:
stabilizationWindowSeconds: 0
policies:
- type: Percent
value: 100
periodSeconds: 15
triggers:
- type: kafka
metadata:
Expand All @@ -156,8 +173,25 @@ metadata:
labels:
app: {{.DeploymentName}}
spec:
pollingInterval: 5
cooldownPeriod: 0
scaleTargetRef:
name: {{.DeploymentName}}
advanced:
horizontalPodAutoscalerConfig:
behavior:
scaleUp:
stabilizationWindowSeconds: 0
policies:
- type: Percent
value: 100
periodSeconds: 15
scaleDown:
stabilizationWindowSeconds: 0
policies:
- type: Percent
value: 100
periodSeconds: 15
triggers:
- type: kafka
metadata:
Expand All @@ -175,8 +209,25 @@ metadata:
labels:
app: {{.DeploymentName}}
spec:
pollingInterval: 5
cooldownPeriod: 0
scaleTargetRef:
name: {{.DeploymentName}}
advanced:
horizontalPodAutoscalerConfig:
behavior:
scaleUp:
stabilizationWindowSeconds: 0
policies:
- type: Percent
value: 100
periodSeconds: 15
scaleDown:
stabilizationWindowSeconds: 0
policies:
- type: Percent
value: 100
periodSeconds: 15
triggers:
- type: kafka
metadata:
Expand All @@ -196,20 +247,21 @@ metadata:
labels:
app: {{.DeploymentName}}
spec:
pollingInterval: 15
pollingInterval: 5
cooldownPeriod: 0
scaleTargetRef:
name: {{.DeploymentName}}
advanced:
horizontalPodAutoscalerConfig:
behavior:
scaleUp:
stabilizationWindowSeconds: 30
stabilizationWindowSeconds: 0
policies:
- type: Percent
value: 100
periodSeconds: 15
scaleDown:
stabilizationWindowSeconds: 30
stabilizationWindowSeconds: 0
policies:
- type: Percent
value: 100
Expand Down Expand Up @@ -327,11 +379,11 @@ func testEarliestPolicy(t *testing.T, kc *kubernetes.Clientset, data templateDat
KubectlApplyWithTemplate(t, data, "singleScaledObjectTemplate", singleScaledObjectTemplate)

// Shouldn't scale pods applying earliest policy
AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 60)
AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 30)

// Shouldn't scale pods with only 1 message due to activation value
publishMessage(t, topic1)
AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 60)
AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 30)

// Scale application with kafka messages
publishMessage(t, topic1)
Expand Down Expand Up @@ -362,11 +414,11 @@ func testLatestPolicy(t *testing.T, kc *kubernetes.Clientset, data templateData)
KubectlApplyWithTemplate(t, data, "singleScaledObjectTemplate", singleScaledObjectTemplate)

// Shouldn't scale pods
AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 60)
AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 30)

// Shouldn't scale pods with only 1 message due to activation value
publishMessage(t, topic1)
AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 60)
AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 30)

// Scale application with kafka messages
publishMessage(t, topic1)
Expand Down Expand Up @@ -396,7 +448,7 @@ func testMultiTopic(t *testing.T, kc *kubernetes.Clientset, data templateData) {
KubectlApplyWithTemplate(t, data, "multiScaledObjectTemplate", multiScaledObjectTemplate)

// Shouldn't scale pods
AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 60)
AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 30)

// Scale application with kafka messages in topic 1
publishMessage(t, topic1)
Expand Down Expand Up @@ -428,7 +480,7 @@ func testZeroOnInvalidOffset(t *testing.T, kc *kubernetes.Clientset, data templa
KubectlApplyWithTemplate(t, data, "invalidOffsetScaledObjectTemplate", invalidOffsetScaledObjectTemplate)

// Shouldn't scale pods
AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 60)
AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 30)

KubectlDeleteWithTemplate(t, data, "singleDeploymentTemplate", singleDeploymentTemplate)
KubectlDeleteWithTemplate(t, data, "invalidOffsetScaledObjectTemplate", invalidOffsetScaledObjectTemplate)
Expand Down Expand Up @@ -535,13 +587,13 @@ func addTopic(t *testing.T, data templateData, name string, partitions int) {
data.KafkaTopicName = name
data.KafkaTopicPartitions = partitions
KubectlApplyWithTemplate(t, data, "kafkaTopicTemplate", kafkaTopicTemplate)
_, err := ExecuteCommand(fmt.Sprintf("kubectl wait kafkatopic/%s --for=condition=Ready --timeout=300s --namespace %s", name, testNamespace))
_, err := ExecuteCommand(fmt.Sprintf("kubectl wait kafkatopic/%s --for=condition=Ready --timeout=480s --namespace %s", name, testNamespace))
assert.NoErrorf(t, err, "cannot execute command - %s", err)
}

func addCluster(t *testing.T, data templateData) {
KubectlApplyWithTemplate(t, data, "kafkaClusterTemplate", kafkaClusterTemplate)
_, err := ExecuteCommand(fmt.Sprintf("kubectl wait kafka/%s --for=condition=Ready --timeout=300s --namespace %s", kafkaName, testNamespace))
_, err := ExecuteCommand(fmt.Sprintf("kubectl wait kafka/%s --for=condition=Ready --timeout=480s --namespace %s", kafkaName, testNamespace))
assert.NoErrorf(t, err, "cannot execute command - %s", err)
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) {

func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData) {
t.Log("--- testing scale out ---")
data.ItemsToWrite = 200
data.ItemsToWrite = 400
KubectlApplyWithTemplate(t, data, "insertJobTemplate", insertJobTemplate)

assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, 3),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) {

func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData) {
t.Log("--- testing scale out ---")
data.ItemsToWrite = 200
data.ItemsToWrite = 400
KubectlApplyWithTemplate(t, data, "insertJobTemplate", insertJobTemplate)

assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, 3),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) {

func testScaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData) {
t.Log("--- testing scale out ---")
data.ItemsToWrite = 200
data.ItemsToWrite = 400
KubectlApplyWithTemplate(t, data, "insertJobTemplate", insertJobTemplate)

assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, 3),
Expand Down
Loading

0 comments on commit f185a18

Please sign in to comment.