Skip to content

Commit

Permalink
fix: Refactor paused e2e test (#5610)
Browse files Browse the repository at this point in the history
Signed-off-by: Jorge Turrado <jorge_turrado@hotmail.es>
  • Loading branch information
JorTurFer authored Mar 30, 2024
1 parent f288c01 commit f2d86a8
Show file tree
Hide file tree
Showing 56 changed files with 539 additions and 584 deletions.
98 changes: 71 additions & 27 deletions tests/internals/pause_scaledjob/pause_scaledjob_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,9 @@ var (
scalerName = fmt.Sprintf("%s-scaler", testName)
scaledJobName = fmt.Sprintf("%s-sj", testName)
minReplicaCount = 0
maxReplicaCount = 3
iterationCountInitial = 15
iterationCountLatter = 30
maxReplicaCount = 1
iterationCountInitial = 30
iterationCountLatter = 60
)

type templateData struct {
Expand Down Expand Up @@ -105,7 +105,7 @@ spec:
image: busybox
command:
- sleep
- "15"
- "30"
imagePullPolicy: IfNotPresent
restartPolicy: Never
backoffLimit: 1
Expand All @@ -119,10 +119,32 @@ spec:
)

// Util function
func WaitForJobByFilterCountUntilIteration(t *testing.T, kc *kubernetes.Clientset, namespace string,
target, iterations, intervalSeconds int, listOptions metav1.ListOptions) bool {
var isTargetAchieved = false
func WaitUntilJobIsRunning(t *testing.T, kc *kubernetes.Clientset, namespace string,
target, iterations, intervalSeconds int) bool {
listOptions := metav1.ListOptions{
FieldSelector: "status.successful=0",
}
for i := 0; i < iterations; i++ {
jobList, _ := kc.BatchV1().Jobs(namespace).List(context.Background(), listOptions)
count := len(jobList.Items)

t.Logf("Waiting for job count to hit target. Namespace - %s, Current - %d, Target - %d",
namespace, count, target)

if count == target {
return true
}
time.Sleep(time.Duration(intervalSeconds) * time.Second)
}

return false
}

func WaitUntilJobIsSucceeded(t *testing.T, kc *kubernetes.Clientset, namespace string,
target, iterations, intervalSeconds int) bool {
listOptions := metav1.ListOptions{
FieldSelector: "status.successful=1",
}
for i := 0; i < iterations; i++ {
jobList, _ := kc.BatchV1().Jobs(namespace).List(context.Background(), listOptions)
count := len(jobList.Items)
Expand All @@ -131,15 +153,33 @@ func WaitForJobByFilterCountUntilIteration(t *testing.T, kc *kubernetes.Clientse
namespace, count, target)

if count == target {
isTargetAchieved = true
} else {
isTargetAchieved = false
return true
}
time.Sleep(time.Duration(intervalSeconds) * time.Second)
}

return false
}

func AssertJobNotChangeKeepingIsSucceeded(t *testing.T, kc *kubernetes.Clientset, namespace string,
target, iterations, intervalSeconds int) bool {
listOptions := metav1.ListOptions{
FieldSelector: "status.successful=1",
}
for i := 0; i < iterations; i++ {
jobList, _ := kc.BatchV1().Jobs(namespace).List(context.Background(), listOptions)
count := len(jobList.Items)

t.Logf("Asserting the job count doesn't change. Namespace - %s, Current - %d, Target - %d",
namespace, count, target)

if count != target {
return false
}
time.Sleep(time.Duration(intervalSeconds) * time.Second)
}

return isTargetAchieved
return true
}

func TestScaler(t *testing.T) {
Expand All @@ -152,21 +192,22 @@ func TestScaler(t *testing.T) {

data, templates := getTemplateData(metricValue)

listOptions := metav1.ListOptions{
FieldSelector: "status.successful=0",
}

CreateKubernetesResources(t, kc, testNamespace, data, templates)

assert.True(t, WaitForJobByFilterCountUntilIteration(t, kc, testNamespace, data.MetricThreshold, iterationCountInitial, 1, listOptions),
// we ensure that the gRPC server is up and ready
assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, scalerName, testNamespace, 1, 60, 1),
"replica count should be 0 after 1 minute")

// we ensure that there is a job running
assert.True(t, WaitUntilJobIsRunning(t, kc, testNamespace, data.MetricThreshold, iterationCountInitial, 1),
"job count should be %d after %d iterations", data.MetricThreshold, iterationCountInitial)

// test scaling
testPause(t, kc, listOptions)
testUnpause(t, kc, data, listOptions)
testPause(t, kc)
testUnpause(t, kc, data)

testPause(t, kc, listOptions)
testUnpauseWithBool(t, kc, data, listOptions)
testPause(t, kc)
testUnpauseWithBool(t, kc, data)

// cleanup
DeleteKubernetesResources(t, testNamespace, data, templates)
Expand All @@ -189,20 +230,23 @@ func getTemplateData(metricValue int) (templateData, []Template) {
}
}

func testPause(t *testing.T, kc *kubernetes.Clientset, listOptions metav1.ListOptions) {
func testPause(t *testing.T, kc *kubernetes.Clientset) {
t.Log("--- testing Paused annotation ---")

_, err := ExecuteCommand(fmt.Sprintf("kubectl annotate scaledjob %s autoscaling.keda.sh/paused=true --namespace %s", scaledJobName, testNamespace))
assert.NoErrorf(t, err, "cannot execute command - %s", err)

t.Log("job count does not change as job is paused")

expectedTarget := 0
assert.True(t, WaitForJobByFilterCountUntilIteration(t, kc, testNamespace, expectedTarget, iterationCountLatter, 1, listOptions),
expectedTarget := 1
assert.True(t, WaitUntilJobIsSucceeded(t, kc, testNamespace, expectedTarget, iterationCountLatter, 1),
"job count should be %d after %d iterations", expectedTarget, iterationCountLatter)

assert.True(t, AssertJobNotChangeKeepingIsSucceeded(t, kc, testNamespace, expectedTarget, iterationCountLatter, 1),
"job count should be %d during %d iterations", expectedTarget, iterationCountLatter)
}

func testUnpause(t *testing.T, kc *kubernetes.Clientset, data templateData, listOptions metav1.ListOptions) {
func testUnpause(t *testing.T, kc *kubernetes.Clientset, data templateData) {
t.Log("--- testing removing Paused annotation ---")

_, err := ExecuteCommand(fmt.Sprintf("kubectl annotate scaledjob %s autoscaling.keda.sh/paused- --namespace %s", scaledJobName, testNamespace))
Expand All @@ -211,11 +255,11 @@ func testUnpause(t *testing.T, kc *kubernetes.Clientset, data templateData, list
t.Log("job count increases from zero as job is no longer paused")

expectedTarget := data.MetricThreshold
assert.True(t, WaitForJobByFilterCountUntilIteration(t, kc, testNamespace, expectedTarget, iterationCountLatter, 1, listOptions),
assert.True(t, WaitUntilJobIsRunning(t, kc, testNamespace, expectedTarget, iterationCountLatter, 1),
"job count should be %d after %d iterations", expectedTarget, iterationCountLatter)
}

func testUnpauseWithBool(t *testing.T, kc *kubernetes.Clientset, data templateData, listOptions metav1.ListOptions) {
func testUnpauseWithBool(t *testing.T, kc *kubernetes.Clientset, data templateData) {
t.Log("--- test setting Paused annotation to false ---")

_, err := ExecuteCommand(fmt.Sprintf("kubectl annotate scaledjob %s autoscaling.keda.sh/paused=false --namespace %s --overwrite=true", scaledJobName, testNamespace))
Expand All @@ -224,6 +268,6 @@ func testUnpauseWithBool(t *testing.T, kc *kubernetes.Clientset, data templateDa
t.Log("job count increases from zero as job is no longer paused")

expectedTarget := data.MetricThreshold
assert.True(t, WaitForJobByFilterCountUntilIteration(t, kc, testNamespace, expectedTarget, iterationCountLatter, 1, listOptions),
assert.True(t, WaitUntilJobIsRunning(t, kc, testNamespace, expectedTarget, iterationCountLatter, 1),
"job count should be %d after %d iterations", expectedTarget, iterationCountLatter)
}
12 changes: 12 additions & 0 deletions tests/internals/scaling_modifiers/scaling_modifiers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,10 @@ spec:
- secretRef:
name: {{.SecretName}}
imagePullPolicy: Always
readinessProbe:
httpGet:
path: /api/value
port: 8080
`
soFallbackTemplate = `
apiVersion: keda.sh/v1alpha1
Expand Down Expand Up @@ -271,6 +275,10 @@ func TestScalingModifiers(t *testing.T) {
data, templates := getTemplateData()
CreateKubernetesResources(t, kc, namespace, data, templates)

// we ensure that the metrics api server is up and ready
assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, metricsServerDeploymentName, namespace, 1, 60, 2),
"replica count should be 1 after 1 minute")

testFormula(t, kc, data)

templates = append(templates, Template{Name: "soComplexFormula", Config: soComplexFormula})
Expand Down Expand Up @@ -309,6 +317,10 @@ func testFormula(t *testing.T, kc *kubernetes.Clientset, data templateData) {
_, err = ExecuteCommand(fmt.Sprintf("kubectl scale deployment/%s --replicas=1 -n %s", metricsServerDeploymentName, namespace))
assert.NoErrorf(t, err, "cannot scale metricsServer deployment - %s", err)

// we ensure that the metrics api server is up and ready
assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, metricsServerDeploymentName, namespace, 1, 60, 2),
"replica count should be 1 after 1 minute")

data.MetricValue = 2
KubectlReplaceWithTemplate(t, data, "updateMetricsTemplate", updateMetricsTemplate)
// 2+2=4; target = 2 -> 4/2 replicas should be 2
Expand Down
13 changes: 7 additions & 6 deletions tests/internals/subresource_scale/subresource_scale_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
"time"

"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/client-go/kubernetes"

. "github.com/kedacore/keda/v2/tests/helper"
Expand Down Expand Up @@ -117,7 +118,11 @@ func TestScaler(t *testing.T) {
// Create kubernetes resources
kc := GetKubernetesClient(t)
data, templates := getTemplateData()

t.Cleanup(func() {
// cleanup
DeleteKubernetesResources(t, testNamespace, data, templates)
cleanupArgo(t)
})
setupArgo(t, kc)

CreateKubernetesResources(t, kc, testNamespace, data, templates)
Expand All @@ -127,10 +132,6 @@ func TestScaler(t *testing.T) {
// test scaling
testScaleOut(t, kc)
testScaleIn(t, kc)

// cleanup
DeleteKubernetesResources(t, testNamespace, data, templates)
cleanupArgo(t)
}

func setupArgo(t *testing.T, kc *kubernetes.Clientset) {
Expand All @@ -139,7 +140,7 @@ func setupArgo(t *testing.T, kc *kubernetes.Clientset) {
argoNamespace)
_, err := ExecuteCommand(cmdWithNamespace)

assert.NoErrorf(t, err, "cannot install argo resources - %s", err)
require.NoErrorf(t, err, "cannot install argo resources - %s", err)
}

func cleanupArgo(t *testing.T) {
Expand Down
14 changes: 8 additions & 6 deletions tests/scalers/activemq/activemq_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ import (

"github.com/joho/godotenv"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/client-go/kubernetes"

. "github.com/kedacore/keda/v2/tests/helper"
Expand Down Expand Up @@ -446,9 +447,13 @@ spec:
)

func TestActiveMQScaler(t *testing.T) {
// Create kubernetes resources
kc := GetKubernetesClient(t)
data, templates := getTemplateData()
t.Cleanup(func() {
DeleteKubernetesResources(t, testNamespace, data, templates)
})

// Create kubernetes resources
CreateKubernetesResources(t, kc, testNamespace, data, templates)

// setup activemq
Expand All @@ -461,16 +466,13 @@ func TestActiveMQScaler(t *testing.T) {
testActivation(t, kc)
testScaleOut(t, kc)
testScaleIn(t, kc)

// cleanup
DeleteKubernetesResources(t, testNamespace, data, templates)
}

func setupActiveMQ(t *testing.T, kc *kubernetes.Clientset) {
assert.True(t, WaitForStatefulsetReplicaReadyCount(t, kc, "activemq", testNamespace, 1, 60, 3),
require.True(t, WaitForStatefulsetReplicaReadyCount(t, kc, "activemq", testNamespace, 1, 60, 3),
"activemq should be up")
err := checkIfActiveMQStatusIsReady(t, activemqPodName)
assert.NoErrorf(t, err, "%s", err)
require.NoErrorf(t, err, "%s", err)
}

func checkIfActiveMQStatusIsReady(t *testing.T, name string) error {
Expand Down
13 changes: 8 additions & 5 deletions tests/scalers/apache_kafka/apache_kafka_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (

"github.com/joho/godotenv"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/client-go/kubernetes"

. "github.com/kedacore/keda/v2/tests/helper"
Expand Down Expand Up @@ -386,10 +387,13 @@ spec:

func TestScaler(t *testing.T) {
// setup
t.Log("--- setting up ---")
// Create kubernetes resources
kc := GetKubernetesClient(t)
data, templates := getTemplateData()
t.Cleanup(func() {
DeleteKubernetesResources(t, testNamespace, data, templates)
})

// Create kubernetes resources
CreateKubernetesResources(t, kc, testNamespace, data, templates)
addCluster(t, data)
addTopic(t, data, topic1, topicPartitions)
Expand All @@ -407,7 +411,6 @@ func TestScaler(t *testing.T) {
testOneOnInvalidOffset(t, kc, data)
testPersistentLag(t, kc, data)
testScalingOnlyPartitionsWithLag(t, kc, data)
DeleteKubernetesResources(t, testNamespace, data, templates)
}

func testEarliestPolicy(t *testing.T, kc *kubernetes.Clientset, data templateData) {
Expand Down Expand Up @@ -667,15 +670,15 @@ func addTopic(t *testing.T, data templateData, name string, partitions int) {
data.KafkaTopicPartitions = partitions
KubectlApplyWithTemplate(t, data, "kafkaTopicTemplate", kafkaTopicTemplate)
_, err := ExecuteCommand(fmt.Sprintf("kubectl wait kafkatopic/%s --for=condition=Ready --timeout=480s --namespace %s", name, testNamespace))
assert.NoErrorf(t, err, "cannot execute command - %s", err)
require.NoErrorf(t, err, "cannot execute command - %s", err)
t.Log("--- kafka topic added ---")
}

func addCluster(t *testing.T, data templateData) {
t.Log("--- adding kafka cluster ---")
KubectlApplyWithTemplate(t, data, "kafkaClusterTemplate", kafkaClusterTemplate)
_, err := ExecuteCommand(fmt.Sprintf("kubectl wait kafka/%s --for=condition=Ready --timeout=480s --namespace %s", kafkaName, testNamespace))
assert.NoErrorf(t, err, "cannot execute command - %s", err)
require.NoErrorf(t, err, "cannot execute command - %s", err)
t.Log("--- kafka cluster added ---")
}

Expand Down
18 changes: 7 additions & 11 deletions tests/scalers/arangodb/arangodb_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -201,14 +201,17 @@ spec:
)

func TestArangoDBScaler(t *testing.T) {
// Create kubernetes resources
kc := GetKubernetesClient(t)

data, templates := getTemplateData()
CreateNamespace(t, kc, testNamespace)
t.Cleanup(func() {
arangodb.UninstallArangoDB(t, testNamespace)
DeleteKubernetesResources(t, testNamespace, data, templates)
})

// Create kubernetes resources
arangodb.InstallArangoDB(t, kc, testNamespace)
arangodb.SetupArangoDB(t, kc, testNamespace, arangoDBName, arangoDBCollection)

data, templates := getTemplateData()
KubectlApplyMultipleWithTemplate(t, data, templates)

assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 3),
Expand All @@ -217,13 +220,6 @@ func TestArangoDBScaler(t *testing.T) {
testActivation(t, kc, data)
testScaleOut(t, kc, data)
testScaleIn(t, kc, data)

// cleanup
KubectlDeleteMultipleWithTemplate(t, data, templates)
arangodb.UninstallArangoDB(t, testNamespace)

DeleteNamespace(t, testNamespace)
WaitForNamespaceDeletion(t, testNamespace)
}

func getTemplateData() (templateData, []Template) {
Expand Down
Loading

0 comments on commit f2d86a8

Please sign in to comment.