From d6b3f05eceb08a6408e9a4679c84ea0d55d7fb6e Mon Sep 17 00:00:00 2001 From: jorturfer Date: Sat, 29 Jan 2022 20:13:53 +0100 Subject: [PATCH 01/48] Reduce concurrent executions from 6 to 5 Signed-off-by: jorturfer --- tests/run-all.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/run-all.sh b/tests/run-all.sh index d03d5d7e4ae..25a8e9b482c 100755 --- a/tests/run-all.sh +++ b/tests/run-all.sh @@ -29,7 +29,7 @@ function run_tests { pids+=($pid) lookup[$pid]=$test_case # limit concurrent runs - if [[ "$counter" -gt "$concurrent_tests_limit" ]]; then + if [[ "$counter" -ge "$concurrent_tests_limit" ]]; then wait_for_jobs counter=0 pids=() From 2be391009355c750960339f4b8d7ed287c38beb9 Mon Sep 17 00:00:00 2001 From: jorturfer Date: Sat, 29 Jan 2022 22:17:54 +0100 Subject: [PATCH 02/48] Update e2e test to 8 Signed-off-by: jorturfer --- tests/run-all.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/run-all.sh b/tests/run-all.sh index 25a8e9b482c..89aa1af851f 100755 --- a/tests/run-all.sh +++ b/tests/run-all.sh @@ -6,7 +6,7 @@ E2E_REGEX=${E2E_TEST_REGEX:-*.test.ts} DIR=$(dirname "$0") cd $DIR -concurrent_tests_limit=5 +concurrent_tests_limit=8 pids=() lookup=() failed_count=0 From b8f0844ae3db3704c00fdedeb1818f6bb016763d Mon Sep 17 00:00:00 2001 From: jorturfer Date: Sat, 29 Jan 2022 22:19:51 +0100 Subject: [PATCH 03/48] Increase az pipelines timeouts Signed-off-by: jorturfer --- tests/scalers/azure-pipelines.test.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/scalers/azure-pipelines.test.ts b/tests/scalers/azure-pipelines.test.ts index 0d5d09251f9..c5700e207d9 100644 --- a/tests/scalers/azure-pipelines.test.ts +++ b/tests/scalers/azure-pipelines.test.ts @@ -68,7 +68,7 @@ test.serial('PoolID: Deployment should scale to 3 replicas after queueing 3 jobs test.serial('PoolID: Deployment should scale to 1 replica after finishing 3 jobs', async t => { // wait 10 minutes for the jobs to finish and scale down - t.true(await waitForDeploymentReplicaCount(1, 'test-deployment', defaultNamespace, 60, 10000), 'replica count should be 1 after finishing 3 jobs') + t.true(await waitForDeploymentReplicaCount(1, 'test-deployment', defaultNamespace, 120, 10000), 'replica count should be 1 after finishing 3 jobs') }) test.serial('PoolName: Deployment should scale to 3 replicas after queueing 3 jobs', async t => { @@ -91,7 +91,7 @@ test.serial('PoolName: Deployment should scale to 3 replicas after queueing 3 jo test.serial('PoolName: should scale to 1 replica after finishing 3 jobs', async t => { // wait 10 minutes for the jobs to finish and scale down - t.true(await waitForDeploymentReplicaCount(1, 'test-deployment', defaultNamespace, 60, 10000), 'replica count should be 1 after finishing 3 jobs') + t.true(await waitForDeploymentReplicaCount(1, 'test-deployment', defaultNamespace, 120, 10000), 'replica count should be 1 after finishing 3 jobs') }) test.after.always('clean up azure-pipelines deployment', t => { From d3ec3a0d72e8dd71b01fd035fde7f8cf039d4563 Mon Sep 17 00:00:00 2001 From: jorturfer Date: Sat, 29 Jan 2022 22:44:13 +0100 Subject: [PATCH 04/48] Increase mysql timeouts Signed-off-by: jorturfer --- tests/scalers/mysql.test.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/scalers/mysql.test.ts b/tests/scalers/mysql.test.ts index 14f36d6df16..2c41448adf2 100644 --- a/tests/scalers/mysql.test.ts +++ b/tests/scalers/mysql.test.ts @@ -76,7 +76,7 @@ test.serial(`Deployment should scale to 5 (the max) then back to 0`, t => { const maxReplicaCount = '5' - for (let i = 0; i < 30 && replicaCount !== maxReplicaCount; i++) { + for (let i = 0; i < 60 && replicaCount !== maxReplicaCount; i++) { replicaCount = sh.exec( `kubectl get deployment.apps/${deploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout @@ -85,7 +85,7 @@ test.serial(`Deployment should scale to 5 (the max) then back to 0`, t => { } } - t.is(maxReplicaCount, replicaCount, `Replica count should be ${maxReplicaCount} after 60 seconds`) + t.is(maxReplicaCount, replicaCount, `Replica count should be ${maxReplicaCount} after 120 seconds`) for (let i = 0; i < 36 && replicaCount !== '0'; i++) { replicaCount = sh.exec( From 93409a2fdbfedb229b49c1d8185b7b3a3d7e2894 Mon Sep 17 00:00:00 2001 From: jorturfer Date: Sat, 29 Jan 2022 22:45:51 +0100 Subject: [PATCH 05/48] Increase predictkube timeouts Signed-off-by: jorturfer --- tests/scalers/predictkube.test.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/scalers/predictkube.test.ts b/tests/scalers/predictkube.test.ts index 5db9437fb63..165e7a6b013 100644 --- a/tests/scalers/predictkube.test.ts +++ b/tests/scalers/predictkube.test.ts @@ -68,7 +68,7 @@ test.serial(`Deployment should scale to 5 (the max) with HTTP Requests exceeding // keda based deployment should start scaling up with http requests issued let replicaCount = '0' for (let i = 0; i < 60 && replicaCount !== '5'; i++) { - t.log(`Waited ${5 * i} seconds for predictkube-based deployments to scale up`) + t.log(`Waited ${10 * i} seconds for predictkube-based deployments to scale up`) const jobLogs = sh.exec(`kubectl logs -l job-name=generate-requests -n ${testNamespace}`).stdout t.log(`Logs from the generate requests: ${jobLogs}`) @@ -76,7 +76,7 @@ test.serial(`Deployment should scale to 5 (the max) with HTTP Requests exceeding `kubectl get deployment.apps/keda-test-app --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout if (replicaCount !== '5') { - sh.exec('sleep 5s') + sh.exec('sleep 10s') } } From 2f2c96581957d54ee8bf9ad12f21559d56876d4f Mon Sep 17 00:00:00 2001 From: jorturfer Date: Sat, 29 Jan 2022 22:48:55 +0100 Subject: [PATCH 06/48] Increase selenium timeouts Signed-off-by: jorturfer --- tests/scalers/selenium-grid.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/scalers/selenium-grid.test.ts b/tests/scalers/selenium-grid.test.ts index 03cfbcf1fc4..71f4f4641f1 100644 --- a/tests/scalers/selenium-grid.test.ts +++ b/tests/scalers/selenium-grid.test.ts @@ -151,7 +151,7 @@ test.serial('should create two chrome and one firefox nodes', t => { break; } console.log('Waiting for chrome 91 to scale down to 0 pods') - sh.exec('sleep 5s') + sh.exec('sleep 10s') } const seleniumGridTestDeployTmpFile = tmp.fileSync(); From 0327f0f29c9ba764adac13fb91f44a614538fba3 Mon Sep 17 00:00:00 2001 From: jorturfer Date: Sat, 29 Jan 2022 22:52:02 +0100 Subject: [PATCH 07/48] Increase azure-queue timeouts Signed-off-by: jorturfer --- tests/scalers/azure-queue.test.ts | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/scalers/azure-queue.test.ts b/tests/scalers/azure-queue.test.ts index 8d5e64765f7..67deb1f2cd3 100644 --- a/tests/scalers/azure-queue.test.ts +++ b/tests/scalers/azure-queue.test.ts @@ -46,7 +46,7 @@ test.serial.cb( (n, cb) => queueSvc.createMessage('queue-name', `test ${n}`, cb), () => { let replicaCount = '0' - for (let i = 0; i < 30 && replicaCount !== '4'; i++) { + for (let i = 0; i < 120 && replicaCount !== '4'; i++) { replicaCount = sh.exec( `kubectl get deployment.apps/test-deployment --namespace ${defaultNamespace} -o jsonpath="{.spec.replicas}"` ).stdout @@ -55,18 +55,18 @@ test.serial.cb( } } - t.is('4', replicaCount, 'Replica count should be 4 after 30 seconds') + t.is('4', replicaCount, 'Replica count should be 4 after 120 seconds') - for (let i = 0; i < 50 && replicaCount !== '0'; i++) { + for (let i = 0; i < 60 && replicaCount !== '0'; i++) { replicaCount = sh.exec( `kubectl get deployment.apps/test-deployment --namespace ${defaultNamespace} -o jsonpath="{.spec.replicas}"` ).stdout if (replicaCount !== '0') { - sh.exec('sleep 5s') + sh.exec('sleep 10s') } } - t.is('0', replicaCount, 'Replica count should be 0 after 3 minutes') + t.is('0', replicaCount, 'Replica count should be 0 after 6 minutes') t.end() } ) From 8502c3a83c5956dc87cc4b2aabaac2b43ac06d0c Mon Sep 17 00:00:00 2001 From: jorturfer Date: Sun, 30 Jan 2022 12:25:08 +0100 Subject: [PATCH 08/48] Increase redis-streams timeouts Signed-off-by: jorturfer --- tests/scalers/redis-streams.test.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/scalers/redis-streams.test.ts b/tests/scalers/redis-streams.test.ts index 72fa5032ce6..2bf0f8701fa 100644 --- a/tests/scalers/redis-streams.test.ts +++ b/tests/scalers/redis-streams.test.ts @@ -66,7 +66,7 @@ test.serial(`Deployment should scale to 5 with ${numMessages} messages and back } // with messages published, the consumer deployment should start receiving the messages let replicaCount = '0' - for (let i = 0; i < 20 && replicaCount !== '5'; i++) { + for (let i = 0; i < 60 && replicaCount !== '5'; i++) { replicaCount = sh.exec( `kubectl get deployment/redis-streams-consumer --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout @@ -76,7 +76,7 @@ test.serial(`Deployment should scale to 5 with ${numMessages} messages and back } } - t.is('5', replicaCount, 'Replica count should be 5 within 60 seconds') + t.is('5', replicaCount, 'Replica count should be 5 within 180 seconds') for (let i = 0; i < 60 && replicaCount !== '1'; i++) { replicaCount = sh.exec( From ccddc6570324471a7b01fad07091e0ff5689fdb6 Mon Sep 17 00:00:00 2001 From: jorturfer Date: Sun, 30 Jan 2022 12:25:21 +0100 Subject: [PATCH 09/48] Increase open-stack timeouts Signed-off-by: jorturfer --- tests/scalers/openstack-swift.test.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/scalers/openstack-swift.test.ts b/tests/scalers/openstack-swift.test.ts index fb267bb8e16..ef871e4bacf 100644 --- a/tests/scalers/openstack-swift.test.ts +++ b/tests/scalers/openstack-swift.test.ts @@ -184,13 +184,13 @@ test.serial('Deployment should be scaled to 5 after deleting 5 objects in contai await swiftClient.deleteObject(swiftContainerName, '2/hello-world.txt') await swiftClient.deleteObject(swiftContainerName, '3/') - for (let i = 0; i < 110 && replicaCount !== '5'; i++) { + for (let i = 0; i < 60 && replicaCount !== '5'; i++) { replicaCount = sh.exec( `kubectl get deployment.apps/${deploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout if (replicaCount !== '5') { - sh.exec('sleep 3s') + sh.exec('sleep 10s') } } From 21184cac9fc96baf41a9eb18a4920ebb5c21f236 Mon Sep 17 00:00:00 2001 From: jorturfer Date: Sun, 30 Jan 2022 13:42:32 +0100 Subject: [PATCH 10/48] Remove unecessary import Signed-off-by: jorturfer --- tests/scalers/mongodb.test.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/scalers/mongodb.test.ts b/tests/scalers/mongodb.test.ts index b314b525500..3501aacce3a 100644 --- a/tests/scalers/mongodb.test.ts +++ b/tests/scalers/mongodb.test.ts @@ -1,4 +1,3 @@ -import * as async from 'async' import * as fs from 'fs' import * as sh from 'shelljs' import * as tmp from 'tmp' From 638bb5412d314444ec2d002e920a597658c195eb Mon Sep 17 00:00:00 2001 From: jorturfer Date: Sun, 30 Jan 2022 15:49:10 +0100 Subject: [PATCH 11/48] modify az pipeline e2e test Signed-off-by: jorturfer --- tests/scalers/azure-pipelines.test.ts | 61 +++++++++++++++------------ 1 file changed, 33 insertions(+), 28 deletions(-) diff --git a/tests/scalers/azure-pipelines.test.ts b/tests/scalers/azure-pipelines.test.ts index c5700e207d9..1abb94be73f 100644 --- a/tests/scalers/azure-pipelines.test.ts +++ b/tests/scalers/azure-pipelines.test.ts @@ -15,6 +15,8 @@ const projectName = process.env['AZURE_DEVOPS_PROJECT'] const buildDefinitionID = process.env['AZURE_DEVOPS_BUILD_DEFINITON_ID'] const poolName = process.env['AZURE_DEVOPS_POOL_NAME'] +let poolID: number + test.before(async t => { if (!organizationURL || !personalAccessToken || !projectName || !buildDefinitionID || !poolName) { t.fail('AZURE_DEVOPS_ORGANIZATION_URL, AZURE_DEVOPS_PAT, AZURE_DEVOPS_PROJECT, AZURE_DEVOPS_BUILD_DEFINITON_ID and AZURE_DEVOPS_POOL_NAME environment variables are required for azure pipelines tests') @@ -25,7 +27,7 @@ test.before(async t => { let taskAgent: ta.ITaskAgentApiBase = await connection.getTaskAgentApi(); let agentPool: ti.TaskAgentPool[] = await taskAgent.getAgentPools(poolName) - let poolID: number = agentPool[0].id + poolID = agentPool[0].id if(!poolID) { t.fail("failed to convert poolName to poolID") @@ -39,39 +41,44 @@ test.before(async t => { .replace('{{AZP_POOL}}', poolName) .replace('{{AZP_URL}}', organizationURL)) sh.exec(`kubectl create namespace ${defaultNamespace}`) - t.is(0, sh.exec(`kubectl apply -f ${deployFile.name} --namespace ${defaultNamespace}`).code, 'creating a deployment should work.') + t.is(0, sh.exec(`kubectl apply -f ${deployFile.name} --namespace ${defaultNamespace}`).code, 'creating a deployment should work.') +}) + +test.serial('Deployment should have 1 replicas on start', async t => { + t.true(await waitForDeploymentReplicaCount(1, 'test-deployment', defaultNamespace, 120, 1000), 'replica count should start out as 1') +}) + + +test.serial('Deployment should have 0 replicas after scale', async t => { + // wait for the first agent to be registered in the agent pool + await sleep(20 * 1000) + const scaledObjectFile = tmp.fileSync() fs.writeFileSync(scaledObjectFile.name, poolIdScaledObject .replace('{{AZP_POOL_ID}}', poolID.toString())) t.is(0, sh.exec(`kubectl apply -f ${scaledObjectFile.name} --namespace ${defaultNamespace}`).code, 'creating ScaledObject with poolId should work.') -}) -test.serial('Deployment should have 1 replicas on start', async t => { - t.true(await waitForDeploymentReplicaCount(1, 'test-deployment', defaultNamespace, 120, 1000), 'replica count should start out as 1') + t.true(await waitForDeploymentReplicaCount(0, 'test-deployment', defaultNamespace, 120, 1000), 'replica count should be 0 if no pending jobs') }) -test.serial('PoolID: Deployment should scale to 3 replicas after queueing 3 jobs', async t => { + +test.serial('PoolID: Deployment should scale to 1 replica after queueing job', async t => { let authHandler = azdev.getPersonalAccessTokenHandler(personalAccessToken); let connection = new azdev.WebApi(organizationURL, authHandler); let build: ba.IBuildApi = await connection.getBuildApi(); var definitionID = parseInt(buildDefinitionID) - // wait for the first agent to be registered in the agent pool - await sleep(20 * 1000) + await build.queueBuild(null, projectName, null, null, null, definitionID) - for(let i = 0; i < 3; i++) { - await build.queueBuild(null, projectName, null, null, null, definitionID) - } - - t.true(await waitForDeploymentReplicaCount(3, 'test-deployment', defaultNamespace, 30, 5000), 'replica count should be 3 after starting 3 jobs') + t.true(await waitForDeploymentReplicaCount(1, 'test-deployment', defaultNamespace, 30, 5000), 'replica count should be 1 after starting a job') }) -test.serial('PoolID: Deployment should scale to 1 replica after finishing 3 jobs', async t => { +test.serial('PoolID: Deployment should scale to 0 replicas after finishing job', async t => { // wait 10 minutes for the jobs to finish and scale down - t.true(await waitForDeploymentReplicaCount(1, 'test-deployment', defaultNamespace, 120, 10000), 'replica count should be 1 after finishing 3 jobs') + t.true(await waitForDeploymentReplicaCount(0, 'test-deployment', defaultNamespace, 120, 10000), 'replica count should be 0 after finishing') }) -test.serial('PoolName: Deployment should scale to 3 replicas after queueing 3 jobs', async t => { +test.serial('PoolName: Deployment should scale to 1 replica after queueing job', async t => { const poolNameScaledObjectFile = tmp.fileSync() fs.writeFileSync(poolNameScaledObjectFile.name, poolNameScaledObject .replace('{{AZP_POOL}}', poolName)) @@ -82,16 +89,14 @@ test.serial('PoolName: Deployment should scale to 3 replicas after queueing 3 jo let build: ba.IBuildApi = await connection.getBuildApi(); var definitionID = parseInt(buildDefinitionID) - for(let i = 0; i < 3; i++) { - await build.queueBuild(null, projectName, null, null, null, definitionID) - } + await build.queueBuild(null, projectName, null, null, null, definitionID) - t.true(await waitForDeploymentReplicaCount(3, 'test-deployment', defaultNamespace, 30, 5000), 'replica count should be 3 after starting 3 jobs') + t.true(await waitForDeploymentReplicaCount(1, 'test-deployment', defaultNamespace, 30, 5000), 'replica count should be 1 after starting a job') }) -test.serial('PoolName: should scale to 1 replica after finishing 3 jobs', async t => { +test.serial('PoolName: should scale to 0 replicas after finishing job', async t => { // wait 10 minutes for the jobs to finish and scale down - t.true(await waitForDeploymentReplicaCount(1, 'test-deployment', defaultNamespace, 120, 10000), 'replica count should be 1 after finishing 3 jobs') + t.true(await waitForDeploymentReplicaCount(0, 'test-deployment', defaultNamespace, 120, 10000), 'replica count should be 0 after finishing') }) test.after.always('clean up azure-pipelines deployment', t => { @@ -157,9 +162,9 @@ metadata: spec: scaleTargetRef: name: test-deployment - minReplicaCount: 1 - maxReplicaCount: 3 - pollingInterval: 50 + minReplicaCount: 0 + maxReplicaCount: 1 + pollingInterval: 30 cooldownPeriod: 60 advanced: horizontalPodAutoscalerConfig: @@ -179,9 +184,9 @@ metadata: spec: scaleTargetRef: name: test-deployment - minReplicaCount: 1 - maxReplicaCount: 3 - pollingInterval: 50 + minReplicaCount: 0 + maxReplicaCount: 1 + pollingInterval: 30 cooldownPeriod: 60 advanced: horizontalPodAutoscalerConfig: From a7e4542eb52c64a14ea6d60391686b94563736ed Mon Sep 17 00:00:00 2001 From: jorturfer Date: Sun, 30 Jan 2022 15:55:34 +0100 Subject: [PATCH 12/48] update changelog Signed-off-by: jorturfer --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 24f3586df98..fa37d120de4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -34,7 +34,7 @@ ### Improvements -- TODO ([#XXX](https://github.com/kedacore/keda/issue/XXX)) +- Improve e2e tests reliability ([#2580](https://github.com/kedacore/keda/issues/2580)) ### Breaking Changes From f04baa499c2797e8c3fbbb0539fac467185de85e Mon Sep 17 00:00:00 2001 From: jorturfer Date: Sun, 30 Jan 2022 16:54:19 +0100 Subject: [PATCH 13/48] Increase redis-streams timeouts Signed-off-by: jorturfer --- tests/scalers/redis-streams.test.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/scalers/redis-streams.test.ts b/tests/scalers/redis-streams.test.ts index 2bf0f8701fa..8b7df291aed 100644 --- a/tests/scalers/redis-streams.test.ts +++ b/tests/scalers/redis-streams.test.ts @@ -66,17 +66,17 @@ test.serial(`Deployment should scale to 5 with ${numMessages} messages and back } // with messages published, the consumer deployment should start receiving the messages let replicaCount = '0' - for (let i = 0; i < 60 && replicaCount !== '5'; i++) { + for (let i = 0; i < 30 && replicaCount !== '5'; i++) { replicaCount = sh.exec( `kubectl get deployment/redis-streams-consumer --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout t.log('(scale up) replica count is:' + replicaCount) if (replicaCount !== '5') { - sh.exec('sleep 3s') + sh.exec('sleep 10s') } } - t.is('5', replicaCount, 'Replica count should be 5 within 180 seconds') + t.is('5', replicaCount, 'Replica count should be 5 within 300 seconds') for (let i = 0; i < 60 && replicaCount !== '1'; i++) { replicaCount = sh.exec( From 36d4ed3a1b75cdc16f19f775d7f0f9b3cdf656cf Mon Sep 17 00:00:00 2001 From: jorturfer Date: Sun, 30 Jan 2022 17:38:05 +0100 Subject: [PATCH 14/48] fix style Signed-off-by: jorturfer --- tests/scalers/azure-pipelines.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/scalers/azure-pipelines.test.ts b/tests/scalers/azure-pipelines.test.ts index 1abb94be73f..c876328745b 100644 --- a/tests/scalers/azure-pipelines.test.ts +++ b/tests/scalers/azure-pipelines.test.ts @@ -41,7 +41,7 @@ test.before(async t => { .replace('{{AZP_POOL}}', poolName) .replace('{{AZP_URL}}', organizationURL)) sh.exec(`kubectl create namespace ${defaultNamespace}`) - t.is(0, sh.exec(`kubectl apply -f ${deployFile.name} --namespace ${defaultNamespace}`).code, 'creating a deployment should work.') + t.is(0, sh.exec(`kubectl apply -f ${deployFile.name} --namespace ${defaultNamespace}`).code, 'creating a deployment should work.') }) test.serial('Deployment should have 1 replicas on start', async t => { From 0455578d2eb27ca9b8314e02a56953c120d6f49e Mon Sep 17 00:00:00 2001 From: jorturfer Date: Sun, 30 Jan 2022 18:01:29 +0100 Subject: [PATCH 15/48] Increase azure-queue timeout Signed-off-by: jorturfer --- tests/scalers/azure-queue.test.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/scalers/azure-queue.test.ts b/tests/scalers/azure-queue.test.ts index 67deb1f2cd3..437f6aeab0a 100644 --- a/tests/scalers/azure-queue.test.ts +++ b/tests/scalers/azure-queue.test.ts @@ -46,16 +46,16 @@ test.serial.cb( (n, cb) => queueSvc.createMessage('queue-name', `test ${n}`, cb), () => { let replicaCount = '0' - for (let i = 0; i < 120 && replicaCount !== '4'; i++) { + for (let i = 0; i < 60 && replicaCount !== '4'; i++) { replicaCount = sh.exec( `kubectl get deployment.apps/test-deployment --namespace ${defaultNamespace} -o jsonpath="{.spec.replicas}"` ).stdout if (replicaCount !== '4') { - sh.exec('sleep 1s') + sh.exec('sleep 5s') } } - t.is('4', replicaCount, 'Replica count should be 4 after 120 seconds') + t.is('4', replicaCount, 'Replica count should be 4 after 300 seconds') for (let i = 0; i < 60 && replicaCount !== '0'; i++) { replicaCount = sh.exec( From dc7e0b45597dba07e597def58c20e8dff017454e Mon Sep 17 00:00:00 2001 From: jorturfer Date: Sun, 30 Jan 2022 18:02:39 +0100 Subject: [PATCH 16/48] Reduce concurrency on tests Signed-off-by: jorturfer --- tests/run-all.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/run-all.sh b/tests/run-all.sh index 89aa1af851f..380818efdd9 100755 --- a/tests/run-all.sh +++ b/tests/run-all.sh @@ -6,7 +6,7 @@ E2E_REGEX=${E2E_TEST_REGEX:-*.test.ts} DIR=$(dirname "$0") cd $DIR -concurrent_tests_limit=8 +concurrent_tests_limit=3 pids=() lookup=() failed_count=0 From 4ccc689f5d23bbc4347c538562d2284e85776a06 Mon Sep 17 00:00:00 2001 From: jorturfer Date: Sun, 30 Jan 2022 21:36:22 +0100 Subject: [PATCH 17/48] Increase time for integration test and concurrency on e2e Signed-off-by: jorturfer --- config/crd/bases/keda.sh_scaledjobs.yaml | 621 ++++++++++++------ .../keda/scaledobject_controller_test.go | 1 + tests/run-all.sh | 2 +- 3 files changed, 433 insertions(+), 191 deletions(-) diff --git a/config/crd/bases/keda.sh_scaledjobs.yaml b/config/crd/bases/keda.sh_scaledjobs.yaml index 19edd6ca0d6..d9ef90afbf7 100644 --- a/config/crd/bases/keda.sh_scaledjobs.yaml +++ b/config/crd/bases/keda.sh_scaledjobs.yaml @@ -1404,9 +1404,8 @@ spec: More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' properties: exec: - description: One and only one of the following - should be specified. Exec specifies the - action to take. + description: Exec specifies the action to + take. properties: command: description: Command is the command @@ -1477,10 +1476,12 @@ spec: - port type: object tcpSocket: - description: 'TCPSocket specifies an action - involving a TCP port. TCP hooks not yet - supported TODO: implement a realistic - TCP lifecycle hook' + description: Deprecated. TCPSocket is NOT + supported as a LifecycleHandler and kept + for the backward compatibility. There + are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler + is specified. properties: host: description: 'Optional: Host name to @@ -1505,21 +1506,19 @@ spec: API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container - crashes or exits. The reason for termination - is passed to the handler. The Pod''s termination - grace period countdown begins before the PreStop - hooked is executed. Regardless of the outcome + crashes or exits. The Pod''s termination grace + period countdown begins before the PreStop + hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace - period. Other management of the container - blocks until the hook completes or until the - termination grace period is reached. More - info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + period (unless delayed by finalizers). Other + management of the container blocks until the + hook completes or until the termination grace + period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' properties: exec: - description: One and only one of the following - should be specified. Exec specifies the - action to take. + description: Exec specifies the action to + take. properties: command: description: Command is the command @@ -1590,10 +1589,12 @@ spec: - port type: object tcpSocket: - description: 'TCPSocket specifies an action - involving a TCP port. TCP hooks not yet - supported TODO: implement a realistic - TCP lifecycle hook' + description: Deprecated. TCPSocket is NOT + supported as a LifecycleHandler and kept + for the backward compatibility. There + are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler + is specified. properties: host: description: 'Optional: Host name to @@ -1619,9 +1620,7 @@ spec: Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: exec: - description: One and only one of the following - should be specified. Exec specifies the action - to take. + description: Exec specifies the action to take. properties: command: description: Command is the command line @@ -1645,6 +1644,26 @@ spec: 1. format: int32 type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is an alpha field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the + service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default + behavior is defined by gRPC." + type: string + required: + - port + type: object httpGet: description: HTTPGet specifies the http request to perform. @@ -1712,10 +1731,8 @@ spec: format: int32 type: integer tcpSocket: - description: 'TCPSocket specifies an action - involving a TCP port. TCP hooks not yet supported - TODO: implement a realistic TCP lifecycle - hook' + description: TCPSocket specifies an action involving + a TCP port. properties: host: description: 'Optional: Host name to connect @@ -1823,9 +1840,7 @@ spec: More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: exec: - description: One and only one of the following - should be specified. Exec specifies the action - to take. + description: Exec specifies the action to take. properties: command: description: Command is the command line @@ -1849,6 +1864,26 @@ spec: 1. format: int32 type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is an alpha field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the + service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default + behavior is defined by gRPC." + type: string + required: + - port + type: object httpGet: description: HTTPGet specifies the http request to perform. @@ -1916,10 +1951,8 @@ spec: format: int32 type: integer tcpSocket: - description: 'TCPSocket specifies an action - involving a TCP port. TCP hooks not yet supported - TODO: implement a realistic TCP lifecycle - hook' + description: TCPSocket specifies an action involving + a TCP port. properties: host: description: 'Optional: Host name to connect @@ -2006,13 +2039,16 @@ spec: controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run - as Privileged 2) has CAP_SYS_ADMIN' + as Privileged 2) has CAP_SYS_ADMIN Note that + this field cannot be set when spec.os.name + is windows.' type: boolean capabilities: description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container - runtime. + runtime. Note that this field cannot be set + when spec.os.name is windows. properties: add: description: Added capabilities @@ -2033,7 +2069,8 @@ spec: description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to - false. + false. Note that this field cannot be set + when spec.os.name is windows. type: boolean procMount: description: procMount denotes the type of proc @@ -2041,11 +2078,14 @@ spec: is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature - flag to be enabled. + flag to be enabled. Note that this field cannot + be set when spec.os.name is windows. type: string readOnlyRootFilesystem: description: Whether this container has a read-only - root filesystem. Default is false. + root filesystem. Default is false. Note that + this field cannot be set when spec.os.name + is windows. type: boolean runAsGroup: description: The GID to run the entrypoint of @@ -2053,7 +2093,8 @@ spec: if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes - precedence. + precedence. Note that this field cannot be + set when spec.os.name is windows. format: int64 type: integer runAsNonRoot: @@ -2075,6 +2116,8 @@ spec: be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name + is windows. format: int64 type: integer seLinuxOptions: @@ -2084,7 +2127,8 @@ spec: for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes - precedence. + precedence. Note that this field cannot be + set when spec.os.name is windows. properties: level: description: Level is SELinux level label @@ -2107,7 +2151,9 @@ spec: description: The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container - options override the pod options. + options override the pod options. Note that + this field cannot be set when spec.os.name + is windows. properties: localhostProfile: description: localhostProfile indicates @@ -2136,7 +2182,8 @@ spec: from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes - precedence. + precedence. Note that this field cannot be + set when spec.os.name is linux. properties: gmsaCredentialSpec: description: GMSACredentialSpec is where @@ -2190,9 +2237,7 @@ spec: cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: exec: - description: One and only one of the following - should be specified. Exec specifies the action - to take. + description: Exec specifies the action to take. properties: command: description: Command is the command line @@ -2216,6 +2261,26 @@ spec: 1. format: int32 type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is an alpha field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the + service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default + behavior is defined by gRPC." + type: string + required: + - port + type: object httpGet: description: HTTPGet specifies the http request to perform. @@ -2283,10 +2348,8 @@ spec: format: int32 type: integer tcpSocket: - description: 'TCPSocket specifies an action - involving a TCP port. TCP hooks not yet supported - TODO: implement a realistic TCP lifecycle - hook' + description: TCPSocket specifies an action involving + a TCP port. properties: host: description: 'Optional: Host name to connect @@ -2516,22 +2579,21 @@ spec: it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field - is alpha-level and is only honored by servers that enable - the EphemeralContainers feature. + is beta-level and available on clusters that haven't + disabled the EphemeralContainers feature gate. items: - description: An EphemeralContainer is a container that - may be added temporarily to an existing pod for user-initiated + description: "An EphemeralContainer is a temporary container + that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they - will not be restarted when they exit or when a pod - is removed or restarted. If an ephemeral container - causes a pod to exceed its resource allocation, the - pod may be evicted. Ephemeral containers may not be - added by directly updating the pod spec. They must - be added via the pod's ephemeralcontainers subresource, - and they will appear in the pod spec once added. This - is an alpha feature enabled by the EphemeralContainers - feature flag. + will not be restarted when they exit or when a Pod + is removed or restarted. The kubelet may evict a Pod + if an ephemeral container causes the Pod to exceed + its resource allocation. \n To add an ephemeral container, + use the ephemeralcontainers subresource of an existing + Pod. Ephemeral containers may not be removed or restarted. + \n This is a beta feature available on clusters that + haven't disabled the EphemeralContainers feature gate." properties: args: description: 'Arguments to the entrypoint. The docker @@ -2757,9 +2819,8 @@ spec: More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' properties: exec: - description: One and only one of the following - should be specified. Exec specifies the - action to take. + description: Exec specifies the action to + take. properties: command: description: Command is the command @@ -2830,10 +2891,12 @@ spec: - port type: object tcpSocket: - description: 'TCPSocket specifies an action - involving a TCP port. TCP hooks not yet - supported TODO: implement a realistic - TCP lifecycle hook' + description: Deprecated. TCPSocket is NOT + supported as a LifecycleHandler and kept + for the backward compatibility. There + are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler + is specified. properties: host: description: 'Optional: Host name to @@ -2858,21 +2921,19 @@ spec: API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container - crashes or exits. The reason for termination - is passed to the handler. The Pod''s termination - grace period countdown begins before the PreStop - hooked is executed. Regardless of the outcome + crashes or exits. The Pod''s termination grace + period countdown begins before the PreStop + hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace - period. Other management of the container - blocks until the hook completes or until the - termination grace period is reached. More - info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + period (unless delayed by finalizers). Other + management of the container blocks until the + hook completes or until the termination grace + period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' properties: exec: - description: One and only one of the following - should be specified. Exec specifies the - action to take. + description: Exec specifies the action to + take. properties: command: description: Command is the command @@ -2943,10 +3004,12 @@ spec: - port type: object tcpSocket: - description: 'TCPSocket specifies an action - involving a TCP port. TCP hooks not yet - supported TODO: implement a realistic - TCP lifecycle hook' + description: Deprecated. TCPSocket is NOT + supported as a LifecycleHandler and kept + for the backward compatibility. There + are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler + is specified. properties: host: description: 'Optional: Host name to @@ -2971,9 +3034,7 @@ spec: containers. properties: exec: - description: One and only one of the following - should be specified. Exec specifies the action - to take. + description: Exec specifies the action to take. properties: command: description: Command is the command line @@ -2997,6 +3058,26 @@ spec: 1. format: int32 type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is an alpha field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the + service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default + behavior is defined by gRPC." + type: string + required: + - port + type: object httpGet: description: HTTPGet specifies the http request to perform. @@ -3064,10 +3145,8 @@ spec: format: int32 type: integer tcpSocket: - description: 'TCPSocket specifies an action - involving a TCP port. TCP hooks not yet supported - TODO: implement a realistic TCP lifecycle - hook' + description: TCPSocket specifies an action involving + a TCP port. properties: host: description: 'Optional: Host name to connect @@ -3159,14 +3238,16 @@ spec: - containerPort type: object type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map readinessProbe: description: Probes are not allowed for ephemeral containers. properties: exec: - description: One and only one of the following - should be specified. Exec specifies the action - to take. + description: Exec specifies the action to take. properties: command: description: Command is the command line @@ -3190,6 +3271,26 @@ spec: 1. format: int32 type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is an alpha field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the + service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default + behavior is defined by gRPC." + type: string + required: + - port + type: object httpGet: description: HTTPGet specifies the http request to perform. @@ -3257,10 +3358,8 @@ spec: format: int32 type: integer tcpSocket: - description: 'TCPSocket specifies an action - involving a TCP port. TCP hooks not yet supported - TODO: implement a realistic TCP lifecycle - hook' + description: TCPSocket specifies an action involving + a TCP port. properties: host: description: 'Optional: Host name to connect @@ -3348,13 +3447,16 @@ spec: controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run - as Privileged 2) has CAP_SYS_ADMIN' + as Privileged 2) has CAP_SYS_ADMIN Note that + this field cannot be set when spec.os.name + is windows.' type: boolean capabilities: description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container - runtime. + runtime. Note that this field cannot be set + when spec.os.name is windows. properties: add: description: Added capabilities @@ -3375,7 +3477,8 @@ spec: description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to - false. + false. Note that this field cannot be set + when spec.os.name is windows. type: boolean procMount: description: procMount denotes the type of proc @@ -3383,11 +3486,14 @@ spec: is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature - flag to be enabled. + flag to be enabled. Note that this field cannot + be set when spec.os.name is windows. type: string readOnlyRootFilesystem: description: Whether this container has a read-only - root filesystem. Default is false. + root filesystem. Default is false. Note that + this field cannot be set when spec.os.name + is windows. type: boolean runAsGroup: description: The GID to run the entrypoint of @@ -3395,7 +3501,8 @@ spec: if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes - precedence. + precedence. Note that this field cannot be + set when spec.os.name is windows. format: int64 type: integer runAsNonRoot: @@ -3417,6 +3524,8 @@ spec: be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name + is windows. format: int64 type: integer seLinuxOptions: @@ -3426,7 +3535,8 @@ spec: for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes - precedence. + precedence. Note that this field cannot be + set when spec.os.name is windows. properties: level: description: Level is SELinux level label @@ -3449,7 +3559,9 @@ spec: description: The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container - options override the pod options. + options override the pod options. Note that + this field cannot be set when spec.os.name + is windows. properties: localhostProfile: description: localhostProfile indicates @@ -3478,7 +3590,8 @@ spec: from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes - precedence. + precedence. Note that this field cannot be + set when spec.os.name is linux. properties: gmsaCredentialSpec: description: GMSACredentialSpec is where @@ -3524,9 +3637,7 @@ spec: containers. properties: exec: - description: One and only one of the following - should be specified. Exec specifies the action - to take. + description: Exec specifies the action to take. properties: command: description: Command is the command line @@ -3550,6 +3661,26 @@ spec: 1. format: int32 type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is an alpha field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the + service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default + behavior is defined by gRPC." + type: string + required: + - port + type: object httpGet: description: HTTPGet specifies the http request to perform. @@ -3617,10 +3748,8 @@ spec: format: int32 type: integer tcpSocket: - description: 'TCPSocket specifies an action - involving a TCP port. TCP hooks not yet supported - TODO: implement a realistic TCP lifecycle - hook' + description: TCPSocket specifies an action involving + a TCP port. properties: host: description: 'Optional: Host name to connect @@ -3686,13 +3815,15 @@ spec: stdin will never receive an EOF. Default is false type: boolean targetContainerName: - description: If set, the name of the container from - PodSpec that this ephemeral container targets. + description: "If set, the name of the container + from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set - then the ephemeral container is run in whatever - namespaces are shared for the pod. Note that the - container runtime must support this feature. + then the ephemeral container uses the namespaces + configured in the Pod spec. \n The container runtime + must implement support for this feature. If the + runtime does not support namespace targeting then + the result of setting this field is undefined." type: string terminationMessagePath: description: 'Optional: Path at which the file to @@ -3744,7 +3875,8 @@ spec: type: array volumeMounts: description: Pod volumes to mount into the container's - filesystem. Cannot be updated. + filesystem. Subpath mounts are not allowed for + ephemeral containers. Cannot be updated. items: description: VolumeMount describes a mounting of a Volume within a container. @@ -4106,9 +4238,8 @@ spec: More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' properties: exec: - description: One and only one of the following - should be specified. Exec specifies the - action to take. + description: Exec specifies the action to + take. properties: command: description: Command is the command @@ -4179,10 +4310,12 @@ spec: - port type: object tcpSocket: - description: 'TCPSocket specifies an action - involving a TCP port. TCP hooks not yet - supported TODO: implement a realistic - TCP lifecycle hook' + description: Deprecated. TCPSocket is NOT + supported as a LifecycleHandler and kept + for the backward compatibility. There + are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler + is specified. properties: host: description: 'Optional: Host name to @@ -4207,21 +4340,19 @@ spec: API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container - crashes or exits. The reason for termination - is passed to the handler. The Pod''s termination - grace period countdown begins before the PreStop - hooked is executed. Regardless of the outcome + crashes or exits. The Pod''s termination grace + period countdown begins before the PreStop + hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace - period. Other management of the container - blocks until the hook completes or until the - termination grace period is reached. More - info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + period (unless delayed by finalizers). Other + management of the container blocks until the + hook completes or until the termination grace + period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' properties: exec: - description: One and only one of the following - should be specified. Exec specifies the - action to take. + description: Exec specifies the action to + take. properties: command: description: Command is the command @@ -4292,10 +4423,12 @@ spec: - port type: object tcpSocket: - description: 'TCPSocket specifies an action - involving a TCP port. TCP hooks not yet - supported TODO: implement a realistic - TCP lifecycle hook' + description: Deprecated. TCPSocket is NOT + supported as a LifecycleHandler and kept + for the backward compatibility. There + are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler + is specified. properties: host: description: 'Optional: Host name to @@ -4321,9 +4454,7 @@ spec: Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: exec: - description: One and only one of the following - should be specified. Exec specifies the action - to take. + description: Exec specifies the action to take. properties: command: description: Command is the command line @@ -4347,6 +4478,26 @@ spec: 1. format: int32 type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is an alpha field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the + service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default + behavior is defined by gRPC." + type: string + required: + - port + type: object httpGet: description: HTTPGet specifies the http request to perform. @@ -4414,10 +4565,8 @@ spec: format: int32 type: integer tcpSocket: - description: 'TCPSocket specifies an action - involving a TCP port. TCP hooks not yet supported - TODO: implement a realistic TCP lifecycle - hook' + description: TCPSocket specifies an action involving + a TCP port. properties: host: description: 'Optional: Host name to connect @@ -4525,9 +4674,7 @@ spec: More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: exec: - description: One and only one of the following - should be specified. Exec specifies the action - to take. + description: Exec specifies the action to take. properties: command: description: Command is the command line @@ -4551,6 +4698,26 @@ spec: 1. format: int32 type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is an alpha field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the + service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default + behavior is defined by gRPC." + type: string + required: + - port + type: object httpGet: description: HTTPGet specifies the http request to perform. @@ -4618,10 +4785,8 @@ spec: format: int32 type: integer tcpSocket: - description: 'TCPSocket specifies an action - involving a TCP port. TCP hooks not yet supported - TODO: implement a realistic TCP lifecycle - hook' + description: TCPSocket specifies an action involving + a TCP port. properties: host: description: 'Optional: Host name to connect @@ -4708,13 +4873,16 @@ spec: controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run - as Privileged 2) has CAP_SYS_ADMIN' + as Privileged 2) has CAP_SYS_ADMIN Note that + this field cannot be set when spec.os.name + is windows.' type: boolean capabilities: description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container - runtime. + runtime. Note that this field cannot be set + when spec.os.name is windows. properties: add: description: Added capabilities @@ -4735,7 +4903,8 @@ spec: description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to - false. + false. Note that this field cannot be set + when spec.os.name is windows. type: boolean procMount: description: procMount denotes the type of proc @@ -4743,11 +4912,14 @@ spec: is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature - flag to be enabled. + flag to be enabled. Note that this field cannot + be set when spec.os.name is windows. type: string readOnlyRootFilesystem: description: Whether this container has a read-only - root filesystem. Default is false. + root filesystem. Default is false. Note that + this field cannot be set when spec.os.name + is windows. type: boolean runAsGroup: description: The GID to run the entrypoint of @@ -4755,7 +4927,8 @@ spec: if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes - precedence. + precedence. Note that this field cannot be + set when spec.os.name is windows. format: int64 type: integer runAsNonRoot: @@ -4777,6 +4950,8 @@ spec: be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name + is windows. format: int64 type: integer seLinuxOptions: @@ -4786,7 +4961,8 @@ spec: for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes - precedence. + precedence. Note that this field cannot be + set when spec.os.name is windows. properties: level: description: Level is SELinux level label @@ -4809,7 +4985,9 @@ spec: description: The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container - options override the pod options. + options override the pod options. Note that + this field cannot be set when spec.os.name + is windows. properties: localhostProfile: description: localhostProfile indicates @@ -4838,7 +5016,8 @@ spec: from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes - precedence. + precedence. Note that this field cannot be + set when spec.os.name is linux. properties: gmsaCredentialSpec: description: GMSACredentialSpec is where @@ -4892,9 +5071,7 @@ spec: cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: exec: - description: One and only one of the following - should be specified. Exec specifies the action - to take. + description: Exec specifies the action to take. properties: command: description: Command is the command line @@ -4918,6 +5095,26 @@ spec: 1. format: int32 type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. This is an alpha field and requires + enabling GRPCContainerProbe feature gate. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the + service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default + behavior is defined by gRPC." + type: string + required: + - port + type: object httpGet: description: HTTPGet specifies the http request to perform. @@ -4985,10 +5182,8 @@ spec: format: int32 type: integer tcpSocket: - description: 'TCPSocket specifies an action - involving a TCP port. TCP hooks not yet supported - TODO: implement a realistic TCP lifecycle - hook' + description: TCPSocket specifies an action involving + a TCP port. properties: host: description: 'Optional: Host name to connect @@ -5173,6 +5368,38 @@ spec: that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' type: object x-kubernetes-map-type: atomic + os: + description: "Specifies the OS of the containers in the + pod. Some pod and container fields are restricted if + this is set. \n If the OS field is set to linux, the + following fields must be unset: -securityContext.windowsOptions + \n If the OS field is set to windows, following fields + must be unset: - spec.hostPID - spec.hostIPC - spec.securityContext.seLinuxOptions + - spec.securityContext.seccompProfile - spec.securityContext.fsGroup + - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls + - spec.shareProcessNamespace - spec.securityContext.runAsUser + - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups + - spec.containers[*].securityContext.seLinuxOptions + - spec.containers[*].securityContext.seccompProfile + - spec.containers[*].securityContext.capabilities - + spec.containers[*].securityContext.readOnlyRootFilesystem + - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation + - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser + - spec.containers[*].securityContext.runAsGroup This + is an alpha field and requires the IdentifyPodOS feature" + properties: + name: + description: 'Name is the name of the operating system. + The currently supported values are linux and windows. + Additional value may be defined in future and can + be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration + Clients should expect to handle additional values + and treat unrecognized values in this field as os: + null' + type: string + required: + - name + type: object overhead: additionalProperties: anyOf: @@ -5273,7 +5500,8 @@ spec: created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- \n If unset, the Kubelet will not modify the ownership - and permissions of any volume." + and permissions of any volume. Note that this field + cannot be set when spec.os.name is windows." format: int64 type: integer fsGroupChangePolicy: @@ -5284,7 +5512,9 @@ spec: based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are "OnRootMismatch" - and "Always". If not specified, "Always" is used.' + and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name + is windows.' type: string runAsGroup: description: The GID to run the entrypoint of the @@ -5292,7 +5522,8 @@ spec: May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for - that container. + that container. Note that this field cannot be set + when spec.os.name is windows. format: int64 type: integer runAsNonRoot: @@ -5311,7 +5542,8 @@ spec: image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext - takes precedence for that container. + takes precedence for that container. Note that this + field cannot be set when spec.os.name is windows. format: int64 type: integer seLinuxOptions: @@ -5321,7 +5553,8 @@ spec: container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence - for that container. + for that container. Note that this field cannot + be set when spec.os.name is windows. properties: level: description: Level is SELinux level label that @@ -5342,7 +5575,8 @@ spec: type: object seccompProfile: description: The seccomp options to use by the containers - in this pod. + in this pod. Note that this field cannot be set + when spec.os.name is windows. properties: localhostProfile: description: localhostProfile indicates a profile @@ -5367,7 +5601,8 @@ spec: description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups - will be added to any container. + will be added to any container. Note that this field + cannot be set when spec.os.name is windows. items: format: int64 type: integer @@ -5376,6 +5611,8 @@ spec: description: Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name + is windows. items: description: Sysctl defines a kernel parameter to be set @@ -5397,6 +5634,8 @@ spec: a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name + is linux. properties: gmsaCredentialSpec: description: GMSACredentialSpec is where the GMSA @@ -5624,7 +5863,7 @@ spec: location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming - pod if and only if every possible node assigment + pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread @@ -6104,9 +6343,7 @@ spec: to be used that way - see the documentation of the driver for more information. \n A pod can use both types of ephemeral volumes and persistent - volumes at the same time. \n This is a beta feature - and only available when the GenericEphemeralVolume - feature gate is enabled." + volumes at the same time." properties: volumeClaimTemplate: description: "Will be used to create a stand-alone @@ -6246,7 +6483,13 @@ spec: resources: description: 'Resources represents the minimum resources the volume should - have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed + to specify resource requirements that + are lower than previous value but + must still be higher than capacity + recorded in the status field of the + claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' properties: limits: additionalProperties: @@ -7340,9 +7583,7 @@ spec: Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible - to be deleted immediately after it finishes. This field is alpha-level - and is only honored by servers that enable the TTLAfterFinished - feature. + to be deleted immediately after it finishes. format: int32 type: integer required: diff --git a/controllers/keda/scaledobject_controller_test.go b/controllers/keda/scaledobject_controller_test.go index bf40980a5d3..e8c836fe1e0 100644 --- a/controllers/keda/scaledobject_controller_test.go +++ b/controllers/keda/scaledobject_controller_test.go @@ -407,6 +407,7 @@ var _ = Describe("ScaledObjectController", func() { Ω(err).ToNot(HaveOccurred()) // Get and confirm the HPA + time.Sleep(30 * time.Second) hpa := &autoscalingv2beta2.HorizontalPodAutoscaler{} Eventually(func() error { return k8sClient.Get(context.Background(), types.NamespacedName{Name: "keda-hpa-" + soName, Namespace: "default"}, hpa) diff --git a/tests/run-all.sh b/tests/run-all.sh index 380818efdd9..c9193391df5 100755 --- a/tests/run-all.sh +++ b/tests/run-all.sh @@ -6,7 +6,7 @@ E2E_REGEX=${E2E_TEST_REGEX:-*.test.ts} DIR=$(dirname "$0") cd $DIR -concurrent_tests_limit=3 +concurrent_tests_limit=4 pids=() lookup=() failed_count=0 From c29efc1348f02d86252905e4bd6d8094fceaebe2 Mon Sep 17 00:00:00 2001 From: jorturfer Date: Sun, 30 Jan 2022 22:53:08 +0100 Subject: [PATCH 18/48] reduce e2e concurrency from 4 to 3 Signed-off-by: jorturfer --- tests/run-all.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/run-all.sh b/tests/run-all.sh index c9193391df5..380818efdd9 100755 --- a/tests/run-all.sh +++ b/tests/run-all.sh @@ -6,7 +6,7 @@ E2E_REGEX=${E2E_TEST_REGEX:-*.test.ts} DIR=$(dirname "$0") cd $DIR -concurrent_tests_limit=4 +concurrent_tests_limit=3 pids=() lookup=() failed_count=0 From 3cef8b88bd9322d914447e748bd446ab5e1c8032 Mon Sep 17 00:00:00 2001 From: Jorge Turrado Date: Mon, 31 Jan 2022 08:39:17 +0100 Subject: [PATCH 19/48] Incrase concurrency to 6 and azure-queue timeouts Signed-off-by: Jorge Turrado --- tests/run-all.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/run-all.sh b/tests/run-all.sh index 380818efdd9..d032395ef4d 100755 --- a/tests/run-all.sh +++ b/tests/run-all.sh @@ -6,7 +6,7 @@ E2E_REGEX=${E2E_TEST_REGEX:-*.test.ts} DIR=$(dirname "$0") cd $DIR -concurrent_tests_limit=3 +concurrent_tests_limit=6 pids=() lookup=() failed_count=0 From 747a9f8261965af4b0880f1d8d292c83ce3b316a Mon Sep 17 00:00:00 2001 From: Jorge Turrado Date: Mon, 31 Jan 2022 09:54:12 +0100 Subject: [PATCH 20/48] concurrency to 4 & increase selenium timeouts Signed-off-by: Jorge Turrado --- tests/run-all.sh | 2 +- tests/scalers/selenium-grid.test.ts | 24 ++++++++++++------------ 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/tests/run-all.sh b/tests/run-all.sh index d032395ef4d..c9193391df5 100755 --- a/tests/run-all.sh +++ b/tests/run-all.sh @@ -6,7 +6,7 @@ E2E_REGEX=${E2E_TEST_REGEX:-*.test.ts} DIR=$(dirname "$0") cd $DIR -concurrent_tests_limit=6 +concurrent_tests_limit=4 pids=() lookup=() failed_count=0 diff --git a/tests/scalers/selenium-grid.test.ts b/tests/scalers/selenium-grid.test.ts index 71f4f4641f1..4ea0ffd6629 100644 --- a/tests/scalers/selenium-grid.test.ts +++ b/tests/scalers/selenium-grid.test.ts @@ -19,13 +19,13 @@ test.before(t => { let seleniumHubReplicaCount = '0'; - for (let i = 0; i < 30; i++) { + for (let i = 0; i < 60; i++) { seleniumHubReplicaCount = sh.exec(`kubectl get deploy/selenium-hub -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout if (seleniumHubReplicaCount == '1') { break; } console.log('Waiting for selenium hub to be ready'); - sh.exec('sleep 2s') + sh.exec('sleep 5s') } t.is('1', seleniumHubReplicaCount, 'Selenium Hub is not in a ready state') }); @@ -33,14 +33,14 @@ test.before(t => { test.serial('should have one node for chrome and firefox each at start', t => { let seleniumChromeNodeReplicaCount = '0'; let seleniumFireFoxReplicaCount = '0'; - for (let i = 0; i < 30; i++) { + for (let i = 0; i < 60; i++) { seleniumChromeNodeReplicaCount = sh.exec(`kubectl get deploy/selenium-chrome-node -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout seleniumFireFoxReplicaCount = sh.exec(`kubectl get deploy/selenium-firefox-node -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout if (seleniumChromeNodeReplicaCount == '1' && seleniumFireFoxReplicaCount == '1') { break; } console.log('Waiting for chrome and firefox node to be ready'); - sh.exec('sleep 2s') + sh.exec('sleep 5s') } t.is('1', seleniumChromeNodeReplicaCount, 'Selenium Chrome Node did not scale up to 1 pods') @@ -84,7 +84,7 @@ test.serial('should create one chrome and firefox node', t => { t.is(0, sh.exec(`kubectl apply --namespace ${seleniumGridNamespace} -f ${seleniumGridTestDeployTmpFile.name}`).code, 'creating a Selenium Grid Tests deployment should work.'); // wait for selenium grid tests to start running - for (let i = 0; i < 20; i++) { + for (let i = 0; i < 60; i++) { const running = sh.exec(`kubectl get job ${seleniumGridTestName} --namespace ${seleniumGridNamespace} -o jsonpath='{.items[0].status.running}'`).stdout if (running == '1') { break; @@ -94,14 +94,14 @@ test.serial('should create one chrome and firefox node', t => { let seleniumChromeNodeReplicaCount = '0'; let seleniumFireFoxReplicaCount = '0'; - for (let i = 0; i < 30; i++) { + for (let i = 0; i < 60; i++) { seleniumChromeNodeReplicaCount = seleniumChromeNodeReplicaCount != '1' ? sh.exec(`kubectl get deploy/selenium-chrome-node -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout : seleniumChromeNodeReplicaCount; seleniumFireFoxReplicaCount = seleniumFireFoxReplicaCount != '1' ? sh.exec(`kubectl get deploy/selenium-firefox-node -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout : seleniumFireFoxReplicaCount; if (seleniumChromeNodeReplicaCount == '1' && seleniumFireFoxReplicaCount == '1') { break; } console.log('Waiting for chrome to scale up 1 pod and firefox to 1 pod'); - sh.exec('sleep 2s') + sh.exec('sleep 5s') } t.is('1', seleniumChromeNodeReplicaCount, 'Selenium Chrome Node did not scale up to 1 pod') @@ -114,7 +114,7 @@ test.serial('should create one chrome and firefox node', t => { if (succeeded == '1') { break; } - sh.exec('sleep 1s') + sh.exec('sleep 5s') } sh.exec(`kubectl delete job/${seleniumGridTestName} --namespace ${seleniumGridNamespace}`) @@ -124,14 +124,14 @@ test.serial('should scale down chrome and firefox nodes to 0', t => { let seleniumChromeNodeReplicaCount = '1'; let seleniumFireFoxReplicaCount = '1'; - for (let i = 0; i < 65; i++) { + for (let i = 0; i < 60; i++) { seleniumChromeNodeReplicaCount = sh.exec(`kubectl get deploy/selenium-chrome-node -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout; seleniumFireFoxReplicaCount = sh.exec(`kubectl get deploy/selenium-firefox-node -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout; if (seleniumChromeNodeReplicaCount == '0' && seleniumFireFoxReplicaCount == '0') { break; } console.log('Waiting for chrome and firefox to scale down to 0 pod'); - sh.exec('sleep 5s') + sh.exec('sleep 10s') } t.is('0', seleniumChromeNodeReplicaCount, 'Selenium Chrome Node did not scale down to 0 pod') @@ -179,7 +179,7 @@ test.serial('should create two chrome and one firefox nodes', t => { let seleniumChromeNodeReplicaCount = '0'; let seleniumFireFoxReplicaCount = '0'; seleniumChrome91NodeReplicaCount = '0'; - for (let i = 0; i < 30; i++) { + for (let i = 0; i < 60; i++) { seleniumChromeNodeReplicaCount = seleniumChromeNodeReplicaCount != '1' ? sh.exec(`kubectl get deploy/selenium-chrome-node -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout : seleniumChromeNodeReplicaCount; seleniumFireFoxReplicaCount = seleniumFireFoxReplicaCount != '1' ? sh.exec(`kubectl get deploy/selenium-firefox-node -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout : seleniumFireFoxReplicaCount; seleniumChrome91NodeReplicaCount = seleniumChrome91NodeReplicaCount != '1' ? sh.exec(`kubectl get deploy/selenium-chrome-node-91 -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout : seleniumChrome91NodeReplicaCount; @@ -187,7 +187,7 @@ test.serial('should create two chrome and one firefox nodes', t => { break; } console.log('Waiting for chrome to scale up 2 pods and firefox to 1 pod'); - sh.exec('sleep 2s') + sh.exec('sleep 5s') } sh.exec(`kubectl delete job/${seleniumGridTestName} --namespace ${seleniumGridNamespace}`) From 9843bad737e02e21611ffd4a4529989c940d6b24 Mon Sep 17 00:00:00 2001 From: jorturfer Date: Mon, 31 Jan 2022 23:17:15 +0100 Subject: [PATCH 21/48] Increase some timeouts Signed-off-by: jorturfer --- tests/scalers/argo-rollouts.test.ts | 4 ++-- tests/scalers/graphite.test.ts | 4 ++-- tests/scalers/new-relic.test.ts | 4 ++-- tests/scalers/predictkube.test.ts | 2 +- tests/scalers/prometheus.test.ts | 4 ++-- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/scalers/argo-rollouts.test.ts b/tests/scalers/argo-rollouts.test.ts index 6cd8b9c6a8f..15f1d48e1d1 100644 --- a/tests/scalers/argo-rollouts.test.ts +++ b/tests/scalers/argo-rollouts.test.ts @@ -83,7 +83,7 @@ test.serial(`Rollouts should scale to 5 (the max) with HTTP Requests exceeding i `kubectl get rollouts.argoproj.io/keda-test-app --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout if (replicaCount !== '5') { - await sleep(5000) + await sleep(10000) } } @@ -94,7 +94,7 @@ test.serial(`Rollouts should scale to 5 (the max) with HTTP Requests exceeding i `kubectl get rollouts.argoproj.io/keda-test-app --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout if (replicaCount !== '0') { - await sleep(5000) + await sleep(10000) } } diff --git a/tests/scalers/graphite.test.ts b/tests/scalers/graphite.test.ts index 1ee7b13a9d6..65061701a47 100644 --- a/tests/scalers/graphite.test.ts +++ b/tests/scalers/graphite.test.ts @@ -62,7 +62,7 @@ test.serial(`Deployment should scale to 5 (the max) with HTTP Requests exceeding `kubectl get deployment php-apache-graphite --namespace ${graphiteNamespace} -o jsonpath="{.spec.replicas}"` ).stdout if (replicaCount !== '5') { - sh.exec('sleep 5s') + sh.exec('sleep 10s') } } @@ -73,7 +73,7 @@ test.serial(`Deployment should scale to 5 (the max) with HTTP Requests exceeding `kubectl get deployment php-apache-graphite --namespace ${graphiteNamespace} -o jsonpath="{.spec.replicas}"` ).stdout if (replicaCount !== '0') { - sh.exec('sleep 5s') + sh.exec('sleep 10s') } } diff --git a/tests/scalers/new-relic.test.ts b/tests/scalers/new-relic.test.ts index 6a2ecb7d245..ff468d16340 100644 --- a/tests/scalers/new-relic.test.ts +++ b/tests/scalers/new-relic.test.ts @@ -132,7 +132,7 @@ test.serial(`Deployment should scale to 5 (the max) with HTTP Requests exceeding `kubectl get deployment.apps/keda-test-app --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout if (replicaCount !== '5') { - sh.exec('sleep 5s') + sh.exec('sleep 10s') } } @@ -143,7 +143,7 @@ test.serial(`Deployment should scale to 5 (the max) with HTTP Requests exceeding `kubectl get deployment.apps/keda-test-app --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout if (replicaCount !== '0') { - sh.exec('sleep 5s') + sh.exec('sleep 10s') } } diff --git a/tests/scalers/predictkube.test.ts b/tests/scalers/predictkube.test.ts index 165e7a6b013..151718692e6 100644 --- a/tests/scalers/predictkube.test.ts +++ b/tests/scalers/predictkube.test.ts @@ -88,7 +88,7 @@ test.serial(`Deployment should scale to 5 (the max) with HTTP Requests exceeding `kubectl get deployment.apps/keda-test-app --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout if (replicaCount !== '0') { - sh.exec('sleep 5s') + sh.exec('sleep 10s') } } diff --git a/tests/scalers/prometheus.test.ts b/tests/scalers/prometheus.test.ts index 1eaa9df9cfe..04e79d81b52 100644 --- a/tests/scalers/prometheus.test.ts +++ b/tests/scalers/prometheus.test.ts @@ -73,7 +73,7 @@ test.serial(`Deployment should scale to 5 (the max) with HTTP Requests exceeding `kubectl get deployment.apps/keda-test-app --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout if (replicaCount !== '5') { - sh.exec('sleep 5s') + sh.exec('sleep 10s') } } @@ -84,7 +84,7 @@ test.serial(`Deployment should scale to 5 (the max) with HTTP Requests exceeding `kubectl get deployment.apps/keda-test-app --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout if (replicaCount !== '0') { - sh.exec('sleep 5s') + sh.exec('sleep 10s') } } From 577975afc12ea28af5864295a325b20d04485140 Mon Sep 17 00:00:00 2001 From: jorturfer Date: Mon, 31 Jan 2022 23:17:47 +0100 Subject: [PATCH 22/48] 8 concurrent tests Signed-off-by: jorturfer --- tests/run-all.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/run-all.sh b/tests/run-all.sh index c9193391df5..89aa1af851f 100755 --- a/tests/run-all.sh +++ b/tests/run-all.sh @@ -6,7 +6,7 @@ E2E_REGEX=${E2E_TEST_REGEX:-*.test.ts} DIR=$(dirname "$0") cd $DIR -concurrent_tests_limit=4 +concurrent_tests_limit=8 pids=() lookup=() failed_count=0 From d33f82e1064bfc2acf01b7eb3c1cc78e65694cb3 Mon Sep 17 00:00:00 2001 From: jorturfer Date: Mon, 31 Jan 2022 23:31:52 +0100 Subject: [PATCH 23/48] Change timeout during integration tests Signed-off-by: jorturfer --- controllers/keda/scaledobject_controller_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/controllers/keda/scaledobject_controller_test.go b/controllers/keda/scaledobject_controller_test.go index e8c836fe1e0..5cc1efa2a28 100644 --- a/controllers/keda/scaledobject_controller_test.go +++ b/controllers/keda/scaledobject_controller_test.go @@ -407,7 +407,6 @@ var _ = Describe("ScaledObjectController", func() { Ω(err).ToNot(HaveOccurred()) // Get and confirm the HPA - time.Sleep(30 * time.Second) hpa := &autoscalingv2beta2.HorizontalPodAutoscaler{} Eventually(func() error { return k8sClient.Get(context.Background(), types.NamespacedName{Name: "keda-hpa-" + soName, Namespace: "default"}, hpa) @@ -463,7 +462,7 @@ var _ = Describe("ScaledObjectController", func() { err = k8sClient.Get(context.Background(), types.NamespacedName{Name: soName, Namespace: "default"}, so) Ω(err).ToNot(HaveOccurred()) return so.Status.Conditions.GetReadyCondition().Status - }, 20*time.Second).Should(Equal(metav1.ConditionFalse)) + }, 60*time.Second).Should(Equal(metav1.ConditionFalse)) }) It("doesn't allow IdleReplicaCount > MinReplicaCount", func() { From 66a3d134732a07e8c5624def6bd5046365aeaba0 Mon Sep 17 00:00:00 2001 From: Jorge Turrado Date: Tue, 1 Feb 2022 08:20:45 +0100 Subject: [PATCH 24/48] Update timeoutz Signed-off-by: Jorge Turrado --- tests/run-all.sh | 2 +- tests/scalers/redis-lists.test.ts | 2 +- tests/scalers/redis-sentinel-lists.test.ts | 2 +- tests/scalers/redis-streams.test.ts | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/run-all.sh b/tests/run-all.sh index 89aa1af851f..d032395ef4d 100755 --- a/tests/run-all.sh +++ b/tests/run-all.sh @@ -6,7 +6,7 @@ E2E_REGEX=${E2E_TEST_REGEX:-*.test.ts} DIR=$(dirname "$0") cd $DIR -concurrent_tests_limit=8 +concurrent_tests_limit=6 pids=() lookup=() failed_count=0 diff --git a/tests/scalers/redis-lists.test.ts b/tests/scalers/redis-lists.test.ts index cf37bef07f7..6db8f4cd1df 100644 --- a/tests/scalers/redis-lists.test.ts +++ b/tests/scalers/redis-lists.test.ts @@ -33,7 +33,7 @@ test.before(t => { t.is(0, sh.exec(`kubectl apply --namespace ${redisNamespace} -f ${redisDeployTmpFile.name}`).code, 'creating a Redis deployment should work.') // wait for redis to be ready - t.is(0, waitForRollout('deployment', redisDeploymentName, redisNamespace, 300), 'Redis is not in a ready state') + t.is(0, waitForRollout('deployment', redisDeploymentName, redisNamespace, 600), 'Redis is not in a ready state') sh.exec(`kubectl create namespace ${testNamespace}`) diff --git a/tests/scalers/redis-sentinel-lists.test.ts b/tests/scalers/redis-sentinel-lists.test.ts index 1d0171b1ba0..66bb7fc432e 100644 --- a/tests/scalers/redis-sentinel-lists.test.ts +++ b/tests/scalers/redis-sentinel-lists.test.ts @@ -38,7 +38,7 @@ test.before(t => { ) // Wait for Redis sentinel to be ready. - t.is(0, waitForRollout('statefulset', redisStatefulSetName, redisNamespace, 300), 'Redis is not in a ready state') + t.is(0, waitForRollout('statefulset', redisStatefulSetName, redisNamespace, 600), 'Redis is not in a ready state') // Get Redis sentinel address. redisHost = sh.exec(`kubectl get svc ${redisService} -n ${redisNamespace} -o jsonpath='{.spec.clusterIP}'`) diff --git a/tests/scalers/redis-streams.test.ts b/tests/scalers/redis-streams.test.ts index 8b7df291aed..289d55bae26 100644 --- a/tests/scalers/redis-streams.test.ts +++ b/tests/scalers/redis-streams.test.ts @@ -21,7 +21,7 @@ test.before(t => { t.is(0, sh.exec(`kubectl apply --namespace ${redisNamespace} -f ${tmpFile1.name}`).code, 'creating a Redis deployment should work.') // wait for redis to be ready - t.is(0, waitForRollout('deployment', redisDeploymentName, redisNamespace, 300), 'Redis is not in a ready state') + t.is(0, waitForRollout('deployment', redisDeploymentName, redisNamespace, 600), 'Redis is not in a ready state') sh.exec(`kubectl create namespace ${testNamespace}`) From e158bb8e104965ded1a0a7b3e0e1d9dc5cb12e41 Mon Sep 17 00:00:00 2001 From: Jorge Turrado Date: Tue, 1 Feb 2022 12:59:45 +0100 Subject: [PATCH 25/48] test with 2 concurrent test Signed-off-by: Jorge Turrado --- tests/run-all.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/run-all.sh b/tests/run-all.sh index d032395ef4d..8eee40f8676 100755 --- a/tests/run-all.sh +++ b/tests/run-all.sh @@ -6,7 +6,7 @@ E2E_REGEX=${E2E_TEST_REGEX:-*.test.ts} DIR=$(dirname "$0") cd $DIR -concurrent_tests_limit=6 +concurrent_tests_limit=2 pids=() lookup=() failed_count=0 From 9449f157fe4988168ffbc47561e12db3d97a95cd Mon Sep 17 00:00:00 2001 From: Jorge Turrado Date: Tue, 1 Feb 2022 13:06:10 +0100 Subject: [PATCH 26/48] Increase timeout in an integration test Signed-off-by: Jorge Turrado --- controllers/keda/scaledobject_controller_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/controllers/keda/scaledobject_controller_test.go b/controllers/keda/scaledobject_controller_test.go index 5cc1efa2a28..56a8820475e 100644 --- a/controllers/keda/scaledobject_controller_test.go +++ b/controllers/keda/scaledobject_controller_test.go @@ -419,7 +419,7 @@ var _ = Describe("ScaledObjectController", func() { err = k8sClient.Get(context.Background(), types.NamespacedName{Name: soName, Namespace: "default"}, so) Ω(err).ToNot(HaveOccurred()) return so.Status.Conditions.GetReadyCondition().Status - }, 20*time.Second).Should(Equal(metav1.ConditionTrue)) + }, 60*time.Second).Should(Equal(metav1.ConditionTrue)) }) It("doesn't allow MinReplicaCount > MaxReplicaCount", func() { From 5821ce9bebf1dd58ce3e5e03810090e27847641a Mon Sep 17 00:00:00 2001 From: Jorge Turrado Date: Tue, 1 Feb 2022 15:25:09 +0100 Subject: [PATCH 27/48] set concurrency to 6 Signed-off-by: Jorge Turrado --- tests/run-all.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/run-all.sh b/tests/run-all.sh index 8eee40f8676..d032395ef4d 100755 --- a/tests/run-all.sh +++ b/tests/run-all.sh @@ -6,7 +6,7 @@ E2E_REGEX=${E2E_TEST_REGEX:-*.test.ts} DIR=$(dirname "$0") cd $DIR -concurrent_tests_limit=2 +concurrent_tests_limit=6 pids=() lookup=() failed_count=0 From a7fa17298708dbf2bc5bf37894e9375d16d96aba Mon Sep 17 00:00:00 2001 From: jorturfer Date: Tue, 1 Feb 2022 22:09:25 +0100 Subject: [PATCH 28/48] Remove resources from e2e pods Signed-off-by: jorturfer --- tests/scalers/activemq.test.ts | 6 ------ tests/scalers/artemis-helpers.ts | 3 --- tests/scalers/elasticsearch.test.ts | 5 ----- tests/scalers/selenium-grid.test.ts | 18 ------------------ 4 files changed, 32 deletions(-) diff --git a/tests/scalers/activemq.test.ts b/tests/scalers/activemq.test.ts index a0a4391ec1a..1adb3dc48b9 100644 --- a/tests/scalers/activemq.test.ts +++ b/tests/scalers/activemq.test.ts @@ -135,12 +135,6 @@ spec: name: mqtt protocol: TCP resources: - requests: - memory: 500Mi - cpu: 200m - limits: - memory: 1000Mi - cpu: 400m volumeMounts: - name: activemq-config mountPath: /opt/apache-activemq-5.16.3/webapps/api/WEB-INF/classes/jolokia-access.xml diff --git a/tests/scalers/artemis-helpers.ts b/tests/scalers/artemis-helpers.ts index e5235576632..51fe2f18433 100644 --- a/tests/scalers/artemis-helpers.ts +++ b/tests/scalers/artemis-helpers.ts @@ -159,9 +159,6 @@ spec: image: docker.io/vromero/activemq-artemis:2.6.2 imagePullPolicy: resources: - requests: - cpu: 100m - memory: 256Mi env: - name: ARTEMIS_PASSWORD valueFrom: diff --git a/tests/scalers/elasticsearch.test.ts b/tests/scalers/elasticsearch.test.ts index 3b04013d3c5..e7901338346 100644 --- a/tests/scalers/elasticsearch.test.ts +++ b/tests/scalers/elasticsearch.test.ts @@ -247,11 +247,6 @@ spec: name: transport protocol: TCP resources: - requests: - cpu: 100m - memory: 1Gi - limits: - memory: 1Gi readinessProbe: exec: command: diff --git a/tests/scalers/selenium-grid.test.ts b/tests/scalers/selenium-grid.test.ts index 4ea0ffd6629..b76cede86f2 100644 --- a/tests/scalers/selenium-grid.test.ts +++ b/tests/scalers/selenium-grid.test.ts @@ -353,12 +353,6 @@ spec: - name: dshm mountPath: /dev/shm resources: - limits: - cpu: "1" - memory: 1Gi - requests: - cpu: "1" - memory: 1Gi volumes: - name: dshm emptyDir: @@ -404,12 +398,6 @@ spec: - name: dshm mountPath: /dev/shm resources: - limits: - cpu: "1" - memory: 1Gi - requests: - cpu: "1" - memory: 1Gi volumes: - name: dshm emptyDir: @@ -508,12 +496,6 @@ spec: - name: dshm mountPath: /dev/shm resources: - limits: - cpu: "1" - memory: 1Gi - requests: - cpu: "1" - memory: 1Gi volumes: - name: dshm emptyDir: From d4b0c660f1d4ee5c3bbea1d098e1bf588dec4515 Mon Sep 17 00:00:00 2001 From: jorturfer Date: Tue, 1 Feb 2022 22:44:57 +0100 Subject: [PATCH 29/48] Increase cassandra timeouts Signed-off-by: jorturfer --- tests/scalers/cassandra.test.ts | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/tests/scalers/cassandra.test.ts b/tests/scalers/cassandra.test.ts index e4d38f5696f..144229e4248 100644 --- a/tests/scalers/cassandra.test.ts +++ b/tests/scalers/cassandra.test.ts @@ -22,10 +22,10 @@ test.before(t => { // wait for cassandra to load console.log("wait for cassandra to load") let cassandraReadyReplicaCount = '0' - for (let i = 0; i < 50; i++) { + for (let i = 0; i < 30; i++) { cassandraReadyReplicaCount = sh.exec(`kubectl get deploy/cassandra -n ${cassandraNamespace} -o jsonpath='{.status.readyReplicas}'`).stdout if (cassandraReadyReplicaCount != '1') { - sh.exec('sleep 2s') + sh.exec('sleep 10s') } } t.is('1', cassandraReadyReplicaCount, 'Cassandra is not in a ready state') @@ -36,7 +36,7 @@ test.before(t => { for (let i = 0; i < 30; i++) { cassandraReady = sh.exec(`kubectl exec -n ${cassandraNamespace} ${cassandraPod} -- nodetool status | grep -w -o UN`) if (cassandraReady != "UN\n") { - sh.exec('sleep 5s') + sh.exec('sleep 10s') } else { break @@ -53,10 +53,10 @@ test.before(t => { // wait for cassandra-client to load console.log("wait for cassandra-client to load") let cassandraClientReadyReplicaCount = '0' - for (let i = 0; i < 50; i++) { + for (let i = 0; i < 30; i++) { cassandraClientReadyReplicaCount = sh.exec(`kubectl get deploy/cassandra-client -n ${cassandraNamespace} -o jsonpath='{.status.readyReplicas}'`).stdout if (cassandraClientReadyReplicaCount != '1') { - sh.exec('sleep 2s') + sh.exec('sleep 10s') } } t.is('1', cassandraClientReadyReplicaCount, 'Cassandra client is not in a ready state') @@ -67,7 +67,7 @@ test.before(t => { for (let i = 0; i < 30; i++) { cassandraClientReady = sh.exec(`kubectl exec -n ${cassandraNamespace} ${cassandraClientPod} -- nodetool status | grep -w -o UN`) if (cassandraClientReady != "UN\n") { - sh.exec('sleep 5s') + sh.exec('sleep 10s') } else { break @@ -94,7 +94,7 @@ test.before(t => { for (let i = 0; i < 30; i++) { nginxReadyReplicaCount = sh.exec(`kubectl get deploy/${nginxDeploymentName} -n ${cassandraNamespace} -o jsonpath='{.status.readyReplicas}'`).stdout if (nginxReadyReplicaCount != '') { - sh.exec('sleep 2s') + sh.exec('sleep 10s') } } t.is('', nginxReadyReplicaCount, 'creating an Nginx deployment should work') @@ -136,11 +136,11 @@ test.serial(`Replicas should scale to 4 (the max) then back to 0`, t => { replicaCount = sh.exec( `kubectl get deploy/${nginxDeploymentName} --namespace ${cassandraNamespace} -o jsonpath="{.spec.replicas}"`).stdout if (replicaCount !== maxReplicaCount) { - sh.exec('sleep 2s') + sh.exec('sleep 10s') } } - t.is(maxReplicaCount, replicaCount, `Replica count should be ${maxReplicaCount} after 60 seconds`) + t.is(maxReplicaCount, replicaCount, `Replica count should be ${maxReplicaCount} after 300 seconds`) sh.exec('sleep 30s') // delete all data from cassandra @@ -157,11 +157,11 @@ test.serial(`Replicas should scale to 4 (the max) then back to 0`, t => { replicaCount = sh.exec( `kubectl get deploy/${nginxDeploymentName} --namespace ${cassandraNamespace} -o jsonpath="{.spec.replicas}"`).stdout if (replicaCount !== '0') { - sh.exec('sleep 5s') + sh.exec('sleep 10s') } } - t.is('0', replicaCount, 'Replica count should be 0 after 3 minutes') + t.is('0', replicaCount, 'Replica count should be 0 after 5 minutes') }) From 4a69ff6ca849b2e9940de34fd55da855a22afcbf Mon Sep 17 00:00:00 2001 From: jorturfer Date: Wed, 2 Feb 2022 00:05:05 +0100 Subject: [PATCH 30/48] Increase selenium timeouts Signed-off-by: jorturfer --- tests/scalers/selenium-grid.test.ts | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/scalers/selenium-grid.test.ts b/tests/scalers/selenium-grid.test.ts index b76cede86f2..1b30b200d58 100644 --- a/tests/scalers/selenium-grid.test.ts +++ b/tests/scalers/selenium-grid.test.ts @@ -40,7 +40,7 @@ test.serial('should have one node for chrome and firefox each at start', t => { break; } console.log('Waiting for chrome and firefox node to be ready'); - sh.exec('sleep 5s') + sh.exec('sleep 10s') } t.is('1', seleniumChromeNodeReplicaCount, 'Selenium Chrome Node did not scale up to 1 pods') @@ -62,7 +62,7 @@ test.serial('should scale down browser nodes to 0', t => { break; } console.log('Waiting for chrome and firefox to scale down to 0 pods') - sh.exec('sleep 5s') + sh.exec('sleep 10s') } t.is('0', seleniumChromeNodeReplicaCount, 'Selenium Chrome Node did not scale down to 0 pods') @@ -101,7 +101,7 @@ test.serial('should create one chrome and firefox node', t => { break; } console.log('Waiting for chrome to scale up 1 pod and firefox to 1 pod'); - sh.exec('sleep 5s') + sh.exec('sleep 10s') } t.is('1', seleniumChromeNodeReplicaCount, 'Selenium Chrome Node did not scale up to 1 pod') @@ -114,7 +114,7 @@ test.serial('should create one chrome and firefox node', t => { if (succeeded == '1') { break; } - sh.exec('sleep 5s') + sh.exec('sleep 10s') } sh.exec(`kubectl delete job/${seleniumGridTestName} --namespace ${seleniumGridNamespace}`) @@ -168,7 +168,7 @@ test.serial('should create two chrome and one firefox nodes', t => { t.is(0, sh.exec(`kubectl apply --namespace ${seleniumGridNamespace} -f ${seleniumGridTestDeployTmpFile.name}`).code, 'creating a Selenium Grid Tests deployment should work.'); // wait for selenium grid tests to start running - for (let i = 0; i < 20; i++) { + for (let i = 0; i < 60; i++) { const running = sh.exec(`kubectl get job ${seleniumGridTestName} --namespace ${seleniumGridNamespace} -o jsonpath='{.items[0].status.running}'`).stdout if (running == '1') { break; @@ -187,7 +187,7 @@ test.serial('should create two chrome and one firefox nodes', t => { break; } console.log('Waiting for chrome to scale up 2 pods and firefox to 1 pod'); - sh.exec('sleep 5s') + sh.exec('sleep 10s') } sh.exec(`kubectl delete job/${seleniumGridTestName} --namespace ${seleniumGridNamespace}`) From 68b1d982afe909b8db8a0448b09af1ba74fbfef8 Mon Sep 17 00:00:00 2001 From: jorturfer Date: Wed, 2 Feb 2022 00:13:39 +0100 Subject: [PATCH 31/48] Increase redis timeouts Signed-off-by: jorturfer --- tests/scalers/redis-cluster-lists.test.ts | 34 ++++++++++---------- tests/scalers/redis-cluster-streams.test.ts | 8 ++--- tests/scalers/redis-lists.test.ts | 30 ++++++++--------- tests/scalers/redis-sentinel-lists.test.ts | 32 +++++++++--------- tests/scalers/redis-sentinel-streams.test.ts | 8 ++--- tests/scalers/redis-streams.test.ts | 6 ++-- 6 files changed, 59 insertions(+), 59 deletions(-) diff --git a/tests/scalers/redis-cluster-lists.test.ts b/tests/scalers/redis-cluster-lists.test.ts index a78ca1d5bdb..c6ad09afdd6 100644 --- a/tests/scalers/redis-cluster-lists.test.ts +++ b/tests/scalers/redis-cluster-lists.test.ts @@ -30,14 +30,14 @@ test.before(t => { sh.exec(`kubectl create namespace ${redisNamespace}`) sh.exec(`helm repo add bitnami https://charts.bitnami.com/bitnami`) - let clusterStatus = sh.exec(`helm install --timeout 600s ${redisClusterName} --namespace ${redisNamespace} --set "global.redis.password=${redisPassword}" bitnami/redis-cluster`).code + let clusterStatus = sh.exec(`helm install --timeout 900s ${redisClusterName} --namespace ${redisNamespace} --set "global.redis.password=${redisPassword}" bitnami/redis-cluster`).code t.is(0, clusterStatus, 'creating a Redis cluster should work.' ) // Wait for Redis cluster to be ready. - t.is(0, waitForRollout('statefulset', redisStatefulSetName, redisNamespace, 300)) + t.is(0, waitForRollout('statefulset', redisStatefulSetName, redisNamespace, 600)) // Get Redis cluster address. redisHost = sh.exec(`kubectl get svc ${redisService} -n ${redisNamespace} -o jsonpath='{.spec.clusterIP}'`) @@ -133,19 +133,19 @@ test.serial(`Deployment using redis host port env vars should max and scale to 5 runWriteJob(t, writeJobNameForHostPortRef, listNameForHostPortRef) let replicaCount = '0' - for (let i = 0; i < 30 && replicaCount !== '5'; i++) { + for (let i = 0; i < 60 && replicaCount !== '5'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerHostPortRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout t.log('(scale up) replica count is:' + replicaCount) if (replicaCount !== '5') { - sh.exec('sleep 3s') + sh.exec('sleep 10s') } } - t.is('5', replicaCount, 'Replica count should be 5 within 60 seconds') + t.is('5', replicaCount, 'Replica count should be 5 within 10 minutes') - for (let i = 0; i < 12 && replicaCount !== '0'; i++) { + for (let i = 0; i < 60 && replicaCount !== '0'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerHostPortRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout @@ -155,7 +155,7 @@ test.serial(`Deployment using redis host port env vars should max and scale to 5 } } - t.is('0', replicaCount, 'Replica count should be 0 within 2 minutes') + t.is('0', replicaCount, 'Replica count should be 0 within 10 minutes') }) test.serial('Deployment for redis address env var should have 0 replica on start', t => { @@ -173,19 +173,19 @@ test.serial(`Deployment using redis address env var should max and scale to 5 wi runWriteJob(t, writeJobNameForAddressRef, listNameForAddressRef) let replicaCount = '0' - for (let i = 0; i < 30 && replicaCount !== '5'; i++) { + for (let i = 0; i < 60 && replicaCount !== '5'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerAddressRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout t.log('(scale up) replica count is:' + replicaCount) if (replicaCount !== '5') { - sh.exec('sleep 3s') + sh.exec('sleep 10s') } } - t.is('5', replicaCount, 'Replica count should be 5 within 60 seconds') + t.is('5', replicaCount, 'Replica count should be 5 within 10 minutes') - for (let i = 0; i < 12 && replicaCount !== '0'; i++) { + for (let i = 0; i < 60 && replicaCount !== '0'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerAddressRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout @@ -195,7 +195,7 @@ test.serial(`Deployment using redis address env var should max and scale to 5 wi } } - t.is('0', replicaCount, 'Replica count should be 0 within 2 minutes') + t.is('0', replicaCount, 'Replica count should be 0 within 10 minutes') }) @@ -213,19 +213,19 @@ test.serial(`Deployment using redis host port in triggerAuth should max and scal runWriteJob(t, writeJobNameForHostPortInTriggerAuth, listNameForHostPortTriggerAuth) let replicaCount = '0' - for (let i = 0; i < 30 && replicaCount !== '5'; i++) { + for (let i = 0; i < 60 && replicaCount !== '5'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerHostPortRefTriggerAuthDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout t.log('(scale up) replica count is:' + replicaCount) if (replicaCount !== '5') { - sh.exec('sleep 3s') + sh.exec('sleep 10s') } } - t.is('5', replicaCount, 'Replica count should be 5 within 60 seconds') + t.is('5', replicaCount, 'Replica count should be 5 within 10 minutes') - for (let i = 0; i < 12 && replicaCount !== '0'; i++) { + for (let i = 0; i < 60 && replicaCount !== '0'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerHostPortRefTriggerAuthDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout @@ -235,7 +235,7 @@ test.serial(`Deployment using redis host port in triggerAuth should max and scal } } - t.is('0', replicaCount, 'Replica count should be 0 within 2 minutes') + t.is('0', replicaCount, 'Replica count should be 0 within 10 minutes') }) diff --git a/tests/scalers/redis-cluster-streams.test.ts b/tests/scalers/redis-cluster-streams.test.ts index 3faed339a7f..86e0ca85800 100644 --- a/tests/scalers/redis-cluster-streams.test.ts +++ b/tests/scalers/redis-cluster-streams.test.ts @@ -18,14 +18,14 @@ test.before(t => { sh.exec(`kubectl create namespace ${redisNamespace}`) sh.exec(`helm repo add bitnami https://charts.bitnami.com/bitnami`) - let clusterStatus = sh.exec(`helm install --timeout 600s ${redisClusterName} --namespace ${redisNamespace} --set "global.redis.password=${redisPassword}" bitnami/redis-cluster`).code + let clusterStatus = sh.exec(`helm install --timeout 900s ${redisClusterName} --namespace ${redisNamespace} --set "global.redis.password=${redisPassword}" bitnami/redis-cluster`).code t.is(0, clusterStatus, 'creating a Redis cluster should work.' ) // Wait for Redis cluster to be ready. - let exitCode = waitForRollout('statefulset', redisStatefulSetName, redisNamespace, 300) + let exitCode = waitForRollout('statefulset', redisStatefulSetName, redisNamespace, 600) t.is(0, exitCode, 'expected rollout status for redis to finish successfully') // Get Redis cluster address. @@ -66,7 +66,7 @@ test.serial(`Deployment should scale to 5 with ${numMessages} messages and back ) // Wait for producer job to finish. - for (let i = 0; i < 40; i++) { + for (let i = 0; i < 60; i++) { const succeeded = sh.exec(`kubectl get job --namespace ${testNamespace} -o jsonpath='{.items[0].status.succeeded}'`).stdout if (succeeded == '1') { break @@ -74,7 +74,7 @@ test.serial(`Deployment should scale to 5 with ${numMessages} messages and back sh.exec('sleep 1s') } // With messages published, the consumer deployment should start receiving the messages. - t.true(await waitForDeploymentReplicaCount(5, 'redis-streams-consumer', testNamespace, 30, 3000), 'Replica count should be 5 within 60 seconds') + t.true(await waitForDeploymentReplicaCount(5, 'redis-streams-consumer', testNamespace, 60, 10000), 'Replica count should be 5 within 10 minutes') t.true(await waitForDeploymentReplicaCount(1, 'redis-streams-consumer', testNamespace, 60, 10000), 'Replica count should be 1 within 10 minutes') }) diff --git a/tests/scalers/redis-lists.test.ts b/tests/scalers/redis-lists.test.ts index 6db8f4cd1df..b526bee09aa 100644 --- a/tests/scalers/redis-lists.test.ts +++ b/tests/scalers/redis-lists.test.ts @@ -124,19 +124,19 @@ test.serial(`Deployment using redis host port env vars should max and scale to 5 runWriteJob(t, writeJobNameForHostPortRef, listNameForHostPortRef) let replicaCount = '0' - for (let i = 0; i < 20 && replicaCount !== '5'; i++) { + for (let i = 0; i < 60 && replicaCount !== '5'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerHostPortRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout t.log('(scale up) replica count is:' + replicaCount) if (replicaCount !== '5') { - sh.exec('sleep 3s') + sh.exec('sleep 10s') } } - t.is('5', replicaCount, 'Replica count should be 5 within 60 seconds') + t.is('5', replicaCount, 'Replica count should be 5 within 10 minutes') - for (let i = 0; i < 12 && replicaCount !== '0'; i++) { + for (let i = 0; i < 60 && replicaCount !== '0'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerHostPortRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout @@ -146,7 +146,7 @@ test.serial(`Deployment using redis host port env vars should max and scale to 5 } } - t.is('0', replicaCount, 'Replica count should be 0 within 2 minutes') + t.is('0', replicaCount, 'Replica count should be 0 within 10 minutes') }) test.serial('Deployment for redis address env var should have 0 replica on start', t => { @@ -164,19 +164,19 @@ test.serial(`Deployment using redis address env var should max and scale to 5 wi runWriteJob(t, writeJobNameForAddressRef, listNameForAddressRef) let replicaCount = '0' - for (let i = 0; i < 20 && replicaCount !== '5'; i++) { + for (let i = 0; i < 60 && replicaCount !== '5'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerAddressRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout t.log('(scale up) replica count is:' + replicaCount) if (replicaCount !== '5') { - sh.exec('sleep 3s') + sh.exec('sleep 10s') } } - t.is('5', replicaCount, 'Replica count should be 5 within 60 seconds') + t.is('5', replicaCount, 'Replica count should be 5 within 10 minutes') - for (let i = 0; i < 12 && replicaCount !== '0'; i++) { + for (let i = 0; i < 60 && replicaCount !== '0'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerAddressRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout @@ -186,7 +186,7 @@ test.serial(`Deployment using redis address env var should max and scale to 5 wi } } - t.is('0', replicaCount, 'Replica count should be 0 within 2 minutes') + t.is('0', replicaCount, 'Replica count should be 0 within 10 minutes') }) @@ -204,19 +204,19 @@ test.serial(`Deployment using redis host port in triggerAuth should max and scal runWriteJob(t, writeJobNameForHostPortInTriggerAuth, listNameForHostPortTriggerAuth) let replicaCount = '0' - for (let i = 0; i < 20 && replicaCount !== '5'; i++) { + for (let i = 0; i < 60 && replicaCount !== '5'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerHostPortRefTriggerAuthDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout t.log('(scale up) replica count is:' + replicaCount) if (replicaCount !== '5') { - sh.exec('sleep 3s') + sh.exec('sleep 10s') } } - t.is('5', replicaCount, 'Replica count should be 5 within 60 seconds') + t.is('5', replicaCount, 'Replica count should be 5 within 10 minutes') - for (let i = 0; i < 12 && replicaCount !== '0'; i++) { + for (let i = 0; i < 60 && replicaCount !== '0'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerHostPortRefTriggerAuthDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout @@ -226,7 +226,7 @@ test.serial(`Deployment using redis host port in triggerAuth should max and scal } } - t.is('0', replicaCount, 'Replica count should be 0 within 2 minutes') + t.is('0', replicaCount, 'Replica count should be 0 within 10 minutes') }) diff --git a/tests/scalers/redis-sentinel-lists.test.ts b/tests/scalers/redis-sentinel-lists.test.ts index 66bb7fc432e..0208df8bf73 100644 --- a/tests/scalers/redis-sentinel-lists.test.ts +++ b/tests/scalers/redis-sentinel-lists.test.ts @@ -31,7 +31,7 @@ test.before(t => { sh.exec(`kubectl create namespace ${redisNamespace}`) sh.exec(`helm repo add bitnami https://charts.bitnami.com/bitnami`) - let sentinelStatus = sh.exec(`helm install --timeout 600s ${redisSentinelName} --namespace ${redisNamespace} --set "sentinel.enabled=true" --set "global.redis.password=${redisPassword}" bitnami/redis`).code + let sentinelStatus = sh.exec(`helm install --timeout 900s ${redisSentinelName} --namespace ${redisNamespace} --set "sentinel.enabled=true" --set "global.redis.password=${redisPassword}" bitnami/redis`).code t.is(0, sentinelStatus, 'creating a Redis sentinel setup should work.' @@ -142,19 +142,19 @@ test.serial(`Deployment using redis host port env vars should max and scale to 5 runWriteJob(t, writeJobNameForHostPortRef, listNameForHostPortRef) let replicaCount = '0' - for (let i = 0; i < 30 && replicaCount !== '5'; i++) { + for (let i = 0; i < 60 && replicaCount !== '5'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerHostPortRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout t.log('(scale up) replica count is:' + replicaCount) if (replicaCount !== '5') { - sh.exec('sleep 3s') + sh.exec('sleep 10s') } } - t.is('5', replicaCount, 'Replica count should be 5 within 60 seconds') + t.is('5', replicaCount, 'Replica count should be 5 within 10 minutes') - for (let i = 0; i < 12 && replicaCount !== '0'; i++) { + for (let i = 0; i < 60 && replicaCount !== '0'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerHostPortRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout @@ -164,7 +164,7 @@ test.serial(`Deployment using redis host port env vars should max and scale to 5 } } - t.is('0', replicaCount, 'Replica count should be 0 within 2 minutes') + t.is('0', replicaCount, 'Replica count should be 0 within 10 minutes') }) test.serial('Deployment for redis address env var should have 0 replica on start', t => { @@ -182,19 +182,19 @@ test.serial(`Deployment using redis address env var should max and scale to 5 wi runWriteJob(t, writeJobNameForAddressRef, listNameForAddressRef) let replicaCount = '0' - for (let i = 0; i < 30 && replicaCount !== '5'; i++) { + for (let i = 0; i < 60 && replicaCount !== '5'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerAddressRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout t.log('(scale up) replica count is:' + replicaCount) if (replicaCount !== '5') { - sh.exec('sleep 3s') + sh.exec('sleep 10s') } } - t.is('5', replicaCount, 'Replica count should be 5 within 60 seconds') + t.is('5', replicaCount, 'Replica count should be 5 within 10 minutes') - for (let i = 0; i < 12 && replicaCount !== '0'; i++) { + for (let i = 0; i < 60 && replicaCount !== '0'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerAddressRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout @@ -204,7 +204,7 @@ test.serial(`Deployment using redis address env var should max and scale to 5 wi } } - t.is('0', replicaCount, 'Replica count should be 0 within 2 minutes') + t.is('0', replicaCount, 'Replica count should be 0 within 10 minutes') }) @@ -222,19 +222,19 @@ test.serial(`Deployment using redis host port in triggerAuth should max and scal runWriteJob(t, writeJobNameForHostPortInTriggerAuth, listNameForHostPortTriggerAuth) let replicaCount = '0' - for (let i = 0; i < 30 && replicaCount !== '5'; i++) { + for (let i = 0; i < 60 && replicaCount !== '5'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerHostPortRefTriggerAuthDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout t.log('(scale up) replica count is:' + replicaCount) if (replicaCount !== '5') { - sh.exec('sleep 3s') + sh.exec('sleep 10s') } } - t.is('5', replicaCount, 'Replica count should be 5 within 60 seconds') + t.is('5', replicaCount, 'Replica count should be 5 within 10 minutes') - for (let i = 0; i < 12 && replicaCount !== '0'; i++) { + for (let i = 0; i < 60 && replicaCount !== '0'; i++) { replicaCount = sh.exec( `kubectl get deployment/${redisWorkerHostPortRefTriggerAuthDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout @@ -244,7 +244,7 @@ test.serial(`Deployment using redis host port in triggerAuth should max and scal } } - t.is('0', replicaCount, 'Replica count should be 0 within 2 minutes') + t.is('0', replicaCount, 'Replica count should be 0 within 10 minutes') }) diff --git a/tests/scalers/redis-sentinel-streams.test.ts b/tests/scalers/redis-sentinel-streams.test.ts index dd0e257148d..dd79c4a4d23 100644 --- a/tests/scalers/redis-sentinel-streams.test.ts +++ b/tests/scalers/redis-sentinel-streams.test.ts @@ -19,14 +19,14 @@ test.before(t => { sh.exec(`kubectl create namespace ${redisNamespace}`) sh.exec(`helm repo add bitnami https://charts.bitnami.com/bitnami`) - let sentinelStatus = sh.exec(`helm install --timeout 600s ${redisSentinelName} --namespace ${redisNamespace} --set "sentinel.enabled=true" --set "global.redis.password=${redisPassword}" bitnami/redis`).code + let sentinelStatus = sh.exec(`helm install --timeout 900s ${redisSentinelName} --namespace ${redisNamespace} --set "sentinel.enabled=true" --set "global.redis.password=${redisPassword}" bitnami/redis`).code t.is(0, sentinelStatus, 'creating a Redis Sentinel setup should work.' ) // Wait for Redis Sentinel to be ready. - let exitCode = waitForRollout('statefulset', redisStatefulSetName, redisNamespace, 300) + let exitCode = waitForRollout('statefulset', redisStatefulSetName, redisNamespace, 600) t.is(0, exitCode, 'expected rollout status for redis to finish successfully') // Get Redis Sentinel address. @@ -68,7 +68,7 @@ test.serial(`Deployment should scale to 5 with ${numMessages} messages and back ) // Wait for producer job to finish. - for (let i = 0; i < 40; i++) { + for (let i = 0; i < 60; i++) { const succeeded = sh.exec(`kubectl get job --namespace ${testNamespace} -o jsonpath='{.items[0].status.succeeded}'`).stdout if (succeeded == '1') { break @@ -76,7 +76,7 @@ test.serial(`Deployment should scale to 5 with ${numMessages} messages and back sh.exec('sleep 1s') } // With messages published, the consumer deployment should start receiving the messages. - t.true(await waitForDeploymentReplicaCount(5, 'redis-streams-consumer', testNamespace, 30, 3000), 'Replica count should be 5 within 60 seconds') + t.true(await waitForDeploymentReplicaCount(5, 'redis-streams-consumer', testNamespace, 30, 10000), 'Replica count should be 5 within 5 minutes') t.true(await waitForDeploymentReplicaCount(1, 'redis-streams-consumer', testNamespace, 60, 10000), 'Replica count should be 1 within 10 minutes') }) diff --git a/tests/scalers/redis-streams.test.ts b/tests/scalers/redis-streams.test.ts index 289d55bae26..6b17ecdc41a 100644 --- a/tests/scalers/redis-streams.test.ts +++ b/tests/scalers/redis-streams.test.ts @@ -57,7 +57,7 @@ test.serial(`Deployment should scale to 5 with ${numMessages} messages and back ) // wait for the producer job to complete - for (let i = 0; i < 20; i++) { + for (let i = 0; i < 60; i++) { const succeeded = sh.exec(`kubectl get job --namespace ${testNamespace} -o jsonpath='{.items[0].status.succeeded}'`).stdout if (succeeded == '1') { break @@ -66,7 +66,7 @@ test.serial(`Deployment should scale to 5 with ${numMessages} messages and back } // with messages published, the consumer deployment should start receiving the messages let replicaCount = '0' - for (let i = 0; i < 30 && replicaCount !== '5'; i++) { + for (let i = 0; i < 60 && replicaCount !== '5'; i++) { replicaCount = sh.exec( `kubectl get deployment/redis-streams-consumer --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout @@ -76,7 +76,7 @@ test.serial(`Deployment should scale to 5 with ${numMessages} messages and back } } - t.is('5', replicaCount, 'Replica count should be 5 within 300 seconds') + t.is('5', replicaCount, 'Replica count should be 5 within 10 minutes') for (let i = 0; i < 60 && replicaCount !== '1'; i++) { replicaCount = sh.exec( From 26eec994146a2086bad721b8a120d11c0974851d Mon Sep 17 00:00:00 2001 From: Jorge Turrado Date: Wed, 2 Feb 2022 09:46:49 +0100 Subject: [PATCH 32/48] Increase ava timeouts Signed-off-by: Jorge Turrado --- tests/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/package.json b/tests/package.json index 9bcf6b10f4f..b9adfd7fd8e 100644 --- a/tests/package.json +++ b/tests/package.json @@ -9,7 +9,7 @@ "require": [ "ts-node/register" ], - "timeout": "10m" + "timeout": "30m" }, "scripts": { "test": "ava" From 3cbfe2af78f932763a72518034c7666324e64a9c Mon Sep 17 00:00:00 2001 From: Jorge Turrado Date: Wed, 2 Feb 2022 11:36:57 +0100 Subject: [PATCH 33/48] Reduce waiting time selenium Signed-off-by: Jorge Turrado --- tests/scalers/selenium-grid.test.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/scalers/selenium-grid.test.ts b/tests/scalers/selenium-grid.test.ts index 1b30b200d58..7748fe20651 100644 --- a/tests/scalers/selenium-grid.test.ts +++ b/tests/scalers/selenium-grid.test.ts @@ -84,7 +84,7 @@ test.serial('should create one chrome and firefox node', t => { t.is(0, sh.exec(`kubectl apply --namespace ${seleniumGridNamespace} -f ${seleniumGridTestDeployTmpFile.name}`).code, 'creating a Selenium Grid Tests deployment should work.'); // wait for selenium grid tests to start running - for (let i = 0; i < 60; i++) { + for (let i = 0; i < 20; i++) { const running = sh.exec(`kubectl get job ${seleniumGridTestName} --namespace ${seleniumGridNamespace} -o jsonpath='{.items[0].status.running}'`).stdout if (running == '1') { break; @@ -168,7 +168,7 @@ test.serial('should create two chrome and one firefox nodes', t => { t.is(0, sh.exec(`kubectl apply --namespace ${seleniumGridNamespace} -f ${seleniumGridTestDeployTmpFile.name}`).code, 'creating a Selenium Grid Tests deployment should work.'); // wait for selenium grid tests to start running - for (let i = 0; i < 60; i++) { + for (let i = 0; i < 20; i++) { const running = sh.exec(`kubectl get job ${seleniumGridTestName} --namespace ${seleniumGridNamespace} -o jsonpath='{.items[0].status.running}'`).stdout if (running == '1') { break; From ebd144738ae105420ae97f787bc5ef7ec7e5cc1a Mon Sep 17 00:00:00 2001 From: Jorge Turrado Date: Wed, 2 Feb 2022 13:05:33 +0100 Subject: [PATCH 34/48] Update new relic test Signed-off-by: Jorge Turrado --- tests/scalers/new-relic.test.ts | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/tests/scalers/new-relic.test.ts b/tests/scalers/new-relic.test.ts index ff468d16340..45593ee1e96 100644 --- a/tests/scalers/new-relic.test.ts +++ b/tests/scalers/new-relic.test.ts @@ -102,14 +102,14 @@ test.serial('Deployment should have 1 replicas on start', t => { t.is(replicaCount, '1', 'replica count should start out as 0') }) -test.serial(`Deployment should scale to 5 (the max) with HTTP Requests exceeding in the rate then back to 0`, t => { +test.serial(`Deployment should scale to 3 (the max) with HTTP Requests exceeding in the rate then back to 0`, t => { // generate a large number of HTTP requests (using Apache Bench) that will take some time // so prometheus has some time to scrape it - const tmpFile = tmp.fileSync() - fs.writeFileSync(tmpFile.name, generateRequestsYaml.replace('{{NAMESPACE}}', testNamespace)) + const loadGeneratorFile = tmp.fileSync() + fs.writeFileSync(loadGeneratorFile.name, generateRequestsYaml.replace('{{NAMESPACE}}', testNamespace)) t.is( 0, - sh.exec(`kubectl apply -f ${tmpFile.name} --namespace ${testNamespace}`).code, + sh.exec(`kubectl apply -f ${loadGeneratorFile.name} --namespace ${testNamespace}`).code, 'creating job should work.' ) @@ -123,7 +123,7 @@ test.serial(`Deployment should scale to 5 (the max) with HTTP Requests exceeding // keda based deployment should start scaling up with http requests issued let replicaCount = '0' - for (let i = 0; i < 60 && replicaCount !== '5'; i++) { + for (let i = 0; i < 60 && replicaCount !== '3'; i++) { t.log(`Waited ${5 * i} seconds for new-relic-based deployments to scale up`) const jobLogs = sh.exec(`kubectl logs -l job-name=generate-requests -n ${testNamespace}`).stdout t.log(`Logs from the generate requests: ${jobLogs}`) @@ -131,14 +131,20 @@ test.serial(`Deployment should scale to 5 (the max) with HTTP Requests exceeding replicaCount = sh.exec( `kubectl get deployment.apps/keda-test-app --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout - if (replicaCount !== '5') { + if (replicaCount !== '3') { sh.exec('sleep 10s') } } - t.is('5', replicaCount, 'Replica count should be maxed at 5') + t.is('3', replicaCount, 'Replica count should be maxed at 3') + + t.is( + 0, + sh.exec(`kubectl delete -f ${loadGeneratorFile.name} --namespace ${testNamespace}`).code, + 'deleting job should work.' + ) - for (let i = 0; i < 50 && replicaCount !== '0'; i++) { + for (let i = 0; i < 60 && replicaCount !== '0'; i++) { replicaCount = sh.exec( `kubectl get deployment.apps/keda-test-app --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout @@ -147,7 +153,7 @@ test.serial(`Deployment should scale to 5 (the max) with HTTP Requests exceeding } } - t.is('0', replicaCount, 'Replica count should be 0 after 3 minutes') + t.is('0', replicaCount, 'Replica count should be 0 after 6 minutes') sh.exec('sleep 10s') }) @@ -171,9 +177,9 @@ spec: - image: jordi/ab name: test command: ["/bin/sh"] - args: ["-c", "for i in $(seq 1 60);do echo $i;ab -c 5 -n 1000 -v 2 http://test-app/;sleep 1;done"] + args: ["-c", "for i in $(seq 1 60);do echo $i;ab -c 5 -n 10000 -v 2 http://test-app/;sleep 1;done"] restartPolicy: Never - activeDeadlineSeconds: 120 + activeDeadlineSeconds: 600 backoffLimit: 2` const deployYaml = `apiVersion: apps/v1 @@ -263,7 +269,7 @@ spec: scaleTargetRef: name: keda-test-app minReplicaCount: 0 - maxReplicaCount: 5 + maxReplicaCount: 3 pollingInterval: 5 cooldownPeriod: 10 triggers: @@ -271,7 +277,7 @@ spec: metadata: account: '{{NEWRELIC_ACCOUNT_ID}}' region: '{{NEWRELIC_REGION}}' - threshold: '100' + threshold: '10' nrql: SELECT average(\`http_requests_total\`) FROM Metric where serviceName='test-app' and namespaceName='new-relic-test' since 60 seconds ago authenticationRef: name: newrelic-trigger From 86db63cc1f281f9332300683164a63e3ae5c6e3e Mon Sep 17 00:00:00 2001 From: jorturfer Date: Wed, 2 Feb 2022 23:06:11 +0100 Subject: [PATCH 35/48] set max instances for cassandra test to 2 Signed-off-by: jorturfer --- tests/scalers/cassandra.test.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/scalers/cassandra.test.ts b/tests/scalers/cassandra.test.ts index 144229e4248..d04bcb7caa2 100644 --- a/tests/scalers/cassandra.test.ts +++ b/tests/scalers/cassandra.test.ts @@ -108,7 +108,7 @@ test.serial('Should start off deployment with 0 replicas', t => { }) -test.serial(`Replicas should scale to 4 (the max) then back to 0`, t => { +test.serial(`Replicas should scale to 2 (the max) then back to 0`, t => { // insert data to cassandra console.log("insert data to cassandra") const insertData = `BEGIN BATCH @@ -130,7 +130,7 @@ test.serial(`Replicas should scale to 4 (the max) then back to 0`, t => { ) let replicaCount = '0' - const maxReplicaCount = '4' + const maxReplicaCount = '2' for (let i = 0; i < 30 && replicaCount !== maxReplicaCount; i++) { replicaCount = sh.exec( @@ -279,7 +279,7 @@ metadata: name: cassandra-scaledobject spec: minReplicaCount: 0 - maxReplicaCount: 4 + maxReplicaCount: 2 pollingInterval: 1 # Optional. Default: 30 seconds cooldownPeriod: 1 # Optional. Default: 300 seconds scaleTargetRef: From 7333e2abeb16cf61318212481f2d87e9f9d40700 Mon Sep 17 00:00:00 2001 From: jorturfer Date: Wed, 2 Feb 2022 23:36:30 +0100 Subject: [PATCH 36/48] Increase argo timeouts Signed-off-by: jorturfer --- tests/scalers/argo-rollouts.test.ts | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/scalers/argo-rollouts.test.ts b/tests/scalers/argo-rollouts.test.ts index 15f1d48e1d1..f42a8db2287 100644 --- a/tests/scalers/argo-rollouts.test.ts +++ b/tests/scalers/argo-rollouts.test.ts @@ -74,8 +74,8 @@ test.serial(`Rollouts should scale to 5 (the max) with HTTP Requests exceeding i // keda based rollout should start scaling up with http requests issued let replicaCount = '0' - for (let i = 0; i < 60 && replicaCount !== '5'; i++) { - t.log(`Waited ${5 * i} seconds for prometheus-based rollout to scale up`) + for (let i = 0; i < 120 && replicaCount !== '5'; i++) { + t.log(`Waited ${10 * i} seconds for prometheus-based rollout to scale up`) const jobLogs = sh.exec(`kubectl logs -l job-name=generate-requests -n ${testNamespace}`).stdout t.log(`Logs from the generate requests: ${jobLogs}`) @@ -89,7 +89,7 @@ test.serial(`Rollouts should scale to 5 (the max) with HTTP Requests exceeding i t.is('5', replicaCount, 'Replica count should be maxed at 5') - for (let i = 0; i < 50 && replicaCount !== '0'; i++) { + for (let i = 0; i < 90 && replicaCount !== '0'; i++) { replicaCount = sh.exec( `kubectl get rollouts.argoproj.io/keda-test-app --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout @@ -98,7 +98,7 @@ test.serial(`Rollouts should scale to 5 (the max) with HTTP Requests exceeding i } } - t.is('0', replicaCount, 'Replica count should be 0 after 3 minutes') + t.is('0', replicaCount, 'Replica count should be 0 after 15 minutes') }) test.after.always.cb('clean up argo-rollouts testing deployment', t => { From ddcbb9e4df0797d1f7fb3acc608a44518aa62e79 Mon Sep 17 00:00:00 2001 From: jorturfer Date: Wed, 2 Feb 2022 23:47:30 +0100 Subject: [PATCH 37/48] Add some configs to selenium Signed-off-by: jorturfer --- tests/scalers/selenium-grid.test.ts | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/tests/scalers/selenium-grid.test.ts b/tests/scalers/selenium-grid.test.ts index 7748fe20651..108b4ba6ad4 100644 --- a/tests/scalers/selenium-grid.test.ts +++ b/tests/scalers/selenium-grid.test.ts @@ -534,7 +534,9 @@ metadata: labels: deploymentName: selenium-chrome-node-91 spec: - maxReplicaCount: 8 + maxReplicaCount: 1 + pollingInterval: 5 + cooldownPeriod: 5 scaleTargetRef: name: selenium-chrome-node-91 triggers: @@ -555,7 +557,9 @@ metadata: labels: deploymentName: selenium-chrome-node spec: - maxReplicaCount: 8 + maxReplicaCount: 1 + pollingInterval: 5 + cooldownPeriod: 5 scaleTargetRef: name: selenium-chrome-node triggers: @@ -573,7 +577,9 @@ metadata: labels: deploymentName: selenium-firefox-node spec: - maxReplicaCount: 8 + maxReplicaCount: 1 + pollingInterval: 5 + cooldownPeriod: 5 scaleTargetRef: name: selenium-firefox-node triggers: From 899d0b64b3d5b49725b0964f09e885361d5be31d Mon Sep 17 00:00:00 2001 From: jorturfer Date: Wed, 2 Feb 2022 23:56:57 +0100 Subject: [PATCH 38/48] undo change in integration test Signed-off-by: jorturfer --- controllers/keda/scaledobject_controller_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/controllers/keda/scaledobject_controller_test.go b/controllers/keda/scaledobject_controller_test.go index 56a8820475e..bf40980a5d3 100644 --- a/controllers/keda/scaledobject_controller_test.go +++ b/controllers/keda/scaledobject_controller_test.go @@ -419,7 +419,7 @@ var _ = Describe("ScaledObjectController", func() { err = k8sClient.Get(context.Background(), types.NamespacedName{Name: soName, Namespace: "default"}, so) Ω(err).ToNot(HaveOccurred()) return so.Status.Conditions.GetReadyCondition().Status - }, 60*time.Second).Should(Equal(metav1.ConditionTrue)) + }, 20*time.Second).Should(Equal(metav1.ConditionTrue)) }) It("doesn't allow MinReplicaCount > MaxReplicaCount", func() { @@ -462,7 +462,7 @@ var _ = Describe("ScaledObjectController", func() { err = k8sClient.Get(context.Background(), types.NamespacedName{Name: soName, Namespace: "default"}, so) Ω(err).ToNot(HaveOccurred()) return so.Status.Conditions.GetReadyCondition().Status - }, 60*time.Second).Should(Equal(metav1.ConditionFalse)) + }, 20*time.Second).Should(Equal(metav1.ConditionFalse)) }) It("doesn't allow IdleReplicaCount > MinReplicaCount", func() { From 4e3a488585381bac9e427a89e6e422ad457c2d15 Mon Sep 17 00:00:00 2001 From: jorturfer Date: Thu, 3 Feb 2022 23:45:46 +0100 Subject: [PATCH 39/48] Update selenium test Signed-off-by: jorturfer --- tests/scalers/selenium-grid.test.ts | 75 ++++++++--------------------- 1 file changed, 20 insertions(+), 55 deletions(-) diff --git a/tests/scalers/selenium-grid.test.ts b/tests/scalers/selenium-grid.test.ts index 108b4ba6ad4..71611af8c3c 100644 --- a/tests/scalers/selenium-grid.test.ts +++ b/tests/scalers/selenium-grid.test.ts @@ -30,24 +30,7 @@ test.before(t => { t.is('1', seleniumHubReplicaCount, 'Selenium Hub is not in a ready state') }); -test.serial('should have one node for chrome and firefox each at start', t => { - let seleniumChromeNodeReplicaCount = '0'; - let seleniumFireFoxReplicaCount = '0'; - for (let i = 0; i < 60; i++) { - seleniumChromeNodeReplicaCount = sh.exec(`kubectl get deploy/selenium-chrome-node -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout - seleniumFireFoxReplicaCount = sh.exec(`kubectl get deploy/selenium-firefox-node -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout - if (seleniumChromeNodeReplicaCount == '1' && seleniumFireFoxReplicaCount == '1') { - break; - } - console.log('Waiting for chrome and firefox node to be ready'); - sh.exec('sleep 10s') - } - - t.is('1', seleniumChromeNodeReplicaCount, 'Selenium Chrome Node did not scale up to 1 pods') - t.is('1', seleniumFireFoxReplicaCount, 'Selenium Firefox Node did not scale up to 1 pods') -}); - -test.serial('should scale down browser nodes to 0', t => { +test.serial('should have 0 nodes at start', t => { const scaledObjectDeployTmpFile = tmp.fileSync(); fs.writeFileSync(scaledObjectDeployTmpFile.name, scaledObjectYaml.replace(/{{NAMESPACE}}/g, seleniumGridNamespace).replace(/{{SELENIUM_GRID_GRAPHQL_URL}}/g, seleniumGridGraphQLUrl)); @@ -83,55 +66,46 @@ test.serial('should create one chrome and firefox node', t => { t.is(0, sh.exec(`kubectl apply --namespace ${seleniumGridNamespace} -f ${seleniumGridTestDeployTmpFile.name}`).code, 'creating a Selenium Grid Tests deployment should work.'); - // wait for selenium grid tests to start running - for (let i = 0; i < 20; i++) { - const running = sh.exec(`kubectl get job ${seleniumGridTestName} --namespace ${seleniumGridNamespace} -o jsonpath='{.items[0].status.running}'`).stdout - if (running == '1') { - break; - } - sh.exec('sleep 1s') - } - - let seleniumChromeNodeReplicaCount = '0'; + let seleniumChromeNodeReplicaCount = '0'; let seleniumFireFoxReplicaCount = '0'; - for (let i = 0; i < 60; i++) { + for (let i = 0; i < 120; i++) { seleniumChromeNodeReplicaCount = seleniumChromeNodeReplicaCount != '1' ? sh.exec(`kubectl get deploy/selenium-chrome-node -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout : seleniumChromeNodeReplicaCount; seleniumFireFoxReplicaCount = seleniumFireFoxReplicaCount != '1' ? sh.exec(`kubectl get deploy/selenium-firefox-node -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout : seleniumFireFoxReplicaCount; if (seleniumChromeNodeReplicaCount == '1' && seleniumFireFoxReplicaCount == '1') { break; } console.log('Waiting for chrome to scale up 1 pod and firefox to 1 pod'); - sh.exec('sleep 10s') + sh.exec('sleep 2s') } t.is('1', seleniumChromeNodeReplicaCount, 'Selenium Chrome Node did not scale up to 1 pod') t.is('1', seleniumFireFoxReplicaCount, 'Selenium Firefox Node did not scale up to 1 pod') // wait for selenium grid tests to complete - let succeeded = '0'; - for (let i = 0; i < 60; i++) { - succeeded = sh.exec(`kubectl get job ${seleniumGridTestName} --namespace ${seleniumGridNamespace} -o jsonpath='{.items[0].status.succeeded}'`).stdout - if (succeeded == '1') { + for (let i = 0; i < 120; i++) { + seleniumChromeNodeReplicaCount = seleniumChromeNodeReplicaCount != '0' ? sh.exec(`kubectl get deploy/selenium-chrome-node -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout : seleniumChromeNodeReplicaCount; + seleniumFireFoxReplicaCount = seleniumFireFoxReplicaCount != '0' ? sh.exec(`kubectl get deploy/selenium-firefox-node -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout : seleniumFireFoxReplicaCount; + if (seleniumChromeNodeReplicaCount == '0' && seleniumFireFoxReplicaCount == '0') { break; } - sh.exec('sleep 10s') + console.log('Waiting for chrome to scale up 0 pod and firefox to 0 pod'); + sh.exec('sleep 2s') } sh.exec(`kubectl delete job/${seleniumGridTestName} --namespace ${seleniumGridNamespace}`) }); test.serial('should scale down chrome and firefox nodes to 0', t => { - let seleniumChromeNodeReplicaCount = '1'; let seleniumFireFoxReplicaCount = '1'; - for (let i = 0; i < 60; i++) { + for (let i = 0; i < 120; i++) { seleniumChromeNodeReplicaCount = sh.exec(`kubectl get deploy/selenium-chrome-node -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout; seleniumFireFoxReplicaCount = sh.exec(`kubectl get deploy/selenium-firefox-node -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout; if (seleniumChromeNodeReplicaCount == '0' && seleniumFireFoxReplicaCount == '0') { break; } console.log('Waiting for chrome and firefox to scale down to 0 pod'); - sh.exec('sleep 10s') + sh.exec('sleep 2s') } t.is('0', seleniumChromeNodeReplicaCount, 'Selenium Chrome Node did not scale down to 0 pod') @@ -145,13 +119,13 @@ test.serial('should create two chrome and one firefox nodes', t => { t.is(0, sh.exec(`kubectl apply --namespace ${seleniumGridNamespace} -f ${chrome91DeployTmpFile.name}`).code, 'creating Chrome 91 node should work.') let seleniumChrome91NodeReplicaCount = '1'; - for (let i = 0; i < 60; i++) { + for (let i = 0; i < 120; i++) { seleniumChrome91NodeReplicaCount = sh.exec(`kubectl get deploy/selenium-chrome-node-91 -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout if (seleniumChrome91NodeReplicaCount == '0') { break; } console.log('Waiting for chrome 91 to scale down to 0 pods') - sh.exec('sleep 10s') + sh.exec('sleep 2s') } const seleniumGridTestDeployTmpFile = tmp.fileSync(); @@ -167,19 +141,10 @@ test.serial('should create two chrome and one firefox nodes', t => { t.is(0, sh.exec(`kubectl apply --namespace ${seleniumGridNamespace} -f ${seleniumGridTestDeployTmpFile.name}`).code, 'creating a Selenium Grid Tests deployment should work.'); - // wait for selenium grid tests to start running - for (let i = 0; i < 20; i++) { - const running = sh.exec(`kubectl get job ${seleniumGridTestName} --namespace ${seleniumGridNamespace} -o jsonpath='{.items[0].status.running}'`).stdout - if (running == '1') { - break; - } - sh.exec('sleep 1s') - } - let seleniumChromeNodeReplicaCount = '0'; let seleniumFireFoxReplicaCount = '0'; seleniumChrome91NodeReplicaCount = '0'; - for (let i = 0; i < 60; i++) { + for (let i = 0; i < 120; i++) { seleniumChromeNodeReplicaCount = seleniumChromeNodeReplicaCount != '1' ? sh.exec(`kubectl get deploy/selenium-chrome-node -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout : seleniumChromeNodeReplicaCount; seleniumFireFoxReplicaCount = seleniumFireFoxReplicaCount != '1' ? sh.exec(`kubectl get deploy/selenium-firefox-node -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout : seleniumFireFoxReplicaCount; seleniumChrome91NodeReplicaCount = seleniumChrome91NodeReplicaCount != '1' ? sh.exec(`kubectl get deploy/selenium-chrome-node-91 -n ${seleniumGridNamespace} -o jsonpath='{.spec.replicas}'`).stdout : seleniumChrome91NodeReplicaCount; @@ -187,7 +152,7 @@ test.serial('should create two chrome and one firefox nodes', t => { break; } console.log('Waiting for chrome to scale up 2 pods and firefox to 1 pod'); - sh.exec('sleep 10s') + sh.exec('sleep 2s') } sh.exec(`kubectl delete job/${seleniumGridTestName} --namespace ${seleniumGridNamespace}`) @@ -329,7 +294,7 @@ metadata: app.kubernetes.io/component: selenium-grid-4.0.0-beta-1-prerelease-20210114 helm.sh/chart: selenium-grid-0.2.0 spec: - replicas: 1 + replicas: 0 selector: matchLabels: app: selenium-chrome-node @@ -374,7 +339,7 @@ metadata: app.kubernetes.io/component: selenium-grid-4.0.0-beta-1-prerelease-20210114 helm.sh/chart: selenium-grid-0.2.0 spec: - replicas: 1 + replicas: 0 selector: matchLabels: app: selenium-firefox-node @@ -472,7 +437,7 @@ metadata: app.kubernetes.io/component: selenium-grid-4.0.0-beta-1-prerelease-20210114 helm.sh/chart: selenium-grid-0.2.0 spec: - replicas: 1 + replicas: 0 selector: matchLabels: app: selenium-chrome-node-91 @@ -602,7 +567,7 @@ spec: spec: containers: - name: {{CONTAINER_NAME}} - image: prashanth0007/selenium-random-tests:v1.0.2 + image: ghcr.io/kedacore/tests-selenium-grid imagePullPolicy: Always env: - name: HOST_NAME From 7a18cdb26cd315757b2ebb8761d8679da52ce4e1 Mon Sep 17 00:00:00 2001 From: Jorge Turrado Date: Fri, 4 Feb 2022 09:04:27 +0100 Subject: [PATCH 40/48] Add retry for failing e2e tests Signed-off-by: Jorge Turrado --- tests/run-all.sh | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/tests/run-all.sh b/tests/run-all.sh index d032395ef4d..7a848df3565 100755 --- a/tests/run-all.sh +++ b/tests/run-all.sh @@ -23,7 +23,7 @@ function run_tests { for test_case in $(find scalers -name "$E2E_REGEX" | shuf) do counter=$((counter+1)) - ./node_modules/.bin/ava $test_case > "${test_case}.log" 2>&1 & + ./node_modules/.bin/ava $test_case > "${test_case}.1.log" 2>&1 & pid=$! echo "Running $test_case with pid: $pid" pids+=($pid) @@ -35,6 +35,33 @@ function run_tests { pids=() fi done + + printf "\n\n##############################################\n" + printf "##############################################\n\n" + printf "FINISHED FIRST EXECUTION, RETRYING FAILING TESTS" + printf "\n\n##############################################\n" + printf "##############################################\n\n" + + retry_lookup=failed_lookup + failed_count=0 + failed_lookup=() + + #Retry failing tests + for test_case in $(retry_lookup | shuf) + do + counter=$((counter+1)) + ./node_modules/.bin/ava $test_case > "${test_case}.2.log" 2>&1 & + pid=$! + echo "Rerunning $test_case with pid: $pid" + pids+=($pid) + lookup[$pid]=$test_case + # limit concurrent runs + if [[ "$counter" -ge "$concurrent_tests_limit" ]]; then + wait_for_jobs + counter=0 + pids=() + fi + done } function mark_failed { From 95338dba6bfd66f2dcb0dde841777de4f7b73e44 Mon Sep 17 00:00:00 2001 From: Jorge Turrado Date: Fri, 4 Feb 2022 11:07:05 +0100 Subject: [PATCH 41/48] Update retry system Signed-off-by: Jorge Turrado --- tests/run-all.sh | 51 +++++++++++++++++++++++++----------------------- 1 file changed, 27 insertions(+), 24 deletions(-) diff --git a/tests/run-all.sh b/tests/run-all.sh index 7a848df3565..3416309a2e9 100755 --- a/tests/run-all.sh +++ b/tests/run-all.sh @@ -36,32 +36,35 @@ function run_tests { fi done - printf "\n\n##############################################\n" - printf "##############################################\n\n" - printf "FINISHED FIRST EXECUTION, RETRYING FAILING TESTS" - printf "\n\n##############################################\n" - printf "##############################################\n\n" + # Retry failing tests + if [ ${#failed_lookup[@]} -ne 0 ]; then - retry_lookup=failed_lookup - failed_count=0 - failed_lookup=() + printf "\n\n##############################################\n" + printf "##############################################\n\n" + printf "FINISHED FIRST EXECUTION, RETRYING FAILING TESTS" + printf "\n\n##############################################\n" + printf "##############################################\n\n" - #Retry failing tests - for test_case in $(retry_lookup | shuf) - do - counter=$((counter+1)) - ./node_modules/.bin/ava $test_case > "${test_case}.2.log" 2>&1 & - pid=$! - echo "Rerunning $test_case with pid: $pid" - pids+=($pid) - lookup[$pid]=$test_case - # limit concurrent runs - if [[ "$counter" -ge "$concurrent_tests_limit" ]]; then - wait_for_jobs - counter=0 - pids=() - fi - done + retry_lookup=("${failed_lookup[@]}") + failed_count=0 + failed_lookup=() + + for test_case in "${retry_lookup[@]}" + do + counter=$((counter+1)) + ./node_modules/.bin/ava $test_case > "${test_case}.2.log" 2>&1 & + pid=$! + echo "Rerunning $test_case with pid: $pid" + pids+=($pid) + lookup[$pid]=$test_case + # limit concurrent runs + if [[ "$counter" -ge "$concurrent_tests_limit" ]]; then + wait_for_jobs + counter=0 + pids=() + fi + done + fi } function mark_failed { From 3ce106ae2df09022102126841a029321ce8e5eca Mon Sep 17 00:00:00 2001 From: Jorge Turrado Date: Fri, 4 Feb 2022 12:20:49 +0100 Subject: [PATCH 42/48] fix and error waiting Signed-off-by: Jorge Turrado --- tests/run-all.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/run-all.sh b/tests/run-all.sh index 3416309a2e9..2403ca5783c 100755 --- a/tests/run-all.sh +++ b/tests/run-all.sh @@ -36,6 +36,8 @@ function run_tests { fi done + wait_for_jobs + # Retry failing tests if [ ${#failed_lookup[@]} -ne 0 ]; then @@ -46,6 +48,8 @@ function run_tests { printf "##############################################\n\n" retry_lookup=("${failed_lookup[@]}") + counter=0 + pids=() failed_count=0 failed_lookup=() From 1455c79d261dc68c2b420511d3a43c41f8e69cd4 Mon Sep 17 00:00:00 2001 From: jorturfer Date: Sat, 5 Feb 2022 09:46:37 +0100 Subject: [PATCH 43/48] FORCE TEST FAILURE Signed-off-by: jorturfer --- tests/scalers/kubernetes-workload.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/scalers/kubernetes-workload.test.ts b/tests/scalers/kubernetes-workload.test.ts index 5cb77c876d1..0f9db89ac02 100644 --- a/tests/scalers/kubernetes-workload.test.ts +++ b/tests/scalers/kubernetes-workload.test.ts @@ -31,7 +31,7 @@ test.serial('Deployment should have 0 replicas on start', t => { const replicaCount = sh.exec( `kubectl get deployment.apps/sut-deployment --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout - t.is(replicaCount, '0', 'replica count should start out as 0') + t.is(replicaCount, '10', 'replica count should start out as 0') }) test.serial(`Deployment should scale to fit the amount of pods which match the selector`, async t => { From cf24adf932e5f0b90c940ae82640e32e7f33e629 Mon Sep 17 00:00:00 2001 From: jorturfer Date: Sat, 5 Feb 2022 09:47:32 +0100 Subject: [PATCH 44/48] Undo test failure probe Signed-off-by: jorturfer --- tests/scalers/kubernetes-workload.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/scalers/kubernetes-workload.test.ts b/tests/scalers/kubernetes-workload.test.ts index 0f9db89ac02..5cb77c876d1 100644 --- a/tests/scalers/kubernetes-workload.test.ts +++ b/tests/scalers/kubernetes-workload.test.ts @@ -31,7 +31,7 @@ test.serial('Deployment should have 0 replicas on start', t => { const replicaCount = sh.exec( `kubectl get deployment.apps/sut-deployment --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` ).stdout - t.is(replicaCount, '10', 'replica count should start out as 0') + t.is(replicaCount, '0', 'replica count should start out as 0') }) test.serial(`Deployment should scale to fit the amount of pods which match the selector`, async t => { From 162e09a0eef1f1cc12f2cb1417f35c36834fd603 Mon Sep 17 00:00:00 2001 From: jorturfer Date: Fri, 11 Feb 2022 00:28:12 +0100 Subject: [PATCH 45/48] Fix azure-queue-restore-original-replicas.test.ts Signed-off-by: jorturfer --- ...re-queue-restore-original-replicas.test.ts | 48 ++++++++----------- 1 file changed, 20 insertions(+), 28 deletions(-) diff --git a/tests/scalers/azure-queue-restore-original-replicas.test.ts b/tests/scalers/azure-queue-restore-original-replicas.test.ts index e565ab44b79..676f21fa1a0 100644 --- a/tests/scalers/azure-queue-restore-original-replicas.test.ts +++ b/tests/scalers/azure-queue-restore-original-replicas.test.ts @@ -1,9 +1,12 @@ +import * as azure from 'azure-storage' import * as fs from 'fs' import * as sh from 'shelljs' import * as tmp from 'tmp' import test from 'ava' +import {waitForDeploymentReplicaCount} from "./helpers"; const defaultNamespace = 'azure-queue-restore-original-replicas-test' +const queueName = 'queue-name-restore' const connectionString = process.env['TEST_STORAGE_CONNECTION_STRING'] test.before(t => { @@ -11,6 +14,10 @@ test.before(t => { t.fail('TEST_STORAGE_CONNECTION_STRING environment variable is required for queue tests') } + const queueSvc = azure.createQueueService(connectionString) + queueSvc.messageEncoder = new azure.QueueMessageEncoder.TextBase64QueueMessageEncoder() + queueSvc.createQueueIfNotExists(queueName, _ => {}) + sh.config.silent = true const base64ConStr = Buffer.from(connectionString).toString('base64') const tmpFile = tmp.fileSync() @@ -23,11 +30,8 @@ test.before(t => { ) }) -test.serial('Deployment should have 2 replicas on start', t => { - const replicaCount = sh.exec( - `kubectl get deployment.apps/test-deployment --namespace ${defaultNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.is(replicaCount, '2', 'replica count should start out as 2') +test.serial('Deployment should have 2 replicas on start', async t => { + t.true(await waitForDeploymentReplicaCount(2, 'test-deployment', defaultNamespace, 15, 1000), 'replica count should be 2 after 15 seconds') }) test.serial('Creating ScaledObject should work', t => { @@ -44,18 +48,8 @@ test.serial('Creating ScaledObject should work', t => { test.serial( 'Deployment should scale to 0 and then shold be back to 2 after deletion of ScaledObject', - t => { - let replicaCount = '100' - for (let i = 0; i < 50 && replicaCount !== '0'; i++) { - replicaCount = sh.exec( - `kubectl get deployment.apps/test-deployment --namespace ${defaultNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - if (replicaCount !== '0') { - sh.exec('sleep 5s') - } - } - t.is('0', replicaCount, 'Replica count should be 0') - + async t => { + t.true(await waitForDeploymentReplicaCount(0, 'test-deployment', defaultNamespace, 120, 1000), 'replica count should be 0 after 2 minutes') t.is( 0, @@ -63,15 +57,7 @@ test.serial( 'deletion of ScaledObject should work.' ) - for (let i = 0; i < 50 && replicaCount !== '2'; i++) { - replicaCount = sh.exec( - `kubectl get deployment.apps/test-deployment --namespace ${defaultNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - if (replicaCount !== '2') { - sh.exec('sleep 5s') - } - } - t.is('2', replicaCount, 'Replica count should be back at orignal 2') + t.true(await waitForDeploymentReplicaCount(2, 'test-deployment', defaultNamespace, 120, 1000), 'replica count should be 2 after 2 minutes') } ) @@ -86,7 +72,13 @@ test.after.always.cb('clean up azure-queue deployment', t => { sh.exec(`kubectl delete ${resource} --namespace ${defaultNamespace}`) } sh.exec(`kubectl delete namespace ${defaultNamespace}`) - t.end() + + // delete test queue + const queueSvc = azure.createQueueService(connectionString) + queueSvc.deleteQueueIfExists(queueName, err => { + t.falsy(err, 'should delete test queue successfully') + t.end() + }) }) const deployYaml = `apiVersion: v1 @@ -145,5 +137,5 @@ spec: triggers: - type: azure-queue metadata: - queueName: queue-name + queueName: ${queueName} connectionFromEnv: AzureWebJobsStorage` From 1621292201e0d7e112d84233f2f11646c96886fc Mon Sep 17 00:00:00 2001 From: jorturfer Date: Fri, 11 Feb 2022 00:36:16 +0100 Subject: [PATCH 46/48] Update azure queue tests to avoid condition races Signed-off-by: jorturfer --- .../scalers/azure-queue-trigger-auth.test.ts | 5 +- tests/scalers/azure-queue.test.ts | 61 ++++++------------- 2 files changed, 24 insertions(+), 42 deletions(-) diff --git a/tests/scalers/azure-queue-trigger-auth.test.ts b/tests/scalers/azure-queue-trigger-auth.test.ts index c341d2b867a..3f40dfbdfd7 100644 --- a/tests/scalers/azure-queue-trigger-auth.test.ts +++ b/tests/scalers/azure-queue-trigger-auth.test.ts @@ -7,7 +7,7 @@ import test from 'ava' import {waitForDeploymentReplicaCount} from "./helpers"; const testNamespace = 'azure-queue-auth-test' -const queueName = 'queue-name' +const queueName = 'queue-name-trigger' const connectionString = process.env['TEST_STORAGE_CONNECTION_STRING'] test.before(async t => { @@ -45,6 +45,9 @@ test.serial( // Scaling out when messages available t.true(await waitForDeploymentReplicaCount(1, 'test-deployment', testNamespace, 60, 1000), 'replica count should be 3 after 1 minute') + + queueSvc.clearMessages(queueName, _ => {}) + // Scaling in when no available messages t.true(await waitForDeploymentReplicaCount(0, 'test-deployment', testNamespace, 300, 1000), 'replica count should be 0 after 5 minute') } diff --git a/tests/scalers/azure-queue.test.ts b/tests/scalers/azure-queue.test.ts index 437f6aeab0a..c172622c40e 100644 --- a/tests/scalers/azure-queue.test.ts +++ b/tests/scalers/azure-queue.test.ts @@ -4,15 +4,21 @@ import * as fs from 'fs' import * as sh from 'shelljs' import * as tmp from 'tmp' import test from 'ava' +import {waitForDeploymentReplicaCount} from "./helpers"; const defaultNamespace = 'azure-queue-test' const connectionString = process.env['TEST_STORAGE_CONNECTION_STRING'] +const queueName = 'queue-name' -test.before(t => { +test.before(async t => { if (!connectionString) { t.fail('TEST_STORAGE_CONNECTION_STRING environment variable is required for queue tests') } + const queueSvc = azure.createQueueService(connectionString) + queueSvc.messageEncoder = new azure.QueueMessageEncoder.TextBase64QueueMessageEncoder() + queueSvc.createQueueIfNotExists(queueName, _ => {}) + sh.config.silent = true const base64ConStr = Buffer.from(connectionString).toString('base64') const tmpFile = tmp.fileSync() @@ -23,54 +29,27 @@ test.before(t => { sh.exec(`kubectl apply -f ${tmpFile.name} --namespace ${defaultNamespace}`).code, 'creating a deployment should work.' ) + t.true(await waitForDeploymentReplicaCount(0, 'test-deployment', defaultNamespace, 60, 1000), 'replica count should be 0 after 1 minute') }) -test.serial('Deployment should have 0 replicas on start', t => { - const replicaCount = sh.exec( - `kubectl get deployment.apps/test-deployment --namespace ${defaultNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.is(replicaCount, '0', 'replica count should start out as 0') -}) - -test.serial.cb( +test.serial( 'Deployment should scale to 4 with 10,000 messages on the queue then back to 0', - t => { + async t => { // add 10,000 messages const queueSvc = azure.createQueueService(connectionString) queueSvc.messageEncoder = new azure.QueueMessageEncoder.TextBase64QueueMessageEncoder() - queueSvc.createQueueIfNotExists('queue-name', err => { - t.falsy(err, 'unable to create queue') - async.mapLimit( - Array(10000).keys(), - 200, - (n, cb) => queueSvc.createMessage('queue-name', `test ${n}`, cb), - () => { - let replicaCount = '0' - for (let i = 0; i < 60 && replicaCount !== '4'; i++) { - replicaCount = sh.exec( - `kubectl get deployment.apps/test-deployment --namespace ${defaultNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - if (replicaCount !== '4') { - sh.exec('sleep 5s') - } - } + await async.mapLimit( + Array(10000).keys(), + 20, + (n, cb) => queueSvc.createMessage(queueName, `test ${n}`, cb) + ) - t.is('4', replicaCount, 'Replica count should be 4 after 300 seconds') + // Scaling out when messages available + t.true(await waitForDeploymentReplicaCount(4, 'test-deployment', defaultNamespace, 300, 1000), 'replica count should be 4 after 5 minutes') - for (let i = 0; i < 60 && replicaCount !== '0'; i++) { - replicaCount = sh.exec( - `kubectl get deployment.apps/test-deployment --namespace ${defaultNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - if (replicaCount !== '0') { - sh.exec('sleep 10s') - } - } - t.is('0', replicaCount, 'Replica count should be 0 after 6 minutes') - t.end() - } - ) - }) + // Scaling in when no available messages + t.true(await waitForDeploymentReplicaCount(0, 'test-deployment', defaultNamespace, 360, 1000), 'replica count should be 0 after 6 minute') } ) @@ -88,7 +67,7 @@ test.after.always.cb('clean up azure-queue deployment', t => { // delete test queue const queueSvc = azure.createQueueService(connectionString) - queueSvc.deleteQueueIfExists('queue-name', err => { + queueSvc.deleteQueueIfExists(queueName, err => { t.falsy(err, 'should delete test queue successfully') t.end() }) From 16ba72d828f92f73300e81e981d5c16200068822 Mon Sep 17 00:00:00 2001 From: Jorge Turrado Date: Fri, 11 Feb 2022 09:12:25 +0100 Subject: [PATCH 47/48] Fix some styles Signed-off-by: Jorge Turrado --- tests/scalers/azure-queue-restore-original-replicas.test.ts | 2 +- tests/scalers/azure-queue-trigger-auth.test.ts | 4 ++-- tests/scalers/azure-queue.test.ts | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/scalers/azure-queue-restore-original-replicas.test.ts b/tests/scalers/azure-queue-restore-original-replicas.test.ts index 676f21fa1a0..1f93a84d0f0 100644 --- a/tests/scalers/azure-queue-restore-original-replicas.test.ts +++ b/tests/scalers/azure-queue-restore-original-replicas.test.ts @@ -72,7 +72,7 @@ test.after.always.cb('clean up azure-queue deployment', t => { sh.exec(`kubectl delete ${resource} --namespace ${defaultNamespace}`) } sh.exec(`kubectl delete namespace ${defaultNamespace}`) - + // delete test queue const queueSvc = azure.createQueueService(connectionString) queueSvc.deleteQueueIfExists(queueName, err => { diff --git a/tests/scalers/azure-queue-trigger-auth.test.ts b/tests/scalers/azure-queue-trigger-auth.test.ts index 3f40dfbdfd7..29a1aa27e2e 100644 --- a/tests/scalers/azure-queue-trigger-auth.test.ts +++ b/tests/scalers/azure-queue-trigger-auth.test.ts @@ -45,9 +45,9 @@ test.serial( // Scaling out when messages available t.true(await waitForDeploymentReplicaCount(1, 'test-deployment', testNamespace, 60, 1000), 'replica count should be 3 after 1 minute') - + queueSvc.clearMessages(queueName, _ => {}) - + // Scaling in when no available messages t.true(await waitForDeploymentReplicaCount(0, 'test-deployment', testNamespace, 300, 1000), 'replica count should be 0 after 5 minute') } diff --git a/tests/scalers/azure-queue.test.ts b/tests/scalers/azure-queue.test.ts index c172622c40e..77987aa24eb 100644 --- a/tests/scalers/azure-queue.test.ts +++ b/tests/scalers/azure-queue.test.ts @@ -126,5 +126,5 @@ spec: triggers: - type: azure-queue metadata: - queueName: queue-name + queueName: ${queueName} connectionFromEnv: AzureWebJobsStorage` From 149cada75f5ba5e4a2ec49cf847e2eaae7962469 Mon Sep 17 00:00:00 2001 From: Jorge Turrado Date: Fri, 11 Feb 2022 12:03:41 +0100 Subject: [PATCH 48/48] Improve azure queues Signed-off-by: Jorge Turrado --- tests/scalers/azure-queue-trigger-auth.test.ts | 2 +- tests/scalers/azure-queue.test.ts | 13 +++++++------ 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/tests/scalers/azure-queue-trigger-auth.test.ts b/tests/scalers/azure-queue-trigger-auth.test.ts index 29a1aa27e2e..633688fafec 100644 --- a/tests/scalers/azure-queue-trigger-auth.test.ts +++ b/tests/scalers/azure-queue-trigger-auth.test.ts @@ -44,7 +44,7 @@ test.serial( ) // Scaling out when messages available - t.true(await waitForDeploymentReplicaCount(1, 'test-deployment', testNamespace, 60, 1000), 'replica count should be 3 after 1 minute') + t.true(await waitForDeploymentReplicaCount(1, 'test-deployment', testNamespace, 60, 1000), 'replica count should be 1 after 1 minute') queueSvc.clearMessages(queueName, _ => {}) diff --git a/tests/scalers/azure-queue.test.ts b/tests/scalers/azure-queue.test.ts index 77987aa24eb..a13b19f9069 100644 --- a/tests/scalers/azure-queue.test.ts +++ b/tests/scalers/azure-queue.test.ts @@ -8,7 +8,7 @@ import {waitForDeploymentReplicaCount} from "./helpers"; const defaultNamespace = 'azure-queue-test' const connectionString = process.env['TEST_STORAGE_CONNECTION_STRING'] -const queueName = 'queue-name' +const queueName = 'queue-single-name' test.before(async t => { if (!connectionString) { @@ -35,21 +35,21 @@ test.before(async t => { test.serial( 'Deployment should scale to 4 with 10,000 messages on the queue then back to 0', async t => { - // add 10,000 messages const queueSvc = azure.createQueueService(connectionString) queueSvc.messageEncoder = new azure.QueueMessageEncoder.TextBase64QueueMessageEncoder() await async.mapLimit( - Array(10000).keys(), + Array(1000).keys(), 20, (n, cb) => queueSvc.createMessage(queueName, `test ${n}`, cb) ) // Scaling out when messages available - t.true(await waitForDeploymentReplicaCount(4, 'test-deployment', defaultNamespace, 300, 1000), 'replica count should be 4 after 5 minutes') + t.true(await waitForDeploymentReplicaCount(1, 'test-deployment', defaultNamespace, 60, 1000), 'replica count should be 1 after 1 minutes') + queueSvc.clearMessages(queueName, _ => {}) // Scaling in when no available messages - t.true(await waitForDeploymentReplicaCount(0, 'test-deployment', defaultNamespace, 360, 1000), 'replica count should be 0 after 6 minute') + t.true(await waitForDeploymentReplicaCount(0, 'test-deployment', defaultNamespace, 300, 1000), 'replica count should be 0 after 5 minute') } ) @@ -121,7 +121,8 @@ spec: scaleTargetRef: name: test-deployment pollingInterval: 5 - maxReplicaCount: 4 + minReplicaCount: 0 + maxReplicaCount: 1 cooldownPeriod: 10 triggers: - type: azure-queue