diff --git a/pkg/scalers/redis_scaler.go b/pkg/scalers/redis_scaler.go index 312bea606f9..cbb578beceb 100644 --- a/pkg/scalers/redis_scaler.go +++ b/pkg/scalers/redis_scaler.go @@ -17,9 +17,10 @@ import ( ) const ( - defaultTargetListLength = 5 - defaultDBIdx = 0 - defaultEnableTLS = false + defaultListLength = 5 + defaultActivationListLength = 0 + defaultDBIdx = 0 + defaultEnableTLS = false ) type redisAddressParser func(metadata, resolvedEnv, authParams map[string]string) (redisConnectionInfo, error) @@ -45,11 +46,12 @@ type redisConnectionInfo struct { } type redisMetadata struct { - targetListLength int64 - listName string - databaseIndex int - connectionInfo redisConnectionInfo - scalerIndex int + listLength int64 + activationListLength int64 + listName string + databaseIndex int + connectionInfo redisConnectionInfo + scalerIndex int } // NewRedisScaler creates a new redisScaler @@ -180,14 +182,23 @@ func parseRedisMetadata(config *ScalerConfig, parserFn redisAddressParser) (*red meta := redisMetadata{ connectionInfo: connInfo, } - meta.targetListLength = defaultTargetListLength + meta.listLength = defaultListLength if val, ok := config.TriggerMetadata["listLength"]; ok { listLength, err := strconv.ParseInt(val, 10, 64) if err != nil { return nil, fmt.Errorf("list length parsing error %s", err.Error()) } - meta.targetListLength = listLength + meta.listLength = listLength + } + + meta.activationListLength = defaultActivationListLength + if val, ok := config.TriggerMetadata["activationListLength"]; ok { + activationListLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return nil, fmt.Errorf("activationListLength parsing error %s", err.Error()) + } + meta.activationListLength = activationListLength } if val, ok := config.TriggerMetadata["listName"]; ok { @@ -217,7 +228,7 @@ func (s *redisScaler) IsActive(ctx context.Context) (bool, error) { return false, err } - return length > 0, nil + return length > s.metadata.activationListLength, nil } func (s *redisScaler) Close(context.Context) error { @@ -231,7 +242,7 @@ func (s *redisScaler) GetMetricSpecForScaling(context.Context) []v2beta2.MetricS Metric: v2beta2.MetricIdentifier{ Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, metricName), }, - Target: GetMetricTarget(s.metricType, s.metadata.targetListLength), + Target: GetMetricTarget(s.metricType, s.metadata.listLength), } metricSpec := v2beta2.MetricSpec{ External: externalMetric, Type: externalMetricType, diff --git a/pkg/scalers/redis_scaler_test.go b/pkg/scalers/redis_scaler_test.go index ac639883dc2..1760c958ac7 100644 --- a/pkg/scalers/redis_scaler_test.go +++ b/pkg/scalers/redis_scaler_test.go @@ -46,6 +46,8 @@ var testRedisMetadata = []parseRedisMetadataTestData{ {map[string]string{"listName": "mylist", "listLength": "10", "address": "", "password": ""}, true, map[string]string{}}, // improperly formed listLength {map[string]string{"listName": "mylist", "listLength": "AA", "addressFromEnv": "REDIS_HOST", "password": ""}, true, map[string]string{}}, + // improperly formed activationListLength + {map[string]string{"listName": "mylist", "listLength": "1", "activationListLength": "AA", "addressFromEnv": "REDIS_HOST", "password": ""}, true, map[string]string{}}, // address does not resolve {map[string]string{"listName": "mylist", "listLength": "0", "addressFromEnv": "REDIS_WRONG", "password": ""}, true, map[string]string{}}, // password is defined in the authParams @@ -153,8 +155,8 @@ func TestParseRedisClusterMetadata(t *testing.T) { "addresses": ":7001, :7002", }, wantMeta: &redisMetadata{ - targetListLength: 5, - listName: "mylist", + listLength: 5, + listName: "mylist", connectionInfo: redisConnectionInfo{ addresses: []string{":7001", ":7002"}, }, @@ -171,8 +173,8 @@ func TestParseRedisClusterMetadata(t *testing.T) { "ports": "1, 2, 3", }, wantMeta: &redisMetadata{ - targetListLength: 5, - listName: "mylist", + listLength: 5, + listName: "mylist", connectionInfo: redisConnectionInfo{ addresses: []string{"a:1", "b:2", "c:3"}, hosts: []string{"a", "b", "c"}, @@ -192,8 +194,8 @@ func TestParseRedisClusterMetadata(t *testing.T) { "username": "username", }, wantMeta: &redisMetadata{ - targetListLength: 5, - listName: "mylist", + listLength: 5, + listName: "mylist", connectionInfo: redisConnectionInfo{ addresses: []string{"a:1", "b:2", "c:3"}, hosts: []string{"a", "b", "c"}, @@ -213,8 +215,8 @@ func TestParseRedisClusterMetadata(t *testing.T) { }, authParams: map[string]string{}, wantMeta: &redisMetadata{ - targetListLength: 5, - listName: "mylist", + listLength: 5, + listName: "mylist", connectionInfo: redisConnectionInfo{ addresses: []string{"a:1", "b:2", "c:3"}, hosts: []string{"a", "b", "c"}, @@ -235,8 +237,8 @@ func TestParseRedisClusterMetadata(t *testing.T) { authParams: map[string]string{}, resolvedEnv: testRedisResolvedEnv, wantMeta: &redisMetadata{ - targetListLength: 5, - listName: "mylist", + listLength: 5, + listName: "mylist", connectionInfo: redisConnectionInfo{ addresses: []string{"a:1", "b:2", "c:3"}, hosts: []string{"a", "b", "c"}, @@ -257,8 +259,8 @@ func TestParseRedisClusterMetadata(t *testing.T) { "password": "password", }, wantMeta: &redisMetadata{ - targetListLength: 5, - listName: "mylist", + listLength: 5, + listName: "mylist", connectionInfo: redisConnectionInfo{ addresses: []string{"a:1", "b:2", "c:3"}, hosts: []string{"a", "b", "c"}, @@ -279,8 +281,8 @@ func TestParseRedisClusterMetadata(t *testing.T) { authParams: map[string]string{}, resolvedEnv: testRedisResolvedEnv, wantMeta: &redisMetadata{ - targetListLength: 5, - listName: "mylist", + listLength: 5, + listName: "mylist", connectionInfo: redisConnectionInfo{ addresses: []string{"a:1", "b:2", "c:3"}, hosts: []string{"a", "b", "c"}, @@ -364,8 +366,8 @@ func TestParseRedisSentinelMetadata(t *testing.T) { "addresses": ":7001, :7002", }, wantMeta: &redisMetadata{ - targetListLength: 5, - listName: "mylist", + listLength: 5, + listName: "mylist", connectionInfo: redisConnectionInfo{ addresses: []string{":7001", ":7002"}, }, @@ -382,8 +384,8 @@ func TestParseRedisSentinelMetadata(t *testing.T) { "ports": "1, 2, 3", }, wantMeta: &redisMetadata{ - targetListLength: 5, - listName: "mylist", + listLength: 5, + listName: "mylist", connectionInfo: redisConnectionInfo{ addresses: []string{"a:1", "b:2", "c:3"}, hosts: []string{"a", "b", "c"}, @@ -402,8 +404,8 @@ func TestParseRedisSentinelMetadata(t *testing.T) { "ports": "1, 2, 3", }, wantMeta: &redisMetadata{ - targetListLength: 5, - listName: "mylist", + listLength: 5, + listName: "mylist", connectionInfo: redisConnectionInfo{ addresses: []string{"a:1", "b:2", "c:3"}, hosts: []string{"a", "b", "c"}, @@ -423,8 +425,8 @@ func TestParseRedisSentinelMetadata(t *testing.T) { "username": "username", }, wantMeta: &redisMetadata{ - targetListLength: 5, - listName: "mylist", + listLength: 5, + listName: "mylist", connectionInfo: redisConnectionInfo{ addresses: []string{"a:1", "b:2", "c:3"}, hosts: []string{"a", "b", "c"}, @@ -444,8 +446,8 @@ func TestParseRedisSentinelMetadata(t *testing.T) { }, authParams: map[string]string{}, wantMeta: &redisMetadata{ - targetListLength: 5, - listName: "mylist", + listLength: 5, + listName: "mylist", connectionInfo: redisConnectionInfo{ addresses: []string{"a:1", "b:2", "c:3"}, hosts: []string{"a", "b", "c"}, @@ -466,8 +468,8 @@ func TestParseRedisSentinelMetadata(t *testing.T) { authParams: map[string]string{}, resolvedEnv: testRedisResolvedEnv, wantMeta: &redisMetadata{ - targetListLength: 5, - listName: "mylist", + listLength: 5, + listName: "mylist", connectionInfo: redisConnectionInfo{ addresses: []string{"a:1", "b:2", "c:3"}, hosts: []string{"a", "b", "c"}, @@ -488,8 +490,8 @@ func TestParseRedisSentinelMetadata(t *testing.T) { "password": "password", }, wantMeta: &redisMetadata{ - targetListLength: 5, - listName: "mylist", + listLength: 5, + listName: "mylist", connectionInfo: redisConnectionInfo{ addresses: []string{"a:1", "b:2", "c:3"}, hosts: []string{"a", "b", "c"}, @@ -510,8 +512,8 @@ func TestParseRedisSentinelMetadata(t *testing.T) { authParams: map[string]string{}, resolvedEnv: testRedisResolvedEnv, wantMeta: &redisMetadata{ - targetListLength: 5, - listName: "mylist", + listLength: 5, + listName: "mylist", connectionInfo: redisConnectionInfo{ addresses: []string{"a:1", "b:2", "c:3"}, hosts: []string{"a", "b", "c"}, @@ -532,8 +534,8 @@ func TestParseRedisSentinelMetadata(t *testing.T) { "sentinelUsername": "sentinelUsername", }, wantMeta: &redisMetadata{ - targetListLength: 5, - listName: "mylist", + listLength: 5, + listName: "mylist", connectionInfo: redisConnectionInfo{ addresses: []string{"a:1", "b:2", "c:3"}, hosts: []string{"a", "b", "c"}, @@ -553,8 +555,8 @@ func TestParseRedisSentinelMetadata(t *testing.T) { }, authParams: map[string]string{}, wantMeta: &redisMetadata{ - targetListLength: 5, - listName: "mylist", + listLength: 5, + listName: "mylist", connectionInfo: redisConnectionInfo{ addresses: []string{"a:1", "b:2", "c:3"}, hosts: []string{"a", "b", "c"}, @@ -575,8 +577,8 @@ func TestParseRedisSentinelMetadata(t *testing.T) { authParams: map[string]string{}, resolvedEnv: testRedisResolvedEnv, wantMeta: &redisMetadata{ - targetListLength: 5, - listName: "mylist", + listLength: 5, + listName: "mylist", connectionInfo: redisConnectionInfo{ addresses: []string{"a:1", "b:2", "c:3"}, hosts: []string{"a", "b", "c"}, @@ -597,8 +599,8 @@ func TestParseRedisSentinelMetadata(t *testing.T) { "sentinelPassword": "sentinelPassword", }, wantMeta: &redisMetadata{ - targetListLength: 5, - listName: "mylist", + listLength: 5, + listName: "mylist", connectionInfo: redisConnectionInfo{ addresses: []string{"a:1", "b:2", "c:3"}, hosts: []string{"a", "b", "c"}, @@ -619,8 +621,8 @@ func TestParseRedisSentinelMetadata(t *testing.T) { authParams: map[string]string{}, resolvedEnv: testRedisResolvedEnv, wantMeta: &redisMetadata{ - targetListLength: 5, - listName: "mylist", + listLength: 5, + listName: "mylist", connectionInfo: redisConnectionInfo{ addresses: []string{"a:1", "b:2", "c:3"}, hosts: []string{"a", "b", "c"}, @@ -641,8 +643,8 @@ func TestParseRedisSentinelMetadata(t *testing.T) { "sentinelMaster": "sentinelMaster", }, wantMeta: &redisMetadata{ - targetListLength: 5, - listName: "mylist", + listLength: 5, + listName: "mylist", connectionInfo: redisConnectionInfo{ addresses: []string{"a:1", "b:2", "c:3"}, hosts: []string{"a", "b", "c"}, @@ -662,8 +664,8 @@ func TestParseRedisSentinelMetadata(t *testing.T) { }, authParams: map[string]string{}, wantMeta: &redisMetadata{ - targetListLength: 5, - listName: "mylist", + listLength: 5, + listName: "mylist", connectionInfo: redisConnectionInfo{ addresses: []string{"a:1", "b:2", "c:3"}, hosts: []string{"a", "b", "c"}, @@ -684,8 +686,8 @@ func TestParseRedisSentinelMetadata(t *testing.T) { authParams: map[string]string{}, resolvedEnv: testRedisResolvedEnv, wantMeta: &redisMetadata{ - targetListLength: 5, - listName: "mylist", + listLength: 5, + listName: "mylist", connectionInfo: redisConnectionInfo{ addresses: []string{"a:1", "b:2", "c:3"}, hosts: []string{"a", "b", "c"}, diff --git a/tests/scalers/redis-cluster-lists.test.ts b/tests/scalers/redis-cluster-lists.test.ts deleted file mode 100644 index a1386bbe7ba..00000000000 --- a/tests/scalers/redis-cluster-lists.test.ts +++ /dev/null @@ -1,531 +0,0 @@ -import test from 'ava' -import * as sh from 'shelljs' -import * as tmp from 'tmp' -import * as fs from 'fs' -import {createNamespace} from "./helpers"; -import { RedisClusterHelper } from './redis-cluster-helper'; - -const redisNamespace = 'redis-cluster-lists' -const testNamespace = 'redis-cluster-lists-test' -const redisPassword = 'my-password' -let redisHost = '' -const redisPort = 6379 -let redisAddress = '' -const listNameForHostPortRef = 'my-test-list-host-port-ref' -const listNameForAddressRef = 'my-test-list-address-ref' -const listNameForHostPortTriggerAuth = 'my-test-list-host-port-trigger' -const redisWorkerHostPortRefDeploymentName = 'redis-worker-test-hostport' -const redisWorkerAddressRefDeploymentName = 'redis-worker-test-address' -const redisWorkerHostPortRefTriggerAuthDeploymentName = 'redis-worker-test-hostport-triggerauth' -const itemsToWrite = 200 -const deploymentContainerImage = 'goku321/redis-cluster-list:v1.7' -const writeJobNameForHostPortRef = 'redis-writer-host-port-ref' -const writeJobNameForAddressRef = 'redis-writer-address-ref' -const writeJobNameForHostPortInTriggerAuth = 'redis-writer-host-port-trigger-auth' - -test.before(t => { - // Deploy Redis cluster. - const base64Password = Buffer.from(redisPassword).toString('base64') - RedisClusterHelper.install(t,base64Password, redisNamespace) - - // Get Redis cluster address. - redisHost = sh.exec(`kubectl get svc redis-cluster -n ${redisNamespace} -o jsonpath='{.spec.clusterIP}'`) - redisAddress = `${redisHost}:${redisPort}` - - // Create test namespace. - createNamespace(testNamespace) - - const triggerAuthTmpFile = tmp.fileSync() - fs.writeFileSync(triggerAuthTmpFile.name, scaledObjectTriggerAuthYaml.replace('{{REDIS_PASSWORD}}', base64Password)) - - t.is( - 0, - sh.exec(`kubectl apply -f ${triggerAuthTmpFile.name} --namespace ${testNamespace}`).code, - 'creating trigger auth should work..' - ) - - const triggerAuthHostPortTmpFile = tmp.fileSync() - - fs.writeFileSync(triggerAuthHostPortTmpFile.name, - scaledObjectTriggerAuthHostPortYaml.replace('{{REDIS_PASSWORD}}', base64Password) - .replace('{{REDIS_HOSTS}}', Buffer.from(redisHost).toString('base64')) - .replace('{{REDIS_PORTS}}', Buffer.from(redisPort.toString()).toString('base64')) - ) - - t.is( - 0, - sh.exec(`kubectl apply -f ${triggerAuthHostPortTmpFile.name} --namespace ${testNamespace}`).code, - 'creating trigger auth with host port should work..' - ) - - // Create a deployment with host and port. - const deploymentHostPortRefTmpFile = tmp.fileSync() - - fs.writeFileSync(deploymentHostPortRefTmpFile.name, redisListDeployHostPortYaml.replace(/{{REDIS_PASSWORD}}/g, redisPassword) - .replace(/{{REDIS_HOSTS}}/g, redisHost) - .replace(/{{REDIS_PORTS}}/g, redisPort.toString()) - .replace(/{{LIST_NAME}}/g, listNameForHostPortRef) - .replace(/{{DEPLOYMENT_NAME}}/g, redisWorkerHostPortRefDeploymentName) - .replace(/{{CONTAINER_IMAGE}}/g, deploymentContainerImage) - ) - - t.is( - 0, - sh.exec(`kubectl apply -f ${deploymentHostPortRefTmpFile.name} --namespace ${testNamespace}`).code, - 'creating a deployment using redis host and port envs should work..' - ) - - const deploymentAddressRefTmpFile = tmp.fileSync() - - fs.writeFileSync(deploymentAddressRefTmpFile.name, redisListDeployAddressYaml.replace(/{{REDIS_PASSWORD}}/g, redisPassword) - .replace(/{{REDIS_ADDRESSES}}/g, redisAddress) - .replace(/{{LIST_NAME}}/g, listNameForAddressRef) - .replace(/{{DEPLOYMENT_NAME}}/g, redisWorkerAddressRefDeploymentName) - .replace(/{{CONTAINER_IMAGE}}/g, deploymentContainerImage) - ) - - t.is( - 0, - sh.exec(`kubectl apply -f ${deploymentAddressRefTmpFile.name} --namespace ${testNamespace}`).code, - 'creating a deployment using redis address var should work..' - ) - - - const deploymentHostPortRefTriggerAuthTmpFile = tmp.fileSync() - - fs.writeFileSync(deploymentHostPortRefTriggerAuthTmpFile.name, redisListDeployHostPortInTriggerAuhYaml.replace(/{{REDIS_PASSWORD}}/g, redisPassword) - .replace(/{{REDIS_HOSTS}}/g, redisHost) - .replace(/{{REDIS_PORTS}}/g, redisPort.toString()) - .replace(/{{LIST_NAME}}/g, listNameForHostPortTriggerAuth) - .replace(/{{DEPLOYMENT_NAME}}/g, redisWorkerHostPortRefTriggerAuthDeploymentName) - .replace(/{{CONTAINER_IMAGE}}/g, deploymentContainerImage) - ) - - t.is( - 0, - sh.exec(`kubectl apply -f ${deploymentHostPortRefTriggerAuthTmpFile.name} --namespace ${testNamespace}`).code, - 'creating a deployment using redis host port in trigger auth should work..' - ) -}) - -test.serial('Deployment for redis host and port env vars should have 0 replica on start', t => { - - const replicaCount = sh.exec( - `kubectl get deployment/${redisWorkerHostPortRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.is(replicaCount, '0', 'replica count should start out as 0') -}) - - -test.serial(`Deployment using redis host port env vars should max and scale to 5 with ${itemsToWrite} items written to list and back to 0`, t => { - runWriteJob(t, writeJobNameForHostPortRef, listNameForHostPortRef) - - let replicaCount = '0' - for (let i = 0; i < 60 && replicaCount !== '5'; i++) { - replicaCount = sh.exec( - `kubectl get deployment/${redisWorkerHostPortRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.log('(scale up) replica count is:' + replicaCount) - if (replicaCount !== '5') { - sh.exec('sleep 10s') - } - } - - t.is('5', replicaCount, 'Replica count should be 5 within 10 minutes') - - for (let i = 0; i < 60 && replicaCount !== '0'; i++) { - replicaCount = sh.exec( - `kubectl get deployment/${redisWorkerHostPortRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.log('(scale down) replica count is:' + replicaCount) - if (replicaCount !== '0') { - sh.exec('sleep 10s') - } - } - - t.is('0', replicaCount, 'Replica count should be 0 within 10 minutes') -}) - -test.serial('Deployment for redis address env var should have 0 replica on start', t => { - - const replicaCount = sh.exec( - `kubectl get deployment/${redisWorkerAddressRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.is(replicaCount, '0', 'replica count should start out as 0') -}) - - - -test.serial(`Deployment using redis address env var should max and scale to 5 with ${itemsToWrite} items written to list and back to 0`, t => { - - runWriteJob(t, writeJobNameForAddressRef, listNameForAddressRef) - - let replicaCount = '0' - for (let i = 0; i < 60 && replicaCount !== '5'; i++) { - replicaCount = sh.exec( - `kubectl get deployment/${redisWorkerAddressRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.log('(scale up) replica count is:' + replicaCount) - if (replicaCount !== '5') { - sh.exec('sleep 10s') - } - } - - t.is('5', replicaCount, 'Replica count should be 5 within 10 minutes') - - for (let i = 0; i < 60 && replicaCount !== '0'; i++) { - replicaCount = sh.exec( - `kubectl get deployment/${redisWorkerAddressRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.log('(scale down) replica count is:' + replicaCount) - if (replicaCount !== '0') { - sh.exec('sleep 10s') - } - } - - t.is('0', replicaCount, 'Replica count should be 0 within 10 minutes') -}) - - -test.serial('Deployment for redis host and port in the trigger auth should have 0 replica on start', t => { - - const replicaCount = sh.exec( - `kubectl get deployment/${redisWorkerHostPortRefTriggerAuthDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.is(replicaCount, '0', 'replica count should start out as 0') -}) - - -test.serial(`Deployment using redis host port in triggerAuth should max and scale to 5 with ${itemsToWrite} items written to list and back to 0`, t => { - - runWriteJob(t, writeJobNameForHostPortInTriggerAuth, listNameForHostPortTriggerAuth) - - let replicaCount = '0' - for (let i = 0; i < 60 && replicaCount !== '5'; i++) { - replicaCount = sh.exec( - `kubectl get deployment/${redisWorkerHostPortRefTriggerAuthDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.log('(scale up) replica count is:' + replicaCount) - if (replicaCount !== '5') { - sh.exec('sleep 10s') - } - } - - t.is('5', replicaCount, 'Replica count should be 5 within 10 minutes') - - for (let i = 0; i < 60 && replicaCount !== '0'; i++) { - replicaCount = sh.exec( - `kubectl get deployment/${redisWorkerHostPortRefTriggerAuthDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.log('(scale down) replica count is:' + replicaCount) - if (replicaCount !== '0') { - sh.exec('sleep 10s') - } - } - - t.is('0', replicaCount, 'Replica count should be 0 within 10 minutes') -}) - - -test.after.always.cb('clean up deployment', t => { - const resources = [ - `job/${writeJobNameForHostPortRef}`, - `job/${writeJobNameForAddressRef}`, - `job/${writeJobNameForHostPortInTriggerAuth}`, - `scaledobject.keda.sh/${redisWorkerHostPortRefDeploymentName}`, - `scaledobject.keda.sh/${redisWorkerAddressRefDeploymentName}`, - `scaledobject.keda.sh/${redisWorkerHostPortRefTriggerAuthDeploymentName}`, - 'triggerauthentication.keda.sh/keda-redis-cluster-list-triggerauth', - 'triggerauthentication.keda.sh/keda-redis-cluster-list-triggerauth-host-port', - `deployment/${redisWorkerAddressRefDeploymentName}`, - `deployment/${redisWorkerHostPortRefTriggerAuthDeploymentName}`, - `deployment/${redisWorkerHostPortRefDeploymentName}`, - 'secret/redis-password', - ] - - for (const resource of resources) { - sh.exec(`kubectl delete ${resource} --namespace ${testNamespace}`) - } - sh.exec(`kubectl delete namespace ${testNamespace}`) - - RedisClusterHelper.uninstall(redisNamespace) - t.end() -}) - -function runWriteJob(t, jobName, listName) { - // write to list - const tmpFile = tmp.fileSync() - fs.writeFileSync(tmpFile.name, writeJobYaml.replace('{{REDIS_ADDRESSES}}', redisAddress).replace('{{REDIS_PASSWORD}}', redisPassword) - .replace('{{LIST_NAME}}', listName) - .replace('{{NUMBER_OF_ITEMS_TO_WRITE}}', itemsToWrite.toString()) - .replace('{{CONTAINER_IMAGE}}', deploymentContainerImage) - .replace('{{JOB_NAME}}', jobName) - ) - - t.is( - 0, - sh.exec(`kubectl apply -f ${tmpFile.name} --namespace ${testNamespace}`).code, - 'list writer job should apply.' - ) - - // wait for the write job to complete - for (let i = 0; i < 20; i++) { - const succeeded = sh.exec(`kubectl get job ${writeJobNameForHostPortRef} --namespace ${testNamespace} -o jsonpath='{.items[0].status.succeeded}'`).stdout - if (succeeded == '1') { - break - } - sh.exec('sleep 1s') - } -} - -const redisListDeployHostPortYaml = `apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{DEPLOYMENT_NAME}} - labels: - app: {{DEPLOYMENT_NAME}} -spec: - replicas: 0 - selector: - matchLabels: - app: {{DEPLOYMENT_NAME}} - template: - metadata: - labels: - app: {{DEPLOYMENT_NAME}} - spec: - containers: - - name: redis-worker - image: {{CONTAINER_IMAGE}} - imagePullPolicy: IfNotPresent - command: ["./main"] - args: ["read"] - env: - - name: REDIS_HOSTS - value: {{REDIS_HOSTS}} - - name: REDIS_PORTS - value: "{{REDIS_PORTS}}" - - name: LIST_NAME - value: {{LIST_NAME}} - - name: REDIS_PASSWORD - value: {{REDIS_PASSWORD}} - - name: READ_PROCESS_TIME - value: "500" ---- -apiVersion: keda.sh/v1alpha1 -kind: ScaledObject -metadata: - name: {{DEPLOYMENT_NAME}} -spec: - scaleTargetRef: - name: {{DEPLOYMENT_NAME}} - pollingInterval: 5 - cooldownPeriod: 30 - minReplicaCount: 0 - maxReplicaCount: 5 - advanced: - horizontalPodAutoscalerConfig: - behavior: - scaleDown: - stabilizationWindowSeconds: 15 - triggers: - - type: redis-cluster - metadata: - hostsFromEnv: REDIS_HOSTS - portsFromEnv: REDIS_PORTS - listName: {{LIST_NAME}} - listLength: "5" - authenticationRef: - name: keda-redis-cluster-list-triggerauth -` - - -const redisListDeployAddressYaml = `apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{DEPLOYMENT_NAME}} - labels: - app: {{DEPLOYMENT_NAME}} -spec: - replicas: 0 - selector: - matchLabels: - app: {{DEPLOYMENT_NAME}} - template: - metadata: - labels: - app: {{DEPLOYMENT_NAME}} - spec: - containers: - - name: redis-worker - image: {{CONTAINER_IMAGE}} - imagePullPolicy: IfNotPresent - command: ["./main"] - args: ["read"] - env: - - name: REDIS_ADDRESSES - value: {{REDIS_ADDRESSES}} - - name: LIST_NAME - value: {{LIST_NAME}} - - name: REDIS_PASSWORD - value: {{REDIS_PASSWORD}} - - name: READ_PROCESS_TIME - value: "500" ---- -apiVersion: keda.sh/v1alpha1 -kind: ScaledObject -metadata: - name: {{DEPLOYMENT_NAME}} -spec: - scaleTargetRef: - name: {{DEPLOYMENT_NAME}} - pollingInterval: 5 - cooldownPeriod: 30 - minReplicaCount: 0 - maxReplicaCount: 5 - advanced: - horizontalPodAutoscalerConfig: - behavior: - scaleDown: - stabilizationWindowSeconds: 15 - triggers: - - type: redis-cluster - metadata: - addressesFromEnv: REDIS_ADDRESSES - listName: {{LIST_NAME}} - listLength: "5" - authenticationRef: - name: keda-redis-cluster-list-triggerauth -` - -const redisListDeployHostPortInTriggerAuhYaml = `apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{DEPLOYMENT_NAME}} - labels: - app: {{DEPLOYMENT_NAME}} -spec: - replicas: 0 - selector: - matchLabels: - app: {{DEPLOYMENT_NAME}} - template: - metadata: - labels: - app: {{DEPLOYMENT_NAME}} - spec: - containers: - - name: redis-worker - image: {{CONTAINER_IMAGE}} - imagePullPolicy: IfNotPresent - command: ["./main"] - args: ["read"] - env: - - name: REDIS_HOSTS - value: {{REDIS_HOSTS}} - - name: REDIS_PORTS - value: "{{REDIS_PORTS}}" - - name: LIST_NAME - value: {{LIST_NAME}} - - name: REDIS_PASSWORD - value: {{REDIS_PASSWORD}} - - name: READ_PROCESS_TIME - value: "500" ---- -apiVersion: keda.sh/v1alpha1 -kind: ScaledObject -metadata: - name: {{DEPLOYMENT_NAME}} -spec: - scaleTargetRef: - name: {{DEPLOYMENT_NAME}} - pollingInterval: 5 - cooldownPeriod: 30 - minReplicaCount: 0 - maxReplicaCount: 5 - advanced: - horizontalPodAutoscalerConfig: - behavior: - scaleDown: - stabilizationWindowSeconds: 15 - triggers: - - type: redis-cluster - metadata: - listName: {{LIST_NAME}} - listLength: "5" - authenticationRef: - name: keda-redis-cluster-list-triggerauth-host-port -` - -const scaledObjectTriggerAuthHostPortYaml = `apiVersion: v1 -kind: Secret -metadata: - name: redis-config -type: Opaque -data: - password: {{REDIS_PASSWORD}} - redisHost: {{REDIS_HOSTS}} - redisPort: {{REDIS_PORTS}} ---- -apiVersion: keda.sh/v1alpha1 -kind: TriggerAuthentication -metadata: - name: keda-redis-cluster-list-triggerauth-host-port -spec: - secretTargetRef: - - parameter: password - name: redis-config - key: password - - parameter: hosts - name: redis-config - key: redisHost - - parameter: ports - name: redis-config - key: redisPort -` - -const scaledObjectTriggerAuthYaml = `apiVersion: v1 -kind: Secret -metadata: - name: redis-password -type: Opaque -data: - password: {{REDIS_PASSWORD}} ---- -apiVersion: keda.sh/v1alpha1 -kind: TriggerAuthentication -metadata: - name: keda-redis-cluster-list-triggerauth -spec: - secretTargetRef: - - parameter: password - name: redis-password - key: password -` - - -const writeJobYaml = `apiVersion: batch/v1 -kind: Job -metadata: - name: {{JOB_NAME}} -spec: - template: - spec: - containers: - - name: redis - image: {{CONTAINER_IMAGE}} - imagePullPolicy: IfNotPresent - command: ["./main"] - env: - - name: REDIS_ADDRESSES - value: {{REDIS_ADDRESSES}} - - name: REDIS_PASSWORD - value: {{REDIS_PASSWORD}} - - name: LIST_NAME - value: {{LIST_NAME}} - - name: NO_LIST_ITEMS_TO_WRITE - value: "{{NUMBER_OF_ITEMS_TO_WRITE}}" - args: ["write"] - restartPolicy: Never - backoffLimit: 4 -` diff --git a/tests/scalers/redis-cluster-streams.test.ts b/tests/scalers/redis-cluster-streams.test.ts deleted file mode 100644 index e3b86a49b13..00000000000 --- a/tests/scalers/redis-cluster-streams.test.ts +++ /dev/null @@ -1,198 +0,0 @@ -import test from 'ava' -import * as sh from 'shelljs' -import * as tmp from 'tmp' -import * as fs from 'fs' -import {createNamespace, waitForDeploymentReplicaCount} from "./helpers"; -import { RedisClusterHelper } from './redis-cluster-helper'; - -const redisNamespace = 'redis-cluster-streams' -const testNamespace = 'redis-cluster-streams-test' -const redisPassword = 'foobared' -let redisHost = '' -const numMessages = 100 - -test.before(t => { - // Deploy Redis cluster. - const base64Password = Buffer.from(redisPassword).toString('base64') - RedisClusterHelper.install(t,base64Password, redisNamespace) - - // Get Redis cluster address. - redisHost = sh.exec(`kubectl get svc redis-cluster -n ${redisNamespace} -o jsonpath='{.spec.clusterIP}'`) - - // Create test namespace. - createNamespace(testNamespace) - - // Deploy streams consumer app, scaled object etc. - const tmpFile = tmp.fileSync() - - - fs.writeFileSync(tmpFile.name, redisStreamsDeployYaml.replace('{{REDIS_PASSWORD}}', base64Password).replace('{{REDIS_HOSTS}}', redisHost)) - t.is( - 0, - sh.exec(`kubectl apply -f ${tmpFile.name} --namespace ${testNamespace}`).code, - 'creating a deployment should work..' - ) -}) - -test.serial('Deployment should have 1 replica on start', t => { - - const replicaCount = sh.exec( - `kubectl get deployment/redis-streams-consumer --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.is(replicaCount, '1', 'replica count should start out as 1') -}) - -test.serial(`Deployment should scale to 5 with ${numMessages} messages and back to 1`, async t => { - // Publish messages to redis streams. - const tmpFile = tmp.fileSync() - fs.writeFileSync(tmpFile.name, producerDeployYaml.replace('{{NUM_MESSAGES}}', numMessages.toString()) - .replace('{{REDIS_HOSTS}}', redisHost)) - t.is( - 0, - sh.exec(`kubectl apply -f ${tmpFile.name} --namespace ${testNamespace}`).code, - 'producer job should apply.' - ) - - // Wait for producer job to finish. - for (let i = 0; i < 60; i++) { - const succeeded = sh.exec(`kubectl get job --namespace ${testNamespace} -o jsonpath='{.items[0].status.succeeded}'`).stdout - if (succeeded == '1') { - break - } - sh.exec('sleep 1s') - } - // With messages published, the consumer deployment should start receiving the messages. - t.true(await waitForDeploymentReplicaCount(5, 'redis-streams-consumer', testNamespace, 60, 10000), 'Replica count should be 5 within 10 minutes') - t.true(await waitForDeploymentReplicaCount(1, 'redis-streams-consumer', testNamespace, 60, 10000), 'Replica count should be 1 within 10 minutes') -}) - - - -test.after.always.cb('clean up deployment', t => { - const resources = [ - 'scaledobject.keda.sh/redis-streams-scaledobject', - 'triggerauthentications.keda.sh/keda-redis-stream-triggerauth', - 'secret/redis-password', - 'deployment/redis-streams-consumer', - 'job/redis-streams-producer', - ] - - for (const resource of resources) { - sh.exec(`kubectl delete ${resource} --namespace ${testNamespace}`) - } - sh.exec(`kubectl delete namespace ${testNamespace}`) - - RedisClusterHelper.uninstall(redisNamespace) - t.end() -}) - -const redisStreamsDeployYaml = `apiVersion: v1 -kind: Secret -metadata: - name: redis-password -type: Opaque -data: - password: {{REDIS_PASSWORD}} ---- -apiVersion: keda.sh/v1alpha1 -kind: TriggerAuthentication -metadata: - name: keda-redis-stream-triggerauth -spec: - secretTargetRef: - - parameter: password - name: redis-password - key: password ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: redis-streams-consumer -spec: - replicas: 1 - selector: - matchLabels: - app: redis-streams-consumer - template: - metadata: - labels: - app: redis-streams-consumer - spec: - containers: - - name: redis-streams-consumer - image: goku321/redis-cluster-streams:v2.5 - command: ["./main"] - args: ["consumer"] - imagePullPolicy: Always - env: - - name: REDIS_HOSTS - value: {{REDIS_HOSTS}} - - name: REDIS_PORTS - value: "6379" - - name: REDIS_STREAM_NAME - value: my-stream - - name: REDIS_STREAM_CONSUMER_GROUP_NAME - value: consumer-group-1 - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: redis-password - key: password ---- -apiVersion: keda.sh/v1alpha1 -kind: ScaledObject -metadata: - name: redis-streams-scaledobject -spec: - scaleTargetRef: - name: redis-streams-consumer - pollingInterval: 5 - cooldownPeriod: 10 - minReplicaCount: 1 - maxReplicaCount: 5 - advanced: - horizontalPodAutoscalerConfig: - behavior: - scaleDown: - stabilizationWindowSeconds: 15 - triggers: - - type: redis-cluster-streams - metadata: - hostsFromEnv: REDIS_HOSTS - portsFromEnv: REDIS_PORTS - stream: my-stream - consumerGroup: consumer-group-1 - pendingEntriesCount: "10" - authenticationRef: - name: keda-redis-stream-triggerauth -` - -const producerDeployYaml = `apiVersion: batch/v1 -kind: Job -metadata: - name: redis-streams-producer -spec: - template: - spec: - containers: - - name: producer - image: goku321/redis-cluster-streams:v2.5 - command: ["./main"] - args: ["producer"] - imagePullPolicy: Always - env: - - name: REDIS_HOSTS - value: {{REDIS_HOSTS}} - - name: REDIS_PORTS - value: "6379" - - name: REDIS_STREAM_NAME - value: my-stream - - name: NUM_MESSAGES - value: "{{NUM_MESSAGES}}" - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: redis-password - key: password - restartPolicy: Never -` diff --git a/tests/scalers/redis-lists.test.ts b/tests/scalers/redis-lists.test.ts deleted file mode 100644 index 79770a69da6..00000000000 --- a/tests/scalers/redis-lists.test.ts +++ /dev/null @@ -1,552 +0,0 @@ -import test from 'ava' -import * as sh from 'shelljs' -import * as tmp from 'tmp' -import * as fs from 'fs' -import { createNamespace, waitForRollout } from './helpers' - -const redisNamespace = 'redis' -const testNamespace = 'redis-lists-test' -const redisDeploymentName = 'redis' -const redisPassword = 'my-password' -const redisHost = `redis.${redisNamespace}.svc.cluster.local` -const redisPort = 6379 -const redisAddress = `${redisHost}:${redisPort}` -const listNameForHostPortRef = 'my-test-list-host-port-ref' -const listNameForAddressRef = 'my-test-list-address-ref' -const listNameForHostPortTriggerAuth = 'my-test-list-host-port-trigger' -const redisWorkerHostPortRefDeploymentName = 'redis-worker-test-hostport' -const redisWorkerAddressRefDeploymentName = 'redis-worker-test-address' -const redisWorkerHostPortRefTriggerAuthDeploymentName = 'redis-worker-test-hostport-triggerauth' -const itemsToWrite = 200 -const deploymentContainerImage = 'ghcr.io/kedacore/tests-redis-lists' -const writeJobNameForHostPortRef = 'redis-writer-host-port-ref' -const writeJobNameForAddressRef = 'redis-writer-address-ref' -const writeJobNameForHostPortInTriggerAuth = 'redis-writer-host-port-trigger-auth' - -test.before(t => { - // setup Redis - createNamespace(redisNamespace) - - const redisDeployTmpFile = tmp.fileSync() - fs.writeFileSync(redisDeployTmpFile.name, redisDeployYaml.replace('{{REDIS_PASSWORD}}', redisPassword)) - - t.is(0, sh.exec(`kubectl apply --namespace ${redisNamespace} -f ${redisDeployTmpFile.name}`).code, 'creating a Redis deployment should work.') - - // wait for redis to be ready - t.is(0, waitForRollout('deployment', redisDeploymentName, redisNamespace, 600), 'Redis is not in a ready state') - - createNamespace(testNamespace) - - const triggerAuthTmpFile = tmp.fileSync() - const base64Password = Buffer.from(redisPassword).toString('base64') - fs.writeFileSync(triggerAuthTmpFile.name, scaledObjectTriggerAuthYaml.replace('{{REDIS_PASSWORD}}', base64Password)) - - t.is( - 0, - sh.exec(`kubectl apply -f ${triggerAuthTmpFile.name} --namespace ${testNamespace}`).code, - 'creating trigger auth should work..' - ) - - const triggerAuthHostPortTmpFile = tmp.fileSync() - - fs.writeFileSync(triggerAuthHostPortTmpFile.name, - scaledObjectTriggerAuthHostPortYaml.replace('{{REDIS_PASSWORD}}', base64Password) - .replace('{{REDIS_HOST}}', Buffer.from(redisHost).toString('base64')) - .replace('{{REDIS_PORT}}', Buffer.from(redisPort.toString()).toString('base64')) - ) - - t.is( - 0, - sh.exec(`kubectl apply -f ${triggerAuthHostPortTmpFile.name} --namespace ${testNamespace}`).code, - 'creating trigger auth with host port should work..' - ) - - const deploymentHostPortRefTmpFile = tmp.fileSync() - - fs.writeFileSync(deploymentHostPortRefTmpFile.name, redisRedisListDeployHostPortYaml.replace(/{{REDIS_PASSWORD}}/g, redisPassword) - .replace(/{{REDIS_HOST}}/g, redisHost) - .replace(/{{REDIS_PORT}}/g, redisPort.toString()) - .replace(/{{LIST_NAME}}/g, listNameForHostPortRef) - .replace(/{{DEPLOYMENT_NAME}}/g, redisWorkerHostPortRefDeploymentName) - .replace(/{{CONTAINER_IMAGE}}/g, deploymentContainerImage) - ) - - t.is( - 0, - sh.exec(`kubectl apply -f ${deploymentHostPortRefTmpFile.name} --namespace ${testNamespace}`).code, - 'creating a deployment using redis host and port envs should work..' - ) - - const deploymentAddressRefTmpFile = tmp.fileSync() - - fs.writeFileSync(deploymentAddressRefTmpFile.name, redisListDeployAddressYaml.replace(/{{REDIS_PASSWORD}}/g, redisPassword) - .replace(/{{REDIS_ADDRESS}}/g, redisAddress) - .replace(/{{LIST_NAME}}/g, listNameForAddressRef) - .replace(/{{DEPLOYMENT_NAME}}/g, redisWorkerAddressRefDeploymentName) - .replace(/{{CONTAINER_IMAGE}}/g, deploymentContainerImage) - ) - - t.is( - 0, - sh.exec(`kubectl apply -f ${deploymentAddressRefTmpFile.name} --namespace ${testNamespace}`).code, - 'creating a deployment using redis address var should work..' - ) - - - const deploymentHostPortRefTriggerAuthTmpFile = tmp.fileSync() - - fs.writeFileSync(deploymentHostPortRefTriggerAuthTmpFile.name, redisListDeployHostPortInTriggerAuhYaml.replace(/{{REDIS_PASSWORD}}/g, redisPassword) - .replace(/{{REDIS_HOST}}/g, redisHost) - .replace(/{{REDIS_PORT}}/g, redisPort.toString()) - .replace(/{{LIST_NAME}}/g, listNameForHostPortTriggerAuth) - .replace(/{{DEPLOYMENT_NAME}}/g, redisWorkerHostPortRefTriggerAuthDeploymentName) - .replace(/{{CONTAINER_IMAGE}}/g, deploymentContainerImage) - ) - - t.is( - 0, - sh.exec(`kubectl apply -f ${deploymentHostPortRefTriggerAuthTmpFile.name} --namespace ${testNamespace}`).code, - 'creating a deployment using redis host port in trigger auth should work..' - ) -}) - -test.serial('Deployment for redis host and port env vars should have 0 replica on start', t => { - - const replicaCount = sh.exec( - `kubectl get deployment/${redisWorkerHostPortRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.is(replicaCount, '0', 'replica count should start out as 0') -}) - - -test.serial(`Deployment using redis host port env vars should max and scale to 5 with ${itemsToWrite} items written to list and back to 0`, t => { - - runWriteJob(t, writeJobNameForHostPortRef, listNameForHostPortRef) - - let replicaCount = '0' - for (let i = 0; i < 60 && replicaCount !== '5'; i++) { - replicaCount = sh.exec( - `kubectl get deployment/${redisWorkerHostPortRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.log('(scale up) replica count is:' + replicaCount) - if (replicaCount !== '5') { - sh.exec('sleep 10s') - } - } - - t.is('5', replicaCount, 'Replica count should be 5 within 10 minutes') - - for (let i = 0; i < 60 && replicaCount !== '0'; i++) { - replicaCount = sh.exec( - `kubectl get deployment/${redisWorkerHostPortRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.log('(scale down) replica count is:' + replicaCount) - if (replicaCount !== '0') { - sh.exec('sleep 10s') - } - } - - t.is('0', replicaCount, 'Replica count should be 0 within 10 minutes') -}) - -test.serial('Deployment for redis address env var should have 0 replica on start', t => { - - const replicaCount = sh.exec( - `kubectl get deployment/${redisWorkerAddressRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.is(replicaCount, '0', 'replica count should start out as 0') -}) - - - -test.serial(`Deployment using redis address env var should max and scale to 5 with ${itemsToWrite} items written to list and back to 0`, t => { - - runWriteJob(t, writeJobNameForAddressRef, listNameForAddressRef) - - let replicaCount = '0' - for (let i = 0; i < 60 && replicaCount !== '5'; i++) { - replicaCount = sh.exec( - `kubectl get deployment/${redisWorkerAddressRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.log('(scale up) replica count is:' + replicaCount) - if (replicaCount !== '5') { - sh.exec('sleep 10s') - } - } - - t.is('5', replicaCount, 'Replica count should be 5 within 10 minutes') - - for (let i = 0; i < 60 && replicaCount !== '0'; i++) { - replicaCount = sh.exec( - `kubectl get deployment/${redisWorkerAddressRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.log('(scale down) replica count is:' + replicaCount) - if (replicaCount !== '0') { - sh.exec('sleep 10s') - } - } - - t.is('0', replicaCount, 'Replica count should be 0 within 10 minutes') -}) - - -test.serial('Deployment for redis host and port in the trigger auth should have 0 replica on start', t => { - - const replicaCount = sh.exec( - `kubectl get deployment/${redisWorkerHostPortRefTriggerAuthDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.is(replicaCount, '0', 'replica count should start out as 0') -}) - - -test.serial(`Deployment using redis host port in triggerAuth should max and scale to 5 with ${itemsToWrite} items written to list and back to 0`, t => { - - runWriteJob(t, writeJobNameForHostPortInTriggerAuth, listNameForHostPortTriggerAuth) - - let replicaCount = '0' - for (let i = 0; i < 60 && replicaCount !== '5'; i++) { - replicaCount = sh.exec( - `kubectl get deployment/${redisWorkerHostPortRefTriggerAuthDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.log('(scale up) replica count is:' + replicaCount) - if (replicaCount !== '5') { - sh.exec('sleep 10s') - } - } - - t.is('5', replicaCount, 'Replica count should be 5 within 10 minutes') - - for (let i = 0; i < 60 && replicaCount !== '0'; i++) { - replicaCount = sh.exec( - `kubectl get deployment/${redisWorkerHostPortRefTriggerAuthDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.log('(scale down) replica count is:' + replicaCount) - if (replicaCount !== '0') { - sh.exec('sleep 10s') - } - } - - t.is('0', replicaCount, 'Replica count should be 0 within 10 minutes') -}) - - -test.after.always.cb('clean up deployment', t => { - const resources = [ - `job/${writeJobNameForHostPortRef}`, - `job/${writeJobNameForAddressRef}`, - `job/${writeJobNameForHostPortInTriggerAuth}`, - `scaledobject.keda.sh/${redisWorkerHostPortRefDeploymentName}`, - `scaledobject.keda.sh/${redisWorkerAddressRefDeploymentName}`, - `scaledobject.keda.sh/${redisWorkerHostPortRefTriggerAuthDeploymentName}`, - 'triggerauthentication.keda.sh/keda-redis-list-triggerauth', - 'triggerauthentication.keda.sh/keda-redis-list-triggerauth-host-port', - `deployment/${redisWorkerAddressRefDeploymentName}`, - `deployment/${redisWorkerHostPortRefTriggerAuthDeploymentName}`, - `deployment/${redisWorkerHostPortRefDeploymentName}`, - 'secret/redis-password', - ] - - for (const resource of resources) { - sh.exec(`kubectl delete ${resource} --namespace ${testNamespace}`) - } - sh.exec(`kubectl delete namespace ${testNamespace}`) - - sh.exec(`kubectl delete namespace ${redisNamespace}`) - t.end() -}) - -function runWriteJob(t, jobName, listName) { - // write to list - const tmpFile = tmp.fileSync() - fs.writeFileSync(tmpFile.name, writeJobYaml.replace('{{REDIS_ADDRESS}}', redisAddress).replace('{{REDIS_PASSWORD}}', redisPassword) - .replace('{{LIST_NAME}}', listName) - .replace('{{NUMBER_OF_ITEMS_TO_WRITE}}', itemsToWrite.toString()) - .replace('{{CONTAINER_IMAGE}}', deploymentContainerImage) - .replace('{{JOB_NAME}}', jobName) - ) - - t.is( - 0, - sh.exec(`kubectl apply -f ${tmpFile.name} --namespace ${testNamespace}`).code, - 'list writer job should apply.' - ) - - // wait for the write job to complete - for (let i = 0; i < 20; i++) { - const succeeded = sh.exec(`kubectl get job ${writeJobNameForHostPortRef} --namespace ${testNamespace} -o jsonpath='{.items[0].status.succeeded}'`).stdout - if (succeeded == '1') { - break - } - sh.exec('sleep 1s') - } -} - -const redisDeployYaml = `apiVersion: apps/v1 -kind: Deployment -metadata: - name: redis - namespace: redis -spec: - selector: - matchLabels: - app: redis - replicas: 1 - template: - metadata: - labels: - app: redis - spec: - containers: - - name: master - image: redis:6.0.6 - command: ["redis-server", "--requirepass", {{REDIS_PASSWORD}}] - ports: - - containerPort: 6379 ---- -apiVersion: v1 -kind: Service -metadata: - name: redis - namespace: redis - labels: - app: redis -spec: - ports: - - port: 6379 - targetPort: 6379 - selector: - app: redis -` - -const redisRedisListDeployHostPortYaml = `apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{DEPLOYMENT_NAME}} - labels: - app: {{DEPLOYMENT_NAME}} -spec: - replicas: 0 - selector: - matchLabels: - app: {{DEPLOYMENT_NAME}} - template: - metadata: - labels: - app: {{DEPLOYMENT_NAME}} - spec: - containers: - - name: redis-worker - image: {{CONTAINER_IMAGE}} - imagePullPolicy: IfNotPresent - args: ["read"] - env: - - name: REDIS_HOST - value: {{REDIS_HOST}} - - name: REDIS_PORT - value: "{{REDIS_PORT}}" - - name: LIST_NAME - value: {{LIST_NAME}} - - name: REDIS_PASSWORD - value: {{REDIS_PASSWORD}} - - name: READ_PROCESS_TIME - value: "200" ---- -apiVersion: keda.sh/v1alpha1 -kind: ScaledObject -metadata: - name: {{DEPLOYMENT_NAME}} -spec: - scaleTargetRef: - name: {{DEPLOYMENT_NAME}} - pollingInterval: 5 - cooldownPeriod: 30 - minReplicaCount: 0 - maxReplicaCount: 5 - triggers: - - type: redis - metadata: - hostFromEnv: REDIS_HOST - portFromEnv: REDIS_PORT - listName: {{LIST_NAME}} - listLength: "5" - authenticationRef: - name: keda-redis-list-triggerauth -` - - -const redisListDeployAddressYaml = `apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{DEPLOYMENT_NAME}} - labels: - app: {{DEPLOYMENT_NAME}} -spec: - replicas: 0 - selector: - matchLabels: - app: {{DEPLOYMENT_NAME}} - template: - metadata: - labels: - app: {{DEPLOYMENT_NAME}} - spec: - containers: - - name: redis-worker - image: {{CONTAINER_IMAGE}} - imagePullPolicy: IfNotPresent - args: ["read"] - env: - - name: REDIS_ADDRESS - value: {{REDIS_ADDRESS}} - - name: LIST_NAME - value: {{LIST_NAME}} - - name: REDIS_PASSWORD - value: {{REDIS_PASSWORD}} - - name: READ_PROCESS_TIME - value: "500" ---- -apiVersion: keda.sh/v1alpha1 -kind: ScaledObject -metadata: - name: {{DEPLOYMENT_NAME}} -spec: - scaleTargetRef: - name: {{DEPLOYMENT_NAME}} - pollingInterval: 5 - cooldownPeriod: 30 - minReplicaCount: 0 - maxReplicaCount: 5 - triggers: - - type: redis - metadata: - addressFromEnv: REDIS_ADDRESS - listName: {{LIST_NAME}} - listLength: "5" - authenticationRef: - name: keda-redis-list-triggerauth -` - -const redisListDeployHostPortInTriggerAuhYaml = `apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{DEPLOYMENT_NAME}} - labels: - app: {{DEPLOYMENT_NAME}} -spec: - replicas: 0 - selector: - matchLabels: - app: {{DEPLOYMENT_NAME}} - template: - metadata: - labels: - app: {{DEPLOYMENT_NAME}} - spec: - containers: - - name: redis-worker - image: {{CONTAINER_IMAGE}} - imagePullPolicy: IfNotPresent - args: ["read"] - env: - - name: REDIS_HOST - value: {{REDIS_HOST}} - - name: REDIS_PORT - value: "{{REDIS_PORT}}" - - name: LIST_NAME - value: {{LIST_NAME}} - - name: REDIS_PASSWORD - value: {{REDIS_PASSWORD}} - - name: READ_PROCESS_TIME - value: "200" ---- -apiVersion: keda.sh/v1alpha1 -kind: ScaledObject -metadata: - name: {{DEPLOYMENT_NAME}} -spec: - scaleTargetRef: - name: {{DEPLOYMENT_NAME}} - pollingInterval: 5 - cooldownPeriod: 30 - minReplicaCount: 0 - maxReplicaCount: 5 - triggers: - - type: redis - metadata: - listName: {{LIST_NAME}} - listLength: "5" - authenticationRef: - name: keda-redis-list-triggerauth-host-port -` - -const scaledObjectTriggerAuthHostPortYaml = `apiVersion: v1 -kind: Secret -metadata: - name: redis-config -type: Opaque -data: - password: {{REDIS_PASSWORD}} - redisHost: {{REDIS_HOST}} - redisPort: {{REDIS_PORT}} ---- -apiVersion: keda.sh/v1alpha1 -kind: TriggerAuthentication -metadata: - name: keda-redis-list-triggerauth-host-port -spec: - secretTargetRef: - - parameter: password - name: redis-config - key: password - - parameter: host - name: redis-config - key: redisHost - - parameter: port - name: redis-config - key: redisPort -` - -const scaledObjectTriggerAuthYaml = `apiVersion: v1 -kind: Secret -metadata: - name: redis-password -type: Opaque -data: - password: {{REDIS_PASSWORD}} ---- -apiVersion: keda.sh/v1alpha1 -kind: TriggerAuthentication -metadata: - name: keda-redis-list-triggerauth -spec: - secretTargetRef: - - parameter: password - name: redis-password - key: password -` - - -const writeJobYaml = `apiVersion: batch/v1 -kind: Job -metadata: - name: {{JOB_NAME}} -spec: - template: - spec: - containers: - - name: redis - image: {{CONTAINER_IMAGE}} - imagePullPolicy: IfNotPresent - env: - - name: REDIS_ADDRESS - value: {{REDIS_ADDRESS}} - - name: REDIS_PASSWORD - value: {{REDIS_PASSWORD}} - - name: LIST_NAME - value: {{LIST_NAME}} - - name: NO_LIST_ITEMS_TO_WRITE - value: "{{NUMBER_OF_ITEMS_TO_WRITE}}" - args: ["write"] - restartPolicy: Never - backoffLimit: 4 -` diff --git a/tests/scalers/redis-sentinel-lists.test.ts b/tests/scalers/redis-sentinel-lists.test.ts deleted file mode 100644 index a3d581fdb5e..00000000000 --- a/tests/scalers/redis-sentinel-lists.test.ts +++ /dev/null @@ -1,567 +0,0 @@ -import test from 'ava' -import * as sh from 'shelljs' -import * as tmp from 'tmp' -import * as fs from 'fs' -import {createNamespace, waitForRollout} from "./helpers"; - -const redisNamespace = 'redis-sentinel' -const redisService = 'redis-sentinel' -const testNamespace = 'redis-sentinel-lists-test' -const redisStatefulSetName = 'redis-sentinel-node' -const redisSentinelName = 'redis-sentinel' -const redisSentinelMasterName = 'mymaster' -const redisPassword = 'my-password' -let redisHost = '' -const redisPort = 26379 -let redisAddress = '' -const listNameForHostPortRef = 'my-test-list-host-port-ref' -const listNameForAddressRef = 'my-test-list-address-ref' -const listNameForHostPortTriggerAuth = 'my-test-list-host-port-trigger' -const redisWorkerHostPortRefDeploymentName = 'redis-worker-test-hostport' -const redisWorkerAddressRefDeploymentName = 'redis-worker-test-address' -const redisWorkerHostPortRefTriggerAuthDeploymentName = 'redis-worker-test-hostport-triggerauth' -const itemsToWrite = 200 -const deploymentContainerImage = 'ghcr.io/kedacore/tests-redis-sentinel-lists' -const writeJobNameForHostPortRef = 'redis-writer-host-port-ref' -const writeJobNameForAddressRef = 'redis-writer-address-ref' -const writeJobNameForHostPortInTriggerAuth = 'redis-writer-host-port-trigger-auth' - -test.before(t => { - // Deploy Redis sentinel. - createNamespace(redisNamespace) - sh.exec(`helm repo add bitnami https://charts.bitnami.com/bitnami`) - - let sentinelStatus = sh.exec(`helm install --timeout 900s ${redisSentinelName} --namespace ${redisNamespace} --set "sentinel.enabled=true" --set "master.persistence.enabled=false" --set "replica.persistence.enabled=false" --set "global.redis.password=${redisPassword}" bitnami/redis`).code - t.is(0, - sentinelStatus, - 'creating a Redis sentinel setup should work.' - ) - - // Wait for Redis sentinel to be ready. - t.is(0, waitForRollout('statefulset', redisStatefulSetName, redisNamespace, 600), 'Redis is not in a ready state') - - // Get Redis sentinel address. - redisHost = sh.exec(`kubectl get svc ${redisService} -n ${redisNamespace} -o jsonpath='{.spec.clusterIP}'`) - redisAddress = `${redisHost}:${redisPort}` - - // Create test namespace. - createNamespace(testNamespace) - - const triggerAuthTmpFile = tmp.fileSync() - const base64Password = Buffer.from(redisPassword).toString('base64') - fs.writeFileSync(triggerAuthTmpFile.name, scaledObjectTriggerAuthYaml.replace('{{REDIS_PASSWORD}}', base64Password).replace('{{REDIS_SENTINEL_PASSWORD}}', base64Password)) - - t.is( - 0, - sh.exec(`kubectl apply -f ${triggerAuthTmpFile.name} --namespace ${testNamespace}`).code, - 'creating trigger auth should work..' - ) - - const triggerAuthHostPortTmpFile = tmp.fileSync() - - fs.writeFileSync(triggerAuthHostPortTmpFile.name, - scaledObjectTriggerAuthHostPortYaml.replace('{{REDIS_PASSWORD}}', base64Password) - .replace('{{REDIS_SENTINEL_PASSWORD}}', base64Password) - .replace('{{REDIS_SENTINEL_MASTER}}', Buffer.from(redisSentinelMasterName).toString('base64')) - .replace('{{REDIS_HOSTS}}', Buffer.from(redisHost).toString('base64')) - .replace('{{REDIS_PORTS}}', Buffer.from(redisPort.toString()).toString('base64')) - ) - - t.is( - 0, - sh.exec(`kubectl apply -f ${triggerAuthHostPortTmpFile.name} --namespace ${testNamespace}`).code, - 'creating trigger auth with host port should work..' - ) - - // Create a deployment with host and port. - const deploymentHostPortRefTmpFile = tmp.fileSync() - - fs.writeFileSync(deploymentHostPortRefTmpFile.name, redisListDeployHostPortYaml.replace(/{{REDIS_PASSWORD}}/g, redisPassword) - .replace(/{{REDIS_SENTINEL_PASSWORD}}/g, redisPassword) - .replace(/{{REDIS_SENTINEL_MASTER}}/g, redisSentinelMasterName) - .replace(/{{REDIS_HOSTS}}/g, redisHost) - .replace(/{{REDIS_PORTS}}/g, redisPort.toString()) - .replace(/{{LIST_NAME}}/g, listNameForHostPortRef) - .replace(/{{DEPLOYMENT_NAME}}/g, redisWorkerHostPortRefDeploymentName) - .replace(/{{CONTAINER_IMAGE}}/g, deploymentContainerImage) - ) - - t.is( - 0, - sh.exec(`kubectl apply -f ${deploymentHostPortRefTmpFile.name} --namespace ${testNamespace}`).code, - 'creating a deployment using redis host and port envs should work..' - ) - - const deploymentAddressRefTmpFile = tmp.fileSync() - - fs.writeFileSync(deploymentAddressRefTmpFile.name, redisListDeployAddressYaml.replace(/{{REDIS_PASSWORD}}/g, redisPassword) - .replace(/{{REDIS_SENTINEL_PASSWORD}}/g, redisPassword) - .replace(/{{REDIS_SENTINEL_MASTER}}/g, redisSentinelMasterName) - .replace(/{{REDIS_ADDRESSES}}/g, redisAddress) - .replace(/{{LIST_NAME}}/g, listNameForAddressRef) - .replace(/{{DEPLOYMENT_NAME}}/g, redisWorkerAddressRefDeploymentName) - .replace(/{{CONTAINER_IMAGE}}/g, deploymentContainerImage) - ) - - t.is( - 0, - sh.exec(`kubectl apply -f ${deploymentAddressRefTmpFile.name} --namespace ${testNamespace}`).code, - 'creating a deployment using redis address var should work..' - ) - - - const deploymentHostPortRefTriggerAuthTmpFile = tmp.fileSync() - - fs.writeFileSync(deploymentHostPortRefTriggerAuthTmpFile.name, redisListDeployHostPortInTriggerAuhYaml.replace(/{{REDIS_PASSWORD}}/g, redisPassword) - .replace(/{{REDIS_SENTINEL_PASSWORD}}/g, redisPassword) - .replace(/{{REDIS_SENTINEL_MASTER}}/g, redisSentinelMasterName) - .replace(/{{REDIS_HOSTS}}/g, redisHost) - .replace(/{{REDIS_PORTS}}/g, redisPort.toString()) - .replace(/{{LIST_NAME}}/g, listNameForHostPortTriggerAuth) - .replace(/{{DEPLOYMENT_NAME}}/g, redisWorkerHostPortRefTriggerAuthDeploymentName) - .replace(/{{CONTAINER_IMAGE}}/g, deploymentContainerImage) - ) - - t.is( - 0, - sh.exec(`kubectl apply -f ${deploymentHostPortRefTriggerAuthTmpFile.name} --namespace ${testNamespace}`).code, - 'creating a deployment using redis host port in trigger auth should work..' - ) -}) - -test.serial('Deployment for redis host and port env vars should have 0 replica on start', t => { - - const replicaCount = sh.exec( - `kubectl get deployment/${redisWorkerHostPortRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.is(replicaCount, '0', 'replica count should start out as 0') -}) - - -test.serial(`Deployment using redis host port env vars should max and scale to 5 with ${itemsToWrite} items written to list and back to 0`, t => { - runWriteJob(t, writeJobNameForHostPortRef, listNameForHostPortRef) - - let replicaCount = '0' - for (let i = 0; i < 60 && replicaCount !== '5'; i++) { - replicaCount = sh.exec( - `kubectl get deployment/${redisWorkerHostPortRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.log('(scale up) replica count is:' + replicaCount) - if (replicaCount !== '5') { - sh.exec('sleep 10s') - } - } - - t.is('5', replicaCount, 'Replica count should be 5 within 10 minutes') - - for (let i = 0; i < 60 && replicaCount !== '0'; i++) { - replicaCount = sh.exec( - `kubectl get deployment/${redisWorkerHostPortRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.log('(scale down) replica count is:' + replicaCount) - if (replicaCount !== '0') { - sh.exec('sleep 10s') - } - } - - t.is('0', replicaCount, 'Replica count should be 0 within 10 minutes') -}) - -test.serial('Deployment for redis address env var should have 0 replica on start', t => { - - const replicaCount = sh.exec( - `kubectl get deployment/${redisWorkerAddressRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.is(replicaCount, '0', 'replica count should start out as 0') -}) - - - -test.serial(`Deployment using redis address env var should max and scale to 5 with ${itemsToWrite} items written to list and back to 0`, t => { - - runWriteJob(t, writeJobNameForAddressRef, listNameForAddressRef) - - let replicaCount = '0' - for (let i = 0; i < 60 && replicaCount !== '5'; i++) { - replicaCount = sh.exec( - `kubectl get deployment/${redisWorkerAddressRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.log('(scale up) replica count is:' + replicaCount) - if (replicaCount !== '5') { - sh.exec('sleep 10s') - } - } - - t.is('5', replicaCount, 'Replica count should be 5 within 10 minutes') - - for (let i = 0; i < 60 && replicaCount !== '0'; i++) { - replicaCount = sh.exec( - `kubectl get deployment/${redisWorkerAddressRefDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.log('(scale down) replica count is:' + replicaCount) - if (replicaCount !== '0') { - sh.exec('sleep 10s') - } - } - - t.is('0', replicaCount, 'Replica count should be 0 within 10 minutes') -}) - - -test.serial('Deployment for redis host and port in the trigger auth should have 0 replica on start', t => { - - const replicaCount = sh.exec( - `kubectl get deployment/${redisWorkerHostPortRefTriggerAuthDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.is(replicaCount, '0', 'replica count should start out as 0') -}) - - -test.serial(`Deployment using redis host port in triggerAuth should max and scale to 5 with ${itemsToWrite} items written to list and back to 0`, t => { - - runWriteJob(t, writeJobNameForHostPortInTriggerAuth, listNameForHostPortTriggerAuth) - - let replicaCount = '0' - for (let i = 0; i < 60 && replicaCount !== '5'; i++) { - replicaCount = sh.exec( - `kubectl get deployment/${redisWorkerHostPortRefTriggerAuthDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.log('(scale up) replica count is:' + replicaCount) - if (replicaCount !== '5') { - sh.exec('sleep 10s') - } - } - - t.is('5', replicaCount, 'Replica count should be 5 within 10 minutes') - - for (let i = 0; i < 60 && replicaCount !== '0'; i++) { - replicaCount = sh.exec( - `kubectl get deployment/${redisWorkerHostPortRefTriggerAuthDeploymentName} --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.log('(scale down) replica count is:' + replicaCount) - if (replicaCount !== '0') { - sh.exec('sleep 10s') - } - } - - t.is('0', replicaCount, 'Replica count should be 0 within 10 minutes') -}) - - -test.after.always.cb('clean up deployment', t => { - const resources = [ - `job/${writeJobNameForHostPortRef}`, - `job/${writeJobNameForAddressRef}`, - `job/${writeJobNameForHostPortInTriggerAuth}`, - `scaledobject.keda.sh/${redisWorkerHostPortRefDeploymentName}`, - `scaledobject.keda.sh/${redisWorkerAddressRefDeploymentName}`, - `scaledobject.keda.sh/${redisWorkerHostPortRefTriggerAuthDeploymentName}`, - 'triggerauthentication.keda.sh/keda-redis-sentinel-list-triggerauth', - 'triggerauthentication.keda.sh/keda-redis-sentinel-list-triggerauth-host-port', - `deployment/${redisWorkerAddressRefDeploymentName}`, - `deployment/${redisWorkerHostPortRefTriggerAuthDeploymentName}`, - `deployment/${redisWorkerHostPortRefDeploymentName}`, - 'secret/redis-password', - ] - - for (const resource of resources) { - sh.exec(`kubectl delete ${resource} --namespace ${testNamespace}`) - } - sh.exec(`kubectl delete namespace ${testNamespace}`) - - sh.exec(`helm delete ${redisSentinelName} --namespace ${redisNamespace}`) - sh.exec(`kubectl delete namespace ${redisNamespace}`) - t.end() -}) - -function runWriteJob(t, jobName, listName) { - // write to list - const tmpFile = tmp.fileSync() - fs.writeFileSync(tmpFile.name, writeJobYaml.replace('{{REDIS_ADDRESSES}}', redisAddress).replace('{{REDIS_PASSWORD}}', redisPassword) - .replace('{{REDIS_SENTINEL_PASSWORD}}', redisPassword) - .replace('{{REDIS_SENTINEL_MASTER}}', redisSentinelMasterName) - .replace('{{LIST_NAME}}', listName) - .replace('{{NUMBER_OF_ITEMS_TO_WRITE}}', itemsToWrite.toString()) - .replace('{{CONTAINER_IMAGE}}', deploymentContainerImage) - .replace('{{JOB_NAME}}', jobName) - ) - - t.is( - 0, - sh.exec(`kubectl apply -f ${tmpFile.name} --namespace ${testNamespace}`).code, - 'list writer job should apply.' - ) - - // wait for the write job to complete - for (let i = 0; i < 20; i++) { - const succeeded = sh.exec(`kubectl get job ${writeJobNameForHostPortRef} --namespace ${testNamespace} -o jsonpath='{.items[0].status.succeeded}'`).stdout - if (succeeded == '1') { - break - } - sh.exec('sleep 1s') - } -} - -const redisListDeployHostPortYaml = `apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{DEPLOYMENT_NAME}} - labels: - app: {{DEPLOYMENT_NAME}} -spec: - replicas: 0 - selector: - matchLabels: - app: {{DEPLOYMENT_NAME}} - template: - metadata: - labels: - app: {{DEPLOYMENT_NAME}} - spec: - containers: - - name: redis-worker - image: {{CONTAINER_IMAGE}} - imagePullPolicy: IfNotPresent - command: ["./main"] - args: ["read"] - env: - - name: REDIS_HOSTS - value: {{REDIS_HOSTS}} - - name: REDIS_PORTS - value: "{{REDIS_PORTS}}" - - name: LIST_NAME - value: {{LIST_NAME}} - - name: REDIS_PASSWORD - value: {{REDIS_PASSWORD}} - - name: REDIS_SENTINEL_PASSWORD - value: {{REDIS_SENTINEL_PASSWORD}} - - name: REDIS_SENTINEL_MASTER - value: {{REDIS_SENTINEL_MASTER}} - - name: READ_PROCESS_TIME - value: "500" ---- -apiVersion: keda.sh/v1alpha1 -kind: ScaledObject -metadata: - name: {{DEPLOYMENT_NAME}} -spec: - scaleTargetRef: - name: {{DEPLOYMENT_NAME}} - pollingInterval: 5 - cooldownPeriod: 30 - minReplicaCount: 0 - maxReplicaCount: 5 - triggers: - - type: redis-sentinel - metadata: - hostsFromEnv: REDIS_HOSTS - portsFromEnv: REDIS_PORTS - listName: {{LIST_NAME}} - listLength: "5" - sentinelMaster: {{REDIS_SENTINEL_MASTER}} - authenticationRef: - name: keda-redis-sentinel-list-triggerauth -` - - -const redisListDeployAddressYaml = `apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{DEPLOYMENT_NAME}} - labels: - app: {{DEPLOYMENT_NAME}} -spec: - replicas: 0 - selector: - matchLabels: - app: {{DEPLOYMENT_NAME}} - template: - metadata: - labels: - app: {{DEPLOYMENT_NAME}} - spec: - containers: - - name: redis-worker - image: {{CONTAINER_IMAGE}} - imagePullPolicy: IfNotPresent - command: ["./main"] - args: ["read"] - env: - - name: REDIS_ADDRESSES - value: {{REDIS_ADDRESSES}} - - name: LIST_NAME - value: {{LIST_NAME}} - - name: REDIS_PASSWORD - value: {{REDIS_PASSWORD}} - - name: REDIS_SENTINEL_PASSWORD - value: {{REDIS_SENTINEL_PASSWORD}} - - name: REDIS_SENTINEL_MASTER - value: {{REDIS_SENTINEL_MASTER}} - - name: READ_PROCESS_TIME - value: "500" ---- -apiVersion: keda.sh/v1alpha1 -kind: ScaledObject -metadata: - name: {{DEPLOYMENT_NAME}} -spec: - scaleTargetRef: - name: {{DEPLOYMENT_NAME}} - pollingInterval: 5 - cooldownPeriod: 30 - minReplicaCount: 0 - maxReplicaCount: 5 - triggers: - - type: redis-sentinel - metadata: - addressesFromEnv: REDIS_ADDRESSES - listName: {{LIST_NAME}} - listLength: "5" - sentinelMaster: {{REDIS_SENTINEL_MASTER}} - authenticationRef: - name: keda-redis-sentinel-list-triggerauth -` - -const redisListDeployHostPortInTriggerAuhYaml = `apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{DEPLOYMENT_NAME}} - labels: - app: {{DEPLOYMENT_NAME}} -spec: - replicas: 0 - selector: - matchLabels: - app: {{DEPLOYMENT_NAME}} - template: - metadata: - labels: - app: {{DEPLOYMENT_NAME}} - spec: - containers: - - name: redis-worker - image: {{CONTAINER_IMAGE}} - imagePullPolicy: IfNotPresent - command: ["./main"] - args: ["read"] - env: - - name: REDIS_HOSTS - value: {{REDIS_HOSTS}} - - name: REDIS_PORTS - value: "{{REDIS_PORTS}}" - - name: LIST_NAME - value: {{LIST_NAME}} - - name: REDIS_PASSWORD - value: {{REDIS_PASSWORD}} - - name: REDIS_SENTINEL_PASSWORD - value: {{REDIS_SENTINEL_PASSWORD}} - - name: REDIS_SENTINEL_MASTER - value: {{REDIS_SENTINEL_MASTER}} - - name: READ_PROCESS_TIME - value: "500" ---- -apiVersion: keda.sh/v1alpha1 -kind: ScaledObject -metadata: - name: {{DEPLOYMENT_NAME}} -spec: - scaleTargetRef: - name: {{DEPLOYMENT_NAME}} - pollingInterval: 5 - cooldownPeriod: 30 - minReplicaCount: 0 - maxReplicaCount: 5 - triggers: - - type: redis-sentinel - metadata: - listName: {{LIST_NAME}} - listLength: "5" - sentinelMaster: {{REDIS_SENTINEL_MASTER}} - authenticationRef: - name: keda-redis-sentinel-list-triggerauth-host-port -` - -const scaledObjectTriggerAuthHostPortYaml = `apiVersion: v1 -kind: Secret -metadata: - name: redis-config -type: Opaque -data: - password: {{REDIS_PASSWORD}} - sentinelPassword: {{REDIS_SENTINEL_PASSWORD}} - redisHost: {{REDIS_HOSTS}} - redisPort: {{REDIS_PORTS}} ---- -apiVersion: keda.sh/v1alpha1 -kind: TriggerAuthentication -metadata: - name: keda-redis-sentinel-list-triggerauth-host-port -spec: - secretTargetRef: - - parameter: password - name: redis-config - key: password - - parameter: sentinelPassword - name: redis-config - key: sentinelPassword - - parameter: hosts - name: redis-config - key: redisHost - - parameter: ports - name: redis-config - key: redisPort -` - -const scaledObjectTriggerAuthYaml = `apiVersion: v1 -kind: Secret -metadata: - name: redis-password -type: Opaque -data: - password: {{REDIS_PASSWORD}} - sentinelPassword: {{REDIS_SENTINEL_PASSWORD}} ---- -apiVersion: keda.sh/v1alpha1 -kind: TriggerAuthentication -metadata: - name: keda-redis-sentinel-list-triggerauth -spec: - secretTargetRef: - - parameter: password - name: redis-password - key: password - - parameter: sentinelPassword - name: redis-password - key: sentinelPassword -` - - -const writeJobYaml = `apiVersion: batch/v1 -kind: Job -metadata: - name: {{JOB_NAME}} -spec: - template: - spec: - containers: - - name: redis - image: {{CONTAINER_IMAGE}} - imagePullPolicy: IfNotPresent - command: ["./main"] - env: - - name: REDIS_ADDRESSES - value: {{REDIS_ADDRESSES}} - - name: REDIS_PASSWORD - value: {{REDIS_PASSWORD}} - - name: REDIS_SENTINEL_PASSWORD - value: {{REDIS_SENTINEL_PASSWORD}} - - name: REDIS_SENTINEL_MASTER - value: {{REDIS_SENTINEL_MASTER}} - - name: LIST_NAME - value: {{LIST_NAME}} - - name: NO_LIST_ITEMS_TO_WRITE - value: "{{NUMBER_OF_ITEMS_TO_WRITE}}" - args: ["write"] - restartPolicy: Never - backoffLimit: 4 -` diff --git a/tests/scalers/redis-sentinel-streams.test.ts b/tests/scalers/redis-sentinel-streams.test.ts deleted file mode 100644 index 72c99dcb353..00000000000 --- a/tests/scalers/redis-sentinel-streams.test.ts +++ /dev/null @@ -1,227 +0,0 @@ -import test from 'ava' -import * as sh from 'shelljs' -import * as tmp from 'tmp' -import * as fs from 'fs' -import {createNamespace, waitForDeploymentReplicaCount, waitForRollout} from "./helpers"; - -const redisNamespace = 'redis-sentinel-streams' -const redisSentinelName = 'redis-sentinel-streams' -const redisSentinelMasterName = 'mymaster' -const redisStatefulSetName = 'redis-sentinel-streams-node' -const redisService = 'redis-sentinel-streams' -const testNamespace = 'redis-sentinel-streams-test' -const redisPassword = 'foobared' -let redisHost = '' -const numMessages = 100 - -test.before(t => { - // Deploy Redis Sentinel. - createNamespace(redisNamespace) - sh.exec(`helm repo add bitnami https://charts.bitnami.com/bitnami`) - - let sentinelStatus = sh.exec(`helm install --timeout 900s ${redisSentinelName} --namespace ${redisNamespace} --set "sentinel.enabled=true" --set "master.persistence.enabled=false" --set "replica.persistence.enabled=false" --set "global.redis.password=${redisPassword}" bitnami/redis`).code - t.is(0, - sentinelStatus, - 'creating a Redis Sentinel setup should work.' - ) - - // Wait for Redis Sentinel to be ready. - let exitCode = waitForRollout('statefulset', redisStatefulSetName, redisNamespace, 600) - t.is(0, exitCode, 'expected rollout status for redis to finish successfully') - - // Get Redis Sentinel address. - redisHost = sh.exec(`kubectl get svc ${redisService} -n ${redisNamespace} -o jsonpath='{.spec.clusterIP}'`) - - // Create test namespace. - createNamespace(testNamespace) - - // Deploy streams consumer app, scaled object etc. - const tmpFile = tmp.fileSync() - const base64Password = Buffer.from(redisPassword).toString('base64') - - fs.writeFileSync(tmpFile.name, redisStreamsDeployYaml.replace('{{REDIS_PASSWORD}}', base64Password).replace('{{REDIS_SENTINEL_PASSWORD}}', base64Password).replace('{{REDIS_SENTINEL_MASTER}}', redisSentinelMasterName).replace('{{REDIS_HOSTS}}', redisHost)) - t.is( - 0, - sh.exec(`kubectl apply -f ${tmpFile.name} --namespace ${testNamespace}`).code, - 'creating a deployment should work..' - ) -}) - -test.serial('Deployment should have 1 replica on start', t => { - - const replicaCount = sh.exec( - `kubectl get deployment/redis-streams-consumer --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.is(replicaCount, '1', 'replica count should start out as 1') -}) - -test.serial(`Deployment should scale to 5 with ${numMessages} messages and back to 1`, async t => { - // Publish messages to redis streams. - const tmpFile = tmp.fileSync() - fs.writeFileSync(tmpFile.name, producerDeployYaml.replace('{{NUM_MESSAGES}}', numMessages.toString()) - .replace('{{REDIS_SENTINEL_MASTER}}', redisSentinelMasterName) - .replace('{{REDIS_HOSTS}}', redisHost)) - t.is( - 0, - sh.exec(`kubectl apply -f ${tmpFile.name} --namespace ${testNamespace}`).code, - 'producer job should apply.' - ) - - // Wait for producer job to finish. - for (let i = 0; i < 60; i++) { - const succeeded = sh.exec(`kubectl get job --namespace ${testNamespace} -o jsonpath='{.items[0].status.succeeded}'`).stdout - if (succeeded == '1') { - break - } - sh.exec('sleep 1s') - } - // With messages published, the consumer deployment should start receiving the messages. - t.true(await waitForDeploymentReplicaCount(5, 'redis-streams-consumer', testNamespace, 30, 10000), 'Replica count should be 5 within 5 minutes') - t.true(await waitForDeploymentReplicaCount(1, 'redis-streams-consumer', testNamespace, 60, 10000), 'Replica count should be 1 within 10 minutes') -}) - - - -test.after.always.cb('clean up deployment', t => { - const resources = [ - 'scaledobject.keda.sh/redis-streams-scaledobject', - 'triggerauthentications.keda.sh/keda-redis-stream-triggerauth', - 'secret/redis-password', - 'deployment/redis-streams-consumer', - 'job/redis-streams-producer', - ] - - for (const resource of resources) { - sh.exec(`kubectl delete ${resource} --namespace ${testNamespace}`) - } - sh.exec(`kubectl delete namespace ${testNamespace}`) - - sh.exec(`helm delete ${redisSentinelName} --namespace ${redisNamespace}`) - sh.exec(`kubectl delete namespace ${redisNamespace}`) - t.end() -}) - -const redisStreamsDeployYaml = `apiVersion: v1 -kind: Secret -metadata: - name: redis-password -type: Opaque -data: - password: {{REDIS_PASSWORD}} - sentinelPassword: {{REDIS_SENTINEL_PASSWORD}} ---- -apiVersion: keda.sh/v1alpha1 -kind: TriggerAuthentication -metadata: - name: keda-redis-stream-triggerauth -spec: - secretTargetRef: - - parameter: password - name: redis-password - key: password - - parameter: sentinelPassword - name: redis-password - key: sentinelPassword ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: redis-streams-consumer -spec: - replicas: 1 - selector: - matchLabels: - app: redis-streams-consumer - template: - metadata: - labels: - app: redis-streams-consumer - spec: - containers: - - name: redis-streams-consumer - image: ghcr.io/kedacore/tests-redis-sentinel-streams - command: ["./main"] - args: ["consumer"] - imagePullPolicy: Always - env: - - name: REDIS_HOSTS - value: {{REDIS_HOSTS}} - - name: REDIS_PORTS - value: "26379" - - name: REDIS_STREAM_NAME - value: my-stream - - name: REDIS_STREAM_CONSUMER_GROUP_NAME - value: consumer-group-1 - - name: REDIS_SENTINEL_MASTER - value: {{REDIS_SENTINEL_MASTER}} - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: redis-password - key: password - - name: REDIS_SENTINEL_PASSWORD - valueFrom: - secretKeyRef: - name: redis-password - key: sentinelPassword ---- -apiVersion: keda.sh/v1alpha1 -kind: ScaledObject -metadata: - name: redis-streams-scaledobject -spec: - scaleTargetRef: - name: redis-streams-consumer - pollingInterval: 5 - cooldownPeriod: 10 - minReplicaCount: 1 - maxReplicaCount: 5 - triggers: - - type: redis-sentinel-streams - metadata: - hostsFromEnv: REDIS_HOSTS - portsFromEnv: REDIS_PORTS - sentinelMasterFromEnv: REDIS_SENTINEL_MASTER - stream: my-stream - consumerGroup: consumer-group-1 - pendingEntriesCount: "10" - authenticationRef: - name: keda-redis-stream-triggerauth -` - -const producerDeployYaml = `apiVersion: batch/v1 -kind: Job -metadata: - name: redis-streams-producer -spec: - template: - spec: - containers: - - name: producer - image: ghcr.io/kedacore/tests-redis-sentinel-streams - command: ["./main"] - args: ["producer"] - imagePullPolicy: Always - env: - - name: REDIS_HOSTS - value: {{REDIS_HOSTS}} - - name: REDIS_PORTS - value: "26379" - - name: REDIS_STREAM_NAME - value: my-stream - - name: NUM_MESSAGES - value: "{{NUM_MESSAGES}}" - - name: REDIS_SENTINEL_MASTER - value: "{{REDIS_SENTINEL_MASTER}}" - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: redis-password - key: password - - name: REDIS_SENTINEL_PASSWORD - valueFrom: - secretKeyRef: - name: redis-password - key: sentinelPassword - restartPolicy: Never -` diff --git a/tests/scalers/redis-streams.test.ts b/tests/scalers/redis-streams.test.ts deleted file mode 100644 index 8a86c7a821d..00000000000 --- a/tests/scalers/redis-streams.test.ts +++ /dev/null @@ -1,243 +0,0 @@ -import test from 'ava' -import * as sh from 'shelljs' -import * as tmp from 'tmp' -import * as fs from 'fs' -import { createNamespace, waitForRollout } from './helpers' - -const redisNamespace = 'redis-ns' -const testNamespace = 'redis-streams-ns' -const redisDeploymentName = 'redis' -const redisPassword = 'foobared' -const redisHost = `redis-service.${redisNamespace}.svc.cluster.local:6379` -const numMessages = 100 - -test.before(t => { - // setup Redis - createNamespace(redisNamespace) - - const tmpFile1 = tmp.fileSync() - fs.writeFileSync(tmpFile1.name, redisDeployYaml.replace('{{REDIS_PASSWORD}}', redisPassword)) - - t.is(0, sh.exec(`kubectl apply --namespace ${redisNamespace} -f ${tmpFile1.name}`).code, 'creating a Redis deployment should work.') - - // wait for redis to be ready - t.is(0, waitForRollout('deployment', redisDeploymentName, redisNamespace, 600), 'Redis is not in a ready state') - - createNamespace(testNamespace) - - // deploy streams consumer app, scaled object etc. - const tmpFile = tmp.fileSync() - const base64Password = Buffer.from(redisPassword).toString('base64') - - fs.writeFileSync(tmpFile.name, redisStreamsDeployYaml.replace('{{REDIS_PASSWORD}}', base64Password).replace('{{REDIS_HOST}}', redisHost)) - t.is( - 0, - sh.exec(`kubectl apply -f ${tmpFile.name} --namespace ${testNamespace}`).code, - 'creating a deployment should work..' - ) -}) - -test.serial('Deployment should have 1 replica on start', t => { - - const replicaCount = sh.exec( - `kubectl get deployment/redis-streams-consumer --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.is(replicaCount, '1', 'replica count should start out as 1') -}) - -test.serial(`Deployment should scale to 5 with ${numMessages} messages and back to 1`, t => { - // publish messages - const tmpFile = tmp.fileSync() - fs.writeFileSync(tmpFile.name, producerDeployYaml.replace('{{NUM_MESSAGES}}', numMessages.toString()) - .replace('{{REDIS_HOST}}', redisHost)) - t.is( - 0, - sh.exec(`kubectl apply -f ${tmpFile.name} --namespace ${testNamespace}`).code, - 'producer job should apply.' - ) - - // wait for the producer job to complete - for (let i = 0; i < 60; i++) { - const succeeded = sh.exec(`kubectl get job --namespace ${testNamespace} -o jsonpath='{.items[0].status.succeeded}'`).stdout - if (succeeded == '1') { - break - } - sh.exec('sleep 1s') - } - // with messages published, the consumer deployment should start receiving the messages - let replicaCount = '0' - for (let i = 0; i < 60 && replicaCount !== '5'; i++) { - replicaCount = sh.exec( - `kubectl get deployment/redis-streams-consumer --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.log('(scale up) replica count is:' + replicaCount) - if (replicaCount !== '5') { - sh.exec('sleep 10s') - } - } - - t.is('5', replicaCount, 'Replica count should be 5 within 10 minutes') - - for (let i = 0; i < 60 && replicaCount !== '1'; i++) { - replicaCount = sh.exec( - `kubectl get deployment/redis-streams-consumer --namespace ${testNamespace} -o jsonpath="{.spec.replicas}"` - ).stdout - t.log('(scale down) replica count is:' + replicaCount) - if (replicaCount !== '1') { - sh.exec('sleep 10s') - } - } - - t.is('1', replicaCount, 'Replica count should be 1 within 10 minutes') -}) - - - -test.after.always.cb('clean up deployment', t => { - const resources = [ - 'scaledobject.keda.sh/redis-streams-scaledobject', - 'triggerauthentications.keda.sh/keda-redis-stream-triggerauth', - 'secret/redis-password', - 'deployment/redis-streams-consumer', - ] - - for (const resource of resources) { - sh.exec(`kubectl delete ${resource} --namespace ${testNamespace}`) - } - sh.exec(`kubectl delete namespace ${testNamespace}`) - - sh.exec(`kubectl delete namespace ${redisNamespace}`) - t.end() -}) - -const redisDeployYaml = `apiVersion: apps/v1 -kind: Deployment -metadata: - name: redis -spec: - selector: - matchLabels: - app: redis - replicas: 1 - template: - metadata: - labels: - app: redis - spec: - containers: - - name: master - image: redis - command: ["redis-server", "--requirepass", "{{REDIS_PASSWORD}}"] - ports: - - containerPort: 6379 ---- -apiVersion: v1 -kind: Service -metadata: - name: redis-service - labels: - app: redis -spec: - ports: - - port: 6379 - targetPort: 6379 - selector: - app: redis -` - -const redisStreamsDeployYaml = `apiVersion: v1 -kind: Secret -metadata: - name: redis-password -type: Opaque -data: - password: {{REDIS_PASSWORD}} ---- -apiVersion: keda.sh/v1alpha1 -kind: TriggerAuthentication -metadata: - name: keda-redis-stream-triggerauth -spec: - secretTargetRef: - - parameter: password - name: redis-password - key: password ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: redis-streams-consumer -spec: - replicas: 1 - selector: - matchLabels: - app: redis-streams-consumer - template: - metadata: - labels: - app: redis-streams-consumer - spec: - containers: - - name: redis-streams-consumer - image: abhirockzz/redis-streams-consumer - imagePullPolicy: Always - env: - - name: REDIS_HOST - value: {{REDIS_HOST}} - - name: REDIS_STREAM_NAME - value: my-stream - - name: REDIS_STREAM_CONSUMER_GROUP_NAME - value: consumer-group-1 - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: redis-password - key: password ---- -apiVersion: keda.sh/v1alpha1 -kind: ScaledObject -metadata: - name: redis-streams-scaledobject -spec: - scaleTargetRef: - name: redis-streams-consumer - pollingInterval: 5 - cooldownPeriod: 10 - minReplicaCount: 1 - maxReplicaCount: 5 - triggers: - - type: redis-streams - metadata: - addressFromEnv: REDIS_HOST - stream: my-stream - consumerGroup: consumer-group-1 - pendingEntriesCount: "10" - authenticationRef: - name: keda-redis-stream-triggerauth -` - -const producerDeployYaml = `apiVersion: batch/v1 -kind: Job -metadata: - name: redis-streams-producer -spec: - template: - spec: - containers: - - name: producer - image: abhirockzz/redis-streams-producer - imagePullPolicy: Always - env: - - name: REDIS_HOST - value: {{REDIS_HOST}} - - name: REDIS_STREAM_NAME - value: my-stream - - name: NUM_MESSAGES - value: "{{NUM_MESSAGES}}" - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: redis-password - key: password - restartPolicy: Never -` diff --git a/tests/scalers/redis-cluster-helper.ts b/tests/scalers_go/redis/helper/helper.go similarity index 60% rename from tests/scalers/redis-cluster-helper.ts rename to tests/scalers_go/redis/helper/helper.go index 792e2fa0960..ff846d37f80 100644 --- a/tests/scalers/redis-cluster-helper.ts +++ b/tests/scalers_go/redis/helper/helper.go @@ -1,48 +1,95 @@ -import * as sh from 'shelljs' -import * as tmp from 'tmp' -import * as fs from 'fs' -import {createNamespace, waitForRollout} from "./helpers"; +//go:build e2e +// +build e2e -export class RedisClusterHelper { +package helper - static install(t, password: string, namespace: string) { - const clusterTmpFile = tmp.fileSync() - fs.writeFileSync(clusterTmpFile.name, redisClusterYaml - .replace('{{PASSWORD}}', password) - .replace('{{NAMESPACE}}', namespace)) - createNamespace(namespace) - t.is(0, sh.exec(`kubectl apply -f ${clusterTmpFile.name} --namespace ${namespace}`).code, 'creating a redis cluster should work.') - // wait for rabbitmq to load - t.is(0, waitForRollout('statefulset', 'redis-cluster', namespace)) - } +import ( + "fmt" + "testing" - static uninstall(namespace: string) { - sh.exec(`kubectl delete namespace ${namespace}`) - } + "github.com/stretchr/testify/assert" + "k8s.io/client-go/kubernetes" + + "github.com/kedacore/keda/v2/tests/helper" +) + +type templateValues map[string]string + +type templateData struct { + Namespace string + RedisName string + RedisPassword string } -const redisClusterYaml = `apiVersion: v1 +var ( + redisStandaloneTemplates = templateValues{ + "standaloneRedisTemplate": standaloneRedisTemplate, + "standaloneRedisServiceTemplate": standaloneRedisServiceTemplate, + } + + redisClusterTemplates = templateValues{ + "clusterRedisSecretTemplate": clusterRedisSecretTemplate, + "clusterRedisConfig1Template": clusterRedisConfig1Template, + "clusterRedisConfig2Template": clusterRedisConfig2Template, + "clusterRedisHeadlessServiceTemplate": clusterRedisHeadlessServiceTemplate, + "clusterRedisServiceTemplate": clusterRedisServiceTemplate, + "clusterRedisStatefulSetTemplate": clusterRedisStatefulSetTemplate, + } +) + +const ( + standaloneRedisTemplate = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{.RedisName}} + namespace: {{.Namespace}} +spec: + selector: + matchLabels: + app: {{.RedisName}} + replicas: 1 + template: + metadata: + labels: + app: {{.RedisName}} + spec: + containers: + - name: master + image: redis:6.0.6 + command: ["redis-server", "--requirepass", {{.RedisPassword}}] + ports: + - containerPort: 6379` + + standaloneRedisServiceTemplate = ` +apiVersion: v1 +kind: Service +metadata: + name: redis + namespace: {{.Namespace}} + labels: + app: {{.RedisName}} +spec: + ports: + - port: 6379 + targetPort: 6379 + selector: + app: {{.RedisName}}` + + clusterRedisSecretTemplate = `apiVersion: v1 kind: Secret metadata: name: redis-cluster - labels: - app.kubernetes.io/name: redis-cluster - helm.sh/chart: redis-cluster-7.5.1 - app.kubernetes.io/instance: redis - app.kubernetes.io/managed-by: Helm + namespace: {{.Namespace}} type: Opaque -data: - redis-password: "{{PASSWORD}}" ---- +stringData: + redis-password: "{{.RedisPassword}}"` + clusterRedisConfig1Template = ` apiVersion: v1 kind: ConfigMap metadata: name: redis-cluster-default - labels: - app.kubernetes.io/name: redis-cluster - helm.sh/chart: redis-cluster-7.5.1 - app.kubernetes.io/instance: redis - app.kubernetes.io/managed-by: Helm + namespace: {{.Namespace}} data: redis-default.conf: |- bind 127.0.0.1 @@ -113,17 +160,13 @@ data: dynamic-hz yes aof-rewrite-incremental-fsync yes rdb-save-incremental-fsync yes - jemalloc-bg-thread yes ---- + jemalloc-bg-thread yes` + clusterRedisConfig2Template = ` apiVersion: v1 kind: ConfigMap metadata: name: redis-cluster-scripts - labels: - app.kubernetes.io/name: redis-cluster - helm.sh/chart: redis-cluster-7.5.1 - app.kubernetes.io/instance: redis - app.kubernetes.io/managed-by: Helm + namespace: {{.Namespace}} data: ping_readiness_local.sh: |- #!/bin/sh @@ -131,10 +174,10 @@ data: REDIS_STATUS_FILE=/tmp/.redis_cluster_check if [ ! -z "$REDIS_PASSWORD" ]; then export REDISCLI_AUTH=$REDIS_PASSWORD; fi; response=$( - timeout -s 3 $1 \\ - redis-cli \\ - -h localhost \\ - -p $REDIS_PORT \\ + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ + -p $REDIS_PORT \ ping ) if [ "$?" -eq "124" ]; then @@ -147,10 +190,10 @@ data: fi if [ ! -f "$REDIS_STATUS_FILE" ]; then response=$( - timeout -s 3 $1 \\ - redis-cli \\ - -h localhost \\ - -p $REDIS_PORT \\ + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ + -p $REDIS_PORT \ CLUSTER INFO | grep cluster_state | tr -d '[:space:]' ) if [ "$?" -eq "124" ]; then @@ -169,10 +212,10 @@ data: set -e if [ ! -z "$REDIS_PASSWORD" ]; then export REDISCLI_AUTH=$REDIS_PASSWORD; fi; response=$( - timeout -s 3 $1 \\ - redis-cli \\ - -h localhost \\ - -p $REDIS_PORT \\ + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ + -p $REDIS_PORT \ ping ) if [ "$?" -eq "124" ]; then @@ -183,17 +226,13 @@ data: if [ "$response" != "PONG" ] && [ "$responseFirstWord" != "LOADING" ] && [ "$responseFirstWord" != "MASTERDOWN" ]; then echo "$response" exit 1 - fi ---- + fi` + clusterRedisHeadlessServiceTemplate = ` apiVersion: v1 kind: Service metadata: - name: redis-cluster-headless - labels: - app.kubernetes.io/name: redis-cluster - helm.sh/chart: redis-cluster-7.5.1 - app.kubernetes.io/instance: redis - app.kubernetes.io/managed-by: Helm + name: {{.RedisName}}-headless + namespace: {{.Namespace}} spec: type: ClusterIP clusterIP: None @@ -207,17 +246,13 @@ spec: targetPort: tcp-redis-bus selector: app.kubernetes.io/name: redis-cluster - app.kubernetes.io/instance: redis ---- + app.kubernetes.io/instance: redis` + clusterRedisServiceTemplate = ` apiVersion: v1 kind: Service metadata: - name: redis-cluster - labels: - app.kubernetes.io/name: redis-cluster - helm.sh/chart: redis-cluster-7.5.1 - app.kubernetes.io/instance: redis - app.kubernetes.io/managed-by: Helm + name: {{.RedisName}} + namespace: {{.Namespace}} annotations: spec: type: ClusterIP @@ -229,12 +264,13 @@ spec: nodePort: null selector: app.kubernetes.io/name: redis-cluster - app.kubernetes.io/instance: redis ---- + app.kubernetes.io/instance: redis` + clusterRedisStatefulSetTemplate = ` apiVersion: apps/v1 kind: StatefulSet metadata: - name: redis-cluster + name: {{.RedisName}} + namespace: {{.Namespace}} labels: app.kubernetes.io/name: redis-cluster helm.sh/chart: redis-cluster-7.5.1 @@ -250,7 +286,7 @@ spec: app.kubernetes.io/name: redis-cluster app.kubernetes.io/instance: redis replicas: 6 - serviceName: redis-cluster-headless + serviceName: {{.RedisName}}-headless podManagementPolicy: Parallel template: metadata: @@ -281,7 +317,7 @@ spec: app.kubernetes.io/name: redis-cluster app.kubernetes.io/instance: redis namespaces: - - {{NAMESPACE}} + - {{.Namespace}} topologyKey: kubernetes.io/hostname weight: 1 nodeAffinity: @@ -299,12 +335,13 @@ spec: echo COPYING FILE cp /opt/bitnami/redis/etc/redis-default.conf /opt/bitnami/redis/etc/redis.conf fi - pod_index=($(echo "$POD_NAME" | tr "-" "\\n")) - pod_index="\${pod_index[-1]}" + pod_index=($(echo "$POD_NAME" | tr "-" "\n")) + pod_index="${pod_index[-1]}" if [[ "$pod_index" == "0" ]]; then export REDIS_CLUSTER_CREATOR="yes" export REDIS_CLUSTER_REPLICAS="1" fi + /opt/bitnami/scripts/redis-cluster/entrypoint.sh /opt/bitnami/scripts/redis-cluster/run.sh env: - name: POD_NAME @@ -312,7 +349,7 @@ spec: fieldRef: fieldPath: metadata.name - name: REDIS_NODES - value: "redis-cluster-0.redis-cluster-headless redis-cluster-1.redis-cluster-headless redis-cluster-2.redis-cluster-headless redis-cluster-3.redis-cluster-headless redis-cluster-4.redis-cluster-headless redis-cluster-5.redis-cluster-headless " + value: "{{.RedisName}}-0.{{.RedisName}}-headless {{.RedisName}}-1.{{.RedisName}}-headless {{.RedisName}}-2.{{.RedisName}}-headless {{.RedisName}}-3.{{.RedisName}}-headless {{.RedisName}}-4.{{.RedisName}}-headless {{.RedisName}}-5.{{.RedisName}}-headless " - name: REDISCLI_AUTH valueFrom: secretKeyRef: @@ -377,3 +414,65 @@ spec: name: redis-cluster-default - name: redis-tmp-conf emptyDir: {}` +) + +func InstallStandalone(t *testing.T, kc *kubernetes.Clientset, name, namespace, password string) { + helper.CreateNamespace(t, kc, namespace) + var data = templateData{ + Namespace: namespace, + RedisName: name, + RedisPassword: password, + } + helper.KubectlApplyMultipleWithTemplate(t, data, redisStandaloneTemplates) +} + +func RemoveStandalone(t *testing.T, kc *kubernetes.Clientset, name, namespace string) { + var data = templateData{ + Namespace: namespace, + RedisName: name, + } + helper.KubectlApplyMultipleWithTemplate(t, data, redisStandaloneTemplates) + helper.DeleteNamespace(t, kc, namespace) +} + +func InstallSentinel(t *testing.T, kc *kubernetes.Clientset, name, namespace, password string) { + helper.CreateNamespace(t, kc, namespace) + _, err := helper.ExecuteCommand("helm repo add bitnami https://charts.bitnami.com/bitnami") + assert.NoErrorf(t, err, "cannot execute command - %s", err) + _, err = helper.ExecuteCommand("helm repo update") + assert.NoErrorf(t, err, "cannot execute command - %s", err) + _, err = helper.ExecuteCommand(fmt.Sprintf(`helm install --wait --timeout 900s %s --namespace %s --set sentinel.enabled=true --set master.persistence.enabled=false --set replica.persistence.enabled=false --set global.redis.password=%s bitnami/redis`, + name, + namespace, + password)) + assert.NoErrorf(t, err, "cannot execute command - %s", err) +} + +func RemoveSentinel(t *testing.T, kc *kubernetes.Clientset, name, namespace string) { + _, err := helper.ExecuteCommand(fmt.Sprintf(`helm uninstall --wait --timeout 900s %s --namespace %s`, + name, + namespace)) + assert.NoErrorf(t, err, "cannot execute command - %s", err) + helper.DeleteNamespace(t, kc, namespace) +} + +func InstallCluster(t *testing.T, kc *kubernetes.Clientset, name, namespace, password string) { + helper.CreateNamespace(t, kc, namespace) + var data = templateData{ + Namespace: namespace, + RedisName: name, + RedisPassword: password, + } + helper.KubectlApplyMultipleWithTemplate(t, data, redisClusterTemplates) + assert.True(t, helper.WaitForStatefulsetReplicaReadyCount(t, kc, name, namespace, 6, 60, 3), + "redis-cluster should be up") +} + +func RemoveCluster(t *testing.T, kc *kubernetes.Clientset, name, namespace string) { + var data = templateData{ + Namespace: namespace, + RedisName: name, + } + helper.KubectlApplyMultipleWithTemplate(t, data, redisClusterTemplates) + helper.DeleteNamespace(t, kc, namespace) +} diff --git a/tests/scalers_go/redis/redis_cluster_lists/redis_cluster_lists_test.go b/tests/scalers_go/redis/redis_cluster_lists/redis_cluster_lists_test.go new file mode 100644 index 00000000000..24f8f24c676 --- /dev/null +++ b/tests/scalers_go/redis/redis_cluster_lists/redis_cluster_lists_test.go @@ -0,0 +1,241 @@ +//go:build e2e +// +build e2e + +package redis_cluster_lists_test + +import ( + "encoding/base64" + "fmt" + "testing" + + "github.com/joho/godotenv" + "github.com/stretchr/testify/assert" + "k8s.io/client-go/kubernetes" + + . "github.com/kedacore/keda/v2/tests/helper" + redis "github.com/kedacore/keda/v2/tests/scalers_go/redis/helper" +) + +// Load environment variables from .env file +var _ = godotenv.Load("../../.env") + +const ( + testName = "redis-cluster-lists-test" +) + +var ( + testNamespace = fmt.Sprintf("%s-ns", testName) + redisNamespace = fmt.Sprintf("%s-redis-ns", testName) + deploymentName = fmt.Sprintf("%s-deployment", testName) + jobName = fmt.Sprintf("%s-job", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) + triggerAuthenticationName = fmt.Sprintf("%s-ta", testName) + secretName = fmt.Sprintf("%s-secret", testName) + redisPassword = "admin" + redisList = "queue" + redisHost = fmt.Sprintf("%s-headless", testName) + minReplicaCount = 0 + maxReplicaCount = 2 +) + +type templateData struct { + TestNamespace string + RedisNamespace string + DeploymentName string + JobName string + ScaledObjectName string + TriggerAuthenticationName string + SecretName string + MinReplicaCount int + MaxReplicaCount int + RedisPassword string + RedisPasswordBase64 string + RedisList string + RedisHost string + ItemsToWrite int +} + +type templateValues map[string]string + +const ( + deploymentTemplate = `apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{.DeploymentName}} + namespace: {{.TestNamespace}} +spec: + replicas: 0 + selector: + matchLabels: + app: {{.DeploymentName}} + template: + metadata: + labels: + app: {{.DeploymentName}} + spec: + containers: + - name: redis-worker + image: goku321/redis-cluster-list:v1.7 + imagePullPolicy: IfNotPresent + command: ["./main"] + args: ["read"] + env: + - name: REDIS_ADDRESSES + value: {{.RedisHost}}.{{.RedisNamespace}}:6379 + - name: LIST_NAME + value: {{.RedisList}} + - name: REDIS_PASSWORD + value: {{.RedisPassword}} + - name: REDIS_SENTINEL_PASSWORD + value: {{.RedisPassword}} + - name: REDIS_SENTINEL_MASTER + value: mymaster + - name: READ_PROCESS_TIME + value: "100" +` + + secretTemplate = `apiVersion: v1 +kind: Secret +metadata: + name: {{.SecretName}} + namespace: {{.TestNamespace}} +type: Opaque +data: + password: {{.RedisPasswordBase64}} +` + + triggerAuthenticationTemplate = `apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: {{.TriggerAuthenticationName}} + namespace: {{.TestNamespace}} +spec: + secretTargetRef: + - parameter: password + name: {{.SecretName}} + key: password +` + + scaledObjectTemplate = `apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} +spec: + scaleTargetRef: + name: {{.DeploymentName}} + pollingInterval: 5 + cooldownPeriod: 10 + minReplicaCount: {{.MinReplicaCount}} + maxReplicaCount: {{.MaxReplicaCount}} + triggers: + - type: redis-cluster + metadata: + addressesFromEnv: REDIS_ADDRESSES + listName: {{.RedisList}} + listLength: "5" + activationListLength: "10" + authenticationRef: + name: {{.TriggerAuthenticationName}} +` + + insertJobTemplate = `apiVersion: batch/v1 +kind: Job +metadata: + name: {{.JobName}} + namespace: {{.TestNamespace}} +spec: + ttlSecondsAfterFinished: 0 + template: + spec: + containers: + - name: redis + image: goku321/redis-cluster-list:v1.7 + imagePullPolicy: IfNotPresent + command: ["./main"] + args: ["write"] + env: + - name: REDIS_ADDRESSES + value: {{.RedisHost}}.{{.RedisNamespace}}:6379 + - name: REDIS_PASSWORD + value: {{.RedisPassword}} + - name: LIST_NAME + value: {{.RedisList}} + - name: NO_LIST_ITEMS_TO_WRITE + value: "{{.ItemsToWrite}}" + restartPolicy: Never + backoffLimit: 4 +` +) + +func TestScaler(t *testing.T) { + // Create kubernetes resources for PostgreSQL server + kc := GetKubernetesClient(t) + + // Create Redis Cluster + redis.InstallCluster(t, kc, testName, redisNamespace, redisPassword) + + // Create kubernetes resources for testing + data, templates := getTemplateData() + CreateKubernetesResources(t, kc, testNamespace, data, templates) + + testActivation(t, kc, data) + testScaleUp(t, kc, data) + testScaleDown(t, kc) + + // cleanup + redis.RemoveCluster(t, kc, testName, redisNamespace) + DeleteKubernetesResources(t, kc, testNamespace, data, templates) +} + +func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) { + t.Log("--- testing activation ---") + templateTriggerJob := templateValues{"insertJobTemplate": insertJobTemplate} + data.ItemsToWrite = 5 + KubectlApplyMultipleWithTemplate(t, data, templateTriggerJob) + + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, minReplicaCount, 60) +} + +func testScaleUp(t *testing.T, kc *kubernetes.Clientset, data templateData) { + t.Log("--- testing scale up ---") + templateTriggerJob := templateValues{"insertJobTemplate": insertJobTemplate} + data.ItemsToWrite = 200 + KubectlApplyMultipleWithTemplate(t, data, templateTriggerJob) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, 3), + "replica count should be %d after 3 minutes", maxReplicaCount) +} + +func testScaleDown(t *testing.T, kc *kubernetes.Clientset) { + t.Log("--- testing scale down ---") + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 3), + "replica count should be %d after 3 minutes", minReplicaCount) +} + +var data = templateData{ + TestNamespace: testNamespace, + RedisNamespace: redisNamespace, + DeploymentName: deploymentName, + ScaledObjectName: scaledObjectName, + MinReplicaCount: minReplicaCount, + MaxReplicaCount: maxReplicaCount, + TriggerAuthenticationName: triggerAuthenticationName, + SecretName: secretName, + JobName: jobName, + RedisPassword: redisPassword, + RedisPasswordBase64: base64.StdEncoding.EncodeToString([]byte(redisPassword)), + RedisList: redisList, + RedisHost: redisHost, + ItemsToWrite: 0, +} + +func getTemplateData() (templateData, map[string]string) { + return data, templateValues{ + "secretTemplate": secretTemplate, + "deploymentTemplate": deploymentTemplate, + "triggerAuthenticationTemplate": triggerAuthenticationTemplate, + "scaledObjectTemplate": scaledObjectTemplate, + } +} diff --git a/tests/scalers_go/redis/redis_cluster_streams/redis_cluster_streams_test.go b/tests/scalers_go/redis/redis_cluster_streams/redis_cluster_streams_test.go new file mode 100644 index 00000000000..e031b892f2f --- /dev/null +++ b/tests/scalers_go/redis/redis_cluster_streams/redis_cluster_streams_test.go @@ -0,0 +1,233 @@ +//go:build e2e +// +build e2e + +package redis_cluster_streams_test + +import ( + "encoding/base64" + "fmt" + "testing" + + "github.com/joho/godotenv" + "github.com/stretchr/testify/assert" + "k8s.io/client-go/kubernetes" + + . "github.com/kedacore/keda/v2/tests/helper" + redis "github.com/kedacore/keda/v2/tests/scalers_go/redis/helper" +) + +// Load environment variables from .env file +var _ = godotenv.Load("../../.env") + +const ( + testName = "redis-cluster-streams-test" +) + +var ( + testNamespace = fmt.Sprintf("%s-ns", testName) + redisNamespace = fmt.Sprintf("%s-redis-ns", testName) + deploymentName = fmt.Sprintf("%s-deployment", testName) + jobName = fmt.Sprintf("%s-job", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) + triggerAuthenticationName = fmt.Sprintf("%s-ta", testName) + secretName = fmt.Sprintf("%s-secret", testName) + redisPassword = "admin" + redisHost = fmt.Sprintf("%s-headless", testName) + minReplicaCount = 1 + maxReplicaCount = 4 +) + +type templateData struct { + TestNamespace string + RedisNamespace string + DeploymentName string + JobName string + ScaledObjectName string + TriggerAuthenticationName string + SecretName string + MinReplicaCount int + MaxReplicaCount int + RedisPassword string + RedisPasswordBase64 string + RedisHost string + ItemsToWrite int +} + +type templateValues map[string]string + +const ( + deploymentTemplate = `apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{.DeploymentName}} + namespace: {{.TestNamespace}} +spec: + replicas: 1 + selector: + matchLabels: + app: {{.DeploymentName}} + template: + metadata: + labels: + app: {{.DeploymentName}} + spec: + containers: + - name: redis-worker + image: goku321/redis-cluster-streams:v2.5 + imagePullPolicy: IfNotPresent + command: ["./main"] + args: ["consumer"] + env: + - name: REDIS_HOSTS + value: {{.RedisHost}}.{{.RedisNamespace}} + - name: REDIS_PORTS + value: "6379" + - name: REDIS_STREAM_NAME + value: my-stream + - name: REDIS_STREAM_CONSUMER_GROUP_NAME + value: consumer-group-1 + - name: REDIS_PASSWORD + value: {{.RedisPassword}} +` + + secretTemplate = `apiVersion: v1 +kind: Secret +metadata: + name: {{.SecretName}} + namespace: {{.TestNamespace}} +type: Opaque +data: + password: {{.RedisPasswordBase64}} +` + + triggerAuthenticationTemplate = `apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: {{.TriggerAuthenticationName}} + namespace: {{.TestNamespace}} +spec: + secretTargetRef: + - parameter: password + name: {{.SecretName}} + key: password +` + + scaledObjectTemplate = `apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} +spec: + scaleTargetRef: + name: {{.DeploymentName}} + pollingInterval: 5 + cooldownPeriod: 10 + minReplicaCount: {{.MinReplicaCount}} + maxReplicaCount: {{.MaxReplicaCount}} + advanced: + horizontalPodAutoscalerConfig: + behavior: + scaleDown: + stabilizationWindowSeconds: 15 + triggers: + - type: redis-cluster-streams + metadata: + hostsFromEnv: REDIS_HOSTS + portsFromEnv: REDIS_PORTS + stream: my-stream + consumerGroup: consumer-group-1 + pendingEntriesCount: "10" + authenticationRef: + name: {{.TriggerAuthenticationName}} +` + + insertJobTemplate = `apiVersion: batch/v1 +kind: Job +metadata: + name: {{.JobName}} + namespace: {{.TestNamespace}} +spec: + ttlSecondsAfterFinished: 0 + template: + spec: + containers: + - name: redis + image: goku321/redis-cluster-streams:v2.5 + imagePullPolicy: IfNotPresent + command: ["./main"] + args: ["producer"] + env: + - name: REDIS_HOSTS + value: {{.RedisHost}}.{{.RedisNamespace}} + - name: REDIS_PORTS + value: "6379" + - name: REDIS_STREAM_NAME + value: my-stream + - name: REDIS_PASSWORD + value: {{.RedisPassword}} + - name: NUM_MESSAGES + value: "{{.ItemsToWrite}}" + restartPolicy: Never + backoffLimit: 4 +` +) + +func TestScaler(t *testing.T) { + // Create kubernetes resources for PostgreSQL server + kc := GetKubernetesClient(t) + + // Create Redis Cluster + redis.InstallCluster(t, kc, testName, redisNamespace, redisPassword) + + // Create kubernetes resources for testing + data, templates := getTemplateData() + CreateKubernetesResources(t, kc, testNamespace, data, templates) + + testScaleUp(t, kc, data) + testScaleDown(t, kc) + + // cleanup + redis.RemoveCluster(t, kc, testName, redisNamespace) + DeleteKubernetesResources(t, kc, testNamespace, data, templates) +} + +func testScaleUp(t *testing.T, kc *kubernetes.Clientset, data templateData) { + t.Log("--- testing scale up ---") + templateTriggerJob := templateValues{"insertJobTemplate": insertJobTemplate} + KubectlApplyMultipleWithTemplate(t, data, templateTriggerJob) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, 3), + "replica count should be %d after 3 minutes", maxReplicaCount) +} + +func testScaleDown(t *testing.T, kc *kubernetes.Clientset) { + t.Log("--- testing scale down ---") + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 3), + "replica count should be %d after 3 minutes", minReplicaCount) +} + +var data = templateData{ + TestNamespace: testNamespace, + RedisNamespace: redisNamespace, + DeploymentName: deploymentName, + ScaledObjectName: scaledObjectName, + MinReplicaCount: minReplicaCount, + MaxReplicaCount: maxReplicaCount, + TriggerAuthenticationName: triggerAuthenticationName, + SecretName: secretName, + JobName: jobName, + RedisPassword: redisPassword, + RedisPasswordBase64: base64.StdEncoding.EncodeToString([]byte(redisPassword)), + RedisHost: redisHost, + ItemsToWrite: 100, +} + +func getTemplateData() (templateData, map[string]string) { + return data, templateValues{ + "secretTemplate": secretTemplate, + "deploymentTemplate": deploymentTemplate, + "triggerAuthenticationTemplate": triggerAuthenticationTemplate, + "scaledObjectTemplate": scaledObjectTemplate, + } +} diff --git a/tests/scalers_go/redis/redis_sentinel_lists/redis_sentinel_lists_test.go b/tests/scalers_go/redis/redis_sentinel_lists/redis_sentinel_lists_test.go new file mode 100644 index 00000000000..8036ddc7e97 --- /dev/null +++ b/tests/scalers_go/redis/redis_sentinel_lists/redis_sentinel_lists_test.go @@ -0,0 +1,249 @@ +//go:build e2e +// +build e2e + +package redis_sentinel_lists_test + +import ( + "encoding/base64" + "fmt" + "testing" + + "github.com/joho/godotenv" + "github.com/stretchr/testify/assert" + "k8s.io/client-go/kubernetes" + + . "github.com/kedacore/keda/v2/tests/helper" + redis "github.com/kedacore/keda/v2/tests/scalers_go/redis/helper" +) + +// Load environment variables from .env file +var _ = godotenv.Load("../../.env") + +const ( + testName = "redis-sentinel-lists-test" +) + +var ( + testNamespace = fmt.Sprintf("%s-ns", testName) + redisNamespace = fmt.Sprintf("%s-redis-ns", testName) + deploymentName = fmt.Sprintf("%s-deployment", testName) + jobName = fmt.Sprintf("%s-job", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) + triggerAuthenticationName = fmt.Sprintf("%s-ta", testName) + secretName = fmt.Sprintf("%s-secret", testName) + redisPassword = "admin" + redisList = "queue" + redisHost = fmt.Sprintf("%s-headless", testName) + minReplicaCount = 0 + maxReplicaCount = 2 +) + +type templateData struct { + TestNamespace string + RedisNamespace string + DeploymentName string + JobName string + ScaledObjectName string + TriggerAuthenticationName string + SecretName string + MinReplicaCount int + MaxReplicaCount int + RedisPassword string + RedisPasswordBase64 string + RedisList string + RedisHost string + ItemsToWrite int +} + +type templateValues map[string]string + +const ( + deploymentTemplate = `apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{.DeploymentName}} + namespace: {{.TestNamespace}} +spec: + replicas: 0 + selector: + matchLabels: + app: {{.DeploymentName}} + template: + metadata: + labels: + app: {{.DeploymentName}} + spec: + containers: + - name: redis-worker + image: ghcr.io/kedacore/tests-redis-sentinel-lists + imagePullPolicy: IfNotPresent + command: ["./main"] + args: ["read"] + env: + - name: REDIS_ADDRESSES + value: {{.RedisHost}}.{{.RedisNamespace}}:26379 + - name: LIST_NAME + value: {{.RedisList}} + - name: REDIS_PASSWORD + value: {{.RedisPassword}} + - name: REDIS_SENTINEL_PASSWORD + value: {{.RedisPassword}} + - name: REDIS_SENTINEL_MASTER + value: mymaster + - name: READ_PROCESS_TIME + value: "100" +` + + secretTemplate = `apiVersion: v1 +kind: Secret +metadata: + name: {{.SecretName}} + namespace: {{.TestNamespace}} +type: Opaque +data: + password: {{.RedisPasswordBase64}} +` + + triggerAuthenticationTemplate = `apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: {{.TriggerAuthenticationName}} + namespace: {{.TestNamespace}} +spec: + secretTargetRef: + - parameter: password + name: {{.SecretName}} + key: password + - parameter: sentinelPassword + name: {{.SecretName}} + key: password +` + + scaledObjectTemplate = `apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} +spec: + scaleTargetRef: + name: {{.DeploymentName}} + pollingInterval: 5 + cooldownPeriod: 10 + minReplicaCount: {{.MinReplicaCount}} + maxReplicaCount: {{.MaxReplicaCount}} + triggers: + - type: redis-sentinel + metadata: + addressesFromEnv: REDIS_ADDRESSES + listName: {{.RedisList}} + sentinelMaster: mymaster + listLength: "5" + activationListLength: "10" + authenticationRef: + name: {{.TriggerAuthenticationName}} +` + + insertJobTemplate = `apiVersion: batch/v1 +kind: Job +metadata: + name: {{.JobName}} + namespace: {{.TestNamespace}} +spec: + ttlSecondsAfterFinished: 0 + template: + spec: + containers: + - name: redis + image: ghcr.io/kedacore/tests-redis-sentinel-lists + imagePullPolicy: IfNotPresent + command: ["./main"] + args: ["write"] + env: + - name: REDIS_ADDRESSES + value: {{.RedisHost}}.{{.RedisNamespace}}:26379 + - name: REDIS_PASSWORD + value: {{.RedisPassword}} + - name: REDIS_SENTINEL_PASSWORD + value: {{.RedisPassword}} + - name: REDIS_SENTINEL_MASTER + value: mymaster + - name: LIST_NAME + value: {{.RedisList}} + - name: NO_LIST_ITEMS_TO_WRITE + value: "{{.ItemsToWrite}}" + restartPolicy: Never + backoffLimit: 4 +` +) + +func TestScaler(t *testing.T) { + // Create kubernetes resources for PostgreSQL server + kc := GetKubernetesClient(t) + + // Create Redis Sentinel + redis.InstallSentinel(t, kc, testName, redisNamespace, redisPassword) + + // Create kubernetes resources for testing + data, templates := getTemplateData() + CreateKubernetesResources(t, kc, testNamespace, data, templates) + + testActivation(t, kc, data) + testScaleUp(t, kc, data) + testScaleDown(t, kc) + + // cleanup + redis.RemoveSentinel(t, kc, testName, redisNamespace) + DeleteKubernetesResources(t, kc, testNamespace, data, templates) +} + +func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) { + t.Log("--- testing activation ---") + templateTriggerJob := templateValues{"insertJobTemplate": insertJobTemplate} + data.ItemsToWrite = 5 + KubectlApplyMultipleWithTemplate(t, data, templateTriggerJob) + + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, minReplicaCount, 60) +} + +func testScaleUp(t *testing.T, kc *kubernetes.Clientset, data templateData) { + t.Log("--- testing scale up ---") + templateTriggerJob := templateValues{"insertJobTemplate": insertJobTemplate} + data.ItemsToWrite = 200 + KubectlApplyMultipleWithTemplate(t, data, templateTriggerJob) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, 3), + "replica count should be %d after 3 minutes", maxReplicaCount) +} + +func testScaleDown(t *testing.T, kc *kubernetes.Clientset) { + t.Log("--- testing scale down ---") + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 3), + "replica count should be %d after 3 minutes", minReplicaCount) +} + +var data = templateData{ + TestNamespace: testNamespace, + RedisNamespace: redisNamespace, + DeploymentName: deploymentName, + ScaledObjectName: scaledObjectName, + MinReplicaCount: minReplicaCount, + MaxReplicaCount: maxReplicaCount, + TriggerAuthenticationName: triggerAuthenticationName, + SecretName: secretName, + JobName: jobName, + RedisPassword: redisPassword, + RedisPasswordBase64: base64.StdEncoding.EncodeToString([]byte(redisPassword)), + RedisList: redisList, + RedisHost: redisHost, + ItemsToWrite: 0, +} + +func getTemplateData() (templateData, map[string]string) { + return data, templateValues{ + "secretTemplate": secretTemplate, + "deploymentTemplate": deploymentTemplate, + "triggerAuthenticationTemplate": triggerAuthenticationTemplate, + "scaledObjectTemplate": scaledObjectTemplate, + } +} diff --git a/tests/scalers_go/redis/redis_sentinel_streams/redis_sentinel_streams_test.go b/tests/scalers_go/redis/redis_sentinel_streams/redis_sentinel_streams_test.go new file mode 100644 index 00000000000..22ff17ec470 --- /dev/null +++ b/tests/scalers_go/redis/redis_sentinel_streams/redis_sentinel_streams_test.go @@ -0,0 +1,247 @@ +//go:build e2e +// +build e2e + +package redis_sentinel_streams_test + +import ( + "encoding/base64" + "fmt" + "testing" + + "github.com/joho/godotenv" + "github.com/stretchr/testify/assert" + "k8s.io/client-go/kubernetes" + + . "github.com/kedacore/keda/v2/tests/helper" + redis "github.com/kedacore/keda/v2/tests/scalers_go/redis/helper" +) + +// Load environment variables from .env file +var _ = godotenv.Load("../../.env") + +const ( + testName = "redis-sentinel-streams-test" +) + +var ( + testNamespace = fmt.Sprintf("%s-ns", testName) + redisNamespace = fmt.Sprintf("%s-redis-ns", testName) + deploymentName = fmt.Sprintf("%s-deployment", testName) + jobName = fmt.Sprintf("%s-job", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) + triggerAuthenticationName = fmt.Sprintf("%s-ta", testName) + secretName = fmt.Sprintf("%s-secret", testName) + redisPassword = "admin" + redisHost = fmt.Sprintf("%s-headless", testName) + minReplicaCount = 1 + maxReplicaCount = 4 +) + +type templateData struct { + TestNamespace string + RedisNamespace string + DeploymentName string + JobName string + ScaledObjectName string + TriggerAuthenticationName string + SecretName string + MinReplicaCount int + MaxReplicaCount int + RedisPassword string + RedisPasswordBase64 string + RedisHost string + ItemsToWrite int +} + +type templateValues map[string]string + +const ( + deploymentTemplate = `apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{.DeploymentName}} + namespace: {{.TestNamespace}} +spec: + replicas: 1 + selector: + matchLabels: + app: {{.DeploymentName}} + template: + metadata: + labels: + app: {{.DeploymentName}} + spec: + containers: + - name: redis-worker + image: ghcr.io/kedacore/tests-redis-sentinel-streams + imagePullPolicy: IfNotPresent + command: ["./main"] + args: ["consumer"] + env: + - name: REDIS_HOSTS + value: {{.RedisHost}}.{{.RedisNamespace}} + - name: REDIS_PORTS + value: "26379" + - name: REDIS_STREAM_NAME + value: my-stream + - name: REDIS_STREAM_CONSUMER_GROUP_NAME + value: consumer-group-1 + - name: REDIS_PASSWORD + value: {{.RedisPassword}} + - name: REDIS_SENTINEL_PASSWORD + value: {{.RedisPassword}} + - name: REDIS_SENTINEL_MASTER + value: mymaster +` + + secretTemplate = `apiVersion: v1 +kind: Secret +metadata: + name: {{.SecretName}} + namespace: {{.TestNamespace}} +type: Opaque +data: + password: {{.RedisPasswordBase64}} +` + + triggerAuthenticationTemplate = `apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: {{.TriggerAuthenticationName}} + namespace: {{.TestNamespace}} +spec: + secretTargetRef: + - parameter: password + name: {{.SecretName}} + key: password + - parameter: sentinelPassword + name: {{.SecretName}} + key: password +` + + scaledObjectTemplate = `apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} +spec: + scaleTargetRef: + name: {{.DeploymentName}} + pollingInterval: 5 + cooldownPeriod: 10 + minReplicaCount: {{.MinReplicaCount}} + maxReplicaCount: {{.MaxReplicaCount}} + advanced: + horizontalPodAutoscalerConfig: + behavior: + scaleDown: + stabilizationWindowSeconds: 15 + triggers: + - type: redis-sentinel-streams + metadata: + hostsFromEnv: REDIS_HOSTS + portsFromEnv: REDIS_PORTS + stream: my-stream + consumerGroup: consumer-group-1 + sentinelMaster: mymaster + pendingEntriesCount: "10" + authenticationRef: + name: {{.TriggerAuthenticationName}} +` + + insertJobTemplate = `apiVersion: batch/v1 +kind: Job +metadata: + name: {{.JobName}} + namespace: {{.TestNamespace}} +spec: + ttlSecondsAfterFinished: 0 + template: + spec: + containers: + - name: redis + image: ghcr.io/kedacore/tests-redis-sentinel-streams + imagePullPolicy: IfNotPresent + command: ["./main"] + args: ["producer"] + env: + - name: REDIS_HOSTS + value: {{.RedisHost}}.{{.RedisNamespace}} + - name: REDIS_PORTS + value: "26379" + - name: REDIS_STREAM_NAME + value: my-stream + - name: REDIS_STREAM_CONSUMER_GROUP_NAME + value: consumer-group-1 + - name: REDIS_PASSWORD + value: {{.RedisPassword}} + - name: REDIS_SENTINEL_PASSWORD + value: {{.RedisPassword}} + - name: REDIS_SENTINEL_MASTER + value: mymaster + - name: NUM_MESSAGES + value: "{{.ItemsToWrite}}" + restartPolicy: Never + backoffLimit: 4 +` +) + +func TestScaler(t *testing.T) { + // Create kubernetes resources for PostgreSQL server + kc := GetKubernetesClient(t) + + // Create Redis Sentinel + redis.InstallSentinel(t, kc, testName, redisNamespace, redisPassword) + + // Create kubernetes resources for testing + data, templates := getTemplateData() + CreateKubernetesResources(t, kc, testNamespace, data, templates) + + testScaleUp(t, kc, data) + testScaleDown(t, kc) + + // cleanup + redis.RemoveSentinel(t, kc, testName, redisNamespace) + DeleteKubernetesResources(t, kc, testNamespace, data, templates) +} + +func testScaleUp(t *testing.T, kc *kubernetes.Clientset, data templateData) { + t.Log("--- testing scale up ---") + templateTriggerJob := templateValues{"insertJobTemplate": insertJobTemplate} + KubectlApplyMultipleWithTemplate(t, data, templateTriggerJob) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, 3), + "replica count should be %d after 3 minutes", maxReplicaCount) +} + +func testScaleDown(t *testing.T, kc *kubernetes.Clientset) { + t.Log("--- testing scale down ---") + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 3), + "replica count should be %d after 3 minutes", minReplicaCount) +} + +var data = templateData{ + TestNamespace: testNamespace, + RedisNamespace: redisNamespace, + DeploymentName: deploymentName, + ScaledObjectName: scaledObjectName, + MinReplicaCount: minReplicaCount, + MaxReplicaCount: maxReplicaCount, + TriggerAuthenticationName: triggerAuthenticationName, + SecretName: secretName, + JobName: jobName, + RedisPassword: redisPassword, + RedisPasswordBase64: base64.StdEncoding.EncodeToString([]byte(redisPassword)), + RedisHost: redisHost, + ItemsToWrite: 100, +} + +func getTemplateData() (templateData, map[string]string) { + return data, templateValues{ + "secretTemplate": secretTemplate, + "deploymentTemplate": deploymentTemplate, + "triggerAuthenticationTemplate": triggerAuthenticationTemplate, + "scaledObjectTemplate": scaledObjectTemplate, + } +} diff --git a/tests/scalers_go/redis/redis_standalone_lists/redis_standalone_lists_test.go b/tests/scalers_go/redis/redis_standalone_lists/redis_standalone_lists_test.go new file mode 100644 index 00000000000..2b6585cad3c --- /dev/null +++ b/tests/scalers_go/redis/redis_standalone_lists/redis_standalone_lists_test.go @@ -0,0 +1,236 @@ +//go:build e2e +// +build e2e + +package redis_standalone_lists_test + +import ( + "encoding/base64" + "fmt" + "testing" + + "github.com/joho/godotenv" + "github.com/stretchr/testify/assert" + "k8s.io/client-go/kubernetes" + + . "github.com/kedacore/keda/v2/tests/helper" + redis "github.com/kedacore/keda/v2/tests/scalers_go/redis/helper" +) + +// Load environment variables from .env file +var _ = godotenv.Load("../../.env") + +const ( + testName = "redis-standalone-lists-test" +) + +var ( + testNamespace = fmt.Sprintf("%s-ns", testName) + redisNamespace = fmt.Sprintf("%s-redis-ns", testName) + deploymentName = fmt.Sprintf("%s-deployment", testName) + jobName = fmt.Sprintf("%s-job", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) + triggerAuthenticationName = fmt.Sprintf("%s-ta", testName) + secretName = fmt.Sprintf("%s-secret", testName) + redisPassword = "admin" + redisList = "queue" + redisHost = fmt.Sprintf("redis.%s.svc.cluster.local", redisNamespace) + minReplicaCount = 0 + maxReplicaCount = 2 +) + +type templateData struct { + TestNamespace string + DeploymentName string + JobName string + ScaledObjectName string + TriggerAuthenticationName string + SecretName string + MinReplicaCount int + MaxReplicaCount int + RedisPassword string + RedisPasswordBase64 string + RedisList string + RedisHost string + ItemsToWrite int +} + +type templateValues map[string]string + +const ( + deploymentTemplate = `apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{.DeploymentName}} + namespace: {{.TestNamespace}} +spec: + replicas: 0 + selector: + matchLabels: + app: {{.DeploymentName}} + template: + metadata: + labels: + app: {{.DeploymentName}} + spec: + containers: + - name: redis-worker + image: ghcr.io/kedacore/tests-redis-lists + imagePullPolicy: IfNotPresent + args: ["read"] + env: + - name: REDIS_HOST + value: {{.RedisHost}} + - name: REDIS_PORT + value: "6379" + - name: LIST_NAME + value: {{.RedisList}} + - name: REDIS_PASSWORD + value: {{.RedisPassword}} + - name: READ_PROCESS_TIME + value: "100" +` + + secretTemplate = `apiVersion: v1 +kind: Secret +metadata: + name: {{.SecretName}} + namespace: {{.TestNamespace}} +type: Opaque +data: + password: {{.RedisPasswordBase64}} +` + + triggerAuthenticationTemplate = `apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: {{.TriggerAuthenticationName}} + namespace: {{.TestNamespace}} +spec: + secretTargetRef: + - parameter: password + name: {{.SecretName}} + key: password +` + + scaledObjectTemplate = `apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} +spec: + scaleTargetRef: + name: {{.DeploymentName}} + pollingInterval: 5 + cooldownPeriod: 10 + minReplicaCount: {{.MinReplicaCount}} + maxReplicaCount: {{.MaxReplicaCount}} + triggers: + - type: redis + metadata: + hostFromEnv: REDIS_HOST + portFromEnv: REDIS_PORT + listName: {{.RedisList}} + listLength: "5" + activationListLength: "10" + authenticationRef: + name: {{.TriggerAuthenticationName}} +` + + insertJobTemplate = `apiVersion: batch/v1 +kind: Job +metadata: + name: {{.JobName}} + namespace: {{.TestNamespace}} +spec: + ttlSecondsAfterFinished: 0 + template: + spec: + containers: + - name: redis + image: ghcr.io/kedacore/tests-redis-lists + imagePullPolicy: IfNotPresent + env: + - name: REDIS_ADDRESS + value: {{.RedisHost}} + - name: REDIS_PASSWORD + value: {{.RedisPassword}} + - name: LIST_NAME + value: {{.RedisList}} + - name: NO_LIST_ITEMS_TO_WRITE + value: "{{.ItemsToWrite}}" + args: ["write"] + restartPolicy: Never + backoffLimit: 4 +` +) + +func TestScaler(t *testing.T) { + // Create kubernetes resources for PostgreSQL server + kc := GetKubernetesClient(t) + + // Create Redis Standalone + redis.InstallStandalone(t, kc, testName, redisNamespace, redisPassword) + + // Create kubernetes resources for testing + data, templates := getTemplateData() + CreateKubernetesResources(t, kc, testNamespace, data, templates) + + testActivation(t, kc, data) + testScaleUp(t, kc, data) + testScaleDown(t, kc) + + // cleanup + redis.RemoveStandalone(t, kc, testName, redisNamespace) + DeleteKubernetesResources(t, kc, testNamespace, data, templates) +} + +func testActivation(t *testing.T, kc *kubernetes.Clientset, data templateData) { + t.Log("--- testing activation ---") + templateTriggerJob := templateValues{"insertJobTemplate": insertJobTemplate} + data.ItemsToWrite = 5 + KubectlApplyMultipleWithTemplate(t, data, templateTriggerJob) + + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, minReplicaCount, 60) +} + +func testScaleUp(t *testing.T, kc *kubernetes.Clientset, data templateData) { + t.Log("--- testing scale up ---") + templateTriggerJob := templateValues{"insertJobTemplate": insertJobTemplate} + data.ItemsToWrite = 200 + KubectlApplyMultipleWithTemplate(t, data, templateTriggerJob) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, 3), + "replica count should be %d after 3 minutes", maxReplicaCount) +} + +func testScaleDown(t *testing.T, kc *kubernetes.Clientset) { + t.Log("--- testing scale down ---") + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 3), + "replica count should be %d after 3 minutes", minReplicaCount) +} + +var data = templateData{ + TestNamespace: testNamespace, + DeploymentName: deploymentName, + ScaledObjectName: scaledObjectName, + MinReplicaCount: minReplicaCount, + MaxReplicaCount: maxReplicaCount, + TriggerAuthenticationName: triggerAuthenticationName, + SecretName: secretName, + JobName: jobName, + RedisPassword: redisPassword, + RedisPasswordBase64: base64.StdEncoding.EncodeToString([]byte(redisPassword)), + RedisList: redisList, + RedisHost: redisHost, + ItemsToWrite: 0, +} + +func getTemplateData() (templateData, map[string]string) { + return data, templateValues{ + "secretTemplate": secretTemplate, + "deploymentTemplate": deploymentTemplate, + "triggerAuthenticationTemplate": triggerAuthenticationTemplate, + "scaledObjectTemplate": scaledObjectTemplate, + } +} diff --git a/tests/scalers_go/redis/redis_standalone_streams/redis_standalone_streams_test.go b/tests/scalers_go/redis/redis_standalone_streams/redis_standalone_streams_test.go new file mode 100644 index 00000000000..433a8039539 --- /dev/null +++ b/tests/scalers_go/redis/redis_standalone_streams/redis_standalone_streams_test.go @@ -0,0 +1,228 @@ +//go:build e2e +// +build e2e + +package redis_standalone_streams_test + +import ( + "encoding/base64" + "fmt" + "testing" + + "github.com/joho/godotenv" + "github.com/stretchr/testify/assert" + "k8s.io/client-go/kubernetes" + + . "github.com/kedacore/keda/v2/tests/helper" + redis "github.com/kedacore/keda/v2/tests/scalers_go/redis/helper" +) + +// Load environment variables from .env file +var _ = godotenv.Load("../../.env") + +const ( + testName = "redis-standalone-streams-test" +) + +var ( + testNamespace = fmt.Sprintf("%s-ns", testName) + redisNamespace = fmt.Sprintf("%s-redis-ns", testName) + deploymentName = fmt.Sprintf("%s-deployment", testName) + jobName = fmt.Sprintf("%s-job", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) + triggerAuthenticationName = fmt.Sprintf("%s-ta", testName) + secretName = fmt.Sprintf("%s-secret", testName) + redisPassword = "admin" + redisStreamName = "stream" + redisHost = fmt.Sprintf("redis.%s.svc.cluster.local:6379", redisNamespace) + minReplicaCount = 1 + maxReplicaCount = 2 +) + +type templateData struct { + TestNamespace string + DeploymentName string + JobName string + ScaledObjectName string + TriggerAuthenticationName string + SecretName string + MinReplicaCount int + MaxReplicaCount int + RedisPassword string + RedisPasswordBase64 string + RedisStreamName string + RedisHost string + ItemsToWrite int +} + +type templateValues map[string]string + +const ( + deploymentTemplate = `apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{.DeploymentName}} + namespace: {{.TestNamespace}} +spec: + replicas: 1 + selector: + matchLabels: + app: {{.DeploymentName}} + template: + metadata: + labels: + app: {{.DeploymentName}} + spec: + containers: + - name: redis-worker + image: abhirockzz/redis-streams-consumer + imagePullPolicy: IfNotPresent + env: + - name: REDIS_HOST + value: {{.RedisHost}} + - name: REDIS_STREAM_NAME + value: {{.RedisStreamName}} + - name: REDIS_PASSWORD + value: {{.RedisPassword}} + - name: REDIS_STREAM_CONSUMER_GROUP_NAME + value: "consumer-group-1" +` + + secretTemplate = `apiVersion: v1 +kind: Secret +metadata: + name: {{.SecretName}} + namespace: {{.TestNamespace}} +type: Opaque +data: + password: {{.RedisPasswordBase64}} +` + + triggerAuthenticationTemplate = `apiVersion: keda.sh/v1alpha1 +kind: TriggerAuthentication +metadata: + name: {{.TriggerAuthenticationName}} + namespace: {{.TestNamespace}} +spec: + secretTargetRef: + - parameter: password + name: {{.SecretName}} + key: password +` + + scaledObjectTemplate = `apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} +spec: + scaleTargetRef: + name: {{.DeploymentName}} + pollingInterval: 5 + cooldownPeriod: 10 + minReplicaCount: {{.MinReplicaCount}} + maxReplicaCount: {{.MaxReplicaCount}} + advanced: + horizontalPodAutoscalerConfig: + behavior: + scaleDown: + stabilizationWindowSeconds: 15 + triggers: + - type: redis-streams + metadata: + addressFromEnv: REDIS_HOST + stream: {{.RedisStreamName}} + consumerGroup: consumer-group-1 + pendingEntriesCount: "10" + authenticationRef: + name: {{.TriggerAuthenticationName}} +` + + insertJobTemplate = `apiVersion: batch/v1 +kind: Job +metadata: + name: {{.JobName}} + namespace: {{.TestNamespace}} +spec: + ttlSecondsAfterFinished: 0 + template: + spec: + containers: + - name: redis + image: abhirockzz/redis-streams-producer + imagePullPolicy: IfNotPresent + env: + - name: REDIS_HOST + value: {{.RedisHost}} + - name: REDIS_PASSWORD + value: {{.RedisPassword}} + - name: REDIS_STREAM_NAME + value: {{.RedisStreamName}} + - name: NUM_MESSAGES + value: "{{.ItemsToWrite}}" + restartPolicy: Never + backoffLimit: 4 +` +) + +func TestScaler(t *testing.T) { + // Create kubernetes resources for PostgreSQL server + kc := GetKubernetesClient(t) + + // Create Redis Standalone + redis.InstallStandalone(t, kc, testName, redisNamespace, redisPassword) + + // Create kubernetes resources for testing + data, templates := getTemplateData() + CreateKubernetesResources(t, kc, testNamespace, data, templates) + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 1, 60, 3), + "replica count should be %d after 3 minutes", 1) + + testScaleUp(t, kc, data) + testScaleDown(t, kc) + + // cleanup + redis.RemoveStandalone(t, kc, testName, redisNamespace) + DeleteKubernetesResources(t, kc, testNamespace, data, templates) +} + +func testScaleUp(t *testing.T, kc *kubernetes.Clientset, data templateData) { + t.Log("--- testing scale up ---") + templateTriggerJob := templateValues{"insertJobTemplate": insertJobTemplate} + data.ItemsToWrite = 20 + KubectlApplyMultipleWithTemplate(t, data, templateTriggerJob) + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicaCount, 60, 3), + "replica count should be %d after 3 minutes", maxReplicaCount) +} + +func testScaleDown(t *testing.T, kc *kubernetes.Clientset) { + t.Log("--- testing scale down ---") + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicaCount, 60, 3), + "replica count should be %d after 5 minutes", minReplicaCount) +} + +var data = templateData{ + TestNamespace: testNamespace, + DeploymentName: deploymentName, + ScaledObjectName: scaledObjectName, + MinReplicaCount: 1, + MaxReplicaCount: maxReplicaCount, + TriggerAuthenticationName: triggerAuthenticationName, + SecretName: secretName, + JobName: jobName, + RedisPassword: redisPassword, + RedisPasswordBase64: base64.StdEncoding.EncodeToString([]byte(redisPassword)), + RedisStreamName: redisStreamName, + RedisHost: redisHost, + ItemsToWrite: 0, +} + +func getTemplateData() (templateData, map[string]string) { + return data, templateValues{ + "secretTemplate": secretTemplate, + "deploymentTemplate": deploymentTemplate, + "triggerAuthenticationTemplate": triggerAuthenticationTemplate, + "scaledObjectTemplate": scaledObjectTemplate, + } +}