From 05453a1b33c9cf1071acb985b5bd79ff05ca77e6 Mon Sep 17 00:00:00 2001 From: Kun Woo Yoo Date: Wed, 18 Sep 2024 20:07:11 +0900 Subject: [PATCH] add test for CPU/Memory trigger activation feature Signed-off-by: Kun Woo Yoo --- pkg/scalers/cpu_memory_scaler.go | 12 +-- pkg/scalers/cpu_memory_scaler_test.go | 146 ++++++++++++++++++++++++-- tests/scalers/cpu/cpu_test.go | 16 ++- 3 files changed, 158 insertions(+), 16 deletions(-) diff --git a/pkg/scalers/cpu_memory_scaler.go b/pkg/scalers/cpu_memory_scaler.go index 28738d03876..1f99c633c23 100644 --- a/pkg/scalers/cpu_memory_scaler.go +++ b/pkg/scalers/cpu_memory_scaler.go @@ -21,7 +21,7 @@ type cpuMemoryScaler struct { metadata *cpuMemoryMetadata resourceName v1.ResourceName logger logr.Logger - client client.Client + kubeClient client.Client } type cpuMemoryMetadata struct { @@ -37,7 +37,7 @@ type cpuMemoryMetadata struct { } // NewCPUMemoryScaler creates a new cpuMemoryScaler -func NewCPUMemoryScaler(resourceName v1.ResourceName, config *scalersconfig.ScalerConfig, client client.Client) (Scaler, error) { +func NewCPUMemoryScaler(resourceName v1.ResourceName, config *scalersconfig.ScalerConfig, kubeClient client.Client) (Scaler, error) { logger := InitializeLogger(config, "cpu_memory_scaler") meta, parseErr := parseResourceMetadata(config, logger) @@ -49,7 +49,7 @@ func NewCPUMemoryScaler(resourceName v1.ResourceName, config *scalersconfig.Scal metadata: meta, resourceName: resourceName, logger: logger, - client: client, + kubeClient: kubeClient, }, nil } @@ -121,7 +121,7 @@ func (s *cpuMemoryScaler) Close(context.Context) error { func (s *cpuMemoryScaler) getHPA(ctx context.Context) (*v2.HorizontalPodAutoscaler, error) { if s.metadata.ScalableObjectType == "ScaledObject" { scaledObject := &kedav1alpha1.ScaledObject{} - err := s.client.Get(ctx, types.NamespacedName{ + err := s.kubeClient.Get(ctx, types.NamespacedName{ Name: s.metadata.ScalableObjectName, Namespace: s.metadata.ScalableObjectNamespace, }, scaledObject) @@ -131,7 +131,7 @@ func (s *cpuMemoryScaler) getHPA(ctx context.Context) (*v2.HorizontalPodAutoscal } hpa := &v2.HorizontalPodAutoscaler{} - err = s.client.Get(ctx, types.NamespacedName{ + err = s.kubeClient.Get(ctx, types.NamespacedName{ Name: scaledObject.Status.HpaName, Namespace: s.metadata.ScalableObjectNamespace, }, hpa) @@ -143,7 +143,7 @@ func (s *cpuMemoryScaler) getHPA(ctx context.Context) (*v2.HorizontalPodAutoscal return hpa, nil } else if s.metadata.ScalableObjectType == "ScaledJob" { scaledJob := &kedav1alpha1.ScaledJob{} - err := s.client.Get(ctx, types.NamespacedName{ + err := s.kubeClient.Get(ctx, types.NamespacedName{ Name: s.metadata.ScalableObjectName, Namespace: s.metadata.ScalableObjectNamespace, }, scaledJob) diff --git a/pkg/scalers/cpu_memory_scaler_test.go b/pkg/scalers/cpu_memory_scaler_test.go index 81f7ea9df9a..eb40851371a 100644 --- a/pkg/scalers/cpu_memory_scaler_test.go +++ b/pkg/scalers/cpu_memory_scaler_test.go @@ -2,6 +2,11 @@ package scalers import ( "context" + "fmt" + kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client/fake" "testing" "github.com/go-logr/logr" @@ -20,8 +25,9 @@ type parseCPUMemoryMetadataTestData struct { // A complete valid metadata example for reference var validCPUMemoryMetadata = map[string]string{ - "type": "Utilization", - "value": "50", + "type": "Utilization", + "value": "50", + "activationValue": "40", } var validContainerCPUMemoryMetadata = map[string]string{ "type": "Utilization", @@ -37,6 +43,7 @@ var testCPUMemoryMetadata = []parseCPUMemoryMetadataTestData{ {v2.UtilizationMetricType, map[string]string{"value": "50"}, false}, {"", map[string]string{"type": "AverageValue", "value": "50"}, false}, {v2.AverageValueMetricType, map[string]string{"value": "50"}, false}, + {"", map[string]string{"type": "AverageValue", "value": "50", "activationValue": "40"}, false}, {"", map[string]string{"type": "Value", "value": "50"}, true}, {v2.ValueMetricType, map[string]string{"value": "50"}, true}, {"", map[string]string{"type": "AverageValue"}, true}, @@ -64,7 +71,8 @@ func TestGetMetricSpecForScaling(t *testing.T) { config := &scalersconfig.ScalerConfig{ TriggerMetadata: validCPUMemoryMetadata, } - scaler, _ := NewCPUMemoryScaler(v1.ResourceCPU, config) + kubeClient := fake.NewFakeClient() + scaler, _ := NewCPUMemoryScaler(v1.ResourceCPU, config, kubeClient) metricSpec := scaler.GetMetricSpecForScaling(context.Background()) assert.Equal(t, metricSpec[0].Type, v2.ResourceMetricSourceType) @@ -76,7 +84,7 @@ func TestGetMetricSpecForScaling(t *testing.T) { TriggerMetadata: map[string]string{"value": "50"}, MetricType: v2.UtilizationMetricType, } - scaler, _ = NewCPUMemoryScaler(v1.ResourceCPU, config) + scaler, _ = NewCPUMemoryScaler(v1.ResourceCPU, config, kubeClient) metricSpec = scaler.GetMetricSpecForScaling(context.Background()) assert.Equal(t, metricSpec[0].Type, v2.ResourceMetricSourceType) @@ -89,7 +97,8 @@ func TestGetContainerMetricSpecForScaling(t *testing.T) { config := &scalersconfig.ScalerConfig{ TriggerMetadata: validContainerCPUMemoryMetadata, } - scaler, _ := NewCPUMemoryScaler(v1.ResourceCPU, config) + kubeClient := fake.NewFakeClient() + scaler, _ := NewCPUMemoryScaler(v1.ResourceCPU, config, kubeClient) metricSpec := scaler.GetMetricSpecForScaling(context.Background()) assert.Equal(t, metricSpec[0].Type, v2.ContainerResourceMetricSourceType) @@ -102,7 +111,7 @@ func TestGetContainerMetricSpecForScaling(t *testing.T) { TriggerMetadata: map[string]string{"value": "50", "containerName": "bar"}, MetricType: v2.UtilizationMetricType, } - scaler, _ = NewCPUMemoryScaler(v1.ResourceCPU, config) + scaler, _ = NewCPUMemoryScaler(v1.ResourceCPU, config, kubeClient) metricSpec = scaler.GetMetricSpecForScaling(context.Background()) assert.Equal(t, metricSpec[0].Type, v2.ContainerResourceMetricSourceType) @@ -110,3 +119,128 @@ func TestGetContainerMetricSpecForScaling(t *testing.T) { assert.Equal(t, metricSpec[0].ContainerResource.Target.Type, v2.UtilizationMetricType) assert.Equal(t, metricSpec[0].ContainerResource.Container, "bar") } + +func createScaledObject() *kedav1alpha1.ScaledObject { + maxReplicas := int32(3) + minReplicas := int32(0) + pollingInterval := int32(10) + return &kedav1alpha1.ScaledObject{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "keda.sh/v1alpha1", + Kind: "ScaledObject", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-name", + Namespace: "test-namespace", + }, + Spec: kedav1alpha1.ScaledObjectSpec{ + MaxReplicaCount: &maxReplicas, + MinReplicaCount: &minReplicas, + PollingInterval: &pollingInterval, + ScaleTargetRef: &kedav1alpha1.ScaleTarget{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "test-deployment", + }, + Triggers: []kedav1alpha1.ScaleTriggers{ + { + Type: "cpu", + Metadata: map[string]string{ + "activationValue": "500", + "value": "800", + }, + MetricType: v2.UtilizationMetricType, + }, + }, + }, + Status: kedav1alpha1.ScaledObjectStatus{ + HpaName: "keda-hpa-test-name", + }, + } +} + +func createHPAWithAverageUtilization(averageUtilization int32) (*v2.HorizontalPodAutoscaler, error) { + minReplicas := int32(1) + averageValue, err := resource.ParseQuantity("800m") + if err != nil { + return nil, fmt.Errorf("error parsing quantity: %s", err) + } + + return &v2.HorizontalPodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Name: "keda-hpa-test-name", + Namespace: "test-namespace", + }, + Spec: v2.HorizontalPodAutoscalerSpec{ + MaxReplicas: 3, + MinReplicas: &minReplicas, + Metrics: []v2.MetricSpec{ + + { + Type: v2.ResourceMetricSourceType, + Resource: &v2.ResourceMetricSource{ + Name: v1.ResourceCPU, + Target: v2.MetricTarget{ + AverageUtilization: &averageUtilization, + Type: v2.UtilizationMetricType, + }, + }, + }, + }, + }, + Status: v2.HorizontalPodAutoscalerStatus{ + CurrentMetrics: []v2.MetricStatus{ + { + Type: v2.ResourceMetricSourceType, + Resource: &v2.ResourceMetricStatus{ + Name: v1.ResourceCPU, + Current: v2.MetricValueStatus{ + AverageUtilization: &averageUtilization, + AverageValue: &averageValue, + }, + }, + }, + }, + }, + }, nil +} + +func TestGetMetricsAndActivity_IsActive(t *testing.T) { + config := &scalersconfig.ScalerConfig{ + TriggerMetadata: validCPUMemoryMetadata, + ScalableObjectType: "ScaledObject", + ScalableObjectName: "test-name", + ScalableObjectNamespace: "test-namespace", + } + + hpa, err := createHPAWithAverageUtilization(50) + if err != nil { + t.Errorf("Error creating HPA: %s", err) + } + + kubeClient := fake.NewClientBuilder().WithObjects(hpa, createScaledObject()).Build() + scaler, _ := NewCPUMemoryScaler(v1.ResourceCPU, config, kubeClient) + + _, isActive, _ := scaler.GetMetricsAndActivity(context.Background(), "cpu") + assert.Equal(t, isActive, true) +} + +func TestGetMetricsAndActivity_IsNotActive(t *testing.T) { + config := &scalersconfig.ScalerConfig{ + TriggerMetadata: validCPUMemoryMetadata, + ScalableObjectType: "ScaledObject", + ScalableObjectName: "test-name", + ScalableObjectNamespace: "test-namespace", + } + + hpa, err := createHPAWithAverageUtilization(30) + if err != nil { + t.Errorf("Error creating HPA: %s", err) + } + + kubeClient := fake.NewClientBuilder().WithRuntimeObjects(hpa, createScaledObject()).Build() + scaler, _ := NewCPUMemoryScaler(v1.ResourceCPU, config, kubeClient) + + _, isActive, _ := scaler.GetMetricsAndActivity(context.Background(), "cpu") + assert.Equal(t, isActive, false) +} diff --git a/tests/scalers/cpu/cpu_test.go b/tests/scalers/cpu/cpu_test.go index f24922dc61d..1c4564d1ad4 100644 --- a/tests/scalers/cpu/cpu_test.go +++ b/tests/scalers/cpu/cpu_test.go @@ -11,8 +11,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" - - . "github.com/kedacore/keda/v2/tests/helper" ) // Load environment variables from .env file @@ -135,7 +133,8 @@ spec: - type: cpu metadata: type: Utilization - value: "50" + value: "10" + activationValue: "5" - type: kubernetes-workload metadata: podSelector: 'pod={{.WorkloadDeploymentName}}' @@ -245,9 +244,18 @@ func scaleToZero(t *testing.T, kc *kubernetes.Clientset, data templateData) { assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicas, 60, 1), "Replica count should be %v", maxReplicas) - // scale external trigger in (expect replicas back to 0 -- external trigger not active) + // activate cpu trigger + KubectlReplaceWithTemplate(t, data, "triggerJobTemplate", triggerJob) + + // replica count should not change from maxReplicas + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, maxReplicas, 60) + + // scale external trigger in (expect replicas to stay at maxReplicas -- external trigger not active) KubernetesScaleDeployment(t, kc, workloadDeploymentName, int64(minReplicas), testNamespace) + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, maxReplicas, 60) + // remove trigger job to deactivate cpu trigger + KubectlDeleteWithTemplate(t, data, "triggerJobTemplate", triggerJob) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicas, 60, 1), "Replica count should be %v", minReplicas) }