Skip to content

Commit

Permalink
add test for CPU/Memory trigger activation feature
Browse files Browse the repository at this point in the history
Signed-off-by: Kun Woo Yoo <vbtkdpf148@gmail.com>
  • Loading branch information
kunwooy committed Sep 19, 2024
1 parent 8f1f965 commit ed2d446
Show file tree
Hide file tree
Showing 4 changed files with 180 additions and 15 deletions.
5 changes: 4 additions & 1 deletion controllers/keda/scaledobject_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -790,13 +790,16 @@ var _ = Describe("ScaledObjectController", func() {
Eventually(func() error {
return k8sClient.Get(context.Background(), types.NamespacedName{Name: getHPAName(so), Namespace: "default"}, hpa)
}).ShouldNot(HaveOccurred())

averageUtilization := int32(100)
hpa.Status.CurrentMetrics = []autoscalingv2.MetricStatus{
{
Type: autoscalingv2.ResourceMetricSourceType,
Resource: &autoscalingv2.ResourceMetricStatus{
Name: corev1.ResourceCPU,
Current: autoscalingv2.MetricValueStatus{
Value: resource.NewQuantity(int64(100), resource.DecimalSI),
Value: resource.NewQuantity(int64(100), resource.DecimalSI),
AverageUtilization: &averageUtilization,
},
},
},
Expand Down
12 changes: 6 additions & 6 deletions pkg/scalers/cpu_memory_scaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ type cpuMemoryScaler struct {
metadata *cpuMemoryMetadata
resourceName v1.ResourceName
logger logr.Logger
client client.Client
kubeClient client.Client
}

type cpuMemoryMetadata struct {
Expand All @@ -38,7 +38,7 @@ type cpuMemoryMetadata struct {
}

// NewCPUMemoryScaler creates a new cpuMemoryScaler
func NewCPUMemoryScaler(resourceName v1.ResourceName, config *scalersconfig.ScalerConfig, client client.Client) (Scaler, error) {
func NewCPUMemoryScaler(resourceName v1.ResourceName, config *scalersconfig.ScalerConfig, kubeClient client.Client) (Scaler, error) {
logger := InitializeLogger(config, "cpu_memory_scaler")

meta, parseErr := parseResourceMetadata(config, logger)
Expand All @@ -50,7 +50,7 @@ func NewCPUMemoryScaler(resourceName v1.ResourceName, config *scalersconfig.Scal
metadata: meta,
resourceName: resourceName,
logger: logger,
client: client,
kubeClient: kubeClient,
}, nil
}

Expand Down Expand Up @@ -122,7 +122,7 @@ func (s *cpuMemoryScaler) Close(context.Context) error {
func (s *cpuMemoryScaler) getHPA(ctx context.Context) (*v2.HorizontalPodAutoscaler, error) {
if s.metadata.ScalableObjectType == "ScaledObject" {
scaledObject := &kedav1alpha1.ScaledObject{}
err := s.client.Get(ctx, types.NamespacedName{
err := s.kubeClient.Get(ctx, types.NamespacedName{
Name: s.metadata.ScalableObjectName,
Namespace: s.metadata.ScalableObjectNamespace,
}, scaledObject)
Expand All @@ -132,7 +132,7 @@ func (s *cpuMemoryScaler) getHPA(ctx context.Context) (*v2.HorizontalPodAutoscal
}

hpa := &v2.HorizontalPodAutoscaler{}
err = s.client.Get(ctx, types.NamespacedName{
err = s.kubeClient.Get(ctx, types.NamespacedName{
Name: scaledObject.Status.HpaName,
Namespace: s.metadata.ScalableObjectNamespace,
}, hpa)
Expand All @@ -144,7 +144,7 @@ func (s *cpuMemoryScaler) getHPA(ctx context.Context) (*v2.HorizontalPodAutoscal
return hpa, nil
} else if s.metadata.ScalableObjectType == "ScaledJob" {
scaledJob := &kedav1alpha1.ScaledJob{}
err := s.client.Get(ctx, types.NamespacedName{
err := s.kubeClient.Get(ctx, types.NamespacedName{
Name: s.metadata.ScalableObjectName,
Namespace: s.metadata.ScalableObjectNamespace,
}, scaledJob)
Expand Down
164 changes: 158 additions & 6 deletions pkg/scalers/cpu_memory_scaler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,12 @@ package scalers

import (
"context"
"fmt"
kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"testing"

"github.com/go-logr/logr"
Expand All @@ -20,8 +26,9 @@ type parseCPUMemoryMetadataTestData struct {

// A complete valid metadata example for reference
var validCPUMemoryMetadata = map[string]string{
"type": "Utilization",
"value": "50",
"type": "Utilization",
"value": "50",
"activationValue": "40",
}
var validContainerCPUMemoryMetadata = map[string]string{
"type": "Utilization",
Expand All @@ -37,6 +44,7 @@ var testCPUMemoryMetadata = []parseCPUMemoryMetadataTestData{
{v2.UtilizationMetricType, map[string]string{"value": "50"}, false},
{"", map[string]string{"type": "AverageValue", "value": "50"}, false},
{v2.AverageValueMetricType, map[string]string{"value": "50"}, false},
{"", map[string]string{"type": "AverageValue", "value": "50", "activationValue": "40"}, false},
{"", map[string]string{"type": "Value", "value": "50"}, true},
{v2.ValueMetricType, map[string]string{"value": "50"}, true},
{"", map[string]string{"type": "AverageValue"}, true},
Expand Down Expand Up @@ -64,7 +72,8 @@ func TestGetMetricSpecForScaling(t *testing.T) {
config := &scalersconfig.ScalerConfig{
TriggerMetadata: validCPUMemoryMetadata,
}
scaler, _ := NewCPUMemoryScaler(v1.ResourceCPU, config)
kubeClient := fake.NewFakeClient()
scaler, _ := NewCPUMemoryScaler(v1.ResourceCPU, config, kubeClient)
metricSpec := scaler.GetMetricSpecForScaling(context.Background())

assert.Equal(t, metricSpec[0].Type, v2.ResourceMetricSourceType)
Expand All @@ -76,7 +85,7 @@ func TestGetMetricSpecForScaling(t *testing.T) {
TriggerMetadata: map[string]string{"value": "50"},
MetricType: v2.UtilizationMetricType,
}
scaler, _ = NewCPUMemoryScaler(v1.ResourceCPU, config)
scaler, _ = NewCPUMemoryScaler(v1.ResourceCPU, config, kubeClient)
metricSpec = scaler.GetMetricSpecForScaling(context.Background())

assert.Equal(t, metricSpec[0].Type, v2.ResourceMetricSourceType)
Expand All @@ -89,7 +98,8 @@ func TestGetContainerMetricSpecForScaling(t *testing.T) {
config := &scalersconfig.ScalerConfig{
TriggerMetadata: validContainerCPUMemoryMetadata,
}
scaler, _ := NewCPUMemoryScaler(v1.ResourceCPU, config)
kubeClient := fake.NewFakeClient()
scaler, _ := NewCPUMemoryScaler(v1.ResourceCPU, config, kubeClient)
metricSpec := scaler.GetMetricSpecForScaling(context.Background())

assert.Equal(t, metricSpec[0].Type, v2.ContainerResourceMetricSourceType)
Expand All @@ -102,11 +112,153 @@ func TestGetContainerMetricSpecForScaling(t *testing.T) {
TriggerMetadata: map[string]string{"value": "50", "containerName": "bar"},
MetricType: v2.UtilizationMetricType,
}
scaler, _ = NewCPUMemoryScaler(v1.ResourceCPU, config)
scaler, _ = NewCPUMemoryScaler(v1.ResourceCPU, config, kubeClient)
metricSpec = scaler.GetMetricSpecForScaling(context.Background())

assert.Equal(t, metricSpec[0].Type, v2.ContainerResourceMetricSourceType)
assert.Equal(t, metricSpec[0].ContainerResource.Name, v1.ResourceCPU)
assert.Equal(t, metricSpec[0].ContainerResource.Target.Type, v2.UtilizationMetricType)
assert.Equal(t, metricSpec[0].ContainerResource.Container, "bar")
}

func createScaledObject() *kedav1alpha1.ScaledObject {
maxReplicas := int32(3)
minReplicas := int32(0)
pollingInterval := int32(10)
return &kedav1alpha1.ScaledObject{
TypeMeta: metav1.TypeMeta{
APIVersion: "keda.sh/v1alpha1",
Kind: "ScaledObject",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-name",
Namespace: "test-namespace",
},
Spec: kedav1alpha1.ScaledObjectSpec{
MaxReplicaCount: &maxReplicas,
MinReplicaCount: &minReplicas,
PollingInterval: &pollingInterval,
ScaleTargetRef: &kedav1alpha1.ScaleTarget{
APIVersion: "apps/v1",
Kind: "Deployment",
Name: "test-deployment",
},
Triggers: []kedav1alpha1.ScaleTriggers{
{
Type: "cpu",
Metadata: map[string]string{
"activationValue": "500",
"value": "800",
},
MetricType: v2.UtilizationMetricType,
},
},
},
Status: kedav1alpha1.ScaledObjectStatus{
HpaName: "keda-hpa-test-name",
},
}
}

func createHPAWithAverageUtilization(averageUtilization int32) (*v2.HorizontalPodAutoscaler, error) {
minReplicas := int32(1)
averageValue, err := resource.ParseQuantity("800m")
if err != nil {
return nil, fmt.Errorf("error parsing quantity: %s", err)
}

return &v2.HorizontalPodAutoscaler{
TypeMeta: metav1.TypeMeta{
APIVersion: "autoscaling/v2",
Kind: "HorizontalPodAutoscaler",
},
ObjectMeta: metav1.ObjectMeta{
Name: "keda-hpa-test-name",
Namespace: "test-namespace",
},
Spec: v2.HorizontalPodAutoscalerSpec{
MaxReplicas: 3,
MinReplicas: &minReplicas,
Metrics: []v2.MetricSpec{

{
Type: v2.ResourceMetricSourceType,
Resource: &v2.ResourceMetricSource{
Name: v1.ResourceCPU,
Target: v2.MetricTarget{
AverageUtilization: &averageUtilization,
Type: v2.UtilizationMetricType,
},
},
},
},
},
Status: v2.HorizontalPodAutoscalerStatus{
CurrentMetrics: []v2.MetricStatus{
{
Type: v2.ResourceMetricSourceType,
Resource: &v2.ResourceMetricStatus{
Name: v1.ResourceCPU,
Current: v2.MetricValueStatus{
AverageUtilization: &averageUtilization,
AverageValue: &averageValue,
},
},
},
},
},
}, nil
}

func TestGetMetricsAndActivity_IsActive(t *testing.T) {
config := &scalersconfig.ScalerConfig{
TriggerMetadata: validCPUMemoryMetadata,
ScalableObjectType: "ScaledObject",
ScalableObjectName: "test-name",
ScalableObjectNamespace: "test-namespace",
}

hpa, err := createHPAWithAverageUtilization(50)
if err != nil {
t.Errorf("Error creating HPA: %s", err)
return
}

err = kedav1alpha1.AddToScheme(scheme.Scheme)
if err != nil {
t.Errorf("Error adding to scheme: %s", err)
return
}

kubeClient := fake.NewClientBuilder().WithObjects(hpa, createScaledObject()).WithScheme(scheme.Scheme).Build()
scaler, _ := NewCPUMemoryScaler(v1.ResourceCPU, config, kubeClient)

_, isActive, _ := scaler.GetMetricsAndActivity(context.Background(), "cpu")
assert.Equal(t, isActive, true)
}

func TestGetMetricsAndActivity_IsNotActive(t *testing.T) {
config := &scalersconfig.ScalerConfig{
TriggerMetadata: validCPUMemoryMetadata,
ScalableObjectType: "ScaledObject",
ScalableObjectName: "test-name",
ScalableObjectNamespace: "test-namespace",
}

hpa, err := createHPAWithAverageUtilization(30)
if err != nil {
t.Errorf("Error creating HPA: %s", err)
}

err = kedav1alpha1.AddToScheme(scheme.Scheme)
if err != nil {
t.Errorf("Error adding to scheme: %s", err)
return
}

kubeClient := fake.NewClientBuilder().WithObjects(hpa, createScaledObject()).WithScheme(scheme.Scheme).Build()
scaler, _ := NewCPUMemoryScaler(v1.ResourceCPU, config, kubeClient)

_, isActive, _ := scaler.GetMetricsAndActivity(context.Background(), "cpu")
assert.Equal(t, isActive, false)
}
14 changes: 12 additions & 2 deletions tests/scalers/cpu/cpu_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,8 @@ spec:
- type: cpu
metadata:
type: Utilization
value: "50"
value: "10"
activationValue: "5"
- type: kubernetes-workload
metadata:
podSelector: 'pod={{.WorkloadDeploymentName}}'
Expand Down Expand Up @@ -245,9 +246,18 @@ func scaleToZero(t *testing.T, kc *kubernetes.Clientset, data templateData) {
assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicas, 60, 1),
"Replica count should be %v", maxReplicas)

// scale external trigger in (expect replicas back to 0 -- external trigger not active)
// activate cpu trigger
KubectlReplaceWithTemplate(t, data, "triggerJobTemplate", triggerJob)

// replica count should not change from maxReplicas
AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, maxReplicas, 60)

// scale external trigger in (expect replicas to stay at maxReplicas -- external trigger not active)
KubernetesScaleDeployment(t, kc, workloadDeploymentName, int64(minReplicas), testNamespace)
AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, maxReplicas, 60)

// remove trigger job to deactivate cpu trigger
KubectlDeleteWithTemplate(t, data, "triggerJobTemplate", triggerJob)
assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicas, 60, 1),
"Replica count should be %v", minReplicas)
}
Expand Down

0 comments on commit ed2d446

Please sign in to comment.