diff --git a/CHANGELOG.md b/CHANGELOG.md index afa36142b0f..5e8aaf7ebf7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -80,6 +80,7 @@ Here is an overview of all new **experimental** features: ### Fixes +- **General**: Admission webhook does not reject workloads with only resource limits provided ([#4802](https://github.com/kedacore/keda/issues/4802)) - **General**: Fix CVE-2023-39325 in golang.org/x/net ([#5122](https://github.com/kedacore/keda/issues/5122)) - **General**: Fix otelgrpc DoS vulnerability ([#5208](https://github.com/kedacore/keda/issues/5208)) - **General**: Prevented memory leak generated by not correctly cleaning http connections ([#5248](https://github.com/kedacore/keda/issues/5248)) diff --git a/apis/keda/v1alpha1/scaledobject_webhook.go b/apis/keda/v1alpha1/scaledobject_webhook.go index b487024b013..66d2c76d60a 100644 --- a/apis/keda/v1alpha1/scaledobject_webhook.go +++ b/apis/keda/v1alpha1/scaledobject_webhook.go @@ -316,19 +316,16 @@ func verifyCPUMemoryScalers(incomingSo *ScaledObject, action string, dryRun bool if conainerName != "" && container.Name != conainerName { continue } + if trigger.Type == cpuString { - if container.Resources.Requests == nil || - container.Resources.Requests.Cpu() == nil || - container.Resources.Requests.Cpu().AsApproximateFloat64() == 0 { + if !isWorkloadResourceSet(container.Resources, corev1.ResourceCPU) { err := fmt.Errorf("the scaledobject has a cpu trigger but the container %s doesn't have the cpu request defined", container.Name) scaledobjectlog.Error(err, "validation error") metricscollector.RecordScaledObjectValidatingErrors(incomingSo.Namespace, action, "missing-requests") return err } } else if trigger.Type == memoryString { - if container.Resources.Requests == nil || - container.Resources.Requests.Memory() == nil || - container.Resources.Requests.Memory().AsApproximateFloat64() == 0 { + if !isWorkloadResourceSet(container.Resources, corev1.ResourceMemory) { err := fmt.Errorf("the scaledobject has a memory trigger but the container %s doesn't have the memory request defined", container.Name) scaledobjectlog.Error(err, "validation error") metricscollector.RecordScaledObjectValidatingErrors(incomingSo.Namespace, action, "missing-requests") @@ -460,3 +457,9 @@ func castToFloatIfNecessary(formula string) string { } return "float(" + formula + ")" } + +func isWorkloadResourceSet(rr corev1.ResourceRequirements, name corev1.ResourceName) bool { + requests, requestsSet := rr.Requests[name] + limits, limitsSet := rr.Limits[name] + return (requestsSet || limitsSet) && (requests.AsApproximateFloat64() > 0 || limits.AsApproximateFloat64() > 0) +} diff --git a/apis/keda/v1alpha1/scaledobject_webhook_test.go b/apis/keda/v1alpha1/scaledobject_webhook_test.go index 8c9dfe00df5..c119dba38bf 100644 --- a/apis/keda/v1alpha1/scaledobject_webhook_test.go +++ b/apis/keda/v1alpha1/scaledobject_webhook_test.go @@ -416,6 +416,68 @@ var _ = It("should validate so creation when min replicas is > 0 with only cpu s }) +var _ = It("should not validate ScaledObject creation when deployment only provides cpu resource limits", func() { + + namespaceName := "only-cpu-resource-limits-set" + namespace := createNamespace(namespaceName) + + err := k8sClient.Create(context.Background(), namespace) + Expect(err).ToNot(HaveOccurred()) + + workload := createDeployment(namespaceName, false, false) + workload.Spec.Template.Spec.Containers[0].Resources.Limits = v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI), + } + + err = k8sClient.Create(context.Background(), workload) + Expect(err).ToNot(HaveOccurred()) + + so := createScaledObject(soName, namespaceName, workloadName, "apps/v1", "Deployment", false, map[string]string{}, "") + so.Spec.Triggers = []ScaleTriggers{ + { + Type: "cpu", + Metadata: map[string]string{ + "value": "10", + }, + }, + } + + Eventually(func() error { + return k8sClient.Create(context.Background(), so) + }).ShouldNot(HaveOccurred()) +}) + +var _ = It("should not validate ScaledObject creation when deployment only provides memory resource limits", func() { + + namespaceName := "only-memory-resource-limits-set" + namespace := createNamespace(namespaceName) + + err := k8sClient.Create(context.Background(), namespace) + Expect(err).ToNot(HaveOccurred()) + + workload := createDeployment(namespaceName, false, false) + workload.Spec.Template.Spec.Containers[0].Resources.Limits = v1.ResourceList{ + v1.ResourceMemory: *resource.NewMilliQuantity(1024, resource.DecimalSI), + } + + err = k8sClient.Create(context.Background(), workload) + Expect(err).ToNot(HaveOccurred()) + + so := createScaledObject(soName, namespaceName, workloadName, "apps/v1", "Deployment", false, map[string]string{}, "") + so.Spec.Triggers = []ScaleTriggers{ + { + Type: "memory", + Metadata: map[string]string{ + "value": "512Mi", + }, + }, + } + + Eventually(func() error { + return k8sClient.Create(context.Background(), so) + }).ShouldNot(HaveOccurred()) +}) + var _ = It("should validate the so update if it's removing the finalizer even if it's invalid", func() { namespaceName := "removing-finalizers" diff --git a/tests/internals/scaled_object_validation/scaled_object_validation_test.go b/tests/internals/scaled_object_validation/scaled_object_validation_test.go index ad1f0fc9c69..ca1bfbdc944 100644 --- a/tests/internals/scaled_object_validation/scaled_object_validation_test.go +++ b/tests/internals/scaled_object_validation/scaled_object_validation_test.go @@ -173,6 +173,8 @@ func TestScaledObjectValidations(t *testing.T) { testMissingMemory(t, data) + testWorkloadWithOnlyLimits(t, data) + DeleteKubernetesResources(t, testNamespace, data, templates) } @@ -249,6 +251,66 @@ func testMissingMemory(t *testing.T, data templateData) { assert.Contains(t, err.Error(), fmt.Sprintf("the scaledobject has a memory trigger but the container %s doesn't have the memory request defined", deploymentName)) } +func testWorkloadWithOnlyLimits(t *testing.T, data templateData) { + t.Log("--- workload with only resource limits set ---") + + data.DeploymentName = fmt.Sprintf("%s-deploy-only-limits", testName) + + customDeploymentTemplate := ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{.DeploymentName}} + namespace: {{.TestNamespace}} + labels: + app: {{.DeploymentName}} +spec: + replicas: 1 + selector: + matchLabels: + app: {{.DeploymentName}} + template: + metadata: + labels: + app: {{.DeploymentName}} + spec: + containers: + - name: {{.DeploymentName}} + image: nginxinc/nginx-unprivileged + resources: + limits: + cpu: 50m +` + + KubectlApplyWithTemplate(t, data, "deploymentTemplate", customDeploymentTemplate) + WaitForDeploymentReplicaReadyCount(t, GetKubernetesClient(t), data.DeploymentName, data.TestNamespace, 1, 10, 5) + + t.Log("deployment was updated with resource limits") + + data.ScaledObjectName = fmt.Sprintf("%s-so-only-limits", testName) + + customScaledObjectTemplate := ` +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} +spec: + scaleTargetRef: + name: {{.DeploymentName}} + minReplicaCount: 1 + maxReplicaCount: 1 + triggers: + - type: cpu + metadata: + type: Utilization + value: "50" +` + + err := KubectlApplyWithErrors(t, data, "scaledObjectTemplate", customScaledObjectTemplate) + assert.NoError(t, err, "Deployment with only resource limits set should be validated") +} + func getTemplateData() (templateData, []Template) { return templateData{ TestNamespace: testNamespace,