Skip to content

Commit

Permalink
fix: unnecessary HPA updates when cpu utilization trigger is used
Browse files Browse the repository at this point in the history
Signed-off-by: uucloud <uucloud@qq.com>
  • Loading branch information
uucloud committed Jul 17, 2024
1 parent 5cbe424 commit 174bceb
Show file tree
Hide file tree
Showing 3 changed files with 87 additions and 2 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ Here is an overview of all new **experimental** features:

- **General**: Check for missing CRD references and sample CRs ([#5920](https://github.com/kedacore/keda/issues/5920))
- **General**: Scalers are properly closed after being refreshed ([#5806](https://github.com/kedacore/keda/issues/5806))
- **General**: Continuous HPA updates with CPU Utilization trigger ([#5821](https://github.com/kedacore/keda/issues/5821))
- **MongoDB Scaler**: MongoDB url parses correctly `+srv` scheme ([#5760](https://github.com/kedacore/keda/issues/5760))
- **New Relic Scaler**: Fix CVE-2024-6104 in github.com/hashicorp/go-retryablehttp ([#5944](https://github.com/kedacore/keda/issues/5944))

Expand Down
24 changes: 23 additions & 1 deletion controllers/keda/hpa.go
Original file line number Diff line number Diff line change
Expand Up @@ -221,9 +221,25 @@ func (r *ScaledObjectReconciler) getScaledObjectMetricSpecs(ctx context.Context,
}

metricSpecs := cache.GetMetricSpecForScaling(ctx)
var mSpecs []autoscalingv2.MetricSpec
var hasCPUUtilization bool
var cpuUtilization autoscalingv2.MetricSpec

for _, metricSpec := range metricSpecs {
if metricSpec.Resource != nil {
target := metricSpec.Resource.Target
// In Kubernetes versions <1.27, the HPA v2 CPU utilization metric is reordered to the end,
// and only one is retained. This prevents the KEDA controller from continuously updating.
// see https://github.com/kedacore/keda/issues/5821 for details
if target.Type == autoscalingv2.UtilizationMetricType && string(metricSpec.Resource.Name) == "cpu" {
if hasCPUUtilization {
continue
}
hasCPUUtilization = true
cpuUtilization = metricSpec
continue
}

resourceMetricNames = append(resourceMetricNames, string(metricSpec.Resource.Name))
}

Expand All @@ -238,8 +254,14 @@ func (r *ScaledObjectReconciler) getScaledObjectMetricSpecs(ctx context.Context,
metricSpec.External.Metric.Selector.MatchLabels[kedav1alpha1.ScaledObjectOwnerAnnotation] = scaledObject.Name
externalMetricNames = append(externalMetricNames, externalMetricName)
}
mSpecs = append(mSpecs, metricSpec)
}
scaledObjectMetricSpecs = append(scaledObjectMetricSpecs, mSpecs...)

if hasCPUUtilization {
resourceMetricNames = append(resourceMetricNames, "cpu")
scaledObjectMetricSpecs = append(scaledObjectMetricSpecs, cpuUtilization)
}
scaledObjectMetricSpecs = append(scaledObjectMetricSpecs, metricSpecs...)

// sort metrics in ScaledObject, this way we always check the same resource in Reconcile loop and we can prevent unnecessary HPA updates,
// see https://github.com/kedacore/keda/issues/1531 for details
Expand Down
64 changes: 63 additions & 1 deletion controllers/keda/hpa_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ package keda

import (
"context"

"github.com/go-logr/logr"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
Expand Down Expand Up @@ -119,6 +119,41 @@ var _ = Describe("hpa", func() {
Expect(capturedScaledObject.Status.Health).To(Equal(expectedHealth))
})

It("should reorder cpu utilization MetricSpecs", func() {
val := map[string]string{
"value": "50",
}
config := &scalersconfig.ScalerConfig{
TriggerMetadata: val,
MetricType: v2.UtilizationMetricType,
}
cpu, _ := scalers.NewCPUMemoryScaler("cpu", config)
memory, _ := scalers.NewCPUMemoryScaler("memory", config)
ordered := append(cpu.GetMetricSpecForScaling(context.Background()), memory.GetMetricSpecForScaling(context.Background())...)

scaledObject := setupSoTest([]v1alpha1.ScaleTriggers{
{
Type: "cpu",
MetricType: v2.UtilizationMetricType,
Metadata: val,
},
{
Type: "memory",
MetricType: v2.UtilizationMetricType,
Metadata: val,
},
}, ordered, scaler, scaleHandler)

expect := append(memory.GetMetricSpecForScaling(context.Background()), cpu.GetMetricSpecForScaling(context.Background())...)

client.EXPECT().Status().Return(statusWriter)
statusWriter.EXPECT().Patch(gomock.Any(), gomock.Any(), gomock.Any()).Do(func(arg interface{}, scaledObject *v1alpha1.ScaledObject, anotherArg interface{}, opts ...interface{}) {
})

v, err := reconciler.getScaledObjectMetricSpecs(context.Background(), logger, scaledObject)
Expect(err).ToNot(HaveOccurred())
Expect(v).To(Equal(expect))
})
})

func setupTest(health map[string]v1alpha1.HealthStatus, scaler *mock_scalers.MockScaler, scaleHandler *mock_scaling.MockScaleHandler) *v1alpha1.ScaledObject {
Expand Down Expand Up @@ -154,3 +189,30 @@ func setupTest(health map[string]v1alpha1.HealthStatus, scaler *mock_scalers.Moc

return scaledObject
}

func setupSoTest(triggers []v1alpha1.ScaleTriggers, ordered []v2.MetricSpec, scaler *mock_scalers.MockScaler, scaleHandler *mock_scaling.MockScaleHandler) *v1alpha1.ScaledObject {
scaledObject := &v1alpha1.ScaledObject{
ObjectMeta: v1.ObjectMeta{
Name: "some scaled object name",
},
Spec: v1alpha1.ScaledObjectSpec{
Triggers: triggers,
},
}

scalersCache := cache.ScalersCache{
Scalers: []cache.ScalerBuilder{{
Scaler: scaler,
Factory: func() (scalers.Scaler, *scalersconfig.ScalerConfig, error) {
return scaler, &scalersconfig.ScalerConfig{}, nil
},
}},
Recorder: nil,
}

ctx := context.Background()
scaler.EXPECT().GetMetricSpecForScaling(ctx).Return(ordered)
scaleHandler.EXPECT().GetScalersCache(context.Background(), gomock.Eq(scaledObject)).Return(&scalersCache, nil)

return scaledObject
}

0 comments on commit 174bceb

Please sign in to comment.