diff --git a/.github/workflows/main-build.yml b/.github/workflows/main-build.yml
index 12140e4cc18..5c987c5531c 100644
--- a/.github/workflows/main-build.yml
+++ b/.github/workflows/main-build.yml
@@ -25,8 +25,8 @@ jobs:
- id: go-paths
run: |
- echo ::set-output name=mod_cache::$(go env GOMODCACHE)
- echo ::set-output name=build_cache::$(go env GOCACHE)
+ echo "mod_cache=$(go env GOMODCACHE)" >> $GITHUB_OUTPUT
+ echo "build_cache=$(go env GOCACHE)" >> $GITHUB_OUTPUT
- name: Go modules cache
uses: actions/cache@v3.3.1
diff --git a/.github/workflows/pr-e2e.yml b/.github/workflows/pr-e2e.yml
index 84f046a7295..5518abf6464 100644
--- a/.github/workflows/pr-e2e.yml
+++ b/.github/workflows/pr-e2e.yml
@@ -57,12 +57,12 @@ jobs:
PR_URL="${{ github.event.issue.pull_request.url }}"
PR_NUM=${PR_URL##*/}
echo "Checking out from PR #$PR_NUM based on URL: $PR_URL"
- echo "::set-output name=pr_num::$PR_NUM"
+ echo "pr_num=$PR_NUM" >> $GITHUB_OUTPUT
# Get commit SHA
git config --global --add safe.directory "$GITHUB_WORKSPACE"
gh pr checkout $PR_NUM
SHA=$(git log -n 1 --pretty=format:"%H")
- echo "::set-output name=commit_sha::$SHA"
+ echo "commit_sha=$SHA" >> $GITHUB_OUTPUT
build-test-images:
needs: triage
diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml
index cd2a91e434b..f36102dc8b7 100644
--- a/.github/workflows/pr-validation.yml
+++ b/.github/workflows/pr-validation.yml
@@ -29,8 +29,8 @@ jobs:
- name: Set Go paths
id: go-paths
run: |
- echo ::set-output name=mod_cache::$(go env GOMODCACHE)
- echo ::set-output name=build_cache::$(go env GOCACHE)
+ echo "mod_cache=$(go env GOMODCACHE)" >> $GITHUB_OUTPUT
+ echo "build_cache=$(go env GOCACHE)" >> $GITHUB_OUTPUT
- name: Go modules cache
uses: actions/cache@v3.3.1
diff --git a/.github/workflows/release-build.yml b/.github/workflows/release-build.yml
index 7f439a175fb..c9f2f7f6cde 100644
--- a/.github/workflows/release-build.yml
+++ b/.github/workflows/release-build.yml
@@ -25,8 +25,8 @@ jobs:
- id: go-paths
run: |
- echo ::set-output name=mod_cache::$(go env GOMODCACHE)
- echo ::set-output name=build_cache::$(go env GOCACHE)
+ echo "mod_cache=$(go env GOMODCACHE)" >> $GITHUB_OUTPUT
+ echo "build_cache=$(go env GOCACHE)" >> $GITHUB_OUTPUT
- name: Go modules cache
uses: actions/cache@v3.3.1
@@ -55,7 +55,7 @@ jobs:
- name: Get the version
id: get_version
- run: echo ::set-output name=VERSION::${GITHUB_REF#refs/tags/v}
+ run: echo "VERSION=${GITHUB_REF#refs/tags/v}" >> $GITHUB_OUTPUT
- name: Release Deployment YAML file
run: make release
diff --git a/.github/workflows/template-arm64-smoke-tests.yml b/.github/workflows/template-arm64-smoke-tests.yml
index bd2076bb3dc..fa33863bfdc 100644
--- a/.github/workflows/template-arm64-smoke-tests.yml
+++ b/.github/workflows/template-arm64-smoke-tests.yml
@@ -11,4 +11,4 @@ jobs:
with:
runs-on: ARM64
kubernetesVersion: v1.26
- kindImage: kindest/node:v1.26.0@sha256:691e24bd2417609db7e589e1a479b902d2e209892a10ce375fab60a8407c7352
+ kindImage: kindest/node:v1.26.6@sha256:6e2d8b28a5b601defe327b98bd1c2d1930b49e5d8c512e1895099e4504007adb
diff --git a/.github/workflows/template-smoke-tests.yml b/.github/workflows/template-smoke-tests.yml
index f88a4378145..5b9d1163e21 100644
--- a/.github/workflows/template-smoke-tests.yml
+++ b/.github/workflows/template-smoke-tests.yml
@@ -35,7 +35,7 @@ jobs:
fetch-depth: 1
- name: Create k8s ${{ inputs.kubernetesVersion }} Kind Cluster
- uses: helm/kind-action@v1.7.0
+ uses: helm/kind-action@v1.8.0
with:
node_image: ${{ inputs.kindImage }}
cluster_name: smoke-tests-cluster-${{ inputs.kubernetesVersion }}
diff --git a/.github/workflows/template-versions-smoke-tests.yml b/.github/workflows/template-versions-smoke-tests.yml
index 8eab93aba4d..a50f521a18d 100644
--- a/.github/workflows/template-versions-smoke-tests.yml
+++ b/.github/workflows/template-versions-smoke-tests.yml
@@ -12,7 +12,7 @@ jobs:
kubernetesVersion: [v1.26, v1.25, v1.24, v1.23]
include:
- kubernetesVersion: v1.26
- kindImage: kindest/node:v1.26.0@sha256:691e24bd2417609db7e589e1a479b902d2e209892a10ce375fab60a8407c7352
+ kindImage: kindest/node:v1.26.6@sha256:6e2d8b28a5b601defe327b98bd1c2d1930b49e5d8c512e1895099e4504007adb
- kubernetesVersion: v1.25
kindImage: kindest/node:v1.25.0@sha256:428aaa17ec82ccde0131cb2d1ca6547d13cf5fdabcc0bbecf749baa935387cbf
- kubernetesVersion: v1.24
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 903cf96aea9..6b8db10ff40 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -44,3 +44,8 @@ repos:
entry: golangci-lint run
types: [go]
pass_filenames: false
+ - id: validate-changelog
+ name: Validate Changelog
+ language: system
+ entry: "bash hack/validate-changelog.sh"
+ pass_filenames: false
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2f3c5f251fb..ed125de182e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -49,16 +49,20 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio
### New
-- TODO ([#XXX](https://github.com/kedacore/keda/issue/XXX))
+- **AWS SQS Scaler**: Support for scaling to include delayed messages. ([#4377](https://github.com/kedacore/keda/issues/4377))
+- **Governance**: KEDA transitioned to CNCF Graduated project ([#63](https://github.com/kedacore/governance/issues/63))
### Improvements
-
-- **General:**: Add ScaledObject/ScaledJob names to output of `kubectl get triggerauthentication/clustertriggerauthentication` ([#796](https://github.com/kedacore/keda/issues/796))
-- **General:**: Add standalone CRD generation to release workflow ([#2726](https://github.com/kedacore/keda/issues/2726))
+- **General**: Add more events for user checking ([#796](https://github.com/kedacore/keda/issues/3764))
+- **General**: Add ScaledObject/ScaledJob names to output of `kubectl get triggerauthentication/clustertriggerauthentication` ([#796](https://github.com/kedacore/keda/issues/796))
+- **General**: Add standalone CRD generation to release workflow ([#2726](https://github.com/kedacore/keda/issues/2726))
+- **General**: Adding a changelog validating script to check for formatting and order ([#3190](https://github.com/kedacore/keda/issues/3190))
+- **General**: Update golangci-lint version documented in CONTRIBUTING.md since old version doesn't support go 1.20 (N/A)
+- **Azure Pod Identity**: Introduce validation to prevent usage of empty identity ID for Azure identity providers ([#4528](https://github.com/kedacore/keda/issues/4528))
### Fixes
-- TODO ([#XXX](https://github.com/kedacore/keda/issue/XXX))
+- **Solace Scaler**: Fix a bug where `queueName` is not properly escaped during URL encode ([#4936](https://github.com/kedacore/keda/issues/4936))
### Deprecations
@@ -66,15 +70,18 @@ You can find all deprecations in [this overview](https://github.com/kedacore/ked
New deprecation(s):
-- TODO ([#XXX](https://github.com/kedacore/keda/issue/XXX))
+- **General**: Clean up previously deprecated code for 2.12 release ([#4899](https://github.com/kedacore/keda/issues/4899))
### Breaking Changes
-- TODO ([#XXX](https://github.com/kedacore/keda/issue/XXX))
+- TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX))
### Other
+- **General**: Fixed a typo in the StatefulSet scaling resolver ([#4902](https://github.com/kedacore/keda/pull/4902))
- **General**: Refactor ScaledJob related methods to be located at scale_handler ([#4781](https://github.com/kedacore/keda/issues/4781))
+- **General**: Replace deprecated `set-output` command with environment file ([#4914](https://github.com/kedacore/keda/issues/4914))
+- **General**: Add one deployment arg in keda-metrics-apiserver for only printing errors, but errors should go to stderr ([#4882](https://github.com/kedacore/keda/pull/4882))
## v2.11.2
@@ -82,8 +89,8 @@ New deprecation(s):
- **General**: Metrics server exposes Prometheus metrics ([#4776](https://github.com/kedacore/keda/issues/4776))
- **AWS Pod Identity Authentication**: Use `default` service account if the workload doesn't set it ([#4767](https://github.com/kedacore/keda/issues/4767))
-- **GitHub Runner Scaler**: Fix rate checking on GHEC when HTTP 200 ([#4786](https://github.com/kedacore/keda/issues/4786))
- **GitHub Runner Scaler**: Fix explicit repo check 404 to skip not crash ([#4790](https://github.com/kedacore/keda/issues/4790))
+- **GitHub Runner Scaler**: Fix rate checking on GHEC when HTTP 200 ([#4786](https://github.com/kedacore/keda/issues/4786))
- **Pulsar Scaler**: Fix `msgBacklogThreshold` field being named wrongly as `msgBacklog` ([#4681](https://github.com/kedacore/keda/issues/4681))
### Deprecations
@@ -132,8 +139,8 @@ None.
- **General**: Introduce new Solr Scaler ([#4234](https://github.com/kedacore/keda/issues/4234))
- **General**: Support ScaledObject taking over existing HPAs with the same name while they are not managed by other ScaledObject ([#4457](https://github.com/kedacore/keda/issues/4457))
- **CPU/Memory scaler**: Add support for scale to zero if there are multiple triggers([#4269](https://github.com/kedacore/keda/issues/4269))
-- **Redis Scalers**: Allow scaling using redis stream length ([#4277](https://github.com/kedacore/keda/issues/4277))
- **Redis Scalers**: Allow scaling using consumer group lag ([#3127](https://github.com/kedacore/keda/issues/3127))
+- **Redis Scalers**: Allow scaling using redis stream length ([#4277](https://github.com/kedacore/keda/issues/4277))
### Breaking Changes
@@ -144,8 +151,8 @@ None.
- **General**: Add a Prometheus metric for measuring the processing loop lag ([#4702](https://github.com/kedacore/keda/issues/4702))
- **General**: Add a Prometheus metric with KEDA build info ([#4647](https://github.com/kedacore/keda/issues/4647))
- **General**: Allow to change the port of the Admission Webhook ([#468](https://github.com/kedacore/charts/issues/468))
-- **General**: Enable secret scanning in GitHub repo
-- **General**: Kubernetes v1.25, v1.26 or v1.27 are supported
+- **General**: Enable secret scanning in GitHub repo ([#4710](https://github.com/kedacore/keda/issues/4710))
+- **General**: Kubernetes v1.25, v1.26 or v1.27 are supported ([#4710](https://github.com/kedacore/keda/issues/4710))
- **AWS DynamoDB**: Add support for `indexName` ([#4680](https://github.com/kedacore/keda/issues/4680))
- **Azure Data Explorer Scaler**: Use azidentity SDK ([#4489](https://github.com/kedacore/keda/issues/4489))
- **External Scaler**: Add tls options in TriggerAuth metadata. ([#3565](https://github.com/kedacore/keda/issues/3565))
@@ -154,7 +161,7 @@ None.
- **Kafka Scaler**: Add support for OAuth extensions ([#4544](https://github.com/kedacore/keda/issues/4544))
- **NATS JetStream Scaler**: Add support for pulling AccountID from TriggerAuthentication ([#4586](https://github.com/kedacore/keda/issues/4586))
- **PostgreSQL Scaler**: Replace `lib/pq` with `pgx` ([#4704](https://github.com/kedacore/keda/issues/4704))
-- **Prometheus Scaler**: Add support for Google Managed Prometheus ([#467](https://github.com/kedacore/keda/issues/4674))
+- **Prometheus Scaler**: Add support for Google Managed Prometheus ([#4674](https://github.com/kedacore/keda/issues/4674))
- **Pulsar Scaler**: Improve error messages for unsuccessful connections ([#4563](https://github.com/kedacore/keda/issues/4563))
- **RabbitMQ Scaler**: Add support for `unsafeSsl` in trigger metadata ([#4448](https://github.com/kedacore/keda/issues/4448))
- **RabbitMQ Scaler**: Add support for `workloadIdentityResource` and utilize AzureAD Workload Identity for HTTP authorization ([#4716](https://github.com/kedacore/keda/issues/4716))
@@ -162,12 +169,12 @@ None.
### Fixes
-- **General**: Allow to remove the finalizer even if the ScaledObject isn't valid ([#4396](https://github.com/kedacore/keda/issue/4396))
-- **General**: Check ScaledObjects with multiple triggers with non unique name in the Admission Webhook ([#4664](https://github.com/kedacore/keda/issue/4664))
-- **General**: Grafana Dashboard: Fix HPA metrics panel to use range instead of instant ([#4513](https://github.com/kedacore/keda/pull/4513))
+- **General**: Allow to remove the finalizer even if the ScaledObject isn't valid ([#4396](https://github.com/kedacore/keda/issues/4396))
+- **General**: Check ScaledObjects with multiple triggers with non unique name in the Admission Webhook ([#4664](https://github.com/kedacore/keda/issues/4664))
- **General**: Grafana Dashboard: Fix HPA metrics panel by replacing $namepsace to $exported_namespace due to label conflict ([#4539](https://github.com/kedacore/keda/pull/4539))
+- **General**: Grafana Dashboard: Fix HPA metrics panel to use range instead of instant ([#4513](https://github.com/kedacore/keda/pull/4513))
- **General**: ScaledJob: Check if MaxReplicaCount is nil before access to it ([#4568](https://github.com/kedacore/keda/issues/4568))
-- **AWS SQS Scaler**: Respect `scaleOnInFlight` value ([#4276](https://github.com/kedacore/keda/issue/4276))
+- **AWS SQS Scaler**: Respect `scaleOnInFlight` value ([#4276](https://github.com/kedacore/keda/issues/4276))
- **Azure Monitor**: Exclude Azure Monitor scaler from metricName deprecation ([#4713](https://github.com/kedacore/keda/pull/4713))
- **Azure Pipelines**: Respect all required demands ([#4404](https://github.com/kedacore/keda/issues/4404))
- **Kafka Scaler**: Add back `strings.TrimSpace()` function for saslAuthType ([#4689](https://github.com/kedacore/keda/issues/4689))
@@ -199,12 +206,12 @@ New deprecation(s):
### Fixes
-- **General**: Drop a transitive dependency on bou.ke/monkey ([#4366](https://github.com/kedacore/keda/issue/4366))
-- **General**: Fix odd number of arguments passed as key-value pairs for logging ([#4369](https://github.com/kedacore/keda/issue/4369))
-- **General**: Update supported versions in the welcome message ([#4360](https://github.com/kedacore/keda/issue/4360))
-- **Admission Webhooks**: Allow to remove the finalizer even if the ScaledObject isn't valid ([#4396](https://github.com/kedacore/keda/issue/4396))
-- **AWS SQS Scaler**: Respect `scaleOnInFlight` value ([#4276](https://github.com/kedacore/keda/issue/4276))
-- **Azure Pipelines**: Fix for disallowing `$top` on query when using `meta.parentID` method ([#4397])
+- **General**: Drop a transitive dependency on bou.ke/monkey ([#4366](https://github.com/kedacore/keda/issues/4366))
+- **General**: Fix odd number of arguments passed as key-value pairs for logging ([#4369](https://github.com/kedacore/keda/issues/4369))
+- **General**: Update supported versions in the welcome message ([#4360](https://github.com/kedacore/keda/issues/4360))
+- **Admission Webhooks**: Allow to remove the finalizer even if the ScaledObject isn't valid ([#4396](https://github.com/kedacore/keda/issues/4396))
+- **AWS SQS Scaler**: Respect `scaleOnInFlight` value ([#4276](https://github.com/kedacore/keda/issues/4276))
+- **Azure Pipelines**: Fix for disallowing `$top` on query when using `meta.parentID` method ([#4397](https://github.com/kedacore/keda/issues/4397))
- **Azure Pipelines**: Respect all required demands ([#4404](https://github.com/kedacore/keda/issues/4404))
## v2.10.0
@@ -219,6 +226,8 @@ Here is an overview of all **stable** additions:
- **Prometheus Metrics**: Introduce scaler activity in Prometheus metrics ([#4114](https://github.com/kedacore/keda/issues/4114))
- **Prometheus Metrics**: Introduce scaler latency in Prometheus metrics ([#4037](https://github.com/kedacore/keda/issues/4037))
+#### Experimental
+
Here is an overview of all new **experimental** features:
- **GitHub Scaler**: Introduced new GitHub Scaler ([#1732](https://github.com/kedacore/keda/issues/1732))
@@ -232,19 +241,19 @@ Here is an overview of all new **experimental** features:
- **Azure Pipelines Scaler**: Improve error logging for `validatePoolID` ([#3996](https://github.com/kedacore/keda/issues/3996))
- **Azure Pipelines Scaler**: New configuration parameter `requireAllDemands` to scale only if jobs request all demands provided by the scaling definition ([#4138](https://github.com/kedacore/keda/issues/4138))
- **Hashicorp Vault**: Add support to secrets backend version 1 ([#2645](https://github.com/kedacore/keda/issues/2645))
-- **Kafka Scaler**: Add support to use `tls` and `sasl` in ScaledObject ([#4232](https://github.com/kedacore/keda/issues/4322))
+- **Kafka Scaler**: Add support to use `tls` and `sasl` in ScaledObject ([#4322](https://github.com/kedacore/keda/issues/4322))
- **Kafka Scaler**: Improve error logging for `GetBlock` method ([#4232](https://github.com/kedacore/keda/issues/4232))
- **Prometheus Scaler**: Add custom headers and custom auth support ([#4208](https://github.com/kedacore/keda/issues/4208))
+- **Prometheus Scaler**: Extend Prometheus Scaler to support Azure managed service for Prometheus ([#4153](https://github.com/kedacore/keda/issues/4153))
- **RabbitMQ Scaler**: Add TLS support ([#967](https://github.com/kedacore/keda/issues/967))
- **Redis Scalers**: Add support to Redis 7 ([#4052](https://github.com/kedacore/keda/issues/4052))
- **Selenium Grid Scaler**: Add `platformName` to selenium-grid scaler metadata structure ([#4038](https://github.com/kedacore/keda/issues/4038))
-- **Prometheus Scaler**: Extend Prometheus Scaler to support Azure managed service for Prometheus ([#4153](https://github.com/kedacore/keda/issues/4153))
### Fixes
- **General**: Fix regression in fallback mechanism ([#4249](https://github.com/kedacore/keda/issues/4249))
- **General**: Prevent a panic that might occur while refreshing a scaler cache ([#4092](https://github.com/kedacore/keda/issues/4092))
-- **AWS Cloudwatch Scaler:** Make `metricName` and `namespace` optional when using `expression` ([#4334](https://github.com/kedacore/keda/issues/4262))
+- **AWS Cloudwatch Scaler**: Make `metricName` and `namespace` optional when using `expression` ([#4334](https://github.com/kedacore/keda/issues/4334))
- **Azure Pipelines Scaler**: Add new parameter to limit the jobs returned ([#4324](https://github.com/kedacore/keda/issues/4324))
- **Azure Queue Scaler**: Fix azure queue length ([#4002](https://github.com/kedacore/keda/issues/4002))
- **Azure Service Bus Scaler**: Improve way clients are created to reduce amount of ARM requests ([#4262](https://github.com/kedacore/keda/issues/4262))
@@ -254,7 +263,7 @@ Here is an overview of all new **experimental** features:
- **Datadog Scaler**: Return correct error when getting a 429 error ([#4187](https://github.com/kedacore/keda/issues/4187))
- **Kafka Scaler**: Return error if the processing of the partition lag fails ([#4098](https://github.com/kedacore/keda/issues/4098))
- **Kafka Scaler**: Support 0 in activationLagThreshold configuration ([#4137](https://github.com/kedacore/keda/issues/4137))
-- **Kafka Scaler:** Trim whitespace from `partitionLimitation` field ([#4333](https://github.com/kedacore/keda/pull/4333))
+- **Kafka Scaler**: Trim whitespace from `partitionLimitation` field ([#4333](https://github.com/kedacore/keda/pull/4333))
- **NATS Jetstream Scaler**: Fix compatibility when cluster not on kubernetes ([#4101](https://github.com/kedacore/keda/issues/4101))
- **Prometheus Metrics**: Expose Prometheus Metrics also when getting ScaledObject state ([#4075](https://github.com/kedacore/keda/issues/4075))
- **Redis Scalers**: Fix panic produced by incorrect logger initialization ([#4197](https://github.com/kedacore/keda/issues/4197))
@@ -279,103 +288,104 @@ New deprecation(s):
- **General**: Metrics Server: print a message on successful connection to gRPC server ([#4190](https://github.com/kedacore/keda/issues/4190))
- **General**: Pass deep copy object to scalers cache from the ScaledObject controller ([#4207](https://github.com/kedacore/keda/issues/4207))
- **General**: Review CodeQL rules and enable it on PRs ([#4032](https://github.com/kedacore/keda/pull/4032))
-- **RabbitMQ Scaler:** Move from `streadway/amqp` to `rabbitmq/amqp091-go` ([#4004](https://github.com/kedacore/keda/pull/4039))
+- **RabbitMQ Scaler**: Move from `streadway/amqp` to `rabbitmq/amqp091-go` ([#4004](https://github.com/kedacore/keda/issues/4004))
## v2.9.3
### Fixes
-- **Azure Service Bus Scaler:** Use correct auth flows with pod identity ([#4026](https://github.com/kedacore/keda/issues/4026)|[#4123](https://github.com/kedacore/keda/issues/4123))
+- **Azure Service Bus Scaler**: Use correct auth flows with pod identity ([#4026](https://github.com/kedacore/keda/issues/4026)|[#4123](https://github.com/kedacore/keda/issues/4123))
## v2.9.2
### Fixes
- **General**: Prevent a panic that might occur while refreshing a scaler cache ([#4092](https://github.com/kedacore/keda/issues/4092))
+- **Azure Service Bus Scaler**: Use correct auth flows with pod identity ([#4026](https://github.com/kedacore/keda/issues/4026))
- **Prometheus Metrics**: Fix exposed metric from `keda_scaled_errors` to `keda_scaled_object_errors` ([#4037](https://github.com/kedacore/keda/issues/4037))
-- **Azure Service Bus Scaler:** Use correct auth flows with pod identity ([#4026](https://github.com/kedacore/keda/issues/4026))
## v2.9.1
### Fixes
- **General**: Properly retrieve and close scalers cache ([#4011](https://github.com/kedacore/keda/issues/4011))
-- **Azure Key Vault:** Raise an error if authentication mechanism not provided ([#4010](https://github.com/kedacore/keda/issues/4010))
+- **Azure Key Vault**: Raise an error if authentication mechanism not provided ([#4010](https://github.com/kedacore/keda/issues/4010))
- **Redis Scalers**: Support `unsafeSsl` and enable ssl verification as default ([#4005](https://github.com/kedacore/keda/issues/4005))
## v2.9.0
### Breaking Changes
-- **General:** Change API version of HPA from `autoscaling/v2beta2` to `autoscaling/v2` ([#2462](https://github.com/kedacore/keda/issues/2462))
-- **General:** As per our [support policy](https://github.com/kedacore/governance/blob/main/SUPPORT.md), Kubernetes v1.23 or above is required and support for Kubernetes v1.22 or below was removed ([docs](https://keda.sh/docs/2.9/operate/cluster/#kubernetes-compatibility))
+- **General**: Change API version of HPA from `autoscaling/v2beta2` to `autoscaling/v2` ([#2462](https://github.com/kedacore/keda/issues/2462))
+- **General**: As per our [support policy](https://github.com/kedacore/governance/blob/main/SUPPORT.md), Kubernetes v1.23 or above is required and support for Kubernetes v1.22 or below was removed ([docs](https://keda.sh/docs/2.9/operate/cluster/#kubernetes-compatibility))
### New
Here is an overview of all **stable** additions:
-- **General**: **EXPERIMENTAL** Adding an option to cache metric values for a scaler during the polling interval ([#2282](https://github.com/kedacore/keda/issues/2282))
-- **General:** Introduce new CouchDB Scaler ([#3746](https://github.com/kedacore/keda/issues/3746))
-- **General:** Introduce new Etcd Scaler ([#3880](https://github.com/kedacore/keda/issues/3880))
-- **General:** Introduce new Loki Scaler ([#3699](https://github.com/kedacore/keda/issues/3699))
+- **General**: Introduce new CouchDB Scaler ([#3746](https://github.com/kedacore/keda/issues/3746))
+- **General**: Introduce new Etcd Scaler ([#3880](https://github.com/kedacore/keda/issues/3880))
+- **General**: Introduce new Loki Scaler ([#3699](https://github.com/kedacore/keda/issues/3699))
- **General**: Introduce rate-limitting parameters to KEDA manager to allow override of client defaults ([#3730](https://github.com/kedacore/keda/issues/3730))
-- **General**: Introduction deprecation & breaking change policy ([Governance #68](https://github.com/kedacore/governance/issues/68))
-- **General**: Provide off-the-shelf Grafana dashboard for application autoscaling ([Docs](https://keda.sh/docs/2.9/operate/prometheus/) | [#3911](https://github.com/kedacore/keda/issues/3911))
-- **General:** Produce reproducible builds ([#3509](https://github.com/kedacore/keda/issues/3509)
+- **General**: Introduction deprecation & breaking change policy ([#68](https://github.com/kedacore/governance/issues/68))
+- **General**: Produce reproducible builds ([#3509](https://github.com/kedacore/keda/issues/3509))
+- **General**: Provide off-the-shelf Grafana dashboard for application autoscaling ([#3911](https://github.com/kedacore/keda/issues/3911))
- **AWS Scalers**: Introduce new AWS endpoint URL settings. ([#3337](https://github.com/kedacore/keda/issues/3337))
- **Azure Service Bus Scaler**: Support for Shared Access Signature (SAS) tokens for authentication. ([#2920](https://github.com/kedacore/keda/issues/2920))
-- **Azure Service Bus Scaler:** Support regex usage in queueName / subscriptionName parameters. ([#1624](https://github.com/kedacore/keda/issues/1624))
-- **ElasticSearch Scaler**: Support for ElasticSearch Service on Elastic Cloud ([#3785](https://github.com/kedacore/keda/issues/3785)
-- **Prometheus Metrics**: Introduce new `ScalerName` label in Prometheus metrics. ([#3588](https://github.com/kedacore/keda/issues/3588))
+- **Azure Service Bus Scaler**: Support regex usage in queueName / subscriptionName parameters. ([#1624](https://github.com/kedacore/keda/issues/1624))
+- **ElasticSearch Scaler**: Support for ElasticSearch Service on Elastic Cloud ([#3785](https://github.com/kedacore/keda/issues/3785))
- **Prometheus Metrics**: Expose renamed version of existing Prometheus Metrics in KEDA Operator. ([#3919](https://github.com/kedacore/keda/issues/3919))
+- **Prometheus Metrics**: Introduce new `ScalerName` label in Prometheus metrics. ([#3588](https://github.com/kedacore/keda/issues/3588))
- **Prometheus Metrics**: Provide Prometheus metric with indication of total number of custom resources per namespace for each custom resource type (CRD). ([#2637](https://github.com/kedacore/keda/issues/2637)|[#2638](https://github.com/kedacore/keda/issues/2638)|[#2639](https://github.com/kedacore/keda/issues/2639))
- **Prometheus Metrics**: Provide Prometheus metric with indication of total number of triggers per trigger type in `ScaledJob`/`ScaledObject`. ([#3663](https://github.com/kedacore/keda/issues/3663))
-- **Selenium Grid Scaler:** Allow setting url trigger parameter from TriggerAuthentication/ClusterTriggerAuthentication ([#3752](https://github.com/kedacore/keda/pull/3752))
+- **Selenium Grid Scaler**: Allow setting url trigger parameter from TriggerAuthentication/ClusterTriggerAuthentication ([#3752](https://github.com/kedacore/keda/pull/3752))
+
+#### Experimental
Here is an overview of all new **experimental** features:
-- **General**: **EXPERIMENTAL** Adding an option to cache metric values for a scaler during the polling interval ([#2282](https://github.com/kedacore/keda/issues/2282))
+- **General**: Adding an option to cache metric values for a scaler during the polling interval ([#2282](https://github.com/kedacore/keda/issues/2282))
### Improvements
-- **General:** Add explicit `seccompProfile` type to `securityContext` config ([#3561](https://github.com/kedacore/keda/issues/3561))
-- **General:** Add `Min` column to ScaledJob visualization ([#3689](https://github.com/kedacore/keda/issues/3689))
-- **General**: Disable response compression for k8s restAPI in client-go ([#3863](https://github.com/kedacore/keda/issues/3863) | [Kubernetes #112296](https://github.com/kubernetes/kubernetes/issues/112296))
-- **General:** Improve the function used to normalize metric names ([#3789](https://github.com/kedacore/keda/issues/3789)
-- **General:** Support for using pod identities for authentication in Azure Key Vault ([#3813](https://github.com/kedacore/keda/issues/3813)
-- **General:** Support disable keep http connection alive ([#3874](https://github.com/kedacore/keda/issues/3874)
-- **General:** Support "Restrict Secret Access" to mitigate the security risk ([#3668](https://github.com/kedacore/keda/issues/3668)
-- **Apache Kafka Scaler:** Support for SASL/OAuth bearer authentication ([#3681](https://github.com/kedacore/keda/issues/3681))
-- **Apache Kafka Scaler:** Support for limiting Kafka partitions KEDA will monitor ([#3830](https://github.com/kedacore/keda/issues/3830))
-- **Apache Kafka Scaler:** Support for excluding persistent lag ([#3904](https://github.com/kedacore/keda/issues/3904))
-- **Azure AD Pod Identity Authentication:** Improve logs around integration with aad-pod-identity for simplified troubleshooting ([#3610](https://github.com/kedacore/keda/issues/3610))
-- **Azure Event Hubs Scaler:** Support Azure Active Directory Pod & Workload Identity for Storage Blobs ([#3569](https://github.com/kedacore/keda/issues/3569))
-- **Azure Event Hubs Scaler:** Support for using connection strings for Event Hub namespace instead of the Event Hub itself. ([#3922](https://github.com/kedacore/keda/issues/3922))
-- **Azure Event Hubs Scaler:** Support for `dapr` checkpoint strategy ([#3022](https://github.com/kedacore/keda/issues/3022))
-- **Azure Pipelines Scaler:** Improved performance for scaling big amount of job requests ([#3702](https://github.com/kedacore/keda/issues/3702))
-- **Cron Scaler**: Improve instance count determination. ([#3838](https://github.com/kedacore/keda/issues/3854))
-- **GCP Storage Scaler:** Support for blob prefix ([#3756](https://github.com/kedacore/keda/issues/3756))
-- **GCP Storage Scaler:** Support for blob delimiters ([#3756](https://github.com/kedacore/keda/issues/3756))
-- **Metrics API Scaler:** Support for `unsafeSsl` parameter to skip certificate validation when connecting over HTTPS ([#3728](https://github.com/kedacore/keda/discussions/3728))
-- **NATS Jetstream Scaler:** Improved querying to respect stream consumer leader in clustered scenarios ([#3860](https://github.com/kedacore/keda/issues/3860))
-- **NATS Scalers:** Support HTTPS protocol in NATS Scalers ([#3805](https://github.com/kedacore/keda/issues/3805))
-- **Prometheus Scaler:** Introduce skipping of certificate check for unsigned certs ([#2310](https://github.com/kedacore/keda/issues/2310))
-- **Pulsar Scaler:** Add support for basic authentication ([#3844](https://github.com/kedacore/keda/issues/3844))
-- **Pulsar Scaler:** Add support for bearer token authentication ([#3844](https://github.com/kedacore/keda/issues/3844))
-- **Pulsar Scaler:** Add support for partitioned topics ([#3833](https://github.com/kedacore/keda/issues/3833))
+- **General**: Add explicit `seccompProfile` type to `securityContext` config ([#3561](https://github.com/kedacore/keda/issues/3561))
+- **General**: Add `Min` column to ScaledJob visualization ([#3689](https://github.com/kedacore/keda/issues/3689))
+- **General**: Disable response compression for k8s restAPI in client-go ([#3863](https://github.com/kedacore/keda/issues/3863))
+- **General**: Improve the function used to normalize metric names ([#3789](https://github.com/kedacore/keda/issues/3789))
+- **General**: Support disable keep http connection alive ([#3874](https://github.com/kedacore/keda/issues/3874))
+- **General**: Support for using pod identities for authentication in Azure Key Vault ([#3813](https://github.com/kedacore/keda/issues/3813))
+- **General**: Support "Restrict Secret Access" to mitigate the security risk ([#3668](https://github.com/kedacore/keda/issues/3668))
+- **Apache Kafka Scaler**: Support for excluding persistent lag ([#3904](https://github.com/kedacore/keda/issues/3904))
+- **Apache Kafka Scaler**: Support for limiting Kafka partitions KEDA will monitor ([#3830](https://github.com/kedacore/keda/issues/3830))
+- **Apache Kafka Scaler**: Support for SASL/OAuth bearer authentication ([#3681](https://github.com/kedacore/keda/issues/3681))
+- **Azure AD Pod Identity Authentication**: Improve logs around integration with aad-pod-identity for simplified troubleshooting ([#3610](https://github.com/kedacore/keda/issues/3610))
+- **Azure Event Hubs Scaler**: Support Azure Active Directory Pod & Workload Identity for Storage Blobs ([#3569](https://github.com/kedacore/keda/issues/3569))
+- **Azure Event Hubs Scaler**: Support for `dapr` checkpoint strategy ([#3022](https://github.com/kedacore/keda/issues/3022))
+- **Azure Event Hubs Scaler**: Support for using connection strings for Event Hub namespace instead of the Event Hub itself. ([#3922](https://github.com/kedacore/keda/issues/3922))
+- **Azure Pipelines Scaler**: Improved performance for scaling big amount of job requests ([#3702](https://github.com/kedacore/keda/issues/3702))
+- **Cron Scaler**: Improve instance count determination. ([#3838](https://github.com/kedacore/keda/issues/3838))
+- **GCP Storage Scaler**: Support for blob delimiters ([#3756](https://github.com/kedacore/keda/issues/3756))
+- **GCP Storage Scaler**: Support for blob prefix ([#3756](https://github.com/kedacore/keda/issues/3756))
+- **Metrics API Scaler**: Support for `unsafeSsl` parameter to skip certificate validation when connecting over HTTPS ([#3728](https://github.com/kedacore/keda/discussions/3728))
+- **NATS Jetstream Scaler**: Improved querying to respect stream consumer leader in clustered scenarios ([#3860](https://github.com/kedacore/keda/issues/3860))
+- **NATS Scalers**: Support HTTPS protocol in NATS Scalers ([#3805](https://github.com/kedacore/keda/issues/3805))
+- **Prometheus Scaler**: Introduce skipping of certificate check for unsigned certs ([#2310](https://github.com/kedacore/keda/issues/2310))
+- **Pulsar Scaler**: Add support for basic authentication ([#3844](https://github.com/kedacore/keda/issues/3844))
+- **Pulsar Scaler**: Add support for bearer token authentication ([#3844](https://github.com/kedacore/keda/issues/3844))
+- **Pulsar Scaler**: Add support for partitioned topics ([#3833](https://github.com/kedacore/keda/issues/3833))
### Fixes
-- **General:** Respect optional parameter inside `envs` for ScaledJobs ([#3568](https://github.com/kedacore/keda/issues/3568))
-- **General:** Ensure `Close` is only called once during `PushScaler`'s deletion ([#3881](https://github.com/kedacore/keda/issues/3881))
-- **Security:** Provide patch for CVE-2022-3172 vulnerability ([#3690](https://github.com/kedacore/keda/issues/3690))
-- **Azure Blob Scaler** Store forgotten logger ([#3811](https://github.com/kedacore/keda/issues/3811))
-- **Datadog Scaler** The last data point of some specific query is always null ([#3906](https://github.com/kedacore/keda/issues/3906))
-- **GCP Stackdriver Scalar:** Update Stackdriver client to handle detecting double and int64 value types ([#3777](https://github.com/kedacore/keda/issues/3777))
-- **MongoDB Scaler:** Username/password can contain `:/?#[]@` ([#3992](https://github.com/kedacore/keda/issues/3992))
-- **New Relic Scaler** Store forgotten logger ([#3945](https://github.com/kedacore/keda/issues/3945))
-- **Prometheus Scaler:** Treat Inf the same as Null result ([#3644](https://github.com/kedacore/keda/issues/3644))
-- **NATS Jetstream:** Correctly count messages that should be redelivered (waiting for ack) towards KEDA value ([#3787](https://github.com/kedacore/keda/issues/3787))
+- **General**: Ensure `Close` is only called once during `PushScaler`'s deletion ([#3881](https://github.com/kedacore/keda/issues/3881))
+- **General**: Respect optional parameter inside `envs` for ScaledJobs ([#3568](https://github.com/kedacore/keda/issues/3568))
+- **Azure Blob Scaler**: Store forgotten logger ([#3811](https://github.com/kedacore/keda/issues/3811))
+- **Datadog Scaler**: The last data point of some specific query is always null ([#3906](https://github.com/kedacore/keda/issues/3906))
+- **GCP Stackdriver Scalar**: Update Stackdriver client to handle detecting double and int64 value types ([#3777](https://github.com/kedacore/keda/issues/3777))
+- **MongoDB Scaler**: Username/password can contain `:/?#[]@` ([#3992](https://github.com/kedacore/keda/issues/3992))
+- **NATS Jetstream**: Correctly count messages that should be redelivered (waiting for ack) towards KEDA value ([#3787](https://github.com/kedacore/keda/issues/3787))
+- **New Relic Scaler**: Store forgotten logger ([#3945](https://github.com/kedacore/keda/issues/3945))
+- **Prometheus Scaler**: Treat Inf the same as Null result ([#3644](https://github.com/kedacore/keda/issues/3644))
+- **Security**: Provide patch for CVE-2022-3172 vulnerability ([#3690](https://github.com/kedacore/keda/issues/3690))
### Deprecations
@@ -393,13 +403,13 @@ Previously announced deprecation(s):
### Other
-- **General**: Bump Golang to 1.18.6 ([#3205](https://github.com/kedacore/keda/issues/3205))
- **General**: Bump `github.com/Azure/azure-event-hubs-go/v3` ([#2986](https://github.com/kedacore/keda/issues/2986))
-- **General**: Migrate from `azure-service-bus-go` to `azservicebus` ([#3394](https://github.com/kedacore/keda/issues/3394))
+- **General**: Bump Golang to 1.18.6 ([#3205](https://github.com/kedacore/keda/issues/3205))
- **General**: Metrics Server: use gRPC connection to get metrics from Operator ([#3920](https://github.com/kedacore/keda/issues/3920))
- **General**: Metrics Server: use OpenAPI definitions served by custom-metrics-apiserver ([#3929](https://github.com/kedacore/keda/issues/3929))
+- **General**: Migrate from `azure-service-bus-go` to `azservicebus` ([#3394](https://github.com/kedacore/keda/issues/3394))
+- **Apache Kafka Scaler**: Increase logging V-level ([#3948](https://github.com/kedacore/keda/issues/3948))
- **Azure EventHub**: Add e2e tests ([#2792](https://github.com/kedacore/keda/issues/2792))
-- **Apache Kafka Scaler:** Increase logging V-level ([#3948](https://github.com/kedacore/keda/issues/3948))
## v2.8.1
@@ -409,13 +419,13 @@ None.
### Improvements
-- **Datadog Scaler:** Support multi-query metrics, and aggregation ([#3423](https://github.com/kedacore/keda/issues/3423))
+- **Datadog Scaler**: Support multi-query metrics, and aggregation ([#3423](https://github.com/kedacore/keda/issues/3423))
### Fixes
-- **General:** Metrics endpoint returns correct HPA values ([#3554](https://github.com/kedacore/keda/issues/3554))
-- **Datadog Scaler:** Fix: panic in datadog scaler ([#3448](https://github.com/kedacore/keda/issues/3448))
-- **RabbitMQ Scaler:** Parse vhost correctly if it's provided in the host url ([#3602](https://github.com/kedacore/keda/issues/3602))
+- **General**: Metrics endpoint returns correct HPA values ([#3554](https://github.com/kedacore/keda/issues/3554))
+- **Datadog Scaler**: Fix: panic in datadog scaler ([#3448](https://github.com/kedacore/keda/issues/3448))
+- **RabbitMQ Scaler**: Parse vhost correctly if it's provided in the host url ([#3602](https://github.com/kedacore/keda/issues/3602))
### Deprecations
@@ -427,52 +437,52 @@ None.
### Other
-- **General:** Execute trivy scan (on PRs) only if there are changes in deps ([#3540](https://github.com/kedacore/keda/issues/3540))
-- **General:** Use re-usable workflows for GitHub Actions ([#2569](https://github.com/kedacore/keda/issues/2569))
+- **General**: Execute trivy scan (on PRs) only if there are changes in deps ([#3540](https://github.com/kedacore/keda/issues/3540))
+- **General**: Use re-usable workflows for GitHub Actions ([#2569](https://github.com/kedacore/keda/issues/2569))
## v2.8.0
### New
-- **General:** Introduce new AWS DynamoDB Streams Scaler ([#3124](https://github.com/kedacore/keda/issues/3124))
-- **General:** Introduce new NATS JetStream scaler ([#2391](https://github.com/kedacore/keda/issues/2391))
-- **General:** Introduce `activationThreshold`/`minMetricValue` for all scalers ([#2800](https://github.com/kedacore/keda/issues/2800))
-- **General:** Support for `minReplicaCount` in ScaledJob ([#3426](https://github.com/kedacore/keda/issues/3426))
-- **General:** Support to customize HPA name ([#3057](https://github.com/kedacore/keda/issues/3057))
-- **General:** Make propagation policy for ScaledJob rollout configurable ([#2910](https://github.com/kedacore/keda/issues/2910))
-- **General:** Support for Azure AD Workload Identity as a pod identity provider. ([#2487](https://github.com/kedacore/keda/issues/2487)|[#2656](https://github.com/kedacore/keda/issues/2656))
-- **General:** Support for permission segregation when using Azure AD Pod / Workload Identity. ([#2656](https://github.com/kedacore/keda/issues/2656))
-- **AWS SQS Queue Scaler:** Support for scaling to include in-flight messages. ([#3133](https://github.com/kedacore/keda/issues/3133))
-- **Azure Pipelines Scaler:** Support for Azure Pipelines to support demands (capabilities) ([#2328](https://github.com/kedacore/keda/issues/2328))
-- **CPU Scaler:** Support for targeting specific container in a pod ([#1378](https://github.com/kedacore/keda/issues/1378))
-- **GCP Stackdriver Scaler:** Added aggregation parameters ([#3008](https://github.com/kedacore/keda/issues/3008))
-- **Kafka Scaler:** Support of passphrase encrypted PKCS #\8 private key ([3449](https://github.com/kedacore/keda/issues/3449))
-- **Memory Scaler:** Support for targeting specific container in a pod ([#1378](https://github.com/kedacore/keda/issues/1378))
-- **Prometheus Scaler:** Add `ignoreNullValues` to return error when prometheus return null in values ([#3065](https://github.com/kedacore/keda/issues/3065))
+- **General**: Introduce `activationThreshold`/`minMetricValue` for all scalers ([#2800](https://github.com/kedacore/keda/issues/2800))
+- **General**: Introduce new AWS DynamoDB Streams Scaler ([#3124](https://github.com/kedacore/keda/issues/3124))
+- **General**: Introduce new NATS JetStream scaler ([#2391](https://github.com/kedacore/keda/issues/2391))
+- **General**: Make propagation policy for ScaledJob rollout configurable ([#2910](https://github.com/kedacore/keda/issues/2910))
+- **General**: Support for Azure AD Workload Identity as a pod identity provider. ([#2487](https://github.com/kedacore/keda/issues/2487)|[#2656](https://github.com/kedacore/keda/issues/2656))
+- **General**: Support for `minReplicaCount` in ScaledJob ([#3426](https://github.com/kedacore/keda/issues/3426))
+- **General**: Support for permission segregation when using Azure AD Pod / Workload Identity. ([#2656](https://github.com/kedacore/keda/issues/2656))
+- **General**: Support to customize HPA name ([#3057](https://github.com/kedacore/keda/issues/3057))
+- **AWS SQS Queue Scaler**: Support for scaling to include in-flight messages. ([#3133](https://github.com/kedacore/keda/issues/3133))
+- **Azure Pipelines Scaler**: Support for Azure Pipelines to support demands (capabilities) ([#2328](https://github.com/kedacore/keda/issues/2328))
+- **CPU Scaler**: Support for targeting specific container in a pod ([#1378](https://github.com/kedacore/keda/issues/1378))
+- **GCP Stackdriver Scaler**: Added aggregation parameters ([#3008](https://github.com/kedacore/keda/issues/3008))
+- **Kafka Scaler**: Support of passphrase encrypted PKCS #\8 private key ([#3449](https://github.com/kedacore/keda/issues/3449))
+- **Memory Scaler**: Support for targeting specific container in a pod ([#1378](https://github.com/kedacore/keda/issues/1378))
+- **Prometheus Scaler**: Add `ignoreNullValues` to return error when prometheus return null in values ([#3065](https://github.com/kedacore/keda/issues/3065))
### Improvements
-- **General:** Add settings for configuring leader election ([#2836](https://github.com/kedacore/keda/issues/2836))
-- **General:** `external` extension reduces connection establishment with long links ([#3193](https://github.com/kedacore/keda/issues/3193))
-- **General:** Reference ScaledObject's/ScaledJob's name in the scalers log ([3419](https://github.com/kedacore/keda/issues/3419))
-- **General:** Use `mili` scale for the returned metrics ([#3135](https://github.com/kedacore/keda/issues/3135))
-- **General:** Use more readable timestamps in KEDA Operator logs ([#3066](https://github.com/kedacore/keda/issues/3066))
-- **Kafka Scaler:** Handle Sarama errors properly ([#3056](https://github.com/kedacore/keda/issues/3056))
+- **General**: Add settings for configuring leader election ([#2836](https://github.com/kedacore/keda/issues/2836))
+- **General**: `external` extension reduces connection establishment with long links ([#3193](https://github.com/kedacore/keda/issues/3193))
+- **General**: Reference ScaledObject's/ScaledJob's name in the scalers log ([#3419](https://github.com/kedacore/keda/issues/3419))
+- **General**: Use `mili` scale for the returned metrics ([#3135](https://github.com/kedacore/keda/issues/3135))
+- **General**: Use more readable timestamps in KEDA Operator logs ([#3066](https://github.com/kedacore/keda/issues/3066))
+- **Kafka Scaler**: Handle Sarama errors properly ([#3056](https://github.com/kedacore/keda/issues/3056))
### Fixes
-- **General:** Provide patch for CVE-2022-27191 vulnerability ([#3378](https://github.com/kedacore/keda/issues/3378))
-- **General:** Refactor adapter startup to ensure proper log initilization. ([2316](https://github.com/kedacore/keda/issues/2316))
-- **General:** Scaleobject ready condition 'False/Unknown' to 'True' requeue ([#3096](https://github.com/kedacore/keda/issues/3096))
-- **General:** Use `go install` in the Makefile for downloading dependencies ([#2916](https://github.com/kedacore/keda/issues/2916))
-- **General:** Use metricName from GetMetricsSpec in ScaledJobs instead of `queueLength` ([#3032](https://github.com/kedacore/keda/issues/3032))
-- **ActiveMQ Scaler:** KEDA doesn't respect restAPITemplate ([#3188](https://github.com/kedacore/keda/issues/3188))
-- **Azure Eventhub Scaler:** KEDA operator crashes on nil memory panic if the eventhub connectionstring for Azure Eventhub Scaler contains an invalid character ([#3082](https://github.com/kedacore/keda/issues/3082))
-- **Azure Pipelines Scaler:** Fix issue with Azure Pipelines wrong PAT Auth. ([#3159](https://github.com/kedacore/keda/issues/3159))
-- **Datadog Scaler:** Ensure that returns the same element that has been checked ([#3448](https://github.com/kedacore/keda/issues/3448))
-- **Kafka Scaler:** Check `lagThreshold` is a positive number ([#3366](https://github.com/kedacore/keda/issues/3366))
-- **Selenium Grid Scaler:** Fix bug where edge active sessions not being properly counted ([#2709](https://github.com/kedacore/keda/issues/2709))
-- **Selenium Grid Scaler:** Fix bug where Max Sessions was not working correctly ([#3061](https://github.com/kedacore/keda/issues/3061))
+- **General**: Provide patch for CVE-2022-27191 vulnerability ([#3378](https://github.com/kedacore/keda/issues/3378))
+- **General**: Refactor adapter startup to ensure proper log initilization. ([#2316](https://github.com/kedacore/keda/issues/2316))
+- **General**: Scaleobject ready condition 'False/Unknown' to 'True' requeue ([#3096](https://github.com/kedacore/keda/issues/3096))
+- **General**: Use `go install` in the Makefile for downloading dependencies ([#2916](https://github.com/kedacore/keda/issues/2916))
+- **General**: Use metricName from GetMetricsSpec in ScaledJobs instead of `queueLength` ([#3032](https://github.com/kedacore/keda/issues/3032))
+- **ActiveMQ Scaler**: KEDA doesn't respect restAPITemplate ([#3188](https://github.com/kedacore/keda/issues/3188))
+- **Azure Eventhub Scaler**: KEDA operator crashes on nil memory panic if the eventhub connectionstring for Azure Eventhub Scaler contains an invalid character ([#3082](https://github.com/kedacore/keda/issues/3082))
+- **Azure Pipelines Scaler**: Fix issue with Azure Pipelines wrong PAT Auth. ([#3159](https://github.com/kedacore/keda/issues/3159))
+- **Datadog Scaler**: Ensure that returns the same element that has been checked ([#3448](https://github.com/kedacore/keda/issues/3448))
+- **Kafka Scaler**: Check `lagThreshold` is a positive number ([#3366](https://github.com/kedacore/keda/issues/3366))
+- **Selenium Grid Scaler**: Fix bug where edge active sessions not being properly counted ([#2709](https://github.com/kedacore/keda/issues/2709))
+- **Selenium Grid Scaler**: Fix bug where Max Sessions was not working correctly ([#3061](https://github.com/kedacore/keda/issues/3061))
### Deprecations
@@ -484,14 +494,14 @@ None.
### Other
-- **General:** Migrate e2e test to Go. ([2737](https://github.com/kedacore/keda/issues/2737))
- **General**: Bump Golang to 1.17.13 and deps ([#3447](https://github.com/kedacore/keda/issues/3447))
-- **General:** Fix devcontainer on ARM64 Arch. ([3084](https://github.com/kedacore/keda/issues/3084))
-- **General:** Improve error message in resolving ServiceAccount for AWS EKS PodIdentity ([3142](https://github.com/kedacore/keda/issues/3142))
-- **General:** Improve e2e on PR process through comments. ([3004](https://github.com/kedacore/keda/issues/3004))
-- **General:** Split e2e test by functionality. ([#3270](https://github.com/kedacore/keda/issues/3270))
-- **General:** Unify the used tooling on different workflows and arch. ([3092](https://github.com/kedacore/keda/issues/3092))
-- **General:** Use Github's Checks API for e2e tests on PR. ([2567](https://github.com/kedacore/keda/issues/2567))
+- **General**: Fix devcontainer on ARM64 Arch. ([#3084](https://github.com/kedacore/keda/issues/3084))
+- **General**: Improve e2e on PR process through comments. ([#3004](https://github.com/kedacore/keda/issues/3004))
+- **General**: Improve error message in resolving ServiceAccount for AWS EKS PodIdentity ([#3142](https://github.com/kedacore/keda/issues/3142))
+- **General**: Migrate e2e test to Go. ([#2737](https://github.com/kedacore/keda/issues/2737))
+- **General**: Split e2e test by functionality. ([#3270](https://github.com/kedacore/keda/issues/3270))
+- **General**: Unify the used tooling on different workflows and arch. ([#3092](https://github.com/kedacore/keda/issues/3092))
+- **General**: Use Github's Checks API for e2e tests on PR. ([#2567](https://github.com/kedacore/keda/issues/2567))
## v2.7.1
@@ -501,52 +511,52 @@ None.
### Other
-- **General**: Fix CVE-2022-21221 in `github.com/valyala/fasthttp` ([#2775](https://github.com/kedacore/keda/issues/2775))
- **General**: Bump Golang to 1.17.9 ([#3016](https://github.com/kedacore/keda/issues/3016))
- **General**: Fix autoscaling behaviour while paused. ([#3009](https://github.com/kedacore/keda/issues/3009))
+- **General**: Fix CVE-2022-21221 in `github.com/valyala/fasthttp` ([#2775](https://github.com/kedacore/keda/issues/2775))
## v2.7.0
### New
-- **General:** Introduce annotation `"autoscaling.keda.sh/paused-replicas"` for ScaledObjects to pause scaling at a fixed replica count. ([#944](https://github.com/kedacore/keda/issues/944))
-- **General:** Introduce ARM-based container image for KEDA ([#2263](https://github.com/kedacore/keda/issues/2263)|[#2262](https://github.com/kedacore/keda/issues/2262))
-- **General:** Introduce new AWS DynamoDB Scaler ([#2486](https://github.com/kedacore/keda/issues/2482))
-- **General:** Introduce new Azure Data Explorer Scaler ([#1488](https://github.com/kedacore/keda/issues/1488)|[#2734](https://github.com/kedacore/keda/issues/2734))
-- **General:** Introduce new GCP Stackdriver Scaler ([#2661](https://github.com/kedacore/keda/issues/2661))
-- **General:** Introduce new GCP Storage Scaler ([#2628](https://github.com/kedacore/keda/issues/2628))
-- **General:** Provide support for authentication via Azure Key Vault ([#900](https://github.com/kedacore/keda/issues/900)|[#2733](https://github.com/kedacore/keda/issues/2733))
+- **General**: Introduce annotation `"autoscaling.keda.sh/paused-replicas"` for ScaledObjects to pause scaling at a fixed replica count. ([#944](https://github.com/kedacore/keda/issues/944))
+- **General**: Introduce ARM-based container image for KEDA ([#2263](https://github.com/kedacore/keda/issues/2263)|[#2262](https://github.com/kedacore/keda/issues/2262))
+- **General**: Introduce new AWS DynamoDB Scaler ([#2482](https://github.com/kedacore/keda/issues/2482))
+- **General**: Introduce new Azure Data Explorer Scaler ([#1488](https://github.com/kedacore/keda/issues/1488)|[#2734](https://github.com/kedacore/keda/issues/2734))
+- **General**: Introduce new GCP Stackdriver Scaler ([#2661](https://github.com/kedacore/keda/issues/2661))
+- **General**: Introduce new GCP Storage Scaler ([#2628](https://github.com/kedacore/keda/issues/2628))
+- **General**: Provide support for authentication via Azure Key Vault ([#900](https://github.com/kedacore/keda/issues/900)|[#2733](https://github.com/kedacore/keda/issues/2733))
- **General**: Support for `ValueMetricType` in `ScaledObject` for all scalers except CPU/Memory ([#2030](https://github.com/kedacore/keda/issues/2030))
### Improvements
-- **General:** Bump dependencies versions ([#2978](https://github.com/kedacore/keda/issues/2978))
-- **General:** Properly handle `restoreToOriginalReplicaCount` if `ScaleTarget` is missing ([#2872](https://github.com/kedacore/keda/issues/2872))
-- **General:** Support for running KEDA secure-by-default as non-root ([#2933](https://github.com/kedacore/keda/issues/2933))
-- **General:** Synchronize HPA annotations from ScaledObject ([#2659](https://github.com/kedacore/keda/pull/2659))
-- **General:** Updated HTTPClient to be proxy-aware, if available, from environment variables. ([#2577](https://github.com/kedacore/keda/issues/2577))
-- **General:** Using manager client in KEDA Metrics Server to avoid flush request to Kubernetes Apiserver([2914](https://github.com/kedacore/keda/issues/2914))
-- **ActiveMQ Scaler:** Add CorsHeader information to ActiveMQ Scaler ([#2884](https://github.com/kedacore/keda/issues/2884))
-- **AWS CloudWatch:** Add support to use expressions([#2998](https://github.com/kedacore/keda/issues/2998))
-- **Azure Application Insights Scaler:** Provide support for non-public clouds ([#2735](https://github.com/kedacore/keda/issues/2735))
-- **Azure Blob Storage Scaler:** Add optional parameters for counting blobs recursively ([#1789](https://github.com/kedacore/keda/issues/1789))
-- **Azure Event Hub Scaler:** Improve logging when blob container not found ([#2363](https://github.com/kedacore/keda/issues/2363))
-- **Azure Event Hub Scaler:** Provide support for non-public clouds ([#1915](https://github.com/kedacore/keda/issues/1915))
-- **Azure Log Analytics Scaler:** Provide support for non-public clouds ([#1916](https://github.com/kedacore/keda/issues/1916))
-- **Azure Monitor Scaler:** Provide support for non-public clouds ([#1917](https://github.com/kedacore/keda/issues/1917))
-- **Azure Queue:** Don't call Azure queue GetProperties API unnecessarily ([#2613](https://github.com/kedacore/keda/pull/2613))
-- **Datadog Scaler:** Validate query to contain `{` to prevent panic on invalid query ([#2625](https://github.com/kedacore/keda/issues/2625))
-- **Datadog Scaler:** Several improvements, including a new optional parameter `metricUnavailableValue` to fill data when no Datadog metric was returned ([#2657](https://github.com/kedacore/keda/issues/2657))
-- **Datadog Scaler:** Rely on Datadog API to validate the query ([2761](https://github.com/kedacore/keda/issues/2761))
-- **Graphite Scaler** Use the latest non-null datapoint returned by query. ([#2625](https://github.com/kedacore/keda/issues/2944))
-- **Kafka Scaler:** Make "disable" a valid value for tls auth parameter ([#2608](https://github.com/kedacore/keda/issues/2608))
-- **Kafka Scaler:** New `scaleToZeroOnInvalidOffset` to control behavior when partitions have an invalid offset ([#2033](https://github.com/kedacore/keda/issues/2033)[#2612](https://github.com/kedacore/keda/issues/2612))
-- **Metric API Scaler:** Improve error handling on not-ok response ([#2317](https://github.com/kedacore/keda/issues/2317))
-- **New Relic Scaler:** Support to get account value from authentication resources. ([#2883](https://github.com/kedacore/keda/issues/2883))
-- **Prometheus Scaler:** Check and properly inform user that `threshold` is not set ([#2793](https://github.com/kedacore/keda/issues/2793))
-- **Prometheus Scaler:** Support for `X-Scope-OrgID` header ([#2667](https://github.com/kedacore/keda/issues/2667))
-- **RabbitMQ Scaler:** Include `vhost` for RabbitMQ when retrieving queue info with `useRegex` ([#2498](https://github.com/kedacore/keda/issues/2498))
-- **Selenium Grid Scaler:** Consider `maxSession` grid info when scaling. ([#2618](https://github.com/kedacore/keda/issues/2618))
+- **General**: Bump dependencies versions ([#2978](https://github.com/kedacore/keda/issues/2978))
+- **General**: Properly handle `restoreToOriginalReplicaCount` if `ScaleTarget` is missing ([#2872](https://github.com/kedacore/keda/issues/2872))
+- **General**: Support for running KEDA secure-by-default as non-root ([#2933](https://github.com/kedacore/keda/issues/2933))
+- **General**: Synchronize HPA annotations from ScaledObject ([#2659](https://github.com/kedacore/keda/pull/2659))
+- **General**: Updated HTTPClient to be proxy-aware, if available, from environment variables. ([#2577](https://github.com/kedacore/keda/issues/2577))
+- **General**: Using manager client in KEDA Metrics Server to avoid flush request to Kubernetes Apiserver([#2914](https://github.com/kedacore/keda/issues/2914))
+- **ActiveMQ Scaler**: Add CorsHeader information to ActiveMQ Scaler ([#2884](https://github.com/kedacore/keda/issues/2884))
+- **AWS CloudWatch**: Add support to use expressions([#2998](https://github.com/kedacore/keda/issues/2998))
+- **Azure Application Insights Scaler**: Provide support for non-public clouds ([#2735](https://github.com/kedacore/keda/issues/2735))
+- **Azure Blob Storage Scaler**: Add optional parameters for counting blobs recursively ([#1789](https://github.com/kedacore/keda/issues/1789))
+- **Azure Event Hub Scaler**: Improve logging when blob container not found ([#2363](https://github.com/kedacore/keda/issues/2363))
+- **Azure Event Hub Scaler**: Provide support for non-public clouds ([#1915](https://github.com/kedacore/keda/issues/1915))
+- **Azure Log Analytics Scaler**: Provide support for non-public clouds ([#1916](https://github.com/kedacore/keda/issues/1916))
+- **Azure Monitor Scaler**: Provide support for non-public clouds ([#1917](https://github.com/kedacore/keda/issues/1917))
+- **Azure Queue**: Don't call Azure queue GetProperties API unnecessarily ([#2613](https://github.com/kedacore/keda/pull/2613))
+- **Datadog Scaler**: Rely on Datadog API to validate the query ([#2761](https://github.com/kedacore/keda/issues/2761))
+- **Datadog Scaler**: Several improvements, including a new optional parameter `metricUnavailableValue` to fill data when no Datadog metric was returned ([#2657](https://github.com/kedacore/keda/issues/2657))
+- **Datadog Scaler**: Validate query to contain `{` to prevent panic on invalid query ([#2625](https://github.com/kedacore/keda/issues/2625))
+- **Graphite Scaler**: Use the latest non-null datapoint returned by query ([#2944](https://github.com/kedacore/keda/issues/2944))
+- **Kafka Scaler**: Make "disable" a valid value for tls auth parameter ([#2608](https://github.com/kedacore/keda/issues/2608))
+- **Kafka Scaler**: New `scaleToZeroOnInvalidOffset` to control behavior when partitions have an invalid offset ([#2033](https://github.com/kedacore/keda/issues/2033)|[#2612](https://github.com/kedacore/keda/issues/2612))
+- **Metric API Scaler**: Improve error handling on not-ok response ([#2317](https://github.com/kedacore/keda/issues/2317))
+- **New Relic Scaler**: Support to get account value from authentication resources. ([#2883](https://github.com/kedacore/keda/issues/2883))
+- **Prometheus Scaler**: Check and properly inform user that `threshold` is not set ([#2793](https://github.com/kedacore/keda/issues/2793))
+- **Prometheus Scaler**: Support for `X-Scope-OrgID` header ([#2667](https://github.com/kedacore/keda/issues/2667))
+- **RabbitMQ Scaler**: Include `vhost` for RabbitMQ when retrieving queue info with `useRegex` ([#2498](https://github.com/kedacore/keda/issues/2498))
+- **Selenium Grid Scaler**: Consider `maxSession` grid info when scaling. ([#2618](https://github.com/kedacore/keda/issues/2618))
### Deprecations
@@ -558,26 +568,26 @@ None.
### Other
-- **General:** Clean go.mod to fix golangci-lint ([#2783](https://github.com/kedacore/keda/issues/2783))
-- **General:** Consistent file naming in `pkg/scalers/` ([#2806](https://github.com/kedacore/keda/issues/2806))
-- **General:** Fix mismatched errors for updating HPA ([#2719](https://github.com/kedacore/keda/issues/2719))
-- **General:** Improve e2e tests reliability ([#2580](https://github.com/kedacore/keda/issues/2580))
-- **General:** Improve e2e tests to always cleanup resources in cluster ([#2584](https://github.com/kedacore/keda/issues/2584))
-- **General:** Internally represent value and threshold as int64 ([#2790](https://github.com/kedacore/keda/issues/2790))
-- **General:** Refactor active directory endpoint parsing for Azure scalers. ([#2853](https://github.com/kedacore/keda/pull/2853))
-- **AWS CloudWatch:** Adding e2e test ([#1525](https://github.com/kedacore/keda/issues/1525))
-- **AWS DynamoDB:** Setup AWS DynamoDB test account ([#2803](https://github.com/kedacore/keda/issues/2803))
-- **AWS Kinesis Stream:** Adding e2e test ([#1526](https://github.com/kedacore/keda/issues/1526))
-- **AWS SQS Queue:** Adding e2e test ([#1527](https://github.com/kedacore/keda/issues/1527))
-- **Azure Data Explorer:** Adding e2e test ([#2841](https://github.com/kedacore/keda/issues/2841))
-- **Azure Data Explorer:** Replace deprecated function `iter.Next()` in favour of `iter.NextRowOrError()` ([#2989](https://github.com/kedacore/keda/issues/2989))
-- **Azure Service Bus:** Adding e2e test ([#2731](https://github.com/kedacore/keda/issues/2731)|[#2732](https://github.com/kedacore/keda/issues/2732))
-- **External Scaler:** Adding e2e test. ([#2697](https://github.com/kedacore/keda/issues/2697))
-- **External Scaler:** Fix issue with internal KEDA core prefix being passed to external scaler. ([#2640](https://github.com/kedacore/keda/issues/2640))
-- **GCP Pubsub Scaler:** Adding e2e test ([#1528](https://github.com/kedacore/keda/issues/1528))
-- **Hashicorp Vault Secret Provider:** Adding e2e test ([#2842](https://github.com/kedacore/keda/issues/2842))
-- **Memory Scaler:** Adding e2e test ([#2220](https://github.com/kedacore/keda/issues/2220))
-- **Selenium Grid Scaler:** Adding e2e test ([#2791](https://github.com/kedacore/keda/issues/2791))
+- **General**: Clean go.mod to fix golangci-lint ([#2783](https://github.com/kedacore/keda/issues/2783))
+- **General**: Consistent file naming in `pkg/scalers/` ([#2806](https://github.com/kedacore/keda/issues/2806))
+- **General**: Fix mismatched errors for updating HPA ([#2719](https://github.com/kedacore/keda/issues/2719))
+- **General**: Improve e2e tests reliability ([#2580](https://github.com/kedacore/keda/issues/2580))
+- **General**: Improve e2e tests to always cleanup resources in cluster ([#2584](https://github.com/kedacore/keda/issues/2584))
+- **General**: Internally represent value and threshold as int64 ([#2790](https://github.com/kedacore/keda/issues/2790))
+- **General**: Refactor active directory endpoint parsing for Azure scalers. ([#2853](https://github.com/kedacore/keda/pull/2853))
+- **AWS CloudWatch**: Adding e2e test ([#1525](https://github.com/kedacore/keda/issues/1525))
+- **AWS DynamoDB**: Setup AWS DynamoDB test account ([#2803](https://github.com/kedacore/keda/issues/2803))
+- **AWS Kinesis Stream**: Adding e2e test ([#1526](https://github.com/kedacore/keda/issues/1526))
+- **AWS SQS Queue**: Adding e2e test ([#1527](https://github.com/kedacore/keda/issues/1527))
+- **Azure Data Explorer**: Adding e2e test ([#2841](https://github.com/kedacore/keda/issues/2841))
+- **Azure Data Explorer**: Replace deprecated function `iter.Next()` in favour of `iter.NextRowOrError()` ([#2989](https://github.com/kedacore/keda/issues/2989))
+- **Azure Service Bus**: Adding e2e test ([#2731](https://github.com/kedacore/keda/issues/2731)|[#2732](https://github.com/kedacore/keda/issues/2732))
+- **External Scaler**: Adding e2e test. ([#2697](https://github.com/kedacore/keda/issues/2697))
+- **External Scaler**: Fix issue with internal KEDA core prefix being passed to external scaler. ([#2640](https://github.com/kedacore/keda/issues/2640))
+- **GCP Pubsub Scaler**: Adding e2e test ([#1528](https://github.com/kedacore/keda/issues/1528))
+- **Hashicorp Vault Secret Provider**: Adding e2e test ([#2842](https://github.com/kedacore/keda/issues/2842))
+- **Memory Scaler**: Adding e2e test ([#2220](https://github.com/kedacore/keda/issues/2220))
+- **Selenium Grid Scaler**: Adding e2e test ([#2791](https://github.com/kedacore/keda/issues/2791))
## v2.6.1
@@ -588,7 +598,7 @@ None.
### Other
-- **General:** Fix failing tests based on the scale to zero bug ([#2603](https://github.com/kedacore/keda/issues/2603))
+- **General**: Fix failing tests based on the scale to zero bug ([#2603](https://github.com/kedacore/keda/issues/2603))
## v2.6.0
@@ -602,22 +612,22 @@ None.
### Improvements
-- **General:** Delete the cache entry when a ScaledObject is deleted ([#2564](https://github.com/kedacore/keda/pull/2564))
-- **General:** Fail fast on `buildScalers` when not able to resolve a secret that a deployment is relying on ([#2394](https://github.com/kedacore/keda/pull/2394))
-- **General:** `keda-operator` Cluster Role: add `list` and `watch` access to service accounts ([#2406](https://github.com/kedacore/keda/pull/2406))|([#2410](https://github.com/kedacore/keda/pull/2410))
-- **General:** Sign KEDA images published on GitHub Container Registry ([#2501](https://github.com/kedacore/keda/pull/2501))|([#2502](https://github.com/kedacore/keda/pull/2502))|([#2504](https://github.com/kedacore/keda/pull/2504))
-- **AWS Scalers:** Support temporary AWS credentials using session tokens ([#2573](https://github.com/kedacore/keda/pull/2573))
-- **AWS SQS Scaler:** Allow using simple queue name instead of URL ([#2483](https://github.com/kedacore/keda/pull/2483))
-- **Azure EventHub Scaler:** Don't expose connection string in metricName ([#2404](https://github.com/kedacore/keda/pull/2404))
-- **Azure Pipelines Scaler:** Support `poolName` or `poolID` validation ([#2370](https://github.com/kedacore/keda/pull/2370))
-- **CPU Scaler:** Adding e2e test for the cpu scaler ([#2441](https://github.com/kedacore/keda/pull/2441))
-- **External Scaler:** Fix wrong calculation of retry backoff duration ([#2416](https://github.com/kedacore/keda/pull/2416))
-- **Graphite Scaler:** Use the latest datapoint returned, not the earliest ([#2365](https://github.com/kedacore/keda/pull/2365))
-- **Kafka Scaler:** Allow flag `topic` to be optional, where lag of all topics within the consumer group will be used for scaling ([#2409](https://github.com/kedacore/keda/pull/2409))
-- **Kafka Scaler:** Concurrently query brokers for consumer and producer offsets ([#2405](https://github.com/kedacore/keda/pull/2405))
-- **Kubernetes Workload Scaler:** Ignore terminated pods ([#2384](https://github.com/kedacore/keda/pull/2384))
-- **PostgreSQL Scaler:** Assign PostgreSQL `userName` to correct attribute ([#2432](https://github.com/kedacore/keda/pull/2432))|([#2433](https://github.com/kedacore/keda/pull/2433))
-- **Prometheus Scaler:** Support namespaced Prometheus queries ([#2575](https://github.com/kedacore/keda/issues/2575))
+- **General**: Delete the cache entry when a ScaledObject is deleted ([#2564](https://github.com/kedacore/keda/pull/2564))
+- **General**: Fail fast on `buildScalers` when not able to resolve a secret that a deployment is relying on ([#2394](https://github.com/kedacore/keda/pull/2394))
+- **General**: `keda-operator` Cluster Role: add `list` and `watch` access to service accounts ([#2406](https://github.com/kedacore/keda/pull/2406))|([#2410](https://github.com/kedacore/keda/pull/2410))
+- **General**: Sign KEDA images published on GitHub Container Registry ([#2501](https://github.com/kedacore/keda/pull/2501))|([#2502](https://github.com/kedacore/keda/pull/2502))|([#2504](https://github.com/kedacore/keda/pull/2504))
+- **AWS Scalers**: Support temporary AWS credentials using session tokens ([#2573](https://github.com/kedacore/keda/pull/2573))
+- **AWS SQS Scaler**: Allow using simple queue name instead of URL ([#2483](https://github.com/kedacore/keda/pull/2483))
+- **Azure EventHub Scaler**: Don't expose connection string in metricName ([#2404](https://github.com/kedacore/keda/pull/2404))
+- **Azure Pipelines Scaler**: Support `poolName` or `poolID` validation ([#2370](https://github.com/kedacore/keda/pull/2370))
+- **CPU Scaler**: Adding e2e test for the cpu scaler ([#2441](https://github.com/kedacore/keda/pull/2441))
+- **External Scaler**: Fix wrong calculation of retry backoff duration ([#2416](https://github.com/kedacore/keda/pull/2416))
+- **Graphite Scaler**: Use the latest datapoint returned, not the earliest ([#2365](https://github.com/kedacore/keda/pull/2365))
+- **Kafka Scaler**: Allow flag `topic` to be optional, where lag of all topics within the consumer group will be used for scaling ([#2409](https://github.com/kedacore/keda/pull/2409))
+- **Kafka Scaler**: Concurrently query brokers for consumer and producer offsets ([#2405](https://github.com/kedacore/keda/pull/2405))
+- **Kubernetes Workload Scaler**: Ignore terminated pods ([#2384](https://github.com/kedacore/keda/pull/2384))
+- **PostgreSQL Scaler**: Assign PostgreSQL `userName` to correct attribute ([#2432](https://github.com/kedacore/keda/pull/2432))|([#2433](https://github.com/kedacore/keda/pull/2433))
+- **Prometheus Scaler**: Support namespaced Prometheus queries ([#2575](https://github.com/kedacore/keda/issues/2575))
### Breaking Changes
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 1c317835989..cc8ee1175db 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -132,7 +132,7 @@ For more installation options visit the [pre-commits](https://pre-commit.com).
Before running pre-commit, you must install the [golangci-lint](https://golangci-lint.run/) tool as a static check tool for golang code (contains a series of linter)
```shell script
-curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.46.2
+curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.54.2
# or
brew install golangci/tap/golangci-lint
```
diff --git a/Makefile b/Makefile
index 291a94b9444..14f48c2ceb6 100644
--- a/Makefile
+++ b/Makefile
@@ -363,3 +363,7 @@ help: ## Display this help.
.PHONY: docker-build-dev-containers
docker-build-dev-containers: ## Build dev-containers image
docker build -f .devcontainer/Dockerfile .
+
+.PHONY: validate-changelog
+validate-changelog: ## Validate changelog
+ ./hack/validate-changelog.sh
diff --git a/README.md b/README.md
index c7ee8bb5029..4a60649ea97 100644
--- a/README.md
+++ b/README.md
@@ -15,7 +15,7 @@ resource definition.
KEDA can run on both the cloud and the edge, integrates natively with Kubernetes components such as the Horizontal
Pod Autoscaler, and has no external dependencies.
-We are a Cloud Native Computing Foundation (CNCF) incubation project.
+We are a Cloud Native Computing Foundation (CNCF) graduated project.

@@ -32,6 +32,7 @@ We are a Cloud Native Computing Foundation (CNCF) incubation project.
- [Releases](#releases)
- [Contributing](#contributing)
- [Building & deploying locally](#building--deploying-locally)
+ - [Testing strategy](#testing-strategy)
@@ -83,3 +84,6 @@ You can find contributing guide [here](./CONTRIBUTING.md).
### Building & deploying locally
Learn how to build & deploy KEDA locally [here](./BUILD.md).
+
+### Testing strategy
+Learn more about our testing strategy [here](./TESTING.md).
diff --git a/ROADMAP.md b/ROADMAP.md
index 5e660d0127d..8ad9c4f53f0 100644
--- a/ROADMAP.md
+++ b/ROADMAP.md
@@ -14,7 +14,7 @@ Here is an overview of our current release estimations:
| Version | Estimated Release Date |
|:--------|:-----------------------------------------------------|
-| v2.12 | September 12th, 2023 |
+| v2.12 | September 26th, 2023 |
| v2.13 | January 11th, 2024 *(planned later due to holidays)* |
| v2.14 | April 12th, 2024 |
diff --git a/TESTING.md b/TESTING.md
new file mode 100644
index 00000000000..67d465b37c6
--- /dev/null
+++ b/TESTING.md
@@ -0,0 +1,22 @@
+# Testing strategy
+
+## Unit tests / code coverage
+
+There are unit tests present for each scaler implementation, and for the majority of the core.
+Code coverage “is something that we need to work on” constantly.
+
+However, using a code coverage tooling is not useful for the KEDA project in the current state given a lot of functionality is covered by end-to-end tests, which are not considered and thus our code coverage metrics will be misleading.
+
+For each PR, we automatically build and run our unit test suite but also build Docker images for both amd64 and arm64 architectures. As part of our CI process, we also perform various security checks for which you can learn more in our security section.
+
+Lastly, we automatically perform code quality analysis with golangci-lint and check licenses of our dependencies with FOSSA.
+
+## End-to-end tests
+
+There are end-to-end tests for the core functionality and majority of features of KEDA as well as the scalers that it offers. These tests are required for every PR and run in the CI (however, maintainers trigger them as a security precaution). Additionally, we run our e2e test suite for every merged commit to the main branch as well as during our nightly CI schedule ([link](https://github.com/kedacore/keda/actions/workflows/nightly-e2e.yml)). Implementing end-to-end tests is a requirement for adding a new scaler, as per [our policy](https://github.com/kedacore/governance/blob/main/SCALERS.md#requirements-for-a-built-in-scaler).
+
+The project runs two Kubernetes clusters on which all e2e tests are ran automatically. Microsoft Azure has donated a dedicated Azure subscription so that all maintainers can manage these cloud resources and allow us to run our automated tesing and automation. This is in addition to CNCF’s Cloud Credits program which we use to provision test resources in AWS & GCP for scaler e2e tests as well.
+
+Both the cluster management as well as the cloud resources required for our automated tests are managed by Terraform and [available on GitHub](https://github.com/kedacore/testing-infrastructure) so that every contributor can open a PR with the infrastructure changes that they require. Everything is automatically deployed by using GitHub Actions to ensure we are running the latest configuration and can easily migrate to other infrastructure, if we have to.
+
+Additionally, CNCF sponsors KEDA by providing arm64 machines on which we build our ARM64 image and Vexxhost sponsors an OpenStack instance to run tests on these as well.
diff --git a/apis/keda/v1alpha1/scaledobject_webhook.go b/apis/keda/v1alpha1/scaledobject_webhook.go
index 6daa3aa62d5..ce366ebc6bb 100644
--- a/apis/keda/v1alpha1/scaledobject_webhook.go
+++ b/apis/keda/v1alpha1/scaledobject_webhook.go
@@ -112,7 +112,7 @@ func validateWorkload(so *ScaledObject, action string) (admission.Warnings, erro
}
func verifyTriggers(incomingSo *ScaledObject, action string) error {
- err := ValidateTriggers(scaledobjectlog.WithValues("name", incomingSo.Name), incomingSo.Spec.Triggers)
+ err := ValidateTriggers(incomingSo.Spec.Triggers)
if err != nil {
scaledobjectlog.WithValues("name", incomingSo.Name).Error(err, "validation error")
prommetrics.RecordScaledObjectValidatingErrors(incomingSo.Namespace, action, "incorrect-triggers")
diff --git a/apis/keda/v1alpha1/scaledobject_webhook_test.go b/apis/keda/v1alpha1/scaledobject_webhook_test.go
index fbde5787df1..1325f2d5288 100644
--- a/apis/keda/v1alpha1/scaledobject_webhook_test.go
+++ b/apis/keda/v1alpha1/scaledobject_webhook_test.go
@@ -18,133 +18,18 @@ package v1alpha1
import (
"context"
- "crypto/tls"
- "fmt"
- "net"
- "path/filepath"
- "testing"
- "time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
- admissionv1beta1 "k8s.io/api/admission/v1beta1"
appsv1 "k8s.io/api/apps/v1"
v2 "k8s.io/api/autoscaling/v2"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
- clientgoscheme "k8s.io/client-go/kubernetes/scheme"
- "k8s.io/client-go/rest"
"k8s.io/utils/pointer"
- ctrl "sigs.k8s.io/controller-runtime"
- "sigs.k8s.io/controller-runtime/pkg/client"
- "sigs.k8s.io/controller-runtime/pkg/envtest"
- logf "sigs.k8s.io/controller-runtime/pkg/log"
- "sigs.k8s.io/controller-runtime/pkg/log/zap"
- //+kubebuilder:scaffold:imports
)
-// These tests use Ginkgo (BDD-style Go testing framework). Refer to
-// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
-
-var cfg *rest.Config
-var k8sClient client.Client
-var testEnv *envtest.Environment
-var ctx context.Context
-var cancel context.CancelFunc
-
-const (
- workloadName = "deployment-name"
- soName = "test-so"
-)
-
-func TestAPIs(t *testing.T) {
- RegisterFailHandler(Fail)
-
- RunSpecs(t, "Webhook Suite")
-}
-
-var _ = BeforeSuite(func() {
- logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
-
- ctx, cancel = context.WithCancel(context.Background())
-
- By("bootstrapping test environment")
- testEnv = &envtest.Environment{
- CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crd", "bases")},
- ErrorIfCRDPathMissing: false,
- WebhookInstallOptions: envtest.WebhookInstallOptions{
- Paths: []string{filepath.Join("..", "..", "..", "config", "webhooks")},
- },
- }
-
- var err error
- // cfg is defined in this file globally.
- done := make(chan interface{})
- go func() {
- defer GinkgoRecover()
- cfg, err = testEnv.Start()
- close(done)
- }()
- Eventually(done).WithTimeout(time.Minute).Should(BeClosed())
- Expect(err).NotTo(HaveOccurred())
- Expect(cfg).NotTo(BeNil())
-
- scheme := runtime.NewScheme()
- err = AddToScheme(scheme)
- Expect(err).NotTo(HaveOccurred())
-
- err = clientgoscheme.AddToScheme(scheme)
- Expect(err).NotTo(HaveOccurred())
-
- err = admissionv1beta1.AddToScheme(scheme)
- Expect(err).NotTo(HaveOccurred())
-
- //+kubebuilder:scaffold:scheme
-
- k8sClient, err = client.New(cfg, client.Options{Scheme: scheme})
- Expect(err).NotTo(HaveOccurred())
- Expect(k8sClient).NotTo(BeNil())
-
- // start webhook server using Manager
- webhookInstallOptions := &testEnv.WebhookInstallOptions
- mgr, err := ctrl.NewManager(cfg, ctrl.Options{
- Scheme: scheme,
- Host: webhookInstallOptions.LocalServingHost,
- Port: webhookInstallOptions.LocalServingPort,
- CertDir: webhookInstallOptions.LocalServingCertDir,
- LeaderElection: false,
- MetricsBindAddress: "0",
- })
- Expect(err).NotTo(HaveOccurred())
-
- err = (&ScaledObject{}).SetupWebhookWithManager(mgr)
- Expect(err).NotTo(HaveOccurred())
-
- //+kubebuilder:scaffold:webhook
-
- go func() {
- defer GinkgoRecover()
- err = mgr.Start(ctx)
- Expect(err).NotTo(HaveOccurred())
- }()
-
- // wait for the webhook server to get ready
- dialer := &net.Dialer{Timeout: time.Second}
- addrPort := fmt.Sprintf("%s:%d", webhookInstallOptions.LocalServingHost, webhookInstallOptions.LocalServingPort)
- Eventually(func() error {
- conn, err := tls.DialWithDialer(dialer, "tcp", addrPort, &tls.Config{InsecureSkipVerify: true})
- if err != nil {
- return err
- }
- conn.Close()
- return nil
- }).Should(Succeed())
-
-})
-
var _ = It("should validate the so creation when there isn't any hpa", func() {
namespaceName := "valid"
diff --git a/apis/keda/v1alpha1/scaletriggers_types.go b/apis/keda/v1alpha1/scaletriggers_types.go
index 0e727c39570..2e8afb6e4fd 100644
--- a/apis/keda/v1alpha1/scaletriggers_types.go
+++ b/apis/keda/v1alpha1/scaletriggers_types.go
@@ -19,7 +19,6 @@ package v1alpha1
import (
"fmt"
- "github.com/go-logr/logr"
autoscalingv2 "k8s.io/api/autoscaling/v2"
)
@@ -50,7 +49,7 @@ type AuthenticationRef struct {
// ValidateTriggers checks that general trigger metadata are valid, it checks:
// - triggerNames in ScaledObject are unique
// - useCachedMetrics is defined only for a supported triggers
-func ValidateTriggers(logger logr.Logger, triggers []ScaleTriggers) error {
+func ValidateTriggers(triggers []ScaleTriggers) error {
triggersCount := len(triggers)
if triggers != nil && triggersCount > 0 {
triggerNames := make(map[string]bool, triggersCount)
@@ -63,13 +62,6 @@ func ValidateTriggers(logger logr.Logger, triggers []ScaleTriggers) error {
}
}
- // FIXME: DEPRECATED to be removed in v2.12
- _, hasMetricName := trigger.Metadata["metricName"]
- // aws-cloudwatch, huawei-cloudeye and azure-monitor have a meaningful use of metricName
- if hasMetricName && trigger.Type != "aws-cloudwatch" && trigger.Type != "huawei-cloudeye" && trigger.Type != "azure-monitor" {
- logger.Info("\"metricName\" is deprecated and will be removed in v2.12, please do not set it anymore", "trigger.type", trigger.Type)
- }
-
name := trigger.Name
if name != "" {
if _, found := triggerNames[name]; found {
diff --git a/apis/keda/v1alpha1/scaletriggers_types_test.go b/apis/keda/v1alpha1/scaletriggers_types_test.go
index 0dab540bb0f..75c8babc865 100644
--- a/apis/keda/v1alpha1/scaletriggers_types_test.go
+++ b/apis/keda/v1alpha1/scaletriggers_types_test.go
@@ -3,7 +3,6 @@ package v1alpha1
import (
"testing"
- "github.com/go-logr/logr"
"github.com/stretchr/testify/assert"
)
@@ -90,7 +89,7 @@ func TestValidateTriggers(t *testing.T) {
for _, test := range tests {
tt := test
t.Run(test.name, func(t *testing.T) {
- err := ValidateTriggers(logr.Discard(), tt.triggers)
+ err := ValidateTriggers(tt.triggers)
if test.expectedErrMsg == "" {
assert.NoError(t, err)
} else {
diff --git a/apis/keda/v1alpha1/suite_test.go b/apis/keda/v1alpha1/suite_test.go
new file mode 100644
index 00000000000..bb2b8bf54c7
--- /dev/null
+++ b/apis/keda/v1alpha1/suite_test.go
@@ -0,0 +1,141 @@
+/*
+Copyright 2023 The KEDA Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "context"
+ "crypto/tls"
+ "fmt"
+ "net"
+ "path/filepath"
+ "testing"
+ "time"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+ admissionv1beta1 "k8s.io/api/admission/v1beta1"
+ "k8s.io/apimachinery/pkg/runtime"
+ clientgoscheme "k8s.io/client-go/kubernetes/scheme"
+ "k8s.io/client-go/rest"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/envtest"
+ logf "sigs.k8s.io/controller-runtime/pkg/log"
+ "sigs.k8s.io/controller-runtime/pkg/log/zap"
+)
+
+// These tests use Ginkgo (BDD-style Go testing framework). Refer to
+// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
+
+var cfg *rest.Config
+var k8sClient client.Client
+var testEnv *envtest.Environment
+var ctx context.Context
+var cancel context.CancelFunc
+
+const (
+ workloadName = "deployment-name"
+ soName = "test-so"
+)
+
+func TestAPIs(t *testing.T) {
+ RegisterFailHandler(Fail)
+
+ RunSpecs(t, "Webhook Suite")
+}
+
+var _ = BeforeSuite(func() {
+ logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
+
+ ctx, cancel = context.WithCancel(context.Background())
+
+ By("bootstrapping test environment")
+ testEnv = &envtest.Environment{
+ CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crd", "bases")},
+ ErrorIfCRDPathMissing: false,
+ WebhookInstallOptions: envtest.WebhookInstallOptions{
+ Paths: []string{filepath.Join("..", "..", "..", "config", "webhooks")},
+ },
+ }
+ var err error
+ // cfg is defined in this file globally.
+ done := make(chan interface{})
+ go func() {
+ defer GinkgoRecover()
+ cfg, err = testEnv.Start()
+ close(done)
+ }()
+ Eventually(done).WithTimeout(time.Minute).Should(BeClosed())
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cfg).NotTo(BeNil())
+
+ scheme := runtime.NewScheme()
+ err = AddToScheme(scheme)
+ Expect(err).NotTo(HaveOccurred())
+
+ err = clientgoscheme.AddToScheme(scheme)
+ Expect(err).NotTo(HaveOccurred())
+
+ err = admissionv1beta1.AddToScheme(scheme)
+ Expect(err).NotTo(HaveOccurred())
+
+ //+kubebuilder:scaffold:scheme
+
+ k8sClient, err = client.New(cfg, client.Options{Scheme: scheme})
+ Expect(err).NotTo(HaveOccurred())
+ Expect(k8sClient).NotTo(BeNil())
+
+ // start webhook server using Manager
+ webhookInstallOptions := &testEnv.WebhookInstallOptions
+ mgr, err := ctrl.NewManager(cfg, ctrl.Options{
+ Scheme: scheme,
+ Host: webhookInstallOptions.LocalServingHost,
+ Port: webhookInstallOptions.LocalServingPort,
+ CertDir: webhookInstallOptions.LocalServingCertDir,
+ LeaderElection: false,
+ MetricsBindAddress: "0",
+ })
+ Expect(err).NotTo(HaveOccurred())
+
+ err = (&ScaledObject{}).SetupWebhookWithManager(mgr)
+ Expect(err).NotTo(HaveOccurred())
+ err = (&TriggerAuthentication{}).SetupWebhookWithManager(mgr)
+ Expect(err).NotTo(HaveOccurred())
+ err = (&ClusterTriggerAuthentication{}).SetupWebhookWithManager(mgr)
+ Expect(err).NotTo(HaveOccurred())
+
+ //+kubebuilder:scaffold:webhook
+
+ go func() {
+ defer GinkgoRecover()
+ err = mgr.Start(ctx)
+ Expect(err).NotTo(HaveOccurred())
+ }()
+
+ // wait for the webhook server to get ready
+ dialer := &net.Dialer{Timeout: time.Second}
+ addrPort := fmt.Sprintf("%s:%d", webhookInstallOptions.LocalServingHost, webhookInstallOptions.LocalServingPort)
+ Eventually(func() error {
+ conn, err := tls.DialWithDialer(dialer, "tcp", addrPort, &tls.Config{InsecureSkipVerify: true})
+ if err != nil {
+ return err
+ }
+ conn.Close()
+ return nil
+ }).Should(Succeed())
+
+})
diff --git a/apis/keda/v1alpha1/triggerauthentication_types.go b/apis/keda/v1alpha1/triggerauthentication_types.go
index c2fb5d9ab5f..4603a68eec1 100755
--- a/apis/keda/v1alpha1/triggerauthentication_types.go
+++ b/apis/keda/v1alpha1/triggerauthentication_types.go
@@ -132,7 +132,14 @@ const (
type AuthPodIdentity struct {
Provider PodIdentityProvider `json:"provider"`
// +optional
- IdentityID string `json:"identityId"`
+ IdentityID *string `json:"identityId"`
+}
+
+func (a *AuthPodIdentity) GetIdentityID() string {
+ if a.IdentityID == nil {
+ return ""
+ }
+ return *a.IdentityID
}
// AuthSecretTargetRef is used to authenticate using a reference to a secret
diff --git a/apis/keda/v1alpha1/triggerauthentication_webhook.go b/apis/keda/v1alpha1/triggerauthentication_webhook.go
new file mode 100644
index 00000000000..72b14e1b388
--- /dev/null
+++ b/apis/keda/v1alpha1/triggerauthentication_webhook.go
@@ -0,0 +1,121 @@
+/*
+Copyright 2023 The KEDA Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "encoding/json"
+ "fmt"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ ctrl "sigs.k8s.io/controller-runtime"
+ logf "sigs.k8s.io/controller-runtime/pkg/log"
+ "sigs.k8s.io/controller-runtime/pkg/webhook"
+ "sigs.k8s.io/controller-runtime/pkg/webhook/admission"
+)
+
+var triggerauthenticationlog = logf.Log.WithName("triggerauthentication-validation-webhook")
+
+func (ta *TriggerAuthentication) SetupWebhookWithManager(mgr ctrl.Manager) error {
+ return ctrl.NewWebhookManagedBy(mgr).
+ For(ta).
+ Complete()
+}
+
+func (cta *ClusterTriggerAuthentication) SetupWebhookWithManager(mgr ctrl.Manager) error {
+ return ctrl.NewWebhookManagedBy(mgr).
+ For(cta).
+ Complete()
+}
+
+// +kubebuilder:webhook:path=/validate-keda-sh-v1alpha1-triggerauthentication,mutating=false,failurePolicy=ignore,sideEffects=None,groups=keda.sh,resources=triggerauthentications,verbs=create;update,versions=v1alpha1,name=vstriggerauthentication.kb.io,admissionReviewVersions=v1
+
+var _ webhook.Validator = &TriggerAuthentication{}
+
+// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
+func (ta *TriggerAuthentication) ValidateCreate() (admission.Warnings, error) {
+ val, _ := json.MarshalIndent(ta, "", " ")
+ triggerauthenticationlog.Info(fmt.Sprintf("validating triggerauthentication creation for %s", string(val)))
+ return validateSpec(&ta.Spec)
+}
+
+func (ta *TriggerAuthentication) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {
+ val, _ := json.MarshalIndent(ta, "", " ")
+ scaledobjectlog.V(1).Info(fmt.Sprintf("validating triggerauthentication update for %s", string(val)))
+
+ oldTa := old.(*TriggerAuthentication)
+ if isTriggerAuthenticationRemovingFinalizer(ta.ObjectMeta, oldTa.ObjectMeta, ta.Spec, oldTa.Spec) {
+ triggerauthenticationlog.V(1).Info("finalizer removal, skipping validation")
+ return nil, nil
+ }
+ return validateSpec(&ta.Spec)
+}
+
+func (ta *TriggerAuthentication) ValidateDelete() (admission.Warnings, error) {
+ return nil, nil
+}
+
+// +kubebuilder:webhook:path=/validate-keda-sh-v1alpha1-clustertriggerauthentication,mutating=false,failurePolicy=ignore,sideEffects=None,groups=keda.sh,resources=clustertriggerauthentications,verbs=create;update,versions=v1alpha1,name=vsclustertriggerauthentication.kb.io,admissionReviewVersions=v1
+
+var _ webhook.Validator = &ClusterTriggerAuthentication{}
+
+// ValidateCreate implements webhook.Validator so a webhook will be registered for the type
+func (cta *ClusterTriggerAuthentication) ValidateCreate() (admission.Warnings, error) {
+ val, _ := json.MarshalIndent(cta, "", " ")
+ triggerauthenticationlog.Info(fmt.Sprintf("validating clustertriggerauthentication creation for %s", string(val)))
+ return validateSpec(&cta.Spec)
+}
+
+func (cta *ClusterTriggerAuthentication) ValidateUpdate(old runtime.Object) (admission.Warnings, error) {
+ val, _ := json.MarshalIndent(cta, "", " ")
+ scaledobjectlog.V(1).Info(fmt.Sprintf("validating clustertriggerauthentication update for %s", string(val)))
+
+ oldCta := old.(*ClusterTriggerAuthentication)
+ if isTriggerAuthenticationRemovingFinalizer(cta.ObjectMeta, oldCta.ObjectMeta, cta.Spec, oldCta.Spec) {
+ triggerauthenticationlog.V(1).Info("finalizer removal, skipping validation")
+ return nil, nil
+ }
+
+ return validateSpec(&cta.Spec)
+}
+
+func (cta *ClusterTriggerAuthentication) ValidateDelete() (admission.Warnings, error) {
+ return nil, nil
+}
+
+func isTriggerAuthenticationRemovingFinalizer(om metav1.ObjectMeta, oldOm metav1.ObjectMeta, spec TriggerAuthenticationSpec, oldSpec TriggerAuthenticationSpec) bool {
+ taSpec, _ := json.MarshalIndent(spec, "", " ")
+ oldTaSpec, _ := json.MarshalIndent(oldSpec, "", " ")
+ taSpecString := string(taSpec)
+ oldTaSpecString := string(oldTaSpec)
+
+ return len(om.Finalizers) == 0 && len(oldOm.Finalizers) == 1 && taSpecString == oldTaSpecString
+}
+
+func validateSpec(spec *TriggerAuthenticationSpec) (admission.Warnings, error) {
+ if spec.PodIdentity != nil {
+ switch spec.PodIdentity.Provider {
+ case PodIdentityProviderAzure, PodIdentityProviderAzureWorkload:
+ if spec.PodIdentity.IdentityID != nil && *spec.PodIdentity.IdentityID == "" {
+ return nil, fmt.Errorf("identityid of PodIdentity should not be empty. If it's set, identityId has to be different than \"\"")
+ }
+ default:
+ return nil, nil
+ }
+ }
+ return nil, nil
+}
diff --git a/apis/keda/v1alpha1/triggerauthentication_webhook_test.go b/apis/keda/v1alpha1/triggerauthentication_webhook_test.go
new file mode 100644
index 00000000000..b18585ff97b
--- /dev/null
+++ b/apis/keda/v1alpha1/triggerauthentication_webhook_test.go
@@ -0,0 +1,130 @@
+/*
+Copyright 2023 The KEDA Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "context"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+var _ = It("validate triggerauthentication when IdentityID is nil", func() {
+ namespaceName := "nilidentityid"
+ namespace := createNamespace(namespaceName)
+ err := k8sClient.Create(context.Background(), namespace)
+ Expect(err).ToNot(HaveOccurred())
+
+ spec := createTriggerAuthenticationSpecWithPodIdentity(nil)
+ ta := createTriggerAuthentication("nilidentityidta", namespaceName, "TriggerAuthentication", spec)
+ Eventually(func() error {
+ return k8sClient.Create(context.Background(), ta)
+ }).ShouldNot(HaveOccurred())
+})
+
+var _ = It("validate triggerauthentication when IdentityID is empty", func() {
+ namespaceName := "emptyidentityid"
+ namespace := createNamespace(namespaceName)
+ err := k8sClient.Create(context.Background(), namespace)
+ Expect(err).ToNot(HaveOccurred())
+
+ identityID := ""
+ spec := createTriggerAuthenticationSpecWithPodIdentity(&identityID)
+ ta := createTriggerAuthentication("emptyidentityidta", namespaceName, "TriggerAuthentication", spec)
+ Eventually(func() error {
+ return k8sClient.Create(context.Background(), ta)
+ }).Should(HaveOccurred())
+})
+
+var _ = It("validate triggerauthentication when IdentityID is not empty", func() {
+ namespaceName := "identityid"
+ namespace := createNamespace(namespaceName)
+ err := k8sClient.Create(context.Background(), namespace)
+ Expect(err).ToNot(HaveOccurred())
+
+ identityID := "12345"
+ spec := createTriggerAuthenticationSpecWithPodIdentity(&identityID)
+ ta := createTriggerAuthentication("identityidta", namespaceName, "TriggerAuthentication", spec)
+ Eventually(func() error {
+ return k8sClient.Create(context.Background(), ta)
+ }).ShouldNot(HaveOccurred())
+})
+
+var _ = It("validate clustertriggerauthentication when IdentityID is nil", func() {
+ namespaceName := "clusternilidentityid"
+ namespace := createNamespace(namespaceName)
+ err := k8sClient.Create(context.Background(), namespace)
+ Expect(err).ToNot(HaveOccurred())
+
+ spec := createTriggerAuthenticationSpecWithPodIdentity(nil)
+ ta := createTriggerAuthentication("clusternilidentityidta", namespaceName, "ClusterTriggerAuthentication", spec)
+ Eventually(func() error {
+ return k8sClient.Create(context.Background(), ta)
+ }).ShouldNot(HaveOccurred())
+})
+
+var _ = It("validate clustertriggerauthentication when IdentityID is empty", func() {
+ namespaceName := "clusteremptyidentityid"
+ namespace := createNamespace(namespaceName)
+ err := k8sClient.Create(context.Background(), namespace)
+ Expect(err).ToNot(HaveOccurred())
+
+ identityID := ""
+ spec := createTriggerAuthenticationSpecWithPodIdentity(&identityID)
+ ta := createTriggerAuthentication("clusteremptyidentityidta", namespaceName, "ClusterTriggerAuthentication", spec)
+ Eventually(func() error {
+ return k8sClient.Create(context.Background(), ta)
+ }).Should(HaveOccurred())
+})
+
+var _ = It("validate clustertriggerauthentication when IdentityID is not empty", func() {
+ namespaceName := "clusteridentityid"
+ namespace := createNamespace(namespaceName)
+ err := k8sClient.Create(context.Background(), namespace)
+ Expect(err).ToNot(HaveOccurred())
+
+ identityID := "12345"
+ spec := createTriggerAuthenticationSpecWithPodIdentity(&identityID)
+ ta := createTriggerAuthentication("clusteridentityidta", namespaceName, "ClusterTriggerAuthentication", spec)
+ Eventually(func() error {
+ return k8sClient.Create(context.Background(), ta)
+ }).ShouldNot(HaveOccurred())
+})
+
+func createTriggerAuthenticationSpecWithPodIdentity(identityID *string) TriggerAuthenticationSpec {
+ return TriggerAuthenticationSpec{
+ PodIdentity: &AuthPodIdentity{
+ Provider: PodIdentityProviderAzure,
+ IdentityID: identityID,
+ },
+ }
+}
+
+func createTriggerAuthentication(name, namespace, targetKind string, spec TriggerAuthenticationSpec) *TriggerAuthentication {
+ return &TriggerAuthentication{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: namespace,
+ },
+ TypeMeta: metav1.TypeMeta{
+ Kind: targetKind,
+ APIVersion: "keda.sh",
+ },
+ Spec: spec,
+ }
+}
diff --git a/cmd/webhooks/main.go b/cmd/webhooks/main.go
index 9a79928a200..ab410cccaec 100644
--- a/cmd/webhooks/main.go
+++ b/cmd/webhooks/main.go
@@ -131,4 +131,12 @@ func setupWebhook(mgr manager.Manager) {
setupLog.Error(err, "unable to create webhook", "webhook", "ScaledObject")
os.Exit(1)
}
+ if err := (&kedav1alpha1.TriggerAuthentication{}).SetupWebhookWithManager(mgr); err != nil {
+ setupLog.Error(err, "unable to create webhook", "webhook", "TriggerAuthentication")
+ os.Exit(1)
+ }
+ if err := (&kedav1alpha1.ClusterTriggerAuthentication{}).SetupWebhookWithManager(mgr); err != nil {
+ setupLog.Error(err, "unable to create webhook", "webhook", "ClusterTriggerAuthentication")
+ os.Exit(1)
+ }
}
diff --git a/config/metrics-server/deployment.yaml b/config/metrics-server/deployment.yaml
index a6f7f88625f..9f67dcd8052 100644
--- a/config/metrics-server/deployment.yaml
+++ b/config/metrics-server/deployment.yaml
@@ -58,6 +58,7 @@ spec:
- /usr/local/bin/keda-adapter
- --secure-port=6443
- --logtostderr=true
+ - --stderrthreshold=ERROR
- --v=0
- --client-ca-file=/certs/ca.crt
- --tls-cert-file=/certs/tls.crt
diff --git a/config/webhooks/validation_webhooks.yaml b/config/webhooks/validation_webhooks.yaml
index 4eb57d09b7c..3561df56e22 100644
--- a/config/webhooks/validation_webhooks.yaml
+++ b/config/webhooks/validation_webhooks.yaml
@@ -33,3 +33,51 @@ webhooks:
- scaledobjects
sideEffects: None
timeoutSeconds: 10
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: keda-admission-webhooks
+ namespace: keda
+ path: /validate-keda-sh-v1alpha1-triggerauthentication
+ failurePolicy: Ignore
+ matchPolicy: Equivalent
+ name: vstriggerauthentication.kb.io
+ namespaceSelector: {}
+ objectSelector: {}
+ rules:
+ - apiGroups:
+ - keda.sh
+ apiVersions:
+ - v1alpha1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - triggerauthentications
+ sideEffects: None
+ timeoutSeconds: 10
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: keda-admission-webhooks
+ namespace: keda
+ path: /validate-keda-sh-v1alpha1-clustertriggerauthentication
+ failurePolicy: Ignore
+ matchPolicy: Equivalent
+ name: vsclustertriggerauthentication.kb.io
+ namespaceSelector: {}
+ objectSelector: {}
+ rules:
+ - apiGroups:
+ - keda.sh
+ apiVersions:
+ - v1alpha1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - clustertriggerauthentications
+ sideEffects: None
+ timeoutSeconds: 10
diff --git a/controllers/keda/hpa.go b/controllers/keda/hpa.go
index be2edeefed3..8f13296003f 100644
--- a/controllers/keda/hpa.go
+++ b/controllers/keda/hpa.go
@@ -222,7 +222,7 @@ func (r *ScaledObjectReconciler) getScaledObjectMetricSpecs(ctx context.Context,
if metricSpec.External != nil {
externalMetricName := metricSpec.External.Metric.Name
if kedacontrollerutil.Contains(externalMetricNames, externalMetricName) {
- return nil, fmt.Errorf("metricName %s defined multiple times in ScaledObject %s, please refer the documentation how to define metricName manually", externalMetricName, scaledObject.Name)
+ return nil, fmt.Errorf("metricName %s defined multiple times in ScaledObject %s", externalMetricName, scaledObject.Name)
}
// add the scaledobject.keda.sh/name label. This is how the MetricsAdapter will know which scaledobject a metric is for when the HPA queries it.
diff --git a/controllers/keda/scaledjob_controller.go b/controllers/keda/scaledjob_controller.go
index 9e9ec7dd594..55f076cee2f 100755
--- a/controllers/keda/scaledjob_controller.go
+++ b/controllers/keda/scaledjob_controller.go
@@ -130,15 +130,17 @@ func (r *ScaledJobReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
if !scaledJob.Status.Conditions.AreInitialized() {
conditions := kedav1alpha1.GetInitializedConditions()
if err := kedastatus.SetStatusConditions(ctx, r.Client, reqLogger, scaledJob, conditions); err != nil {
+ r.Recorder.Event(scaledJob, corev1.EventTypeWarning, eventreason.ScaledJobUpdateFailed, err.Error())
return ctrl.Result{}, err
}
}
// Check jobTargetRef is specified
if scaledJob.Spec.JobTargetRef == nil {
- errMsg := "scaledJob.spec.jobTargetRef is not set"
+ errMsg := "ScaledJob.spec.jobTargetRef not found"
err := fmt.Errorf(errMsg)
- reqLogger.Error(err, "scaledJob.spec.jobTargetRef not found")
+ reqLogger.Error(err, errMsg)
+ r.Recorder.Event(scaledJob, corev1.EventTypeWarning, eventreason.ScaledJobCheckFailed, errMsg)
return ctrl.Result{}, err
}
conditions := scaledJob.Status.Conditions.DeepCopy()
@@ -158,6 +160,7 @@ func (r *ScaledJobReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
}
if err := kedastatus.SetStatusConditions(ctx, r.Client, reqLogger, scaledJob, &conditions); err != nil {
+ r.Recorder.Event(scaledJob, corev1.EventTypeWarning, eventreason.ScaledJobUpdateFailed, err.Error())
return ctrl.Result{}, err
}
diff --git a/controllers/keda/scaledobject_controller.go b/controllers/keda/scaledobject_controller.go
index 6ce1d09094d..e3256606ef3 100755
--- a/controllers/keda/scaledobject_controller.go
+++ b/controllers/keda/scaledobject_controller.go
@@ -43,6 +43,7 @@ import (
kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1"
kedacontrollerutil "github.com/kedacore/keda/v2/controllers/keda/util"
+ "github.com/kedacore/keda/v2/pkg/common/message"
"github.com/kedacore/keda/v2/pkg/eventreason"
"github.com/kedacore/keda/v2/pkg/prommetrics"
"github.com/kedacore/keda/v2/pkg/scaling"
@@ -134,7 +135,6 @@ func (r *ScaledObjectReconciler) SetupWithManager(mgr ctrl.Manager, options cont
// Reconcile performs reconciliation on the identified ScaledObject resource based on the request information passed, returns the result and an error (if any).
func (r *ScaledObjectReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
reqLogger := log.FromContext(ctx)
-
// Fetch the ScaledObject instance
scaledObject := &kedav1alpha1.ScaledObject{}
err := r.Client.Get(ctx, req.NamespacedName, scaledObject)
@@ -168,6 +168,7 @@ func (r *ScaledObjectReconciler) Reconcile(ctx context.Context, req ctrl.Request
if !scaledObject.Status.Conditions.AreInitialized() {
conditions := kedav1alpha1.GetInitializedConditions()
if err := kedastatus.SetStatusConditions(ctx, r.Client, reqLogger, scaledObject, conditions); err != nil {
+ r.Recorder.Event(scaledObject, corev1.EventTypeWarning, eventreason.ScaledObjectUpdateFailed, err.Error())
return ctrl.Result{}, err
}
}
@@ -183,13 +184,14 @@ func (r *ScaledObjectReconciler) Reconcile(ctx context.Context, req ctrl.Request
} else {
wasReady := conditions.GetReadyCondition()
if wasReady.IsFalse() || wasReady.IsUnknown() {
- r.Recorder.Event(scaledObject, corev1.EventTypeNormal, eventreason.ScaledObjectReady, "ScaledObject is ready for scaling")
+ r.Recorder.Event(scaledObject, corev1.EventTypeNormal, eventreason.ScaledObjectReady, message.ScalerReadyMsg)
}
reqLogger.V(1).Info(msg)
conditions.SetReadyCondition(metav1.ConditionTrue, kedav1alpha1.ScaledObjectConditionReadySucccesReason, msg)
}
if err := kedastatus.SetStatusConditions(ctx, r.Client, reqLogger, scaledObject, &conditions); err != nil {
+ r.Recorder.Event(scaledObject, corev1.EventTypeWarning, eventreason.ScaledObjectUpdateFailed, err.Error())
return ctrl.Result{}, err
}
@@ -229,7 +231,7 @@ func (r *ScaledObjectReconciler) reconcileScaledObject(ctx context.Context, logg
// Check scale target Name is specified
if scaledObject.Spec.ScaleTargetRef.Name == "" {
err := fmt.Errorf("ScaledObject.spec.scaleTargetRef.name is missing")
- return "ScaledObject doesn't have correct scaleTargetRef specification", err
+ return message.ScaleTargetErrMsg, err
}
// Check the label needed for Metrics servers is present on ScaledObject
@@ -241,7 +243,7 @@ func (r *ScaledObjectReconciler) reconcileScaledObject(ctx context.Context, logg
// Check if resource targeted for scaling exists and exposes /scale subresource
gvkr, err := r.checkTargetResourceIsScalable(ctx, logger, scaledObject)
if err != nil {
- return "ScaledObject doesn't have correct scaleTargetRef specification", err
+ return message.ScaleTargetErrMsg, err
}
err = r.checkReplicaCountBoundsAreValid(scaledObject)
@@ -249,7 +251,7 @@ func (r *ScaledObjectReconciler) reconcileScaledObject(ctx context.Context, logg
return "ScaledObject doesn't have correct Idle/Min/Max Replica Counts specification", err
}
- err = kedav1alpha1.ValidateTriggers(logger, scaledObject.Spec.Triggers)
+ err = kedav1alpha1.ValidateTriggers(scaledObject.Spec.Triggers)
if err != nil {
return "ScaledObject doesn't have correct triggers specification", err
}
@@ -305,7 +307,9 @@ func (r *ScaledObjectReconciler) ensureScaledObjectLabel(ctx context.Context, lo
func (r *ScaledObjectReconciler) checkTargetResourceIsScalable(ctx context.Context, logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) (kedav1alpha1.GroupVersionKindResource, error) {
gvkr, err := kedav1alpha1.ParseGVKR(r.restMapper, scaledObject.Spec.ScaleTargetRef.APIVersion, scaledObject.Spec.ScaleTargetRef.Kind)
if err != nil {
- logger.Error(err, "failed to parse Group, Version, Kind, Resource", "apiVersion", scaledObject.Spec.ScaleTargetRef.APIVersion, "kind", scaledObject.Spec.ScaleTargetRef.Kind)
+ msg := "Failed to parse Group, Version, Kind, Resource"
+ logger.Error(err, msg, "apiVersion", scaledObject.Spec.ScaleTargetRef.APIVersion, "kind", scaledObject.Spec.ScaleTargetRef.Kind)
+ r.Recorder.Event(scaledObject, corev1.EventTypeWarning, eventreason.ScaledObjectUpdateFailed, msg)
return gvkr, err
}
gvkString := gvkr.GVKString()
@@ -331,11 +335,13 @@ func (r *ScaledObjectReconciler) checkTargetResourceIsScalable(ctx context.Conte
unstruct.SetGroupVersionKind(gvkr.GroupVersionKind())
if err := r.Client.Get(ctx, client.ObjectKey{Namespace: scaledObject.Namespace, Name: scaledObject.Spec.ScaleTargetRef.Name}, unstruct); err != nil {
// resource doesn't exist
- logger.Error(err, "target resource doesn't exist", "resource", gvkString, "name", scaledObject.Spec.ScaleTargetRef.Name)
+ logger.Error(err, message.ScaleTargetNotFoundMsg, "resource", gvkString, "name", scaledObject.Spec.ScaleTargetRef.Name)
+ r.Recorder.Event(scaledObject, corev1.EventTypeWarning, eventreason.ScaledObjectCheckFailed, message.ScaleTargetNotFoundMsg)
return gvkr, err
}
// resource exist but doesn't expose /scale subresource
- logger.Error(errScale, "target resource doesn't expose /scale subresource", "resource", gvkString, "name", scaledObject.Spec.ScaleTargetRef.Name)
+ logger.Error(errScale, message.ScaleTargetNoSubresourceMsg, "resource", gvkString, "name", scaledObject.Spec.ScaleTargetRef.Name)
+ r.Recorder.Event(scaledObject, corev1.EventTypeWarning, eventreason.ScaledObjectCheckFailed, message.ScaleTargetNoSubresourceMsg)
return gvkr, errScale
}
isScalableCache.Store(gr.String(), true)
diff --git a/controllers/keda/scaledobject_controller_test.go b/controllers/keda/scaledobject_controller_test.go
index 3a376f377e9..9c2f7aa6f99 100644
--- a/controllers/keda/scaledobject_controller_test.go
+++ b/controllers/keda/scaledobject_controller_test.go
@@ -66,8 +66,8 @@ var _ = Describe("ScaledObjectController", func() {
)
var triggerMeta = []map[string]string{
- {"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "disableScaleToZero": "true"},
- {"serverAddress": "http://localhost:9090", "metricName": "http_requests_total2", "threshold": "100", "query": "up"},
+ {"serverAddress": "http://localhost:9090", "threshold": "100", "query": "up", "disableScaleToZero": "true"},
+ {"serverAddress": "http://localhost:9090", "threshold": "100", "query": "up"},
}
BeforeEach(func() {
@@ -97,6 +97,7 @@ var _ = Describe("ScaledObjectController", func() {
TriggerMetadata: tm,
ResolvedEnv: nil,
AuthParams: nil,
+ ScalerIndex: i,
}
s, err := scalers.NewPrometheusScaler(config)
@@ -221,6 +222,7 @@ var _ = Describe("ScaledObjectController", func() {
// Call function tobe tested
metricSpecs, err := metricNameTestReconciler.getScaledObjectMetricSpecs(context.Background(), testLogger, duplicateNamedScaledObject)
+ Ω(err).ShouldNot(BeNil())
// Test that the status was not updated
Ω(duplicateNamedScaledObject.Status.ExternalMetricNames).Should(BeNil())
diff --git a/hack/validate-changelog.sh b/hack/validate-changelog.sh
new file mode 100755
index 00000000000..cf2d5f32ab9
--- /dev/null
+++ b/hack/validate-changelog.sh
@@ -0,0 +1,73 @@
+#!/usr/bin/env bash
+
+SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
+
+# Define filename
+filename="$SCRIPT_ROOT/CHANGELOG.md"
+
+# Check if file exists
+if [[ ! -f "$filename" ]]; then
+ echo "Error: $filename does not exist."
+ exit 1
+fi
+
+# Storing the version to be checked
+mapfile -t versions < <(awk '/## History/{flag=1;next}/## /{flag=0}flag' "$filename" | grep -o '\[[^]]*\]' | grep -v "v1." | sed 's/[][]//g')
+
+# Define a function to extract and sort sections
+function extract_and_check() {
+ local section=$1
+ local content_block=$2
+ local content=$(awk "/### $section/{flag=1;next}/### /{flag=0}flag" <<< "$content_block" | grep '^- \*\*')
+
+ # Skip if content does not exist
+ if [[ -z "$content" ]]; then
+ return
+ fi
+
+ # Separate and sort the **General**: lines
+ local sorted_general_lines=$(echo "$content" | grep '^- \*\*General\*\*:' | sort --ignore-case --dictionary-order)
+
+ # Sort the remaining lines
+ local sorted_content=$(echo "$content" | grep -v '^- \*\*General\*\*:' | sort --ignore-case --dictionary-order)
+
+ # Check if sorted_general_lines is not empty, then concatenate
+ if [[ -n "$sorted_general_lines" ]]; then
+ sorted_content=$(printf "%s\n%s" "$sorted_general_lines" "$sorted_content")
+ fi
+
+ # Check pattern and throw error if wrong pattern found
+ while IFS= read -r line; do
+ echo "Error: Wrong pattern found in section: $section , line: $line"
+ exit 1
+ done < <(grep -Ev '^(-\s\*\*[^*]+\*\*: .*\(\[#(\d+)\]\(https:\/\/github\.com\/kedacore\/(keda|charts|governance)\/(pull|issues|discussions)\/\2\)(?:\|\[#(\d+)\]\(https:\/\/github\.com\/kedacore\/(keda|charts|governance)\/(pull|issues|discussions)\/\5\)){0,}\))$' <<< "$content")
+
+ if [ "$content" != "$sorted_content" ]; then
+ echo "Error: Section: $section is not sorted correctly. Correct order:"
+ echo "$sorted_content"
+ exit 1
+ fi
+}
+
+
+# Extract release sections, including "Unreleased", and check them
+for version in "${versions[@]}"; do
+ release_content=$(awk "/## $version/{flag=1;next}/## v[0-9\.]+/{flag=0}flag" "$filename")
+
+
+ if [[ -z "$release_content" ]]; then
+ echo "No content found for $version Skipping."
+ continue
+ fi
+
+ echo "Checking section: $version"
+
+ # Separate content into different sections and check sorting for each release
+ extract_and_check "New" "$release_content"
+ extract_and_check "Experimental" "$release_content"
+ extract_and_check "Improvements" "$release_content"
+ extract_and_check "Fixes" "$release_content"
+ extract_and_check "Deprecations" "$release_content"
+ extract_and_check "Other" "$release_content"
+
+done
diff --git a/pkg/common/message/message.go b/pkg/common/message/message.go
new file mode 100644
index 00000000000..a63d7fa69f3
--- /dev/null
+++ b/pkg/common/message/message.go
@@ -0,0 +1,31 @@
+/*
+Copyright 2020 The KEDA Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package message
+
+const (
+ ScalerIsBuiltMsg = "Scaler %s is built."
+
+ ScalerStartMsg = "Started scalers watch"
+
+ ScalerReadyMsg = "ScaledObject is ready for scaling"
+
+ ScaleTargetErrMsg = "ScaledObject doesn't have correct scaleTargetRef specification"
+
+ ScaleTargetNotFoundMsg = "Target resource doesn't exist"
+
+ ScaleTargetNoSubresourceMsg = "Target resource doesn't expose /scale subresource"
+)
diff --git a/pkg/eventreason/eventreason.go b/pkg/eventreason/eventreason.go
index 5614abf3589..6b56ef79abd 100644
--- a/pkg/eventreason/eventreason.go
+++ b/pkg/eventreason/eventreason.go
@@ -29,6 +29,12 @@ const (
// ScaledJobCheckFailed is for event when ScaledJob validation check fails
ScaledJobCheckFailed = "ScaledJobCheckFailed"
+ // ScaledObjectUpdateFailed is for event when ScaledObject update status fails
+ ScaledObjectUpdateFailed = "ScaledObjectUpdateFailed"
+
+ // ScaledJobUpdateFailed is for event when ScaledJob update status fails
+ ScaledJobUpdateFailed = "ScaledJobUpdateFailed"
+
// ScaledObjectDeleted is for event when ScaledObject is deleted
ScaledObjectDeleted = "ScaledObjectDeleted"
@@ -65,9 +71,15 @@ const (
// TriggerAuthenticationAdded is for event when a TriggerAuthentication is added
TriggerAuthenticationAdded = "TriggerAuthenticationAdded"
+ // TriggerAuthenticationFailed is for event when a TriggerAuthentication occurs error
+ TriggerAuthenticationFailed = "TriggerAuthenticationFailed"
+
// ClusterTriggerAuthenticationDeleted is for event when a ClusterTriggerAuthentication is deleted
ClusterTriggerAuthenticationDeleted = "ClusterTriggerAuthenticationDeleted"
// ClusterTriggerAuthenticationAdded is for event when a ClusterTriggerAuthentication is added
ClusterTriggerAuthenticationAdded = "ClusterTriggerAuthenticationAdded"
+
+ // ClusterTriggerAuthenticationFailed is for event when a ClusterTriggerAuthentication occurs error
+ ClusterTriggerAuthenticationFailed = "ClusterTriggerAuthenticationFailed"
)
diff --git a/pkg/scalers/aws_sqs_queue_scaler.go b/pkg/scalers/aws_sqs_queue_scaler.go
index effc6fa607e..2bc663ed244 100644
--- a/pkg/scalers/aws_sqs_queue_scaler.go
+++ b/pkg/scalers/aws_sqs_queue_scaler.go
@@ -21,17 +21,9 @@ const (
targetQueueLengthDefault = 5
activationTargetQueueLengthDefault = 0
defaultScaleOnInFlight = true
+ defaultScaleOnDelayed = false
)
-var awsSqsQueueMetricNamesForScalingInFlight = []string{
- "ApproximateNumberOfMessages",
- "ApproximateNumberOfMessagesNotVisible",
-}
-
-var awsSqsQueueMetricNamesForNotScalingInFlight = []string{
- "ApproximateNumberOfMessages",
-}
-
type awsSqsQueueScaler struct {
metricType v2.MetricTargetType
metadata *awsSqsQueueMetadata
@@ -49,6 +41,7 @@ type awsSqsQueueMetadata struct {
awsAuthorization awsAuthorizationMetadata
scalerIndex int
scaleOnInFlight bool
+ scaleOnDelayed bool
awsSqsQueueMetricNames []string
}
@@ -78,6 +71,7 @@ func parseAwsSqsQueueMetadata(config *ScalerConfig, logger logr.Logger) (*awsSqs
meta := awsSqsQueueMetadata{}
meta.targetQueueLength = defaultTargetQueueLength
meta.scaleOnInFlight = defaultScaleOnInFlight
+ meta.scaleOnDelayed = defaultScaleOnDelayed
if val, ok := config.TriggerMetadata["queueLength"]; ok && val != "" {
queueLength, err := strconv.ParseInt(val, 10, 64)
@@ -109,10 +103,22 @@ func parseAwsSqsQueueMetadata(config *ScalerConfig, logger logr.Logger) (*awsSqs
}
}
+ if val, ok := config.TriggerMetadata["scaleOnDelayed"]; ok && val != "" {
+ scaleOnDelayed, err := strconv.ParseBool(val)
+ if err != nil {
+ meta.scaleOnDelayed = defaultScaleOnDelayed
+ logger.Error(err, "Error parsing SQS queue metadata scaleOnDelayed, using default %n", defaultScaleOnDelayed)
+ } else {
+ meta.scaleOnDelayed = scaleOnDelayed
+ }
+ }
+
+ meta.awsSqsQueueMetricNames = []string{"ApproximateNumberOfMessages"}
if meta.scaleOnInFlight {
- meta.awsSqsQueueMetricNames = awsSqsQueueMetricNamesForScalingInFlight
- } else {
- meta.awsSqsQueueMetricNames = awsSqsQueueMetricNamesForNotScalingInFlight
+ meta.awsSqsQueueMetricNames = append(meta.awsSqsQueueMetricNames, "ApproximateNumberOfMessagesNotVisible")
+ }
+ if meta.scaleOnDelayed {
+ meta.awsSqsQueueMetricNames = append(meta.awsSqsQueueMetricNames, "ApproximateNumberOfMessagesDelayed")
}
if val, ok := config.TriggerMetadata["queueURL"]; ok && val != "" {
diff --git a/pkg/scalers/aws_sqs_queue_scaler_test.go b/pkg/scalers/aws_sqs_queue_scaler_test.go
index fc7e4cdd971..94381c95a28 100644
--- a/pkg/scalers/aws_sqs_queue_scaler_test.go
+++ b/pkg/scalers/aws_sqs_queue_scaler_test.go
@@ -3,6 +3,7 @@ package scalers
import (
"context"
"errors"
+ "strconv"
"testing"
"github.com/aws/aws-sdk-go/aws"
@@ -25,6 +26,10 @@ const (
testAWSSQSErrorQueueURL = "https://sqs.eu-west-1.amazonaws.com/account_id/Error"
testAWSSQSBadDataQueueURL = "https://sqs.eu-west-1.amazonaws.com/account_id/BadData"
+
+ testAWSSQSApproximateNumberOfMessagesVisible = 200
+ testAWSSQSApproximateNumberOfMessagesNotVisible = 100
+ testAWSSQSApproximateNumberOfMessagesDelayed = 50
)
var testAWSSQSEmptyResolvedEnv = map[string]string{}
@@ -65,14 +70,16 @@ func (m *mockSqs) GetQueueAttributes(input *sqs.GetQueueAttributesInput) (*sqs.G
Attributes: map[string]*string{
"ApproximateNumberOfMessages": aws.String("NotInt"),
"ApproximateNumberOfMessagesNotVisible": aws.String("NotInt"),
+ "ApproximateNumberOfMessagesDelayed": aws.String("NotInt"),
},
}, nil
}
return &sqs.GetQueueAttributesOutput{
Attributes: map[string]*string{
- "ApproximateNumberOfMessages": aws.String("200"),
- "ApproximateNumberOfMessagesNotVisible": aws.String("100"),
+ "ApproximateNumberOfMessages": aws.String(strconv.Itoa(testAWSSQSApproximateNumberOfMessagesVisible)),
+ "ApproximateNumberOfMessagesNotVisible": aws.String(strconv.Itoa(testAWSSQSApproximateNumberOfMessagesNotVisible)),
+ "ApproximateNumberOfMessagesDelayed": aws.String(strconv.Itoa(testAWSSQSApproximateNumberOfMessagesDelayed)),
},
}, nil
}
@@ -326,6 +333,44 @@ var awsSQSGetMetricTestData = []*parseAWSSQSMetadataTestData{
testAWSSQSEmptyResolvedEnv,
false,
"not error with scaleOnInFlight enabled"},
+ {map[string]string{
+ "queueURL": testAWSSQSProperQueueURL,
+ "queueLength": "1",
+ "awsRegion": "eu-west-1",
+ "scaleOnDelayed": "false"},
+ testAWSSQSAuthentication,
+ testAWSSQSEmptyResolvedEnv,
+ false,
+ "not error with scaleOnDelayed disabled"},
+ {map[string]string{
+ "queueURL": testAWSSQSProperQueueURL,
+ "queueLength": "1",
+ "awsRegion": "eu-west-1",
+ "scaleOnDelayed": "true"},
+ testAWSSQSAuthentication,
+ testAWSSQSEmptyResolvedEnv,
+ false,
+ "not error with scaleOnDelayed enabled"},
+ {map[string]string{
+ "queueURL": testAWSSQSProperQueueURL,
+ "queueLength": "1",
+ "awsRegion": "eu-west-1",
+ "scaleOnInFlight": "false",
+ "scaleOnDelayed": "false"},
+ testAWSSQSAuthentication,
+ testAWSSQSEmptyResolvedEnv,
+ false,
+ "not error with scaledOnInFlight and scaleOnDelayed disabled"},
+ {map[string]string{
+ "queueURL": testAWSSQSProperQueueURL,
+ "queueLength": "1",
+ "awsRegion": "eu-west-1",
+ "scaleOnInFlight": "true",
+ "scaleOnDelayed": "true"},
+ testAWSSQSAuthentication,
+ testAWSSQSEmptyResolvedEnv,
+ false,
+ "not error with scaledOnInFlight and scaleOnDelayed enabled"},
{map[string]string{
"queueURL": testAWSSQSErrorQueueURL,
"queueLength": "1",
@@ -390,11 +435,17 @@ func TestAWSSQSScalerGetMetrics(t *testing.T) {
case testAWSSQSBadDataQueueURL:
assert.Error(t, err, "expect error because of bad data return from sqs")
default:
+ expectedMessages := testAWSSQSApproximateNumberOfMessagesVisible
+
if meta.scaleOnInFlight {
- assert.EqualValues(t, int64(300.0), value[0].Value.Value())
- } else {
- assert.EqualValues(t, int64(200.0), value[0].Value.Value())
+ expectedMessages += testAWSSQSApproximateNumberOfMessagesNotVisible
}
+
+ if meta.scaleOnDelayed {
+ expectedMessages += testAWSSQSApproximateNumberOfMessagesDelayed
+ }
+
+ assert.EqualValues(t, int64(expectedMessages), value[0].Value.Value())
}
}
}
diff --git a/pkg/scalers/azure/azure_app_insights.go b/pkg/scalers/azure/azure_app_insights.go
index c4c64d78c34..461d61a2cf9 100644
--- a/pkg/scalers/azure/azure_app_insights.go
+++ b/pkg/scalers/azure/azure_app_insights.go
@@ -69,10 +69,10 @@ func getAuthConfig(ctx context.Context, info AppInsightsInfo, podIdentity kedav1
case kedav1alpha1.PodIdentityProviderAzure:
config := auth.NewMSIConfig()
config.Resource = info.AppInsightsResourceURL
- config.ClientID = podIdentity.IdentityID
+ config.ClientID = podIdentity.GetIdentityID()
return config
case kedav1alpha1.PodIdentityProviderAzureWorkload:
- return NewAzureADWorkloadIdentityConfig(ctx, podIdentity.IdentityID, info.AppInsightsResourceURL)
+ return NewAzureADWorkloadIdentityConfig(ctx, podIdentity.GetIdentityID(), info.AppInsightsResourceURL)
}
return nil
}
diff --git a/pkg/scalers/azure/azure_blob.go b/pkg/scalers/azure/azure_blob.go
index e9d48056861..3314df79d57 100644
--- a/pkg/scalers/azure/azure_blob.go
+++ b/pkg/scalers/azure/azure_blob.go
@@ -34,7 +34,6 @@ type BlobMetadata struct {
BlobPrefix string
Connection string
AccountName string
- MetricName string
EndpointSuffix string
ScalerIndex int
GlobPattern *glob.Glob
diff --git a/pkg/scalers/azure/azure_data_explorer.go b/pkg/scalers/azure/azure_data_explorer.go
index 915e5ece3d2..3785ddc644b 100644
--- a/pkg/scalers/azure/azure_data_explorer.go
+++ b/pkg/scalers/azure/azure_data_explorer.go
@@ -91,7 +91,7 @@ func getDataExplorerAuthConfig(metadata *DataExplorerMetadata) (*kusto.Connectio
case kedav1alpha1.PodIdentityProviderAzure, kedav1alpha1.PodIdentityProviderAzureWorkload:
azureDataExplorerLogger.V(1).Info(fmt.Sprintf("Creating Azure Data Explorer Client using podIdentity %s", metadata.PodIdentity.Provider))
- creds, chainedErr := NewChainedCredential(metadata.PodIdentity.IdentityID, metadata.PodIdentity.Provider)
+ creds, chainedErr := NewChainedCredential(metadata.PodIdentity.GetIdentityID(), metadata.PodIdentity.Provider)
if chainedErr != nil {
return nil, chainedErr
}
diff --git a/pkg/scalers/azure/azure_eventhub.go b/pkg/scalers/azure/azure_eventhub.go
index e7c3b93b2e3..d4ca34b879c 100644
--- a/pkg/scalers/azure/azure_eventhub.go
+++ b/pkg/scalers/azure/azure_eventhub.go
@@ -53,7 +53,7 @@ func GetEventHubClient(ctx context.Context, info EventHubInfo) (*eventhub.Hub, e
envJWTProviderOption := aad.JWTProviderWithAzureEnvironment(&env)
resourceURLJWTProviderOption := aad.JWTProviderWithResourceURI(info.EventHubResourceURL)
clientIDJWTProviderOption := func(config *aad.TokenProviderConfiguration) error {
- config.ClientID = info.PodIdentity.IdentityID
+ config.ClientID = info.PodIdentity.GetIdentityID()
return nil
}
@@ -68,7 +68,7 @@ func GetEventHubClient(ctx context.Context, info EventHubInfo) (*eventhub.Hub, e
// User wants to use AAD Workload Identity
env := azure.Environment{ActiveDirectoryEndpoint: info.ActiveDirectoryEndpoint, ServiceBusEndpointSuffix: info.ServiceBusEndpointSuffix}
hubEnvOptions := eventhub.HubWithEnvironment(env)
- provider := NewAzureADWorkloadIdentityTokenProvider(ctx, info.PodIdentity.IdentityID, info.EventHubResourceURL)
+ provider := NewAzureADWorkloadIdentityTokenProvider(ctx, info.PodIdentity.GetIdentityID(), info.EventHubResourceURL)
return eventhub.NewHub(info.Namespace, info.EventHubName, provider, hubEnvOptions)
}
diff --git a/pkg/scalers/azure/azure_managed_prometheus_http_round_tripper.go b/pkg/scalers/azure/azure_managed_prometheus_http_round_tripper.go
index b3bd400d798..16e1aa1bdfe 100644
--- a/pkg/scalers/azure/azure_managed_prometheus_http_round_tripper.go
+++ b/pkg/scalers/azure/azure_managed_prometheus_http_round_tripper.go
@@ -36,7 +36,7 @@ func TryAndGetAzureManagedPrometheusHTTPRoundTripper(podIdentity kedav1alpha1.Au
return nil, fmt.Errorf("trigger metadata cannot be nil")
}
- chainedCred, err := NewChainedCredential(podIdentity.IdentityID, podIdentity.Provider)
+ chainedCred, err := NewChainedCredential(podIdentity.GetIdentityID(), podIdentity.Provider)
if err != nil {
return nil, err
}
diff --git a/pkg/scalers/azure/azure_monitor.go b/pkg/scalers/azure/azure_monitor.go
index 08ef4fa21a1..0ed25ff561c 100644
--- a/pkg/scalers/azure/azure_monitor.go
+++ b/pkg/scalers/azure/azure_monitor.go
@@ -89,11 +89,11 @@ func createMetricsClient(ctx context.Context, info MonitorInfo, podIdentity keda
case kedav1alpha1.PodIdentityProviderAzure:
config := auth.NewMSIConfig()
config.Resource = info.AzureResourceManagerEndpoint
- config.ClientID = podIdentity.IdentityID
+ config.ClientID = podIdentity.GetIdentityID()
authConfig = config
case kedav1alpha1.PodIdentityProviderAzureWorkload:
- authConfig = NewAzureADWorkloadIdentityConfig(ctx, podIdentity.IdentityID, info.AzureResourceManagerEndpoint)
+ authConfig = NewAzureADWorkloadIdentityConfig(ctx, podIdentity.GetIdentityID(), info.AzureResourceManagerEndpoint)
}
authorizer, _ := authConfig.Authorizer()
diff --git a/pkg/scalers/azure/azure_storage.go b/pkg/scalers/azure/azure_storage.go
index 759fa62917c..12ac8e9f18d 100644
--- a/pkg/scalers/azure/azure_storage.go
+++ b/pkg/scalers/azure/azure_storage.go
@@ -111,7 +111,7 @@ func ParseAzureStorageQueueConnection(ctx context.Context, httpClient util.HTTPD
return credential, endpoint, nil
default:
- return nil, nil, fmt.Errorf("azure queues doesn't support %s pod identity type", podIdentity)
+ return nil, nil, fmt.Errorf("azure queues doesn't support %s pod identity type", podIdentity.Provider)
}
}
@@ -139,7 +139,7 @@ func ParseAzureStorageBlobConnection(ctx context.Context, httpClient util.HTTPDo
return credential, endpoint, nil
default:
- return nil, nil, fmt.Errorf("azure queues doesn't support %s pod identity type", podIdentity)
+ return nil, nil, fmt.Errorf("azure queues doesn't support %s pod identity type", podIdentity.Provider)
}
}
@@ -207,9 +207,9 @@ func parseAccessTokenAndEndpoint(ctx context.Context, httpClient util.HTTPDoer,
switch podIdentity.Provider {
case kedav1alpha1.PodIdentityProviderAzure:
- token, err = GetAzureADPodIdentityToken(ctx, httpClient, podIdentity.IdentityID, storageResource)
+ token, err = GetAzureADPodIdentityToken(ctx, httpClient, podIdentity.GetIdentityID(), storageResource)
case kedav1alpha1.PodIdentityProviderAzureWorkload:
- token, err = GetAzureADWorkloadIdentityToken(ctx, podIdentity.IdentityID, storageResource)
+ token, err = GetAzureADWorkloadIdentityToken(ctx, podIdentity.GetIdentityID(), storageResource)
}
if err != nil {
diff --git a/pkg/scalers/azure_blob_scaler.go b/pkg/scalers/azure_blob_scaler.go
index 9324c742095..8b154364b21 100644
--- a/pkg/scalers/azure_blob_scaler.go
+++ b/pkg/scalers/azure_blob_scaler.go
@@ -142,14 +142,6 @@ func parseAzureBlobMetadata(config *ScalerConfig, logger logr.Logger) (*azure.Bl
if val, ok := config.TriggerMetadata["useAAdPodIdentity"]; ok && config.PodIdentity.Provider == "" && val == stringTrue {
config.PodIdentity = kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderAzure}
}
-
- // FIXME: DEPRECATED to be removed in v2.12
- if val, ok := config.TriggerMetadata["metricName"]; ok {
- meta.MetricName = kedautil.NormalizeString(fmt.Sprintf("azure-blob-%s", val))
- } else {
- meta.MetricName = kedautil.NormalizeString(fmt.Sprintf("azure-blob-%s", meta.BlobContainerName))
- }
-
// If the Use AAD Pod Identity is not present, or set to "none"
// then check for connection string
switch config.PodIdentity.Provider {
@@ -173,7 +165,7 @@ func parseAzureBlobMetadata(config *ScalerConfig, logger logr.Logger) (*azure.Bl
return nil, kedav1alpha1.AuthPodIdentity{}, fmt.Errorf("no accountName given")
}
default:
- return nil, kedav1alpha1.AuthPodIdentity{}, fmt.Errorf("pod identity %s not supported for azure storage blobs", config.PodIdentity)
+ return nil, kedav1alpha1.AuthPodIdentity{}, fmt.Errorf("pod identity %s not supported for azure storage blobs", config.PodIdentity.Provider)
}
meta.ScalerIndex = config.ScalerIndex
@@ -188,7 +180,7 @@ func (s *azureBlobScaler) Close(context.Context) error {
func (s *azureBlobScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.ScalerIndex, s.metadata.MetricName),
+ Name: GenerateMetricNameWithIndex(s.metadata.ScalerIndex, kedautil.NormalizeString(fmt.Sprintf("azure-blob-%s", s.metadata.BlobContainerName))),
},
Target: GetMetricTarget(s.metricType, s.metadata.TargetBlobCount),
}
diff --git a/pkg/scalers/azure_blob_scaler_test.go b/pkg/scalers/azure_blob_scaler_test.go
index a36262a0372..abe2bf481e6 100644
--- a/pkg/scalers/azure_blob_scaler_test.go
+++ b/pkg/scalers/azure_blob_scaler_test.go
@@ -49,8 +49,6 @@ var testAzBlobMetadata = []parseAzBlobMetadataTestData{
{map[string]string{}, true, testAzBlobResolvedEnv, map[string]string{}, ""},
// properly formed
{map[string]string{"connectionFromEnv": "CONNECTION", "blobContainerName": "sample", "blobCount": "5", "blobDelimiter": "/", "blobPrefix": "blobsubpath"}, false, testAzBlobResolvedEnv, map[string]string{}, ""},
- // properly formed with metricName
- {map[string]string{"connectionFromEnv": "CONNECTION", "blobContainerName": "sample", "blobCount": "5", "blobDelimiter": "/", "blobPrefix": "blobsubpath", "metricName": "customname"}, false, testAzBlobResolvedEnv, map[string]string{}, ""},
// Empty blobcontainerName
{map[string]string{"connectionFromEnv": "CONNECTION", "blobContainerName": ""}, true, testAzBlobResolvedEnv, map[string]string{}, ""},
// improperly formed blobCount
@@ -105,8 +103,7 @@ var testAzBlobMetadata = []parseAzBlobMetadataTestData{
var azBlobMetricIdentifiers = []azBlobMetricIdentifier{
{&testAzBlobMetadata[1], 0, "s0-azure-blob-sample"},
- {&testAzBlobMetadata[2], 1, "s1-azure-blob-customname"},
- {&testAzBlobMetadata[6], 2, "s2-azure-blob-sample_container"},
+ {&testAzBlobMetadata[5], 1, "s1-azure-blob-sample_container"},
}
func TestAzBlobParseMetadata(t *testing.T) {
diff --git a/pkg/scalers/azure_log_analytics_scaler.go b/pkg/scalers/azure_log_analytics_scaler.go
index 0c4f3b25fed..77cb7b74a39 100644
--- a/pkg/scalers/azure_log_analytics_scaler.go
+++ b/pkg/scalers/azure_log_analytics_scaler.go
@@ -65,7 +65,6 @@ type azureLogAnalyticsMetadata struct {
query string
threshold float64
activationThreshold float64
- metricName string // Custom metric name for trigger
scalerIndex int
logAnalyticsResourceURL string
activeDirectoryEndpoint string
@@ -163,7 +162,7 @@ func parseAzureLogAnalyticsMetadata(config *ScalerConfig) (*azureLogAnalyticsMet
case kedav1alpha1.PodIdentityProviderAzure, kedav1alpha1.PodIdentityProviderAzureWorkload:
meta.podIdentity = config.PodIdentity
default:
- return nil, fmt.Errorf("error parsing metadata. Details: Log Analytics Scaler doesn't support pod identity %s", config.PodIdentity)
+ return nil, fmt.Errorf("error parsing metadata. Details: Log Analytics Scaler doesn't support pod identity %s", config.PodIdentity.Provider)
}
// Getting workspaceId
@@ -180,7 +179,7 @@ func parseAzureLogAnalyticsMetadata(config *ScalerConfig) (*azureLogAnalyticsMet
}
meta.query = query
- // Getting threshold, observe that we dont check AuthParams for threshold
+ // Getting threshold, observe that we don't check AuthParams for threshold
val, err := getParameterFromConfig(config, "threshold", false)
if err != nil {
return nil, err
@@ -201,16 +200,6 @@ func parseAzureLogAnalyticsMetadata(config *ScalerConfig) (*azureLogAnalyticsMet
}
meta.activationThreshold = activationThreshold
}
-
- // Resolve metricName
-
- // FIXME: DEPRECATED to be removed in v2.12
- if val, ok := config.TriggerMetadata["metricName"]; ok {
- meta.metricName = kedautil.NormalizeString(fmt.Sprintf("%s-%s", "azure-log-analytics", val))
- } else {
- meta.metricName = kedautil.NormalizeString(fmt.Sprintf("%s-%s", "azure-log-analytics", meta.workspaceID))
- }
-
meta.scalerIndex = config.ScalerIndex
meta.logAnalyticsResourceURL = defaultLogAnalyticsResourceURL
@@ -234,7 +223,7 @@ func parseAzureLogAnalyticsMetadata(config *ScalerConfig) (*azureLogAnalyticsMet
}
meta.activeDirectoryEndpoint = activeDirectoryEndpoint
- // Getting unsafeSsl, observe that we dont check AuthParams for unsafeSsl
+ // Getting unsafeSsl, observe that we don't check AuthParams for unsafeSsl
meta.unsafeSsl = false
unsafeSslVal, err := getParameterFromConfig(config, "unsafeSsl", false)
if err == nil {
@@ -264,7 +253,7 @@ func getParameterFromConfig(config *ScalerConfig, parameter string, checkAuthPar
func (s *azureLogAnalyticsScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, s.metadata.metricName),
+ Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("%s-%s", "azure-log-analytics", s.metadata.workspaceID))),
},
Target: GetMetricTargetMili(s.metricType, s.metadata.threshold),
}
@@ -480,7 +469,7 @@ func (s *azureLogAnalyticsScaler) getAuthorizationToken(ctx context.Context) (to
switch s.metadata.podIdentity.Provider {
case kedav1alpha1.PodIdentityProviderAzureWorkload:
- aadToken, err := azure.GetAzureADWorkloadIdentityToken(ctx, s.metadata.podIdentity.IdentityID, s.metadata.logAnalyticsResourceURL)
+ aadToken, err := azure.GetAzureADWorkloadIdentityToken(ctx, s.metadata.podIdentity.GetIdentityID(), s.metadata.logAnalyticsResourceURL)
if err != nil {
return tokenData{}, nil
}
@@ -565,10 +554,10 @@ func (s *azureLogAnalyticsScaler) executeAADApicall(ctx context.Context) ([]byte
func (s *azureLogAnalyticsScaler) executeIMDSApicall(ctx context.Context) ([]byte, int, error) {
var urlStr string
- if s.metadata.podIdentity.IdentityID == "" {
+ if s.metadata.podIdentity.GetIdentityID() == "" {
urlStr = fmt.Sprintf(azure.MSIURL, s.metadata.logAnalyticsResourceURL)
} else {
- urlStr = fmt.Sprintf(azure.MSIURLWithClientID, s.metadata.logAnalyticsResourceURL, url.QueryEscape(s.metadata.podIdentity.IdentityID))
+ urlStr = fmt.Sprintf(azure.MSIURLWithClientID, s.metadata.logAnalyticsResourceURL, url.QueryEscape(s.metadata.podIdentity.GetIdentityID()))
}
request, err := http.NewRequestWithContext(ctx, http.MethodGet, urlStr, nil)
diff --git a/pkg/scalers/azure_log_analytics_scaler_test.go b/pkg/scalers/azure_log_analytics_scaler_test.go
index 26fa82b4941..d7e4249b008 100644
--- a/pkg/scalers/azure_log_analytics_scaler_test.go
+++ b/pkg/scalers/azure_log_analytics_scaler_test.go
@@ -213,33 +213,6 @@ func TestLogAnalyticsGetMetricSpecForScaling(t *testing.T) {
}
}
-type parseMetadataMetricNameTestData struct {
- metadata map[string]string
- scalerIndex int
- metricName string
-}
-
-var testParseMetadataMetricName = []parseMetadataMetricNameTestData{
- // WorkspaceID
- {map[string]string{"tenantIdFromEnv": "d248da64-0e1e-4f79-b8c6-72ab7aa055eb", "clientIdFromEnv": "41826dd4-9e0a-4357-a5bd-a88ad771ea7d", "clientSecretFromEnv": "U6DtAX5r6RPZxd~l12Ri3X8J9urt5Q-xs", "workspaceIdFromEnv": "074dd9f8-c368-4220-9400-acb6e80fc325", "query": query, "threshold": "1900000000"}, 0, "azure-log-analytics-074dd9f8-c368-4220-9400-acb6e80fc325"},
- // Custom Name
- {map[string]string{"metricName": "testName", "tenantIdFromEnv": "d248da64-0e1e-4f79-b8c6-72ab7aa055eb", "clientIdFromEnv": "41826dd4-9e0a-4357-a5bd-a88ad771ea7d", "clientSecretFromEnv": "U6DtAX5r6RPZxd~l12Ri3X8J9urt5Q-xs", "workspaceIdFromEnv": "074dd9f8-c368-4220-9400-acb6e80fc325", "query": query, "threshold": "1900000000"}, 1, "azure-log-analytics-testName"},
-}
-
-func TestLogAnalyticsParseMetadataMetricName(t *testing.T) {
- for _, testData := range testParseMetadataMetricName {
- meta, err := parseAzureLogAnalyticsMetadata(&ScalerConfig{ResolvedEnv: sampleLogAnalyticsResolvedEnv,
- TriggerMetadata: testData.metadata, AuthParams: nil,
- PodIdentity: kedav1alpha1.AuthPodIdentity{}, ScalerIndex: testData.scalerIndex})
- if err != nil {
- t.Error("Expected success but got error", err)
- }
- if meta.metricName != testData.metricName {
- t.Errorf("Expected %s but got %s", testData.metricName, meta.metricName)
- }
- }
-}
-
type parseLogAnalyticsMetadataTestUnsafeSsl struct {
metadata map[string]string
unsafeSsl bool
diff --git a/pkg/scalers/azure_monitor_scaler.go b/pkg/scalers/azure_monitor_scaler.go
index f4ef32ca953..f6a044814ec 100644
--- a/pkg/scalers/azure_monitor_scaler.go
+++ b/pkg/scalers/azure_monitor_scaler.go
@@ -207,7 +207,7 @@ func parseAzurePodIdentityParams(config *ScalerConfig) (clientID string, clientP
case kedav1alpha1.PodIdentityProviderAzure, kedav1alpha1.PodIdentityProviderAzureWorkload:
// no params required to be parsed
default:
- return "", "", fmt.Errorf("azure Monitor doesn't support pod identity %s", config.PodIdentity)
+ return "", "", fmt.Errorf("azure Monitor doesn't support pod identity %s", config.PodIdentity.Provider)
}
return clientID, clientPassword, nil
diff --git a/pkg/scalers/azure_queue_scaler.go b/pkg/scalers/azure_queue_scaler.go
index 1088bd7e5b9..23fcd15815f 100644
--- a/pkg/scalers/azure_queue_scaler.go
+++ b/pkg/scalers/azure_queue_scaler.go
@@ -150,7 +150,7 @@ func parseAzureQueueMetadata(config *ScalerConfig, logger logr.Logger) (*azureQu
return nil, kedav1alpha1.AuthPodIdentity{}, fmt.Errorf("no accountName given")
}
default:
- return nil, kedav1alpha1.AuthPodIdentity{}, fmt.Errorf("pod identity %s not supported for azure storage queues", config.PodIdentity)
+ return nil, kedav1alpha1.AuthPodIdentity{}, fmt.Errorf("pod identity %s not supported for azure storage queues", config.PodIdentity.Provider)
}
meta.scalerIndex = config.ScalerIndex
diff --git a/pkg/scalers/azure_servicebus_scaler.go b/pkg/scalers/azure_servicebus_scaler.go
index 977d60b4c9b..7ad8ee71acd 100755
--- a/pkg/scalers/azure_servicebus_scaler.go
+++ b/pkg/scalers/azure_servicebus_scaler.go
@@ -214,7 +214,7 @@ func parseAzureServiceBusMetadata(config *ScalerConfig, logger logr.Logger) (*az
}
default:
- return nil, fmt.Errorf("azure service bus doesn't support pod identity %s", config.PodIdentity)
+ return nil, fmt.Errorf("azure service bus doesn't support pod identity %s", config.PodIdentity.Provider)
}
meta.scalerIndex = config.ScalerIndex
@@ -297,7 +297,7 @@ func (s *azureServiceBusScaler) getServiceBusAdminClient() (*admin.Client, error
case "", kedav1alpha1.PodIdentityProviderNone:
client, err = admin.NewClientFromConnectionString(s.metadata.connection, nil)
case kedav1alpha1.PodIdentityProviderAzure, kedav1alpha1.PodIdentityProviderAzureWorkload:
- creds, chainedErr := azure.NewChainedCredential(s.podIdentity.IdentityID, s.podIdentity.Provider)
+ creds, chainedErr := azure.NewChainedCredential(s.podIdentity.GetIdentityID(), s.podIdentity.Provider)
if chainedErr != nil {
return nil, chainedErr
}
diff --git a/pkg/scalers/cassandra_scaler.go b/pkg/scalers/cassandra_scaler.go
index 35a3c29415d..3ee539d1623 100644
--- a/pkg/scalers/cassandra_scaler.go
+++ b/pkg/scalers/cassandra_scaler.go
@@ -35,7 +35,6 @@ type CassandraMetadata struct {
query string
targetQueryValue int64
activationTargetQueryValue int64
- metricName string
scalerIndex int
}
@@ -147,14 +146,6 @@ func parseCassandraMetadata(config *ScalerConfig) (*CassandraMetadata, error) {
} else {
return nil, fmt.Errorf("no keyspace given")
}
-
- // FIXME: DEPRECATED to be removed in v2.12
- if val, ok := config.TriggerMetadata["metricName"]; ok {
- meta.metricName = kedautil.NormalizeString(fmt.Sprintf("cassandra-%s", val))
- } else {
- meta.metricName = kedautil.NormalizeString(fmt.Sprintf("cassandra-%s", meta.keyspace))
- }
-
if val, ok := config.AuthParams["password"]; ok {
meta.password = val
} else {
@@ -189,7 +180,7 @@ func newCassandraSession(meta *CassandraMetadata, logger logr.Logger) (*gocql.Se
func (s *cassandraScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, s.metadata.metricName),
+ Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("cassandra-%s", s.metadata.keyspace))),
},
Target: GetMetricTarget(s.metricType, s.metadata.targetQueryValue),
}
diff --git a/pkg/scalers/cassandra_scaler_test.go b/pkg/scalers/cassandra_scaler_test.go
index bdf4e69c818..816e82dc9ee 100644
--- a/pkg/scalers/cassandra_scaler_test.go
+++ b/pkg/scalers/cassandra_scaler_test.go
@@ -24,29 +24,29 @@ var testCassandraMetadata = []parseCassandraMetadataTestData{
// nothing passed
{map[string]string{}, true, map[string]string{}},
// everything is passed in verbatim
- {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "port": "9042", "clusterIPAddress": "cassandra.test", "keyspace": "test_keyspace", "ScalerIndex": "0", "metricName": "myMetric"}, false, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
- // no metricName passed, metricName is generated from keyspace
+ {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "port": "9042", "clusterIPAddress": "cassandra.test", "keyspace": "test_keyspace", "ScalerIndex": "0"}, false, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
+ // metricName is generated from keyspace
{map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "clusterIPAddress": "cassandra.test:9042", "keyspace": "test_keyspace", "ScalerIndex": "0"}, false, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
// no query passed
- {map[string]string{"targetQueryValue": "1", "username": "cassandra", "clusterIPAddress": "cassandra.test:9042", "keyspace": "test_keyspace", "ScalerIndex": "0", "metricName": "myMetric"}, true, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
+ {map[string]string{"targetQueryValue": "1", "username": "cassandra", "clusterIPAddress": "cassandra.test:9042", "keyspace": "test_keyspace", "ScalerIndex": "0"}, true, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
// no targetQueryValue passed
- {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "username": "cassandra", "clusterIPAddress": "cassandra.test:9042", "keyspace": "test_keyspace", "ScalerIndex": "0", "metricName": "myMetric"}, true, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
+ {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "username": "cassandra", "clusterIPAddress": "cassandra.test:9042", "keyspace": "test_keyspace", "ScalerIndex": "0"}, true, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
// no username passed
- {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "clusterIPAddress": "cassandra.test:9042", "keyspace": "test_keyspace", "ScalerIndex": "0", "metricName": "myMetric"}, true, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
+ {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "clusterIPAddress": "cassandra.test:9042", "keyspace": "test_keyspace", "ScalerIndex": "0"}, true, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
// no port passed
- {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "clusterIPAddress": "cassandra.test", "keyspace": "test_keyspace", "ScalerIndex": "0", "metricName": "myMetric"}, true, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
+ {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "clusterIPAddress": "cassandra.test", "keyspace": "test_keyspace", "ScalerIndex": "0"}, true, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
// no clusterIPAddress passed
- {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "port": "9042", "keyspace": "test_keyspace", "ScalerIndex": "0", "metricName": "myMetric"}, true, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
+ {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "port": "9042", "keyspace": "test_keyspace", "ScalerIndex": "0"}, true, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
// no keyspace passed
- {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "clusterIPAddress": "cassandra.test:9042", "ScalerIndex": "0", "metricName": "myMetric"}, true, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
+ {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "clusterIPAddress": "cassandra.test:9042", "ScalerIndex": "0"}, true, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
// no password passed
- {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "clusterIPAddress": "cassandra.test:9042", "keyspace": "test_keyspace", "ScalerIndex": "0", "metricName": "myMetric"}, true, map[string]string{}},
+ {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "clusterIPAddress": "cassandra.test:9042", "keyspace": "test_keyspace", "ScalerIndex": "0"}, true, map[string]string{}},
// fix issue[4110] passed
- {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "port": "9042", "clusterIPAddress": "https://cassandra.test", "keyspace": "test_keyspace", "ScalerIndex": "0", "metricName": "myMetric"}, false, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
+ {map[string]string{"query": "SELECT COUNT(*) FROM test_keyspace.test_table;", "targetQueryValue": "1", "username": "cassandra", "port": "9042", "clusterIPAddress": "https://cassandra.test", "keyspace": "test_keyspace", "ScalerIndex": "0"}, false, map[string]string{"password": "Y2Fzc2FuZHJhCg=="}},
}
var cassandraMetricIdentifiers = []cassandraMetricIdentifier{
- {&testCassandraMetadata[1], 0, "s0-cassandra-myMetric"},
+ {&testCassandraMetadata[1], 0, "s0-cassandra-test_keyspace"},
{&testCassandraMetadata[2], 1, "s1-cassandra-test_keyspace"},
}
diff --git a/pkg/scalers/couchdb_scaler.go b/pkg/scalers/couchdb_scaler.go
index b2a522f703a..507a5848f97 100644
--- a/pkg/scalers/couchdb_scaler.go
+++ b/pkg/scalers/couchdb_scaler.go
@@ -38,7 +38,6 @@ type couchDBMetadata struct {
query string
queryValue int64
activationQueryValue int64
- metricName string
scalerIndex int
}
@@ -51,7 +50,7 @@ type Res struct {
func (s *couchDBScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, s.metadata.metricName),
+ Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("coucdb-%s", s.metadata.dbName))),
},
Target: GetMetricTarget(s.metricType, s.metadata.queryValue),
}
@@ -176,9 +175,6 @@ func parseCouchDBMetadata(config *ScalerConfig) (*couchDBMetadata, string, error
// nosemgrep: db-connection-string
connStr = "http://" + addr
}
-
- // FIXME: DEPRECATED to be removed in v2.12
- meta.metricName = GenerateMetricNameWithIndex(config.ScalerIndex, kedautil.NormalizeString(fmt.Sprintf("coucdb-%s", meta.dbName)))
meta.scalerIndex = config.ScalerIndex
return &meta, connStr, nil
}
diff --git a/pkg/scalers/couchdb_scaler_test.go b/pkg/scalers/couchdb_scaler_test.go
index 03eff1d63e4..8abde9b0797 100644
--- a/pkg/scalers/couchdb_scaler_test.go
+++ b/pkg/scalers/couchdb_scaler_test.go
@@ -66,8 +66,8 @@ var testCOUCHDBMetadata = []parseCouchDBMetadataTestData{
}
var couchDBMetricIdentifiers = []couchDBMetricIdentifier{
- {metadataTestData: &testCOUCHDBMetadata[2], scalerIndex: 0, name: "s0-s0-coucdb-animals"},
- {metadataTestData: &testCOUCHDBMetadata[2], scalerIndex: 1, name: "s1-s1-coucdb-animals"},
+ {metadataTestData: &testCOUCHDBMetadata[2], scalerIndex: 0, name: "s0-coucdb-animals"},
+ {metadataTestData: &testCOUCHDBMetadata[2], scalerIndex: 1, name: "s1-coucdb-animals"},
}
func TestParseCouchDBMetadata(t *testing.T) {
diff --git a/pkg/scalers/graphite_scaler.go b/pkg/scalers/graphite_scaler.go
index e72f360143b..58526d20ab4 100644
--- a/pkg/scalers/graphite_scaler.go
+++ b/pkg/scalers/graphite_scaler.go
@@ -17,11 +17,7 @@ import (
)
const (
- graphiteServerAddress = "serverAddress"
-
- // FIXME: DEPRECATED to be removed in v2.12
- graphiteMetricName = "metricName"
-
+ graphiteServerAddress = "serverAddress"
graphiteQuery = "query"
graphiteThreshold = "threshold"
graphiteActivationThreshold = "activationThreshold"
@@ -39,7 +35,6 @@ type graphiteScaler struct {
type graphiteMetadata struct {
serverAddress string
- metricName string
query string
threshold float64
activationThreshold float64
@@ -95,13 +90,6 @@ func parseGraphiteMetadata(config *ScalerConfig) (*graphiteMetadata, error) {
return nil, fmt.Errorf("no %s given", graphiteQuery)
}
- // FIXME: DEPRECATED to be removed in v2.12
- if val, ok := config.TriggerMetadata[graphiteMetricName]; ok && val != "" {
- meta.metricName = val
- } else {
- meta.metricName = "graphite"
- }
-
if val, ok := config.TriggerMetadata[graphiteQueryTime]; ok && val != "" {
meta.from = val
} else {
@@ -159,7 +147,7 @@ func (s *graphiteScaler) Close(context.Context) error {
func (s *graphiteScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("graphite-%s", s.metadata.metricName))),
+ Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, "graphite"),
},
Target: GetMetricTargetMili(s.metricType, s.metadata.threshold),
}
diff --git a/pkg/scalers/graphite_scaler_test.go b/pkg/scalers/graphite_scaler_test.go
index a7cbcee8141..fc30c8671c5 100644
--- a/pkg/scalers/graphite_scaler_test.go
+++ b/pkg/scalers/graphite_scaler_test.go
@@ -24,13 +24,13 @@ type graphiteMetricIdentifier struct {
var testGrapMetadata = []parseGraphiteMetadataTestData{
{map[string]string{}, true},
// all properly formed
- {map[string]string{"serverAddress": "http://localhost:81", "metricName": "request-count", "threshold": "100", "activationThreshold": "23", "query": "stats.counters.http.hello-world.request.count.count", "queryTime": "-30Seconds"}, false},
+ {map[string]string{"serverAddress": "http://localhost:81", "threshold": "100", "activationThreshold": "23", "query": "stats.counters.http.hello-world.request.count.count", "queryTime": "-30Seconds"}, false},
// missing serverAddress
- {map[string]string{"serverAddress": "", "metricName": "request-count", "threshold": "100", "query": "stats.counters.http.hello-world.request.count.count", "queryTime": "-30Seconds"}, true},
+ {map[string]string{"serverAddress": "", "threshold": "100", "query": "stats.counters.http.hello-world.request.count.count", "queryTime": "-30Seconds"}, true},
// malformed threshold
- {map[string]string{"serverAddress": "http://localhost:81", "metricName": "request-count", "threshold": "one", "query": "stats.counters.http.hello-world.request.count.count", "queryTime": "-30Seconds"}, true},
+ {map[string]string{"serverAddress": "http://localhost:81", "threshold": "one", "query": "stats.counters.http.hello-world.request.count.count", "queryTime": "-30Seconds"}, true},
// malformed activationThreshold
- {map[string]string{"serverAddress": "http://localhost:81", "metricName": "request-count", "threshold": "100", "activationThreshold": "one", "query": "stats.counters.http.hello-world.request.count.count", "queryTime": "-30Seconds"}, true},
+ {map[string]string{"serverAddress": "http://localhost:81", "threshold": "100", "activationThreshold": "one", "query": "stats.counters.http.hello-world.request.count.count", "queryTime": "-30Seconds"}, true},
// missing query
{map[string]string{"serverAddress": "http://localhost:81", "metricName": "request-count", "threshold": "100", "query": "", "queryTime": "-30Seconds", "disableScaleToZero": "true"}, true},
// missing queryTime
@@ -38,8 +38,8 @@ var testGrapMetadata = []parseGraphiteMetadataTestData{
}
var graphiteMetricIdentifiers = []graphiteMetricIdentifier{
- {&testGrapMetadata[1], 0, "s0-graphite-request-count"},
- {&testGrapMetadata[1], 1, "s1-graphite-request-count"},
+ {&testGrapMetadata[1], 0, "s0-graphite"},
+ {&testGrapMetadata[1], 1, "s1-graphite"},
}
type graphiteAuthMetadataTestData struct {
diff --git a/pkg/scalers/influxdb_scaler.go b/pkg/scalers/influxdb_scaler.go
index ab51b4fabd8..36aa59230e1 100644
--- a/pkg/scalers/influxdb_scaler.go
+++ b/pkg/scalers/influxdb_scaler.go
@@ -23,7 +23,6 @@ type influxDBScaler struct {
type influxDBMetadata struct {
authToken string
- metricName string
organizationName string
query string
serverURL string
@@ -64,7 +63,6 @@ func NewInfluxDBScaler(config *ScalerConfig) (Scaler, error) {
// parseInfluxDBMetadata parses the metadata passed in from the ScaledObject config
func parseInfluxDBMetadata(config *ScalerConfig) (*influxDBMetadata, error) {
var authToken string
- var metricName string
var organizationName string
var query string
var serverURL string
@@ -118,13 +116,6 @@ func parseInfluxDBMetadata(config *ScalerConfig) (*influxDBMetadata, error) {
return nil, fmt.Errorf("no server url given")
}
- // FIXME: DEPRECATED to be removed in v2.12
- if val, ok := config.TriggerMetadata["metricName"]; ok {
- metricName = util.NormalizeString(fmt.Sprintf("influxdb-%s", val))
- } else {
- metricName = util.NormalizeString(fmt.Sprintf("influxdb-%s", organizationName))
- }
-
if val, ok := config.TriggerMetadata["activationThresholdValue"]; ok {
value, err := strconv.ParseFloat(val, 64)
if err != nil {
@@ -153,7 +144,6 @@ func parseInfluxDBMetadata(config *ScalerConfig) (*influxDBMetadata, error) {
return &influxDBMetadata{
authToken: authToken,
- metricName: metricName,
organizationName: organizationName,
query: query,
serverURL: serverURL,
@@ -213,7 +203,7 @@ func (s *influxDBScaler) GetMetricsAndActivity(ctx context.Context, metricName s
func (s *influxDBScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, s.metadata.metricName),
+ Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, util.NormalizeString(fmt.Sprintf("influxdb-%s", s.metadata.organizationName))),
},
Target: GetMetricTargetMili(s.metricType, s.metadata.thresholdValue),
}
diff --git a/pkg/scalers/influxdb_scaler_test.go b/pkg/scalers/influxdb_scaler_test.go
index 354799a339d..dcbab435664 100644
--- a/pkg/scalers/influxdb_scaler_test.go
+++ b/pkg/scalers/influxdb_scaler_test.go
@@ -51,7 +51,7 @@ var testInfluxDBMetadata = []parseInfluxDBMetadataTestData{
}
var influxDBMetricIdentifiers = []influxDBMetricIdentifier{
- {&testInfluxDBMetadata[1], 0, "s0-influxdb-influx_metric"},
+ {&testInfluxDBMetadata[1], 0, "s0-influxdb-influx_org"},
{&testInfluxDBMetadata[2], 1, "s1-influxdb-influx_org"},
}
diff --git a/pkg/scalers/mongo_scaler.go b/pkg/scalers/mongo_scaler.go
index cc6bd729d98..203d920c047 100644
--- a/pkg/scalers/mongo_scaler.go
+++ b/pkg/scalers/mongo_scaler.go
@@ -34,16 +34,16 @@ type mongoDBMetadata struct {
// The string is used by connected with mongoDB.
// +optional
connectionString string
- // Specify the host to connect to the mongoDB server,if the connectionString be provided, don't need specify this param.
+ // Specify the host to connect to the mongoDB server,if the connectionString be provided, don't need to specify this param.
// +optional
host string
- // Specify the port to connect to the mongoDB server,if the connectionString be provided, don't need specify this param.
+ // Specify the port to connect to the mongoDB server,if the connectionString be provided, don't need to specify this param.
// +optional
port string
- // Specify the username to connect to the mongoDB server,if the connectionString be provided, don't need specify this param.
+ // Specify the username to connect to the mongoDB server,if the connectionString be provided, don't need to specify this param.
// +optional
username string
- // Specify the password to connect to the mongoDB server,if the connectionString be provided, don't need specify this param.
+ // Specify the password to connect to the mongoDB server,if the connectionString be provided, don't need to specify this param.
// +optional
password string
@@ -62,9 +62,6 @@ type mongoDBMetadata struct {
// A threshold that is used to check if scaler is active
// +optional
activationQueryValue int64
- // The name of the metric to use in the Horizontal Pod Autoscaler. This value will be prefixed with "mongodb-".
- // +optional
- metricName string
// The index of the scaler inside the ScaledObject
// +internal
@@ -197,13 +194,6 @@ func parseMongoDBMetadata(config *ScalerConfig) (*mongoDBMetadata, string, error
// nosemgrep: db-connection-string
connStr = fmt.Sprintf("mongodb://%s:%s@%s/%s", url.QueryEscape(meta.username), url.QueryEscape(meta.password), addr, meta.dbName)
}
-
- // FIXME: DEPRECATED to be removed in v2.12
- if val, ok := config.TriggerMetadata["metricName"]; ok {
- meta.metricName = kedautil.NormalizeString(fmt.Sprintf("mongodb-%s", val))
- } else {
- meta.metricName = kedautil.NormalizeString(fmt.Sprintf("mongodb-%s", meta.collection))
- }
meta.scalerIndex = config.ScalerIndex
return &meta, connStr, nil
}
@@ -257,7 +247,7 @@ func (s *mongoDBScaler) GetMetricsAndActivity(ctx context.Context, metricName st
func (s *mongoDBScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, s.metadata.metricName),
+ Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("mongodb-%s", s.metadata.collection))),
},
Target: GetMetricTarget(s.metricType, s.metadata.queryValue),
}
diff --git a/pkg/scalers/mongo_scaler_test.go b/pkg/scalers/mongo_scaler_test.go
index 3ecf4bdd2f6..c3c833b91fe 100644
--- a/pkg/scalers/mongo_scaler_test.go
+++ b/pkg/scalers/mongo_scaler_test.go
@@ -49,28 +49,28 @@ var testMONGODBMetadata = []parseMongoDBMetadataTestData{
},
// with metric name
{
- metadata: map[string]string{"query": `{"name":"John"}`, "metricName": "hpa", "collection": "demo", "queryValue": "12", "connectionStringFromEnv": "MongoDB_CONN_STR", "dbName": "test"},
+ metadata: map[string]string{"query": `{"name":"John"}`, "collection": "demo", "queryValue": "12", "connectionStringFromEnv": "MongoDB_CONN_STR", "dbName": "test"},
authParams: map[string]string{},
resolvedEnv: testMongoDBResolvedEnv,
raisesError: false,
},
// from passwordFromEnv
{
- metadata: map[string]string{"query": `{"name":"John"}`, "metricName": "hpa", "collection": "demo", "queryValue": "12", "passwordFromEnv": "MongoDB_PASSWORD"},
+ metadata: map[string]string{"query": `{"name":"John"}`, "collection": "demo", "queryValue": "12", "passwordFromEnv": "MongoDB_PASSWORD"},
authParams: map[string]string{"dbName": "test", "host": "localhost", "port": "1234", "username": "sample"},
resolvedEnv: testMongoDBResolvedEnv,
raisesError: false,
},
// from trigger auth
{
- metadata: map[string]string{"query": `{"name":"John"}`, "metricName": "hpa", "collection": "demo", "queryValue": "12"},
+ metadata: map[string]string{"query": `{"name":"John"}`, "collection": "demo", "queryValue": "12"},
authParams: map[string]string{"dbName": "test", "host": "localhost", "port": "1234", "username": "sample", "password": "sec@ure"},
resolvedEnv: testMongoDBResolvedEnv,
raisesError: false,
},
// wrong activationQueryValue
{
- metadata: map[string]string{"query": `{"name":"John"}`, "metricName": "hpa", "collection": "demo", "queryValue": "12", "activationQueryValue": "aa", "connectionStringFromEnv": "Mongo_CONN_STR", "dbName": "test"},
+ metadata: map[string]string{"query": `{"name":"John"}`, "collection": "demo", "queryValue": "12", "activationQueryValue": "aa", "connectionStringFromEnv": "Mongo_CONN_STR", "dbName": "test"},
authParams: map[string]string{},
resolvedEnv: testMongoDBResolvedEnv,
raisesError: true,
@@ -84,8 +84,8 @@ var mongoDBConnectionStringTestDatas = []mongoDBConnectionStringTestData{
}
var mongoDBMetricIdentifiers = []mongoDBMetricIdentifier{
- {metadataTestData: &testMONGODBMetadata[2], scalerIndex: 0, name: "s0-mongodb-hpa"},
- {metadataTestData: &testMONGODBMetadata[2], scalerIndex: 1, name: "s1-mongodb-hpa"},
+ {metadataTestData: &testMONGODBMetadata[2], scalerIndex: 0, name: "s0-mongodb-demo"},
+ {metadataTestData: &testMONGODBMetadata[2], scalerIndex: 1, name: "s1-mongodb-demo"},
}
func TestParseMongoDBMetadata(t *testing.T) {
@@ -121,7 +121,7 @@ func TestMongoDBGetMetricSpecForScaling(t *testing.T) {
metricSpec := mockMongoDBScaler.GetMetricSpecForScaling(context.Background())
metricName := metricSpec[0].External.Metric.Name
if metricName != testData.name {
- t.Error("Wrong External metric source name:", metricName)
+ t.Error("Wrong External metric source name:", metricName, "Expected", testData.name)
}
}
}
diff --git a/pkg/scalers/mssql_scaler.go b/pkg/scalers/mssql_scaler.go
index 54d4e27e2bf..285b6e9acb6 100644
--- a/pkg/scalers/mssql_scaler.go
+++ b/pkg/scalers/mssql_scaler.go
@@ -14,8 +14,6 @@ import (
"github.com/go-logr/logr"
v2 "k8s.io/api/autoscaling/v2"
"k8s.io/metrics/pkg/apis/external_metrics"
-
- kedautil "github.com/kedacore/keda/v2/pkg/util"
)
var (
@@ -64,9 +62,6 @@ type mssqlMetadata struct {
// The threshold that is used in activation phase
// +optional
activationTargetValue float64
- // The name of the metric to use in the Horizontal Pod Autoscaler. This value will be prefixed with "mssql-".
- // +optional
- metricName string
// The index of the scaler inside the ScaledObject
// +internal
scalerIndex int
@@ -168,22 +163,6 @@ func parseMSSQLMetadata(config *ScalerConfig) (*mssqlMetadata, error) {
meta.password = config.ResolvedEnv[config.TriggerMetadata["passwordFromEnv"]]
}
}
-
- // get the metricName, which can be explicit or from the (masked) connection string
-
- // FIXME: DEPRECATED to be removed in v2.12
- if val, ok := config.TriggerMetadata["metricName"]; ok {
- meta.metricName = kedautil.NormalizeString(fmt.Sprintf("mssql-%s", val))
- } else {
- switch {
- case meta.database != "":
- meta.metricName = kedautil.NormalizeString(fmt.Sprintf("mssql-%s", meta.database))
- case meta.host != "":
- meta.metricName = kedautil.NormalizeString(fmt.Sprintf("mssql-%s", meta.host))
- default:
- meta.metricName = "mssql"
- }
- }
meta.scalerIndex = config.ScalerIndex
return &meta, nil
}
@@ -244,7 +223,7 @@ func getMSSQLConnectionString(meta *mssqlMetadata) string {
func (s *mssqlScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, s.metadata.metricName),
+ Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, "mssql"),
},
Target: GetMetricTargetMili(s.metricType, s.metadata.targetValue),
}
diff --git a/pkg/scalers/mssql_scaler_test.go b/pkg/scalers/mssql_scaler_test.go
index 2b56d7eab66..734be3e234f 100644
--- a/pkg/scalers/mssql_scaler_test.go
+++ b/pkg/scalers/mssql_scaler_test.go
@@ -1,6 +1,7 @@
package scalers
import (
+ "context"
"errors"
"testing"
)
@@ -17,7 +18,13 @@ type mssqlTestData struct {
expectedError error
}
-var testInputs = []mssqlTestData{
+type mssqlMetricIdentifier struct {
+ metadataTestData *mssqlTestData
+ scalerIndex int
+ name string
+}
+
+var testMssqlMetadata = []mssqlTestData{
// direct connection string input
{
metadata: map[string]string{"query": "SELECT 1", "targetValue": "1"},
@@ -39,28 +46,26 @@ var testInputs = []mssqlTestData{
authParams: map[string]string{"connectionString": "Server=example.database.windows.net;port=1433;Database=AdventureWorks;Persist Security Info=False;User ID=user1;Password=Password#1;MultipleActiveResultSets=False;Encrypt=True;TrustServerCertificate=False;Connection Timeout=30;"},
expectedConnectionString: "Server=example.database.windows.net;port=1433;Database=AdventureWorks;Persist Security Info=False;User ID=user1;Password=Password#1;MultipleActiveResultSets=False;Encrypt=True;TrustServerCertificate=False;Connection Timeout=30;",
},
- // connection string input via environment variables + explicit metricName
+ // connection string input via environment variables
{
- metadata: map[string]string{"query": "SELECT 1", "targetValue": "1", "connectionStringFromEnv": "test_connection_string", "metricName": "myMetric"},
+ metadata: map[string]string{"query": "SELECT 1", "targetValue": "1", "connectionStringFromEnv": "test_connection_string"},
resolvedEnv: map[string]string{"test_connection_string": "sqlserver://localhost?database=AdventureWorks"},
authParams: map[string]string{},
expectedConnectionString: "sqlserver://localhost?database=AdventureWorks",
- expectedMetricName: "mssql-myMetric",
},
// connection string generated from minimal required metadata
{
metadata: map[string]string{"query": "SELECT 1", "targetValue": "1", "host": "127.0.0.1"},
resolvedEnv: map[string]string{},
authParams: map[string]string{},
- expectedMetricName: "mssql-127-0-0-1",
+ expectedMetricName: "mssql",
expectedConnectionString: "sqlserver://127.0.0.1",
},
// connection string generated from full metadata
{
- metadata: map[string]string{"query": "SELECT 1", "targetValue": "1", "host": "example.database.windows.net", "username": "user1", "passwordFromEnv": "test_password", "port": "1433", "database": "AdventureWorks", "metricName": "myMetric1"},
+ metadata: map[string]string{"query": "SELECT 1", "targetValue": "1", "host": "example.database.windows.net", "username": "user1", "passwordFromEnv": "test_password", "port": "1433", "database": "AdventureWorks"},
resolvedEnv: map[string]string{"test_password": "Password#1"},
authParams: map[string]string{},
- expectedMetricName: "mssql-myMetric1",
expectedConnectionString: "sqlserver://user1:Password%231@example.database.windows.net:1433?database=AdventureWorks",
},
// variation of previous: no port, password from authParams, metricName from database name
@@ -68,7 +73,7 @@ var testInputs = []mssqlTestData{
metadata: map[string]string{"query": "SELECT 1", "targetValue": "1", "host": "example.database.windows.net", "username": "user2", "database": "AdventureWorks"},
resolvedEnv: map[string]string{},
authParams: map[string]string{"password": "Password#2"},
- expectedMetricName: "mssql-AdventureWorks",
+ expectedMetricName: "mssql",
expectedConnectionString: "sqlserver://user2:Password%232@example.database.windows.net?database=AdventureWorks",
},
// connection string generated from full authParams
@@ -76,7 +81,7 @@ var testInputs = []mssqlTestData{
metadata: map[string]string{"query": "SELECT 1", "targetValue": "1"},
resolvedEnv: map[string]string{},
authParams: map[string]string{"password": "Password#2", "host": "example.database.windows.net", "username": "user2", "database": "AdventureWorks", "port": "1433"},
- expectedMetricName: "mssql-AdventureWorks",
+ expectedMetricName: "mssql",
expectedConnectionString: "sqlserver://user2:Password%232@example.database.windows.net:1433?database=AdventureWorks",
},
// variation of previous: no database name, metricName from host
@@ -84,7 +89,7 @@ var testInputs = []mssqlTestData{
metadata: map[string]string{"query": "SELECT 1", "targetValue": "1", "host": "example.database.windows.net", "username": "user3"},
resolvedEnv: map[string]string{},
authParams: map[string]string{"password": "Password#3"},
- expectedMetricName: "mssql-example-database-windows-net",
+ expectedMetricName: "mssql",
expectedConnectionString: "sqlserver://user3:Password%233@example.database.windows.net",
},
// Error: missing query
@@ -110,8 +115,13 @@ var testInputs = []mssqlTestData{
},
}
+var mssqlMetricIdentifiers = []mssqlMetricIdentifier{
+ {&testMssqlMetadata[0], 0, "s0-mssql"},
+ {&testMssqlMetadata[1], 1, "s1-mssql"},
+}
+
func TestMSSQLMetadataParsing(t *testing.T) {
- for _, testData := range testInputs {
+ for _, testData := range testMssqlMetadata {
var config = ScalerConfig{
ResolvedEnv: testData.resolvedEnv,
TriggerMetadata: testData.metadata,
@@ -143,9 +153,30 @@ func TestMSSQLMetadataParsing(t *testing.T) {
if testData.expectedConnectionString != outputConnectionString {
t.Errorf("Wrong connection string. Expected '%s' but got '%s'", testData.expectedConnectionString, outputConnectionString)
}
+ }
+}
+
+func TestMSSQLGetMetricSpecForScaling(t *testing.T) {
+ for _, testData := range mssqlMetricIdentifiers {
+ ctx := context.Background()
+ var config = ScalerConfig{
+ ResolvedEnv: testData.metadataTestData.resolvedEnv,
+ TriggerMetadata: testData.metadataTestData.metadata,
+ AuthParams: testData.metadataTestData.authParams,
+ ScalerIndex: testData.scalerIndex,
+ }
+ meta, err := parseMSSQLMetadata(&config)
+ if err != nil {
+ t.Fatal("Could not parse metadata:", err)
+ }
- if testData.expectedMetricName != "" && testData.expectedMetricName != outputMetadata.metricName {
- t.Errorf("Wrong metric name. Expected '%s' but got '%s'", testData.expectedMetricName, outputMetadata.metricName)
+ mockMssqlScaler := mssqlScaler{
+ metadata: meta,
+ }
+ metricSpec := mockMssqlScaler.GetMetricSpecForScaling(ctx)
+ metricName := metricSpec[0].External.Metric.Name
+ if metricName != testData.name {
+ t.Error("Wrong External metric source name:", metricName, testData.name)
}
}
}
diff --git a/pkg/scalers/postgresql_scaler.go b/pkg/scalers/postgresql_scaler.go
index 789817641ae..e9aa6d562a5 100644
--- a/pkg/scalers/postgresql_scaler.go
+++ b/pkg/scalers/postgresql_scaler.go
@@ -28,7 +28,6 @@ type postgreSQLMetadata struct {
activationTargetQueryValue float64
connection string
query string
- metricName string
scalerIndex int
}
@@ -134,13 +133,6 @@ func parsePostgreSQLMetadata(config *ScalerConfig) (*postgreSQLMetadata, error)
params = append(params, "password="+escapePostgreConnectionParameter(password))
meta.connection = strings.Join(params, " ")
}
-
- // FIXME: DEPRECATED to be removed in v2.12
- if val, ok := config.TriggerMetadata["metricName"]; ok {
- meta.metricName = kedautil.NormalizeString(fmt.Sprintf("postgresql-%s", val))
- } else {
- meta.metricName = kedautil.NormalizeString("postgresql")
- }
meta.scalerIndex = config.ScalerIndex
return &meta, nil
}
@@ -183,7 +175,7 @@ func (s *postgreSQLScaler) getActiveNumber(ctx context.Context) (float64, error)
func (s *postgreSQLScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, s.metadata.metricName),
+ Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString("postgresql")),
},
Target: GetMetricTargetMili(s.metricType, s.metadata.targetQueryValue),
}
diff --git a/pkg/scalers/prometheus_scaler.go b/pkg/scalers/prometheus_scaler.go
index a5910be97fb..4cde60ec88c 100644
--- a/pkg/scalers/prometheus_scaler.go
+++ b/pkg/scalers/prometheus_scaler.go
@@ -23,11 +23,7 @@ import (
)
const (
- promServerAddress = "serverAddress"
-
- // FIXME: DEPRECATED to be removed in v2.12
- promMetricName = "metricName"
-
+ promServerAddress = "serverAddress"
promQuery = "query"
promThreshold = "threshold"
promActivationThreshold = "activationThreshold"
@@ -51,7 +47,6 @@ type prometheusScaler struct {
type prometheusMetadata struct {
serverAddress string
- metricName string
query string
threshold float64
activationThreshold float64
@@ -109,8 +104,8 @@ func NewPrometheusScaler(config *ScalerConfig) (Scaler, error) {
httpClient.Transport = transport
}
} else {
- // could be the case of azure managed prometheus. Try and get the roundtripper.
- // If its not the case of azure managed prometheus, we will get both transport and err as nil and proceed assuming no auth.
+ // could be the case of azure managed prometheus. Try and get the round-tripper.
+ // If it's not the case of azure managed prometheus, we will get both transport and err as nil and proceed assuming no auth.
azureTransport, err := azure.TryAndGetAzureManagedPrometheusHTTPRoundTripper(config.PodIdentity, config.TriggerMetadata)
if err != nil {
logger.V(1).Error(err, "error while init Azure Managed Prometheus client http transport")
@@ -156,13 +151,6 @@ func parsePrometheusMetadata(config *ScalerConfig) (meta *prometheusMetadata, er
return nil, fmt.Errorf("no %s given", promQuery)
}
- // FIXME: DEPRECATED to be removed in v2.12
- if val, ok := config.TriggerMetadata[promMetricName]; ok && val != "" {
- meta.metricName = val
- } else {
- meta.metricName = "prometheus"
- }
-
if val, ok := config.TriggerMetadata[promThreshold]; ok && val != "" {
t, err := strconv.ParseFloat(val, 64)
if err != nil {
@@ -251,7 +239,7 @@ func (s *prometheusScaler) Close(context.Context) error {
}
func (s *prometheusScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
- metricName := kedautil.NormalizeString(fmt.Sprintf("prometheus-%s", s.metadata.metricName))
+ metricName := kedautil.NormalizeString("prometheus")
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, metricName),
@@ -269,7 +257,7 @@ func (s *prometheusScaler) ExecutePromQuery(ctx context.Context) (float64, error
queryEscaped := url_pkg.QueryEscape(s.metadata.query)
url := fmt.Sprintf("%s/api/v1/query?query=%s&time=%s", s.metadata.serverAddress, queryEscaped, t)
- // set 'namespace' parameter for namespaced Prometheus requests (eg. for Thanos Querier)
+ // set 'namespace' parameter for namespaced Prometheus requests (e.g. for Thanos Querier)
if s.metadata.namespace != "" {
url = fmt.Sprintf("%s&namespace=%s", url, s.metadata.namespace)
}
@@ -324,7 +312,7 @@ func (s *prometheusScaler) ExecutePromQuery(ctx context.Context) (float64, error
if s.metadata.ignoreNullValues {
return 0, nil
}
- return -1, fmt.Errorf("prometheus metrics %s target may be lost, the result is empty", s.metadata.metricName)
+ return -1, fmt.Errorf("prometheus metrics 'prometheus' target may be lost, the result is empty")
} else if len(result.Data.Result) > 1 {
return -1, fmt.Errorf("prometheus query %s returned multiple elements", s.metadata.query)
}
@@ -334,7 +322,7 @@ func (s *prometheusScaler) ExecutePromQuery(ctx context.Context) (float64, error
if s.metadata.ignoreNullValues {
return 0, nil
}
- return -1, fmt.Errorf("prometheus metrics %s target may be lost, the value list is empty", s.metadata.metricName)
+ return -1, fmt.Errorf("prometheus metrics 'prometheus' target may be lost, the value list is empty")
} else if valueLen < 2 {
return -1, fmt.Errorf("prometheus query %s didn't return enough values", s.metadata.query)
}
diff --git a/pkg/scalers/prometheus_scaler_test.go b/pkg/scalers/prometheus_scaler_test.go
index 423d9fddd8f..7bb8a336804 100644
--- a/pkg/scalers/prometheus_scaler_test.go
+++ b/pkg/scalers/prometheus_scaler_test.go
@@ -35,7 +35,7 @@ var testPromMetadata = []parsePrometheusMetadataTestData{
// all properly formed
{map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up"}, false},
// all properly formed, with namespace
- {map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "namespace": "foo"}, false},
+ {map[string]string{"serverAddress": "http://localhost:9090", "threshold": "100", "query": "up", "namespace": "foo"}, false},
// all properly formed, with ignoreNullValues
{map[string]string{"serverAddress": "http://localhost:9090", "metricName": "http_requests_total", "threshold": "100", "query": "up", "ignoreNullValues": "false"}, false},
// all properly formed, with activationThreshold
@@ -63,8 +63,8 @@ var testPromMetadata = []parsePrometheusMetadataTestData{
}
var prometheusMetricIdentifiers = []prometheusMetricIdentifier{
- {&testPromMetadata[1], 0, "s0-prometheus-http_requests_total"},
- {&testPromMetadata[1], 1, "s1-prometheus-http_requests_total"},
+ {&testPromMetadata[1], 0, "s0-prometheus"},
+ {&testPromMetadata[1], 1, "s1-prometheus"},
}
type prometheusAuthMetadataTestData struct {
diff --git a/pkg/scalers/rabbitmq_scaler.go b/pkg/scalers/rabbitmq_scaler.go
index abdaea3ea02..64288b1285e 100644
--- a/pkg/scalers/rabbitmq_scaler.go
+++ b/pkg/scalers/rabbitmq_scaler.go
@@ -77,7 +77,6 @@ type rabbitMQMetadata struct {
excludeUnacknowledged bool // specify if the QueueLength value should exclude Unacknowledged messages (Ready messages only)
pageSize int64 // specify the page size if useRegex is enabled
operation string // specify the operation to apply in case of multiples queues
- metricName string // custom metric name for trigger
timeout time.Duration // custom http timeout for a specific trigger
scalerIndex int // scaler index
@@ -242,7 +241,7 @@ func parseRabbitMQMetadata(config *ScalerConfig) (*rabbitMQMetadata, error) {
if config.PodIdentity.Provider == v1alpha1.PodIdentityProviderAzureWorkload {
if config.AuthParams["workloadIdentityResource"] != "" {
- meta.workloadIdentityClientID = config.PodIdentity.IdentityID
+ meta.workloadIdentityClientID = config.PodIdentity.GetIdentityID()
meta.workloadIdentityResource = config.AuthParams["workloadIdentityResource"]
}
}
@@ -311,16 +310,6 @@ func parseRabbitMQMetadata(config *ScalerConfig) (*rabbitMQMetadata, error) {
if err != nil {
return nil, fmt.Errorf("unable to parse trigger: %w", err)
}
-
- // Resolve metricName
-
- // FIXME: DEPRECATED to be removed in v2.12
- if val, ok := config.TriggerMetadata["metricName"]; ok {
- meta.metricName = kedautil.NormalizeString(fmt.Sprintf("rabbitmq-%s", url.QueryEscape(val)))
- } else {
- meta.metricName = kedautil.NormalizeString(fmt.Sprintf("rabbitmq-%s", url.QueryEscape(meta.queueName)))
- }
-
// Resolve timeout
if err := resolveTimeout(config, &meta); err != nil {
return nil, err
@@ -598,7 +587,7 @@ func (s *rabbitMQScaler) getQueueInfoViaHTTP(ctx context.Context) (*queueInfo, e
func (s *rabbitMQScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec {
externalMetric := &v2.ExternalMetricSource{
Metric: v2.MetricIdentifier{
- Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, s.metadata.metricName),
+ Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("rabbitmq-%s", url.QueryEscape(s.metadata.queueName)))),
},
Target: GetMetricTargetMili(s.metricType, s.metadata.value),
}
diff --git a/pkg/scalers/rabbitmq_scaler_test.go b/pkg/scalers/rabbitmq_scaler_test.go
index c1dcf1f301e..303249e2d44 100644
--- a/pkg/scalers/rabbitmq_scaler_test.go
+++ b/pkg/scalers/rabbitmq_scaler_test.go
@@ -13,6 +13,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/kedacore/keda/v2/apis/keda/v1alpha1"
+ kedautil "github.com/kedacore/keda/v2/pkg/util"
)
const (
@@ -107,8 +108,6 @@ var testRabbitMQMetadata = []parseRabbitMQMetadataTestData{
{map[string]string{"mode": "MessageRate", "value": "1000", "queueName": "sample", "host": "http://", "useRegex": "true"}, false, map[string]string{}},
// queue length and useRegex
{map[string]string{"mode": "QueueLength", "value": "1000", "queueName": "sample", "host": "http://", "useRegex": "true"}, false, map[string]string{}},
- // custom metric name
- {map[string]string{"mode": "QueueLength", "value": "1000", "queueName": "sample", "host": "http://", "useRegex": "true", "metricName": "host1-sample"}, false, map[string]string{}},
// http valid timeout
{map[string]string{"mode": "QueueLength", "value": "1000", "queueName": "sample", "host": "http://", "timeout": "1000"}, false, map[string]string{}},
// http invalid timeout
@@ -152,14 +151,13 @@ var testRabbitMQAuthParamData = []parseRabbitMQAuthParamTestData{
// failure, TLS invalid
{map[string]string{"queueName": "sample", "hostFromEnv": host}, v1alpha1.AuthPodIdentity{}, map[string]string{"tls": "yes", "ca": "caaa", "cert": "ceert", "key": "kee"}, true, true, false},
// success, WorkloadIdentity
- {map[string]string{"queueName": "sample", "hostFromEnv": host, "protocol": "http"}, v1alpha1.AuthPodIdentity{Provider: v1alpha1.PodIdentityProviderAzureWorkload, IdentityID: "client-id"}, map[string]string{"workloadIdentityResource": "rabbitmq-resource-id"}, false, false, true},
+ {map[string]string{"queueName": "sample", "hostFromEnv": host, "protocol": "http"}, v1alpha1.AuthPodIdentity{Provider: v1alpha1.PodIdentityProviderAzureWorkload, IdentityID: kedautil.StringPointer("client-id")}, map[string]string{"workloadIdentityResource": "rabbitmq-resource-id"}, false, false, true},
// failure, WoekloadIdentity not supported for amqp
- {map[string]string{"queueName": "sample", "hostFromEnv": host, "protocol": "amqp"}, v1alpha1.AuthPodIdentity{Provider: v1alpha1.PodIdentityProviderAzureWorkload, IdentityID: "client-id"}, map[string]string{"workloadIdentityResource": "rabbitmq-resource-id"}, true, false, false},
+ {map[string]string{"queueName": "sample", "hostFromEnv": host, "protocol": "amqp"}, v1alpha1.AuthPodIdentity{Provider: v1alpha1.PodIdentityProviderAzureWorkload, IdentityID: kedautil.StringPointer("client-id")}, map[string]string{"workloadIdentityResource": "rabbitmq-resource-id"}, true, false, false},
}
var rabbitMQMetricIdentifiers = []rabbitMQMetricIdentifier{
{&testRabbitMQMetadata[1], 0, "s0-rabbitmq-sample"},
{&testRabbitMQMetadata[7], 1, "s1-rabbitmq-namespace-2Fname"},
- {&testRabbitMQMetadata[31], 2, "s2-rabbitmq-host1-sample"},
}
func TestRabbitMQParseMetadata(t *testing.T) {
diff --git a/pkg/scalers/scaler.go b/pkg/scalers/scaler.go
index 71088572652..e72d18bef52 100644
--- a/pkg/scalers/scaler.go
+++ b/pkg/scalers/scaler.go
@@ -110,7 +110,7 @@ var (
ErrScalerConfigMissingField = errors.New("missing required field in scaler config")
)
-// GetFromAuthOrMeta helps getting a field from Auth or Meta sections
+// GetFromAuthOrMeta helps to get a field from Auth or Meta sections
func GetFromAuthOrMeta(config *ScalerConfig, field string) (string, error) {
var result string
var err error
@@ -125,7 +125,7 @@ func GetFromAuthOrMeta(config *ScalerConfig, field string) (string, error) {
return result, err
}
-// GenerateMetricNameWithIndex helps adding the index prefix to the metric name
+// GenerateMetricNameWithIndex helps to add the index prefix to the metric name
func GenerateMetricNameWithIndex(scalerIndex int, metricName string) string {
return fmt.Sprintf("s%d-%s", scalerIndex, metricName)
}
diff --git a/pkg/scalers/solace_scaler.go b/pkg/scalers/solace_scaler.go
index 8ec5b2c7eae..5c0585e1f30 100644
--- a/pkg/scalers/solace_scaler.go
+++ b/pkg/scalers/solace_scaler.go
@@ -5,6 +5,7 @@ import (
"encoding/json"
"fmt"
"net/http"
+ "net/url"
"strconv"
"strings"
@@ -252,7 +253,8 @@ func parseSolaceMetadata(config *ScalerConfig) (*SolaceMetadata, error) {
solaceAPIVersion,
meta.messageVpn,
solaceAPIObjectTypeQueue,
- meta.queueName)
+ url.QueryEscape(meta.queueName),
+ )
// Get Credentials
var e error
diff --git a/pkg/scalers/solace_scaler_test.go b/pkg/scalers/solace_scaler_test.go
index 3b6df9fd24f..28acc36337b 100644
--- a/pkg/scalers/solace_scaler_test.go
+++ b/pkg/scalers/solace_scaler_test.go
@@ -4,6 +4,8 @@ import (
"context"
"fmt"
"net/http"
+ "net/url"
+ "strings"
"testing"
v2 "k8s.io/api/autoscaling/v2"
@@ -246,6 +248,23 @@ var testParseSolaceMetadata = []testSolaceMetadata{
1,
true,
},
+ // +Case - Properly encode queueName
+ {
+ "#016 - Properly Encode QueueName- ",
+ map[string]string{
+ "": "",
+ solaceMetaSempBaseURL: soltestValidBaseURL,
+ solaceMetaMsgVpn: soltestValidVpn,
+ solaceMetaUsernameFromEnv: "",
+ solaceMetaPasswordFromEnv: "",
+ solaceMetaUsername: soltestValidUsername,
+ solaceMetaPassword: soltestValidPassword,
+ solaceMetaQueueName: "with/slash",
+ solaceMetaMsgCountTarget: soltestValidMsgCountTarget,
+ },
+ 1,
+ false,
+ },
}
var testSolaceEnvCreds = []testSolaceMetadata{
@@ -513,7 +532,7 @@ var testSolaceExpectedMetricNames = map[string]string{
func TestSolaceParseSolaceMetadata(t *testing.T) {
for _, testData := range testParseSolaceMetadata {
fmt.Print(testData.testID)
- _, err := parseSolaceMetadata(&ScalerConfig{ResolvedEnv: nil, TriggerMetadata: testData.metadata, AuthParams: nil, ScalerIndex: testData.scalerIndex})
+ meta, err := parseSolaceMetadata(&ScalerConfig{ResolvedEnv: nil, TriggerMetadata: testData.metadata, AuthParams: nil, ScalerIndex: testData.scalerIndex})
switch {
case err != nil && !testData.isError:
t.Error("expected success but got error: ", err)
@@ -524,6 +543,10 @@ func TestSolaceParseSolaceMetadata(t *testing.T) {
default:
fmt.Println(" --> PASS")
}
+ if !testData.isError && strings.Contains(testData.metadata["queueName"], "/") && !strings.Contains(meta.endpointURL, url.QueryEscape(testData.metadata["queueName"])) {
+ t.Error("expected endpointURL to query escape special characters in the URL but got:", meta.endpointURL)
+ fmt.Println(" --> FAIL")
+ }
}
for _, testData := range testSolaceEnvCreds {
fmt.Print(testData.testID)
diff --git a/pkg/scaling/resolver/azure_keyvault_handler.go b/pkg/scaling/resolver/azure_keyvault_handler.go
index 62263accb53..f7fe36544c2 100644
--- a/pkg/scaling/resolver/azure_keyvault_handler.go
+++ b/pkg/scaling/resolver/azure_keyvault_handler.go
@@ -133,12 +133,12 @@ func (vh *AzureKeyVaultHandler) getAuthConfig(ctx context.Context, client client
case kedav1alpha1.PodIdentityProviderAzure:
config := auth.NewMSIConfig()
config.Resource = keyVaultResourceURL
- config.ClientID = podIdentity.IdentityID
+ config.ClientID = podIdentity.GetIdentityID()
return config, nil
case kedav1alpha1.PodIdentityProviderAzureWorkload:
- return azure.NewAzureADWorkloadIdentityConfig(ctx, podIdentity.IdentityID, keyVaultResourceURL), nil
+ return azure.NewAzureADWorkloadIdentityConfig(ctx, podIdentity.GetIdentityID(), keyVaultResourceURL), nil
default:
- return nil, fmt.Errorf("key vault does not support pod identity provider - %s", podIdentity)
+ return nil, fmt.Errorf("key vault does not support pod identity provider - %s", podIdentity.Provider)
}
}
diff --git a/pkg/scaling/resolver/scale_resolvers.go b/pkg/scaling/resolver/scale_resolvers.go
index 9d18eeb20c0..c2a9c39c9a6 100644
--- a/pkg/scaling/resolver/scale_resolvers.go
+++ b/pkg/scaling/resolver/scale_resolvers.go
@@ -112,7 +112,7 @@ func ResolveScaleTargetPodSpec(ctx context.Context, kubeClient client.Client, sc
statefulSet := &appsv1.StatefulSet{}
if err := kubeClient.Get(ctx, objKey, statefulSet); err != nil {
// resource doesn't exist
- logger.Error(err, "target deployment doesn't exist")
+ logger.Error(err, "target statefulset doesn't exist")
return nil, "", err
}
podTemplateSpec.ObjectMeta = statefulSet.ObjectMeta
@@ -181,7 +181,8 @@ func ResolveAuthRefAndPodIdentity(ctx context.Context, client client.Client, log
if podTemplateSpec != nil {
authParams, podIdentity := resolveAuthRef(ctx, client, logger, triggerAuthRef, &podTemplateSpec.Spec, namespace, secretsLister)
- if podIdentity.Provider == kedav1alpha1.PodIdentityProviderAwsEKS {
+ switch podIdentity.Provider {
+ case kedav1alpha1.PodIdentityProviderAwsEKS:
serviceAccountName := defaultServiceAccount
if podTemplateSpec.Spec.ServiceAccountName != "" {
serviceAccountName = podTemplateSpec.Spec.ServiceAccountName
@@ -193,8 +194,13 @@ func ResolveAuthRefAndPodIdentity(ctx context.Context, client client.Client, log
fmt.Errorf("error getting service account: '%s', error: %w", serviceAccountName, err)
}
authParams["awsRoleArn"] = serviceAccount.Annotations[kedav1alpha1.PodIdentityAnnotationEKS]
- } else if podIdentity.Provider == kedav1alpha1.PodIdentityProviderAwsKiam {
+ case kedav1alpha1.PodIdentityProviderAwsKiam:
authParams["awsRoleArn"] = podTemplateSpec.ObjectMeta.Annotations[kedav1alpha1.PodIdentityAnnotationKiam]
+ case kedav1alpha1.PodIdentityProviderAzure, kedav1alpha1.PodIdentityProviderAzureWorkload:
+ if podIdentity.IdentityID != nil && *podIdentity.IdentityID == "" {
+ return nil, kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderNone}, fmt.Errorf("IdentityID of PodIdentity should not be empty")
+ }
+ default:
}
return authParams, podIdentity, nil
}
diff --git a/pkg/scaling/resolver/scale_resolvers_test.go b/pkg/scaling/resolver/scale_resolvers_test.go
index 859b5a7c701..7db11297362 100644
--- a/pkg/scaling/resolver/scale_resolvers_test.go
+++ b/pkg/scaling/resolver/scale_resolvers_test.go
@@ -422,7 +422,7 @@ func TestResolveAuthRef(t *testing.T) {
t.Errorf("Returned authParams are different: %s", diff)
}
if gotPodIdentity != test.expectedPodIdentity {
- t.Errorf("Unexpected podidentity, wanted: %q got: %q", test.expectedPodIdentity, gotPodIdentity)
+ t.Errorf("Unexpected podidentity, wanted: %q got: %q", test.expectedPodIdentity.Provider, gotPodIdentity.Provider)
}
})
}
diff --git a/pkg/scaling/scale_handler.go b/pkg/scaling/scale_handler.go
index 649d19fd1a7..4999bd01eb6 100644
--- a/pkg/scaling/scale_handler.go
+++ b/pkg/scaling/scale_handler.go
@@ -34,6 +34,7 @@ import (
logf "sigs.k8s.io/controller-runtime/pkg/log"
kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1"
+ "github.com/kedacore/keda/v2/pkg/common/message"
"github.com/kedacore/keda/v2/pkg/eventreason"
"github.com/kedacore/keda/v2/pkg/fallback"
"github.com/kedacore/keda/v2/pkg/prommetrics"
@@ -109,7 +110,7 @@ func (h *scaleHandler) HandleScalableObject(ctx context.Context, scalableObject
}
h.scaleLoopContexts.Store(key, cancel)
} else {
- h.recorder.Event(withTriggers, corev1.EventTypeNormal, eventreason.KEDAScalersStarted, "Started scalers watch")
+ h.recorder.Event(withTriggers, corev1.EventTypeNormal, eventreason.KEDAScalersStarted, message.ScalerStartMsg)
}
// a mutex is used to synchronize scale requests per scalableObject
diff --git a/pkg/scaling/scalers_builder.go b/pkg/scaling/scalers_builder.go
index 7a4a53b24e5..ed442e47a5b 100644
--- a/pkg/scaling/scalers_builder.go
+++ b/pkg/scaling/scalers_builder.go
@@ -24,6 +24,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1"
+ "github.com/kedacore/keda/v2/pkg/common/message"
"github.com/kedacore/keda/v2/pkg/eventreason"
"github.com/kedacore/keda/v2/pkg/scalers"
"github.com/kedacore/keda/v2/pkg/scaling/cache"
@@ -87,6 +88,8 @@ func (h *scaleHandler) buildScalers(ctx context.Context, withTriggers *kedav1alp
}
return nil, err
}
+ msg := fmt.Sprintf(message.ScalerIsBuiltMsg, trigger.Type)
+ h.recorder.Event(withTriggers, corev1.EventTypeNormal, eventreason.KEDAScalersStarted, msg)
result = append(result, cache.ScalerBuilder{
Scaler: scaler,
diff --git a/pkg/util/conver_types.go b/pkg/util/conver_types.go
new file mode 100644
index 00000000000..6d7559a8ee9
--- /dev/null
+++ b/pkg/util/conver_types.go
@@ -0,0 +1,22 @@
+/*
+Copyright 2023 The KEDA Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+// String returns a pointer to the string value passed in.
+func StringPointer(v string) *string {
+ return &v
+}
diff --git a/tests/README.md b/tests/README.md
index bd478733369..76510581db5 100644
--- a/tests/README.md
+++ b/tests/README.md
@@ -16,6 +16,13 @@ go test -v -tags e2e ./scalers/...
go test -v -tags e2e ./utils/cleanup_test.go # Skip if you want to keep testing.
```
+> **Note**
+> As default, `go test -v -tags e2e ./utils/setup_test.go` deploys KEDA from upstream's main branch,
+> if you are adding an e2e test to your own code, this is not useful as you need your own version.
+> Like for [building and deploying your own image](../BUILD.md#custom-keda-as-an-image), you can use
+> the Makefile envrionment variables to customize KEDA deployment.
+> eg. `IMAGE_REGISTRY=docker.io IMAGE_REPO=johndoe go test -v -tags e2e ./utils/setup_test.go`
+
### Specific test
```bash
diff --git a/tests/internals/events/events_test.go b/tests/internals/events/events_test.go
new file mode 100644
index 00000000000..90a81aa28cc
--- /dev/null
+++ b/tests/internals/events/events_test.go
@@ -0,0 +1,230 @@
+//go:build e2e
+// +build e2e
+
+package events_test
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "k8s.io/client-go/kubernetes"
+
+ "github.com/kedacore/keda/v2/pkg/common/message"
+ "github.com/kedacore/keda/v2/pkg/eventreason"
+ . "github.com/kedacore/keda/v2/tests/helper"
+)
+
+const (
+ testName = "events-test"
+)
+
+var (
+ testNamespace = fmt.Sprintf("%s-ns", testName)
+ monitoredDeploymentName = fmt.Sprintf("%s-monitor-deployment", testName)
+ deploymentName = fmt.Sprintf("%s-deployment", testName)
+ daemonsetName = fmt.Sprintf("%s-daemonset", testName)
+ scaledObjectName = fmt.Sprintf("%s-so", testName)
+ scaledObjectTargetNotFoundName = fmt.Sprintf("%s-so-target-error", testName)
+ scaledObjectTargetNoSubresourceName = fmt.Sprintf("%s-so-target-no-subresource", testName)
+)
+
+type templateData struct {
+ TestNamespace string
+ ScaledObjectName string
+ ScaledObjectTargetNotFoundName string
+ ScaledObjectTargetNoSubresourceName string
+ DeploymentName string
+ MonitoredDeploymentName string
+ DaemonsetName string
+}
+
+const (
+ scaledObjectTemplate = `
+apiVersion: keda.sh/v1alpha1
+kind: ScaledObject
+metadata:
+ name: {{.ScaledObjectName}}
+ namespace: {{.TestNamespace}}
+spec:
+ scaleTargetRef:
+ name: {{.DeploymentName}}
+ triggers:
+ - type: kubernetes-workload
+ metadata:
+ podSelector: 'app={{.MonitoredDeploymentName}}'
+ value: '1'
+`
+ monitoredDeploymentTemplate = `apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{.MonitoredDeploymentName}}
+ namespace: {{.TestNamespace}}
+ labels:
+ app: {{.MonitoredDeploymentName}}
+spec:
+ replicas: 0
+ selector:
+ matchLabels:
+ app: {{.MonitoredDeploymentName}}
+ template:
+ metadata:
+ labels:
+ app: {{.MonitoredDeploymentName}}
+ spec:
+ containers:
+ - name: nginx
+ image: 'nginxinc/nginx-unprivileged'
+`
+
+ deploymentTemplate = `
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{.DeploymentName}}
+ namespace: {{.TestNamespace}}
+ labels:
+ app: {{.DeploymentName}}
+spec:
+ replicas: 0
+ selector:
+ matchLabels:
+ app: {{.DeploymentName}}
+ template:
+ metadata:
+ labels:
+ app: {{.DeploymentName}}
+ spec:
+ containers:
+ - name: {{.DeploymentName}}
+ image: nginxinc/nginx-unprivileged:alpine-slim
+`
+
+ scaledObjectTargetErrTemplate = `
+apiVersion: keda.sh/v1alpha1
+kind: ScaledObject
+metadata:
+ name: {{.ScaledObjectTargetNotFoundName}}
+ namespace: {{.TestNamespace}}
+spec:
+ scaleTargetRef:
+ name: no-exist
+ triggers:
+ - type: kubernetes-workload
+ metadata:
+ podSelector: 'app={{.DeploymentName}}'
+ value: '1'
+`
+
+ daemonSetTemplate = `
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: {{.DaemonsetName}}
+ namespace: {{.TestNamespace}}
+ labels:
+ app: {{.DaemonsetName}}
+spec:
+ selector:
+ matchLabels:
+ app: {{.DaemonsetName}}
+ template:
+ metadata:
+ labels:
+ app: {{.DaemonsetName}}
+ spec:
+ containers:
+ - name: {{.DaemonsetName}}
+ image: nginxinc/nginx-unprivileged:alpine-slim
+`
+
+ scaledObjectTargetNotSupportTemplate = `
+apiVersion: keda.sh/v1alpha1
+kind: ScaledObject
+metadata:
+ name: {{.ScaledObjectTargetNoSubresourceName}}
+ namespace: {{.TestNamespace}}
+spec:
+ scaleTargetRef:
+ name: {{.DaemonsetName}}
+ kind: DaemonSet
+ triggers:
+ - type: kubernetes-workload
+ metadata:
+ podSelector: 'app={{.DeploymentName}}'
+ value: '1'
+`
+)
+
+func TestEvents(t *testing.T) {
+ // setup
+ t.Log("--- setting up ---")
+
+ // Create kubernetes resources
+ kc := GetKubernetesClient(t)
+ data, templates := getTemplateData()
+
+ CreateKubernetesResources(t, kc, testNamespace, data, templates)
+
+ // test scaling
+ testNormalEvent(t, kc, data)
+ testTargetNotFoundErr(t, kc, data)
+ testTargetNotSupportEventErr(t, kc, data)
+
+ // cleanup
+ DeleteKubernetesResources(t, testNamespace, data, templates)
+}
+
+func getTemplateData() (templateData, []Template) {
+ return templateData{
+ TestNamespace: testNamespace,
+ DeploymentName: deploymentName,
+ MonitoredDeploymentName: monitoredDeploymentName,
+ DaemonsetName: daemonsetName,
+ ScaledObjectName: scaledObjectName,
+ ScaledObjectTargetNotFoundName: scaledObjectTargetNotFoundName,
+ ScaledObjectTargetNoSubresourceName: scaledObjectTargetNoSubresourceName,
+ }, []Template{}
+}
+
+func checkingEvent(t *testing.T, scaledObject string, index int, eventreason string, message string) {
+ result, err := ExecuteCommand(fmt.Sprintf("kubectl get events -n %s --field-selector involvedObject.name=%s --sort-by=.metadata.creationTimestamp -o jsonpath=\"{.items[%d].reason}:{.items[%d].message}\"", testNamespace, scaledObject, index, index))
+
+ assert.NoError(t, err)
+ lastEventMessage := strings.Trim(string(result), "\"")
+ assert.Equal(t, lastEventMessage, eventreason+":"+message)
+}
+
+func testNormalEvent(t *testing.T, kc *kubernetes.Clientset, data templateData) {
+ t.Log("--- testing normal event ---")
+
+ KubectlApplyWithTemplate(t, data, "deploymentTemplate", deploymentTemplate)
+ KubectlApplyWithTemplate(t, data, "monitoredDeploymentName", monitoredDeploymentTemplate)
+ KubectlApplyWithTemplate(t, data, "scaledObjectTemplate", scaledObjectTemplate)
+
+ // time.Sleep(2 * time.Second)
+ KubernetesScaleDeployment(t, kc, monitoredDeploymentName, 2, testNamespace)
+ assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 2, 60, 1),
+ "replica count should be 2 after 1 minute")
+ checkingEvent(t, scaledObjectName, 0, eventreason.KEDAScalersStarted, fmt.Sprintf(message.ScalerIsBuiltMsg, "kubernetes-workload"))
+ checkingEvent(t, scaledObjectName, 1, eventreason.KEDAScalersStarted, message.ScalerStartMsg)
+ checkingEvent(t, scaledObjectName, 2, eventreason.ScaledObjectReady, message.ScalerReadyMsg)
+}
+
+func testTargetNotFoundErr(t *testing.T, _ *kubernetes.Clientset, data templateData) {
+ t.Log("--- testing target not found error event ---")
+
+ KubectlApplyWithTemplate(t, data, "scaledObjectTargetErrTemplate", scaledObjectTargetErrTemplate)
+ checkingEvent(t, scaledObjectTargetNotFoundName, -2, eventreason.ScaledObjectCheckFailed, message.ScaleTargetNotFoundMsg)
+ checkingEvent(t, scaledObjectTargetNotFoundName, -1, eventreason.ScaledObjectCheckFailed, message.ScaleTargetErrMsg)
+}
+
+func testTargetNotSupportEventErr(t *testing.T, _ *kubernetes.Clientset, data templateData) {
+ t.Log("--- testing target not support error event ---")
+
+ KubectlApplyWithTemplate(t, data, "daemonSetTemplate", daemonSetTemplate)
+ KubectlApplyWithTemplate(t, data, "scaledObjectTargetNotSupportTemplate", scaledObjectTargetNotSupportTemplate)
+ checkingEvent(t, scaledObjectTargetNoSubresourceName, -2, eventreason.ScaledObjectCheckFailed, message.ScaleTargetNoSubresourceMsg)
+ checkingEvent(t, scaledObjectTargetNoSubresourceName, -1, eventreason.ScaledObjectCheckFailed, message.ScaleTargetErrMsg)
+}
diff --git a/tests/internals/trigger_authentication_validation/trigger_authentication_validation_test.go b/tests/internals/trigger_authentication_validation/trigger_authentication_validation_test.go
new file mode 100644
index 00000000000..f15099cfd7b
--- /dev/null
+++ b/tests/internals/trigger_authentication_validation/trigger_authentication_validation_test.go
@@ -0,0 +1,224 @@
+//go:build e2e
+// +build e2e
+
+package trigger_authentication_validation_test
+
+import (
+ "context"
+ "fmt"
+ "testing"
+
+ "github.com/joho/godotenv"
+ "github.com/stretchr/testify/assert"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/kubernetes"
+
+ . "github.com/kedacore/keda/v2/tests/helper"
+)
+
+// Load environment variables from .env file
+var _ = godotenv.Load("../../../.env")
+
+const (
+ testName = "azure-aad-pod-identity-test"
+)
+
+var (
+ testNamespace = fmt.Sprintf("%s-ns", testName)
+ triggerAuthEmptyIDName = fmt.Sprintf("%s-ta-empty", testName)
+ triggerAuthNilIDName = fmt.Sprintf("%s-ta-nil", testName)
+ clusterTriggerAuthEmptyIDName = fmt.Sprintf("%s-cta-empty", testName)
+ clusterTriggerAuthNilIDName = fmt.Sprintf("%s-cta-nil", testName)
+ triggerAuthWorkloadEmptyIDName = fmt.Sprintf("%s-ta-workload-empty", testName)
+ triggerAuthWorkloadNilIDName = fmt.Sprintf("%s-ta-workload-nil", testName)
+ clusterTriggerAuthWorkloadEmptyIDName = fmt.Sprintf("%s-cta-workload-empty", testName)
+ clusterTriggerAuthWorkloadNilIDName = fmt.Sprintf("%s-cta-workload-nil", testName)
+)
+
+type templateData struct {
+ TestNamespace string
+ TriggerAuthEmptyIDName string
+ TriggerAuthNilIDName string
+ ClusterTriggerAuthEmptyIDName string
+ ClusterTriggerAuthNilIDName string
+ TriggerAuthWorkloadEmptyIDName string
+ TriggerAuthWorkloadNilIDName string
+ ClusterTriggerAuthWorkloadEmptyIDName string
+ ClusterTriggerAuthWorkloadNilIDName string
+}
+
+const (
+ triggerAuthEmptyIDTemplate = `
+apiVersion: keda.sh/v1alpha1
+kind: TriggerAuthentication
+metadata:
+ name: {{.TriggerAuthEmptyIDName}}
+ namespace: {{.TestNamespace}}
+spec:
+ podIdentity:
+ provider: azure
+ identityId: ""
+`
+
+ triggerAuthNilIDTemplate = `
+apiVersion: keda.sh/v1alpha1
+kind: TriggerAuthentication
+metadata:
+ name: {{.TriggerAuthNilIDName}}
+ namespace: {{.TestNamespace}}
+spec:
+ podIdentity:
+ provider: azure
+`
+ clusterTriggerAuthEmptyIDTemplate = `
+apiVersion: keda.sh/v1alpha1
+kind: ClusterTriggerAuthentication
+metadata:
+ name: {{.ClusterTriggerAuthEmptyIDName}}
+spec:
+ podIdentity:
+ provider: azure
+ identityId: ""
+`
+
+ clusterTriggerAuthNilIDTemplate = `
+apiVersion: keda.sh/v1alpha1
+kind: ClusterTriggerAuthentication
+metadata:
+ name: {{.ClusterTriggerAuthNilIDName}}
+spec:
+ podIdentity:
+ provider: azure
+`
+
+ triggerAuthWorkloadEmptyIDTemplate = `
+apiVersion: keda.sh/v1alpha1
+kind: TriggerAuthentication
+metadata:
+ name: {{.TriggerAuthWorkloadEmptyIDName}}
+ namespace: {{.TestNamespace}}
+spec:
+ podIdentity:
+ provider: azure-workload
+ identityId: ""
+`
+
+ triggerAuthWorkloadNilIDTemplate = `
+apiVersion: keda.sh/v1alpha1
+kind: TriggerAuthentication
+metadata:
+ name: {{.TriggerAuthWorkloadNilIDName}}
+ namespace: {{.TestNamespace}}
+spec:
+ podIdentity:
+ provider: azure-workload
+`
+ clusterTriggerAuthWorkloadEmptyIDTemplate = `
+apiVersion: keda.sh/v1alpha1
+kind: ClusterTriggerAuthentication
+metadata:
+ name: {{.ClusterTriggerAuthWorkloadEmptyIDName}}
+spec:
+ podIdentity:
+ provider: azure-workload
+ identityId: ""
+`
+
+ clusterTriggerAuthWorkloadNilIDTemplate = `
+apiVersion: keda.sh/v1alpha1
+kind: ClusterTriggerAuthentication
+metadata:
+ name: {{.ClusterTriggerAuthWorkloadNilIDName}}
+spec:
+ podIdentity:
+ provider: azure-workload
+`
+)
+
+func TestScaler(t *testing.T) {
+ // setup
+ t.Log("--- setting up ---")
+
+ // Create kubernetes resources
+ kc := GetKubernetesClient(t)
+ data, templates := getTemplateData()
+
+ CreateKubernetesResources(t, kc, testNamespace, data, templates)
+
+ // test auth
+ testTriggerAuthenticationWithEmptyID(t, kc, data)
+ testTriggerAuthenticationWithNilID(t, kc, data)
+ testClusterTriggerAuthenticationWithEmptyID(t, kc, data)
+ testClusterTriggerAuthenticationWithNilID(t, kc, data)
+
+ // cleanup
+ DeleteKubernetesResources(t, testNamespace, data, templates)
+}
+
+func getTemplateData() (templateData, []Template) {
+ return templateData{
+ TestNamespace: testNamespace,
+ TriggerAuthEmptyIDName: triggerAuthEmptyIDName,
+ TriggerAuthNilIDName: triggerAuthNilIDName,
+ ClusterTriggerAuthEmptyIDName: clusterTriggerAuthEmptyIDName,
+ ClusterTriggerAuthNilIDName: clusterTriggerAuthNilIDName,
+ TriggerAuthWorkloadEmptyIDName: triggerAuthWorkloadEmptyIDName,
+ TriggerAuthWorkloadNilIDName: triggerAuthWorkloadNilIDName,
+ ClusterTriggerAuthWorkloadEmptyIDName: clusterTriggerAuthWorkloadEmptyIDName,
+ ClusterTriggerAuthWorkloadNilIDName: clusterTriggerAuthWorkloadNilIDName,
+ }, []Template{}
+}
+
+// expect triggerauthentication should not be created with empty identity id
+func testTriggerAuthenticationWithEmptyID(t *testing.T, _ *kubernetes.Clientset, data templateData) {
+ t.Log("--- create triggerauthentication with empty identity id ---")
+
+ err := KubectlApplyWithErrors(t, data, "triggerAuthEmptyIDTemplate", triggerAuthEmptyIDTemplate)
+ assert.Errorf(t, err, "can deploy TriggerAuthtication - %s", err)
+
+ err = KubectlApplyWithErrors(t, data, "triggerAuthWorkloadEmptyIDTemplate", triggerAuthWorkloadEmptyIDTemplate)
+ assert.Errorf(t, err, "can deploy TriggerAuthtication with azureworkload - %s", err)
+}
+
+// expect triggerauthentication can be created without identity id property
+func testTriggerAuthenticationWithNilID(t *testing.T, _ *kubernetes.Clientset, data templateData) {
+ t.Log("--- create triggerauthentication with nil identity id ---")
+
+ kedaKc := GetKedaKubernetesClient(t)
+ KubectlApplyWithTemplate(t, data, "triggerAuthNilITemplate", triggerAuthNilIDTemplate)
+
+ triggerauthentication, _ := kedaKc.TriggerAuthentications(testNamespace).Get(context.Background(), triggerAuthNilIDName, v1.GetOptions{})
+ assert.NotNil(t, triggerauthentication)
+
+ KubectlApplyWithTemplate(t, data, "triggerAuthWorkloadNilITemplate", triggerAuthWorkloadNilIDTemplate)
+
+ triggerauthentication, _ = kedaKc.TriggerAuthentications(testNamespace).Get(context.Background(), triggerAuthWorkloadNilIDName, v1.GetOptions{})
+ assert.NotNil(t, triggerauthentication)
+}
+
+// expect clustertriggerauthentication should not be created with empty identity id
+func testClusterTriggerAuthenticationWithEmptyID(t *testing.T, _ *kubernetes.Clientset, data templateData) {
+ t.Log("--- create clustertriggerauthentication with empty identity id ---")
+
+ err := KubectlApplyWithErrors(t, data, "clusterTriggerAuthEmptyIDTemplate", clusterTriggerAuthEmptyIDTemplate)
+ assert.Errorf(t, err, "can deploy ClusterTriggerAuthtication - %s", err)
+
+ err = KubectlApplyWithErrors(t, data, "clusterTriggerAuthWorkloadEmptyIDTemplate", clusterTriggerAuthWorkloadEmptyIDTemplate)
+ assert.Errorf(t, err, "can deploy ClusterTriggerAuthtication with azureworkload - %s", err)
+}
+
+// expect clustertriggerauthentication can be created without identity id property
+func testClusterTriggerAuthenticationWithNilID(t *testing.T, _ *kubernetes.Clientset, data templateData) {
+ t.Log("--- create clustertriggerauthentication with nil identity id ---")
+
+ kedaKc := GetKedaKubernetesClient(t)
+ KubectlApplyWithTemplate(t, data, "clusterTriggerAuthNilIDTemplate", clusterTriggerAuthNilIDTemplate)
+
+ clustertriggerauthentication, _ := kedaKc.ClusterTriggerAuthentications().Get(context.Background(), clusterTriggerAuthNilIDTemplate, v1.GetOptions{})
+ assert.NotNil(t, clustertriggerauthentication)
+
+ KubectlApplyWithTemplate(t, data, "clusterTriggerAuthWorkloadNilIDTemplate", clusterTriggerAuthWorkloadNilIDTemplate)
+
+ clustertriggerauthentication, _ = kedaKc.ClusterTriggerAuthentications().Get(context.Background(), clusterTriggerAuthWorkloadNilIDTemplate, v1.GetOptions{})
+ assert.NotNil(t, clustertriggerauthentication)
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
new file mode 100644
index 00000000000..16686a65523
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
@@ -0,0 +1,62 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: github.com/golang/protobuf/ptypes/empty/empty.proto
+
+package empty
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ reflect "reflect"
+)
+
+// Symbols defined in public import of google/protobuf/empty.proto.
+
+type Empty = emptypb.Empty
+
+var File_github_com_golang_protobuf_ptypes_empty_empty_proto protoreflect.FileDescriptor
+
+var file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = []byte{
+ 0x0a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x3b, 0x65, 0x6d,
+ 0x70, 0x74, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() }
+func file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() {
+ if File_github_com_golang_protobuf_ptypes_empty_empty_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes,
+ DependencyIndexes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs,
+ }.Build()
+ File_github_com_golang_protobuf_ptypes_empty_empty_proto = out.File
+ file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = nil
+ file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = nil
+ file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = nil
+}