diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 9df7e2dfc06..1d3809acb92 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -3,7 +3,7 @@ # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information. #------------------------------------------------------------------------------------------------------------- -FROM golang:1.21.9 +FROM golang:1.21.12 # Avoid warnings by switching to noninteractive ENV DEBIAN_FRONTEND=noninteractive @@ -16,8 +16,6 @@ ARG USERNAME=vscode ARG USER_UID=1000 ARG USER_GID=$USER_UID -ENV GO111MODULE=auto - # Configure apt, install packages and tools RUN apt-get update \ && apt-get -y install --no-install-recommends apt-utils dialog unzip 2>&1 \ @@ -25,32 +23,25 @@ RUN apt-get update \ # Verify git, process tools, lsb-release (common in install instructions for CLIs) installed && apt-get -y install git iproute2 procps lsb-release \ # - # Install gocode-gomod - && go get -x -d github.com/stamblerre/gocode 2>&1 \ - && go build -o gocode-gomod github.com/stamblerre/gocode \ - && mv gocode-gomod $GOPATH/bin/ \ - # # Install Go tools - && go get -u -v \ - github.com/mdempsky/gocode \ - github.com/uudashr/gopkgs/cmd/gopkgs \ - github.com/ramya-rao-a/go-outline \ - github.com/acroca/go-symbols \ - github.com/godoctor/godoctor \ - golang.org/x/tools/cmd/gorename \ - github.com/rogpeppe/godef \ - github.com/zmb3/gogetdoc \ - github.com/haya14busa/goplay/cmd/goplay \ - github.com/sqs/goreturns \ - github.com/josharian/impl \ - github.com/davidrjenni/reftools/cmd/fillstruct \ - github.com/fatih/gomodifytags \ - github.com/cweill/gotests/... \ - golang.org/x/tools/cmd/goimports \ - golang.org/x/lint/golint \ - github.com/alecthomas/gometalinter 2>&1 \ - github.com/mgechev/revive \ - github.com/derekparker/delve/cmd/dlv 2>&1 \ + && go install github.com/uudashr/gopkgs/v2/cmd/gopkgs@latest \ + && go install github.com/ramya-rao-a/go-outline@latest \ + && go install github.com/acroca/go-symbols@latest \ + && go install github.com/godoctor/godoctor@latest \ + && go install golang.org/x/tools/cmd/gorename@latest \ + && go install github.com/rogpeppe/godef@latest \ + && go install github.com/zmb3/gogetdoc@latest \ + && go install github.com/haya14busa/goplay/cmd/goplay@latest \ + && go install github.com/sqs/goreturns@latest \ + && go install github.com/josharian/impl@latest \ + && go install github.com/davidrjenni/reftools/cmd/fillstruct@latest \ + && go install github.com/fatih/gomodifytags@latest \ + && go install github.com/cweill/gotests/...@latest \ + && go install golang.org/x/tools/cmd/goimports@latest \ + && go install golang.org/x/lint/golint@latest \ + && go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest \ + && go install github.com/mgechev/revive@latest \ + && go install github.com/go-delve/delve/cmd/dlv@latest \ && go install honnef.co/go/tools/cmd/staticcheck@latest \ && go install golang.org/x/tools/gopls@latest \ # Protocol Buffer Compiler @@ -61,8 +52,6 @@ RUN apt-get update \ && mv $HOME/.local/bin/protoc /usr/local/bin/protoc \ && mv $HOME/.local/include/ /usr/local/bin/include/ \ && protoc --version \ - # Install golangci-lint - && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.55.2 \ # # Create a non-root user to use if preferred - see https://aka.ms/vscode-remote/containers/non-root-user. && groupadd --gid $USER_GID $USERNAME \ @@ -91,9 +80,6 @@ RUN apt-get update \ && apt-get clean -y \ && rm -rf /var/lib/apt/lists/* -# Enable go modules -ENV GO111MODULE=on - ENV OPERATOR_RELEASE_VERSION=v1.26.0 RUN ARCH=$(case $(uname -m) in x86_64) echo -n amd64 ;; aarch64) echo -n arm64 ;; *) echo -n $(uname -m) ;; esac) \ && OS=$(uname | awk '{print tolower($0)}') \ diff --git a/.github/workflows/main-build.yml b/.github/workflows/main-build.yml index 1ea7db77226..6cf0a8221a4 100644 --- a/.github/workflows/main-build.yml +++ b/.github/workflows/main-build.yml @@ -13,7 +13,7 @@ jobs: id-token: write # needed for signing the images with GitHub OIDC Token **not production ready** # keda-tools is built from github.com/test-tools/tools/Dockerfile - container: ghcr.io/kedacore/keda-tools:1.21.9 + container: ghcr.io/kedacore/keda-tools:1.21.12 steps: - name: Check out code uses: actions/checkout@1d96c772d19495a3b5c517cd2bc0cb401ea0529f # v4 diff --git a/.github/workflows/pr-e2e.yml b/.github/workflows/pr-e2e.yml index 4af1a16a8b7..2d7bc9c090f 100644 --- a/.github/workflows/pr-e2e.yml +++ b/.github/workflows/pr-e2e.yml @@ -68,7 +68,7 @@ jobs: needs: triage runs-on: ubuntu-latest name: Build images - container: ghcr.io/kedacore/keda-tools:1.21.9 + container: ghcr.io/kedacore/keda-tools:1.21.12 if: needs.triage.outputs.run-e2e == 'true' steps: - name: Set status in-progress @@ -93,6 +93,42 @@ jobs: gh pr checkout ${{ needs.triage.outputs.pr_num }} git checkout ${{ needs.triage.outputs.commit_sha }} + - name: Run regex checks + id: regex-validation + continue-on-error: true + env: + COMMENT_BODY: ${{ github.event.comment.body }} + run: | + MESSAGE="$COMMENT_BODY" + REGEX='/run-e2e (.+)' + if [[ "$MESSAGE" =~ $REGEX ]] + then + export E2E_TEST_REGEX="$(echo ${BASH_REMATCH[1]} | head -1)" + fi + make e2e-regex-check + + - name: React to comment with failure + uses: dkershner6/reaction-action@v2 + if: steps.regex-validation.outcome != 'success' + with: + token: ${{ secrets.GITHUB_TOKEN }} + commentId: ${{ github.event.comment.id }} + reaction: "-1" + + - name: Set status failure + uses: LouisBrunner/checks-action@6b626ffbad7cc56fd58627f774b9067e6118af23 # v2 + if: steps.regex-validation.outcome != 'success' + with: + token: ${{ secrets.GITHUB_TOKEN }} + sha: ${{ needs.triage.outputs.commit_sha }} + name: ${{ env.E2E_CHECK_NAME }} + conclusion: failure + details_url: https://github.com/${{github.repository}}/actions/runs/${{github.run_id}} + + - name: Exit on failure + if: steps.regex-validation.outcome != 'success' + run: exit 1 + - name: Login to GitHub Container Registry uses: docker/login-action@v3 with: @@ -112,7 +148,7 @@ jobs: needs: [triage, build-test-images] runs-on: e2e name: Execute e2e tests - container: ghcr.io/kedacore/keda-tools:1.21.9 + container: ghcr.io/kedacore/keda-tools:1.21.12 if: needs.triage.outputs.run-e2e == 'true' steps: - name: Set status in-progress diff --git a/.github/workflows/pr-validation.yml b/.github/workflows/pr-validation.yml index 845ed4e08f1..dd9aac82c20 100644 --- a/.github/workflows/pr-validation.yml +++ b/.github/workflows/pr-validation.yml @@ -10,7 +10,7 @@ jobs: validate: name: validate - ${{ matrix.name }} runs-on: ${{ matrix.runner }} - container: ghcr.io/kedacore/keda-tools:1.21.9 + container: ghcr.io/kedacore/keda-tools:1.21.12 strategy: matrix: include: @@ -73,9 +73,9 @@ jobs: validate-dockerfiles: name: validate-dockerfiles - ${{ matrix.name }} runs-on: ${{ matrix.runner }} - container: ghcr.io/kedacore/keda-tools:1.21.9 + container: ghcr.io/kedacore/keda-tools:1.21.12 strategy: - matrix: + matrix: include: - runner: ARM64 name: arm64 @@ -104,9 +104,9 @@ jobs: validate-dev-container: name: Validate dev-container - ${{ matrix.name }} runs-on: ${{ matrix.runner }} - container: ghcr.io/kedacore/keda-tools:1.21.9 + container: ghcr.io/kedacore/keda-tools:1.21.12 strategy: - matrix: + matrix: include: - runner: ARM64 name: arm64 diff --git a/.github/workflows/release-build.yml b/.github/workflows/release-build.yml index 10cb3d4d3e4..55c00a71023 100644 --- a/.github/workflows/release-build.yml +++ b/.github/workflows/release-build.yml @@ -13,7 +13,7 @@ jobs: id-token: write # needed for signing the images with GitHub OIDC Token **not production ready** # keda-tools is built from github.com/test-tools/tools/Dockerfile - container: ghcr.io/kedacore/keda-tools:1.21.9 + container: ghcr.io/kedacore/keda-tools:1.21.12 steps: - name: Check out code uses: actions/checkout@1d96c772d19495a3b5c517cd2bc0cb401ea0529f # v4 diff --git a/.github/workflows/static-analysis-codeql.yml b/.github/workflows/static-analysis-codeql.yml index c3558780f3a..ef3e58edfb7 100644 --- a/.github/workflows/static-analysis-codeql.yml +++ b/.github/workflows/static-analysis-codeql.yml @@ -13,7 +13,7 @@ jobs: codeQl: name: Analyze CodeQL Go runs-on: ubuntu-latest - container: ghcr.io/kedacore/keda-tools:1.21.9 + container: ghcr.io/kedacore/keda-tools:1.21.12 if: (github.actor != 'dependabot[bot]') steps: - name: Checkout repository diff --git a/.github/workflows/template-main-e2e-test.yml b/.github/workflows/template-main-e2e-test.yml index 24da132fa59..78d4d3f9aa1 100644 --- a/.github/workflows/template-main-e2e-test.yml +++ b/.github/workflows/template-main-e2e-test.yml @@ -8,7 +8,7 @@ jobs: name: Run e2e test runs-on: ARM64 # keda-tools is built from github.com/test-tools/tools/Dockerfile - container: ghcr.io/kedacore/keda-tools:1.21.9 + container: ghcr.io/kedacore/keda-tools:1.21.12 concurrency: e2e-tests steps: - name: Check out code diff --git a/CHANGELOG.md b/CHANGELOG.md index bd671c7516b..997c82d7a29 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio ## History - [Unreleased](#unreleased) +- [v2.14.1](#v2141) - [v2.14.0](#v2140) - [v2.13.1](#v2131) - [v2.13.0](#v2130) @@ -50,41 +51,18 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio - [v1.1.0](#v110) - [v1.0.0](#v100) -## Unreleased - -### New - -- TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) - -#### Experimental - -Here is an overview of all new **experimental** features: - -- TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) - -### Improvements - -- TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) +## v2.14.1 ### Fixes -- TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) - -### Deprecations - -You can find all deprecations in [this overview](https://github.com/kedacore/keda/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3Abreaking-change) and [join the discussion here](https://github.com/kedacore/keda/discussions/categories/deprecations). - -New deprecation(s): - -- TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) - -### Breaking Changes - -- TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) - -### Other +- **General**: Do not delete running Jobs on KEDA restart ([#5656](https://github.com/kedacore/keda/issues/5656)) +- **General**: Fix CVE-2024-24790, CVE-2024-24789, and CVE-2024-24791 in stdlib. ([#5971](https://github.com/kedacore/keda/pull/5971)) +- **General**: Fix CVE-2024-35255 in github.com/Azure/azure-sdk-for-go/sdk/azidentity ([#5971](https://github.com/kedacore/keda/pull/5971)) +- **General**: Fix CVE-2024-6104 in github.com/hashicorp/go-retryablehttp ([#5971](https://github.com/kedacore/keda/pull/5971)) +- **General**: Fix ScaledJob ignores failing trigger(s) error ([#5922](https://github.com/kedacore/keda/issues/5922))- **General**: Scalers are properly closed after being refreshed ([#5806](https://github.com/kedacore/keda/issues/5806)) +- **GitHub Scaler**: Fixed pagination, fetching repository list ([#5738](https://github.com/kedacore/keda/issues/5738)) +- **MongoDB Scaler**: MongoDB url parses correctly `+srv` scheme ([#5760](https://github.com/kedacore/keda/issues/5760)) -- TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) ## v2.14.0 diff --git a/Dockerfile b/Dockerfile index 07c0e63fb58..81b8947b5e0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Build the manager binary -FROM --platform=$BUILDPLATFORM ghcr.io/kedacore/keda-tools:1.21.9 AS builder +FROM --platform=$BUILDPLATFORM ghcr.io/kedacore/keda-tools:1.21.12 AS builder ARG BUILD_VERSION=main ARG GIT_COMMIT=HEAD diff --git a/Dockerfile.adapter b/Dockerfile.adapter index ba961577589..a0a71e5e9ca 100644 --- a/Dockerfile.adapter +++ b/Dockerfile.adapter @@ -1,5 +1,5 @@ # Build the adapter binary -FROM --platform=$BUILDPLATFORM ghcr.io/kedacore/keda-tools:1.21.9 AS builder +FROM --platform=$BUILDPLATFORM ghcr.io/kedacore/keda-tools:1.21.12 AS builder ARG BUILD_VERSION=main ARG GIT_COMMIT=HEAD diff --git a/Dockerfile.webhooks b/Dockerfile.webhooks index c2a4c172ea0..f4a47473138 100644 --- a/Dockerfile.webhooks +++ b/Dockerfile.webhooks @@ -1,5 +1,5 @@ # Build the manager binary -FROM --platform=$BUILDPLATFORM ghcr.io/kedacore/keda-tools:1.21.9 AS builder +FROM --platform=$BUILDPLATFORM ghcr.io/kedacore/keda-tools:1.21.12 AS builder ARG BUILD_VERSION=main ARG GIT_COMMIT=HEAD diff --git a/Makefile b/Makefile index 1b5cbbce40b..0453cb3f31a 100644 --- a/Makefile +++ b/Makefile @@ -98,6 +98,10 @@ scale-node-pool: az-login ## Scale nodepool. --resource-group $(TF_AZURE_RESOURCE_GROUP) \ --node-count $(NODE_POOL_SIZE) +.PHONY: e2e-regex-check +e2e-regex-check: + go run -tags e2e ./tests/run-all.go regex-check + .PHONY: e2e-test e2e-test: get-cluster-context ## Run e2e tests against Azure cluster. TERMINFO=/etc/terminfo @@ -268,7 +272,7 @@ uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified deploy: install ## Deploy controller to the K8s cluster specified in ~/.kube/config. cd config/manager && \ $(KUSTOMIZE) edit set image ghcr.io/kedacore/keda=${IMAGE_CONTROLLER} && \ - if [ "$(AZURE_RUN_AAD_POD_IDENTITY_TESTS)" = true ]; then \ + if [ "$(AZURE_RUN_WORKLOAD_IDENTITY_TESTS)" = true ]; then \ $(KUSTOMIZE) edit add label --force aadpodidbinding:keda; \ fi && \ if [ "$(AZURE_RUN_WORKLOAD_IDENTITY_TESTS)" = true ]; then \ @@ -276,7 +280,7 @@ deploy: install ## Deploy controller to the K8s cluster specified in ~/.kube/con fi cd config/metrics-server && \ $(KUSTOMIZE) edit set image ghcr.io/kedacore/keda-metrics-apiserver=${IMAGE_ADAPTER} && \ - if [ "$(AZURE_RUN_AAD_POD_IDENTITY_TESTS)" = true ]; then \ + if [ "$(AZURE_RUN_WORKLOAD_IDENTITY_TESTS)" = true ]; then \ $(KUSTOMIZE) edit add label --force aadpodidbinding:keda; \ fi if [ "$(AZURE_RUN_WORKLOAD_IDENTITY_TESTS)" = true ]; then \ diff --git a/controllers/keda/scaledjob_controller.go b/controllers/keda/scaledjob_controller.go index 98c1ce87cc8..845ba5aca90 100755 --- a/controllers/keda/scaledjob_controller.go +++ b/controllers/keda/scaledjob_controller.go @@ -279,22 +279,36 @@ func (r *ScaledJobReconciler) deletePreviousVersionScaleJobs(ctx context.Context return "Cannot get list of Jobs owned by this scaledJob", err } - if len(jobs.Items) > 0 { - logger.Info("RolloutStrategy: immediate, Deleting jobs owned by the previous version of the scaledJob", "numJobsToDelete", len(jobs.Items)) + jobIndexes := make([]int, 0, len(jobs.Items)) + scaledJobGeneration := strconv.FormatInt(scaledJob.Generation, 10) + for i, job := range jobs.Items { + if jobGen, ok := job.Annotations["scaledjob.keda.sh/generation"]; !ok { + // delete Jobs that don't have the generation annotation + jobIndexes = append(jobIndexes, i) + } else if jobGen != scaledJobGeneration { + // delete Jobs that have a different generation annotation + jobIndexes = append(jobIndexes, i) + } } - for _, job := range jobs.Items { - job := job - propagationPolicy := metav1.DeletePropagationBackground - if scaledJob.Spec.Rollout.PropagationPolicy == "foreground" { - propagationPolicy = metav1.DeletePropagationForeground - } - err = r.Client.Delete(ctx, &job, client.PropagationPolicy(propagationPolicy)) - if err != nil { - return "Not able to delete job: " + job.Name, err + if len(jobIndexes) == 0 { + logger.Info("RolloutStrategy: immediate, No jobs owned by the previous version of the scaledJob") + } else { + logger.Info("RolloutStrategy: immediate, Deleting jobs owned by the previous version of the scaledJob", "numJobsToDelete", len(jobIndexes)) + for _, index := range jobIndexes { + job := jobs.Items[index] + + propagationPolicy := metav1.DeletePropagationBackground + if scaledJob.Spec.Rollout.PropagationPolicy == "foreground" { + propagationPolicy = metav1.DeletePropagationForeground + } + err = r.Client.Delete(ctx, &job, client.PropagationPolicy(propagationPolicy)) + if err != nil { + return "Not able to delete job: " + job.Name, err + } } + return fmt.Sprintf("RolloutStrategy: immediate, deleted jobs owned by the previous version of the scaleJob: %d jobs deleted", len(jobIndexes)), nil } - return fmt.Sprintf("RolloutStrategy: immediate, deleted jobs owned by the previous version of the scaleJob: %d jobs deleted", len(jobs.Items)), nil } return fmt.Sprintf("RolloutStrategy: %s", scaledJob.Spec.RolloutStrategy), nil } diff --git a/go.mod b/go.mod index 69c37dbb8c6..926a2222897 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/Azure/azure-kusto-go v0.15.2 github.com/Azure/azure-sdk-for-go v68.0.0+incompatible github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.0 github.com/Azure/azure-storage-blob-go v0.15.0 github.com/Azure/azure-storage-queue-go v0.0.0-20230927153703-648530c9aaf2 @@ -97,12 +97,12 @@ require ( google.golang.org/grpc v1.63.2 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 google.golang.org/protobuf v1.33.0 - k8s.io/api v0.29.2 - k8s.io/apimachinery v0.29.2 - k8s.io/apiserver v0.29.2 + k8s.io/api v0.29.4 + k8s.io/apimachinery v0.29.4 + k8s.io/apiserver v0.29.4 k8s.io/client-go v1.5.2 - k8s.io/code-generator v0.29.2 - k8s.io/component-base v0.29.2 + k8s.io/code-generator v0.29.4 + k8s.io/component-base v0.29.4 k8s.io/klog/v2 v2.120.1 k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 k8s.io/metrics v0.28.9 @@ -115,6 +115,14 @@ require ( sigs.k8s.io/kustomize/kustomize/v5 v5.4.1 ) +require ( + filippo.io/edwards25519 v1.1.0 // indirect + nhooyr.io/websocket v1.8.11 // indirect +) + +// Remove this when they merge the PR and cut a release https://github.com/open-policy-agent/cert-controller/pull/202 +replace github.com/open-policy-agent/cert-controller => github.com/jorturfer/cert-controller v0.0.0-20240427003941-363ba56751d7 + replace ( // pin k8s.io to v0.28.9 github.com/google/cel-go => github.com/google/cel-go v0.16.1 @@ -164,9 +172,8 @@ require ( cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect cloud.google.com/go/iam v1.1.7 // indirect code.cloudfoundry.org/clock v1.1.0 // indirect - filippo.io/edwards25519 v1.1.0 // indirect github.com/Azure/azure-pipeline-go v0.2.3 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid v0.4.0 github.com/Azure/go-amqp v1.0.5 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect @@ -342,13 +349,13 @@ require ( go.uber.org/automaxprocs v1.5.3 go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.22.0 - golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 + golang.org/x/crypto v0.24.0 + golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f golang.org/x/mod v0.17.0 // indirect - golang.org/x/net v0.24.0 // indirect - golang.org/x/sys v0.19.0 // indirect + golang.org/x/net v0.26.0 // indirect + golang.org/x/sys v0.21.0 // indirect golang.org/x/term v0.19.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.20.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect @@ -361,9 +368,9 @@ require ( gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 - k8s.io/apiextensions-apiserver v0.29.2 // indirect + k8s.io/apiextensions-apiserver v0.29.4 // indirect k8s.io/gengo v0.0.0-20240129211411-f967bbeff4b4 // indirect - k8s.io/kms v0.29.2 // indirect + k8s.io/kms v0.29.4 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/kustomize/api v0.17.1 // indirect diff --git a/go.sum b/go.sum index c0d4dc45484..debdcfc6b32 100644 --- a/go.sum +++ b/go.sum @@ -1347,11 +1347,11 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbL github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJcghJGOYCgdezslRSVzqwLf/q+4Y2r/0= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg= github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid v0.4.0 h1:d7S13DPk63SvBJfSUiMJJ26tRsvrBumkLPEfQEAarGk= github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventgrid v0.4.0/go.mod h1:7e/gsXp4INB4k/vg0h3UOkYpDK6oZqctxr+L05FGybg= github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.0 h1:QISzMrspEvZj4zrrN2wlNwfum5RmnKQhQNiSujwH7oU= @@ -1583,7 +1583,6 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= -github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -2010,6 +2009,8 @@ github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwA github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/jorturfer/cert-controller v0.0.0-20240427003941-363ba56751d7 h1:RmuyuIEdGQx2M7gJ72PRwmTPgel2VEzoiu+CmpFkjzc= +github.com/jorturfer/cert-controller v0.0.0-20240427003941-363ba56751d7/go.mod h1:jRjiFw5OzNzEtyB76Lw6Fxy1avWw7GWKRqKABeoDcJQ= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= @@ -2150,10 +2151,8 @@ github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3Hig github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.33.0 h1:snPCflnZrpMsy94p4lXVEkHo12lmPnc3vY5XBbreexE= github.com/onsi/gomega v1.33.0/go.mod h1:+925n5YtiFsLzzafLUHzVMBpvvRAzrydIBiSIxjX3wY= -github.com/open-policy-agent/cert-controller v0.10.1 h1:RXSYoyn8FdCenWecRP//UV5nbVfmstNpj4kHQFkvPK4= -github.com/open-policy-agent/cert-controller v0.10.1/go.mod h1:4uRbBLY5DsPOog+a9pqk3JLxuuhrWsbUedQW65HcLTI= -github.com/open-policy-agent/frameworks/constraint v0.0.0-20230822235116-f0b62fe1e4c4 h1:5dum5SLEz+95JDLkMls7Z7IDPjvSq3UhJSFe4f5einQ= -github.com/open-policy-agent/frameworks/constraint v0.0.0-20230822235116-f0b62fe1e4c4/go.mod h1:54/KzLMvA5ndBVpm7B1OjLeV0cUtTLTz2bZ2OtydLpU= +github.com/open-policy-agent/frameworks/constraint v0.0.0-20240411024313-c2efb00269a8 h1:+3lwaywVgMn4XfcYASBJs2V19XjsKlsRmUEne+Zn8eY= +github.com/open-policy-agent/frameworks/constraint v0.0.0-20240411024313-c2efb00269a8/go.mod h1:6olMPE+rOIu3A1fNk9FaMAe18fTlJbElZUDz+Oi+MkU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/otiai10/copy v1.7.0 h1:hVoPiN+t+7d2nzzwMiDHPSOogsWAStewq3TwU05+clE= github.com/otiai10/copy v1.7.0/go.mod h1:rmRl6QPdJj6EiUqXQ/4Nn2lLXoNQjFCQbbNrxgc/t3U= @@ -2216,8 +2215,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= @@ -2462,8 +2461,8 @@ golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMk golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= -golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o= -golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= +golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f h1:99ci1mjWVBWwJiEKYY6jWa4d2nTQVIEhZIptnrVb1XY= +golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f/go.mod h1:/lliqkxwWAhPjf5oSOIJup2XcqJaw8RGS6k3TGEc7GI= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -2667,8 +2666,9 @@ golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= @@ -3148,8 +3148,8 @@ honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= k8s.io/api v0.28.9 h1:E7VEXXCAlSrp+08zq4zgd+ko6Ttu0Mw+XoXlIkDTVW0= k8s.io/api v0.28.9/go.mod h1:AnCsDYf3SHjfa8mPG5LGYf+iF4mie+3peLQR51MMCgw= -k8s.io/apiextensions-apiserver v0.29.2 h1:UK3xB5lOWSnhaCk0RFZ0LUacPZz9RY4wi/yt2Iu+btg= -k8s.io/apiextensions-apiserver v0.29.2/go.mod h1:aLfYjpA5p3OwtqNXQFkhJ56TB+spV8Gc4wfMhUA3/b8= +k8s.io/apiextensions-apiserver v0.29.4 h1:M7hbuHU/ckbibR7yPbe6DyNWgTFKNmZDbdZKD8q1Smk= +k8s.io/apiextensions-apiserver v0.29.4/go.mod h1:TTDC9fB+0kHY2rogf5hgBR03KBKCwED+GHUsXGpR7SM= k8s.io/apimachinery v0.28.9 h1:aXz4Zxsw+Pk4KhBerAtKRxNN1uSMWKfciL/iOdBfXvA= k8s.io/apimachinery v0.28.9/go.mod h1:zUG757HaKs6Dc3iGtKjzIpBfqTM4yiRsEe3/E7NX15o= k8s.io/apiserver v0.28.9 h1:koPXvgSXRBDxKJQjJGdZNgPsT9lQv6scJJFipd1m86E= @@ -3165,10 +3165,10 @@ k8s.io/gengo v0.0.0-20240129211411-f967bbeff4b4/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAE k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kms v0.29.2 h1:MDsbp98gSlEQs7K7dqLKNNTwKFQRYYvO4UOlBOjNy6Y= -k8s.io/kms v0.29.2/go.mod h1:s/9RC4sYRZ/6Tn6yhNjbfJuZdb8LzlXhdlBnKizeFDo= -k8s.io/kube-aggregator v0.28.1 h1:rvG4llYnQKHjj6YjjoBPEJxfD1uH0DJwkrJTNKGAaCs= -k8s.io/kube-aggregator v0.28.1/go.mod h1:JaLizMe+AECSpO2OmrWVsvnG0V3dX1RpW+Wq/QHbu18= +k8s.io/kms v0.29.4 h1:cFGEoCLwoXk/eqYZppLZxybCdmEWeRKMCbm9f13IdRQ= +k8s.io/kms v0.29.4/go.mod h1:vWVImKkJd+1BQY4tBwdfSwjQBiLrnbNtHADcDEDQFtk= +k8s.io/kube-aggregator v0.29.4 h1:yT7vYtwIag4G8HNrktYZ3qz6p6oHKronMAXOw4eQ2WQ= +k8s.io/kube-aggregator v0.29.4/go.mod h1:zBfe4iXXmw5HinNgN0JoAu5rpXdyCUvRfG99+FVOd68= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= k8s.io/metrics v0.28.9 h1:3TAJhF1GzYK89bE1RLqDinTXAlCnI8UgciwfpKHzKfg= @@ -3237,8 +3237,8 @@ modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= modernc.org/z v1.7.0/go.mod h1:hVdgNMh8ggTuRG1rGU8x+xGRFfiQUIAw0ZqlPy8+HyQ= -nhooyr.io/websocket v1.8.10 h1:mv4p+MnGrLDcPlBoWsvPP7XCzTYMXP9F9eIGoKbgx7Q= -nhooyr.io/websocket v1.8.10/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= +nhooyr.io/websocket v1.8.11 h1:f/qXNc2/3DpoSZkHt1DQu6rj4zGC8JmkkLkWss0MgN0= +nhooyr.io/websocket v1.8.11/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= diff --git a/pkg/mock/mock_scaling/mock_executor/mock_interface.go b/pkg/mock/mock_scaling/mock_executor/mock_interface.go index 53836a597fb..eef9c5fc4bd 100644 --- a/pkg/mock/mock_scaling/mock_executor/mock_interface.go +++ b/pkg/mock/mock_scaling/mock_executor/mock_interface.go @@ -42,15 +42,15 @@ func (m *MockScaleExecutor) EXPECT() *MockScaleExecutorMockRecorder { } // RequestJobScale mocks base method. -func (m *MockScaleExecutor) RequestJobScale(ctx context.Context, scaledJob *v1alpha1.ScaledJob, isActive bool, scaleTo, maxScale int64) { +func (m *MockScaleExecutor) RequestJobScale(ctx context.Context, scaledJob *v1alpha1.ScaledJob, isActive, isError bool, scaleTo, maxScale int64) { m.ctrl.T.Helper() - m.ctrl.Call(m, "RequestJobScale", ctx, scaledJob, isActive, scaleTo, maxScale) + m.ctrl.Call(m, "RequestJobScale", ctx, scaledJob, isActive, isError, scaleTo, maxScale) } // RequestJobScale indicates an expected call of RequestJobScale. -func (mr *MockScaleExecutorMockRecorder) RequestJobScale(ctx, scaledJob, isActive, scaleTo, maxScale any) *gomock.Call { +func (mr *MockScaleExecutorMockRecorder) RequestJobScale(ctx, scaledJob, isActive, isError, scaleTo, maxScale any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RequestJobScale", reflect.TypeOf((*MockScaleExecutor)(nil).RequestJobScale), ctx, scaledJob, isActive, scaleTo, maxScale) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RequestJobScale", reflect.TypeOf((*MockScaleExecutor)(nil).RequestJobScale), ctx, scaledJob, isActive, isError, scaleTo, maxScale) } // RequestScale mocks base method. diff --git a/pkg/scalers/github_runner_scaler.go b/pkg/scalers/github_runner_scaler.go index 9c430341272..d53cd21bd2e 100644 --- a/pkg/scalers/github_runner_scaler.go +++ b/pkg/scalers/github_runner_scaler.go @@ -470,31 +470,44 @@ func (s *githubRunnerScaler) getRepositories(ctx context.Context) ([]string, err return s.metadata.repos, nil } - var url string - switch s.metadata.runnerScope { - case ORG: - url = fmt.Sprintf("%s/orgs/%s/repos", s.metadata.githubAPIURL, s.metadata.owner) - case REPO: - url = fmt.Sprintf("%s/users/%s/repos", s.metadata.githubAPIURL, s.metadata.owner) - case ENT: - url = fmt.Sprintf("%s/orgs/%s/repos", s.metadata.githubAPIURL, s.metadata.owner) - default: - return nil, fmt.Errorf("runnerScope %s not supported", s.metadata.runnerScope) - } - body, _, err := getGithubRequest(ctx, url, s.metadata, s.httpClient) - if err != nil { - return nil, err - } + page := 1 + var repoList []string - var repos []Repo - err = json.Unmarshal(body, &repos) - if err != nil { - return nil, err - } + for { + var url string + switch s.metadata.runnerScope { + case ORG: + url = fmt.Sprintf("%s/orgs/%s/repos?page=%s", s.metadata.githubAPIURL, s.metadata.owner, strconv.Itoa(page)) + case REPO: + url = fmt.Sprintf("%s/users/%s/repos?page=%s", s.metadata.githubAPIURL, s.metadata.owner, strconv.Itoa(page)) + case ENT: + url = fmt.Sprintf("%s/orgs/%s/repos?page=%s", s.metadata.githubAPIURL, s.metadata.owner, strconv.Itoa(page)) + default: + return nil, fmt.Errorf("runnerScope %s not supported", s.metadata.runnerScope) + } - var repoList []string - for _, repo := range repos { - repoList = append(repoList, repo.Name) + body, _, err := getGithubRequest(ctx, url, s.metadata, s.httpClient) + if err != nil { + return nil, err + } + + var repos []Repo + + err = json.Unmarshal(body, &repos) + if err != nil { + return nil, err + } + + for _, repo := range repos { + repoList = append(repoList, repo.Name) + } + + // GitHub returned less than 30 repos per page, so consider no repos left + if len(repos) < 30 { + break + } + + page++ } return repoList, nil diff --git a/pkg/scalers/github_runner_scaler_test.go b/pkg/scalers/github_runner_scaler_test.go index 2d03e81ecb1..fc1babdddc2 100644 --- a/pkg/scalers/github_runner_scaler_test.go +++ b/pkg/scalers/github_runner_scaler_test.go @@ -2,7 +2,11 @@ package scalers import ( "context" + "crypto/rand" + "encoding/json" "fmt" + "html/template" + "math/big" "net/http" "net/http/httptest" "strings" @@ -155,7 +159,22 @@ func buildQueueJSON() []byte { return []byte(output) } -func apiStubHandler(hasRateLeft bool) *httptest.Server { +func generateResponseExceed30Repos() []byte { + var repos []Repo + + for i := 0; i < 30; i++ { + var repository Repo + id, _ := rand.Int(rand.Reader, big.NewInt(100000)) + repository.ID = int(id.Int64()) + repository.Name = "BadRepo" + repos = append(repos, repository) + } + + result, _ := json.Marshal(repos) + return result +} + +func apiStubHandler(hasRateLeft bool, exceeds30Repos bool) *httptest.Server { return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { futureReset := time.Now() futureReset = futureReset.Add(time.Minute * 30) @@ -178,9 +197,24 @@ func apiStubHandler(hasRateLeft bool) *httptest.Server { w.WriteHeader(http.StatusOK) } } - if strings.HasSuffix(r.URL.String(), "repos") { - _, _ = w.Write([]byte(testGhUserReposResponse)) - w.WriteHeader(http.StatusOK) + if strings.Contains(r.URL.String(), "repos?page") { + if exceeds30Repos && strings.HasSuffix(r.URL.String(), "?page=1") { + repos := generateResponseExceed30Repos() + tmpl, err := template.New("repos").Parse(string(repos)) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } + err = tmpl.Execute(w, nil) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusOK) + } else { + _, _ = w.Write([]byte(testGhUserReposResponse)) + w.WriteHeader(http.StatusOK) + } } })) } @@ -193,7 +227,7 @@ func apiStubHandler404() *httptest.Server { } func TestNewGitHubRunnerScaler_QueueLength_NoRateLeft(t *testing.T) { - var apiStub = apiStubHandler(false) + var apiStub = apiStubHandler(false, false) meta := getGitHubTestMetaData(apiStub.URL) @@ -217,7 +251,7 @@ func TestNewGitHubRunnerScaler_QueueLength_NoRateLeft(t *testing.T) { } func TestNewGitHubRunnerScaler_QueueLength_SingleRepo(t *testing.T) { - var apiStub = apiStubHandler(true) + var apiStub = apiStubHandler(true, false) meta := getGitHubTestMetaData(apiStub.URL) @@ -241,7 +275,7 @@ func TestNewGitHubRunnerScaler_QueueLength_SingleRepo(t *testing.T) { } func TestNewGitHubRunnerScaler_QueueLength_SingleRepo_ExtraRunnerLabels(t *testing.T) { - var apiStub = apiStubHandler(true) + var apiStub = apiStubHandler(true, false) meta := getGitHubTestMetaData(apiStub.URL) @@ -265,7 +299,7 @@ func TestNewGitHubRunnerScaler_QueueLength_SingleRepo_ExtraRunnerLabels(t *testi } func TestNewGitHubRunnerScaler_QueueLength_SingleRepo_LessRunnerLabels(t *testing.T) { - var apiStub = apiStubHandler(true) + var apiStub = apiStubHandler(true, false) meta := getGitHubTestMetaData(apiStub.URL) @@ -358,7 +392,7 @@ func TestNewGitHubRunnerScaler_BadURL(t *testing.T) { } func TestNewGitHubRunnerScaler_QueueLength_NoRunnerLabels(t *testing.T) { - var apiStub = apiStubHandler(true) + var apiStub = apiStubHandler(true, false) meta := getGitHubTestMetaData(apiStub.URL) @@ -382,7 +416,7 @@ func TestNewGitHubRunnerScaler_QueueLength_NoRunnerLabels(t *testing.T) { } func TestNewGitHubRunnerScaler_QueueLength_MultiRepo_Assigned(t *testing.T) { - var apiStub = apiStubHandler(true) + var apiStub = apiStubHandler(true, false) meta := getGitHubTestMetaData(apiStub.URL) @@ -410,7 +444,7 @@ func TestNewGitHubRunnerScaler_QueueLength_MultiRepo_Assigned(t *testing.T) { } func TestNewGitHubRunnerScaler_QueueLength_MultiRepo_Assigned_OneBad(t *testing.T) { - var apiStub = apiStubHandler(true) + var apiStub = apiStubHandler(true, false) meta := getGitHubTestMetaData(apiStub.URL) @@ -438,7 +472,7 @@ func TestNewGitHubRunnerScaler_QueueLength_MultiRepo_Assigned_OneBad(t *testing. } func TestNewGitHubRunnerScaler_QueueLength_MultiRepo_PulledUserRepos(t *testing.T) { - var apiStub = apiStubHandler(true) + var apiStub = apiStubHandler(true, false) meta := getGitHubTestMetaData(apiStub.URL) @@ -461,8 +495,31 @@ func TestNewGitHubRunnerScaler_QueueLength_MultiRepo_PulledUserRepos(t *testing. } } +func TestNewGitHubRunnerScaler_QueueLength_MultiRepo_PulledUserRepos_Exceeds30Entries(t *testing.T) { + var apiStub = apiStubHandler(true, true) + + meta := getGitHubTestMetaData(apiStub.URL) + + mockGitHubRunnerScaler := githubRunnerScaler{ + metadata: meta, + httpClient: http.DefaultClient, + } + + mockGitHubRunnerScaler.metadata.labels = []string{"foo", "bar"} + + queueLen, err := mockGitHubRunnerScaler.GetWorkflowQueueLength(context.TODO()) + if err != nil { + fmt.Println(err) + t.Fail() + } + + if queueLen != 2 { + t.Fail() + } +} + func TestNewGitHubRunnerScaler_QueueLength_MultiRepo_PulledOrgRepos(t *testing.T) { - var apiStub = apiStubHandler(true) + var apiStub = apiStubHandler(true, false) meta := getGitHubTestMetaData(apiStub.URL) @@ -487,7 +544,7 @@ func TestNewGitHubRunnerScaler_QueueLength_MultiRepo_PulledOrgRepos(t *testing.T } func TestNewGitHubRunnerScaler_QueueLength_MultiRepo_PulledEntRepos(t *testing.T) { - var apiStub = apiStubHandler(true) + var apiStub = apiStubHandler(true, false) meta := getGitHubTestMetaData(apiStub.URL) @@ -512,7 +569,7 @@ func TestNewGitHubRunnerScaler_QueueLength_MultiRepo_PulledEntRepos(t *testing.T } func TestNewGitHubRunnerScaler_QueueLength_MultiRepo_PulledBadRepos(t *testing.T) { - var apiStub = apiStubHandler(true) + var apiStub = apiStubHandler(true, false) meta := getGitHubTestMetaData(apiStub.URL) @@ -535,7 +592,7 @@ func TestNewGitHubRunnerScaler_QueueLength_MultiRepo_PulledBadRepos(t *testing.T } func TestNewGitHubRunnerScaler_QueueLength_MultiRepo_PulledRepos_NoRate(t *testing.T) { - var apiStub = apiStubHandler(false) + var apiStub = apiStubHandler(false, false) meta := getGitHubTestMetaData(apiStub.URL) diff --git a/pkg/scalers/mongo_scaler.go b/pkg/scalers/mongo_scaler.go index f7871c8567e..f30b8fb97ec 100644 --- a/pkg/scalers/mongo_scaler.go +++ b/pkg/scalers/mongo_scaler.go @@ -7,6 +7,7 @@ import ( "net" "net/url" "strconv" + "strings" "time" "github.com/go-logr/logr" @@ -178,11 +179,13 @@ func parseMongoDBMetadata(config *scalersconfig.ScalerConfig) (*mongoDBMetadata, } meta.host = host - port, err := GetFromAuthOrMeta(config, "port") - if err != nil { - return nil, "", err + if !strings.Contains(scheme, "mongodb+srv") { + port, err := GetFromAuthOrMeta(config, "port") + if err != nil { + return nil, "", err + } + meta.port = port } - meta.port = port username, err := GetFromAuthOrMeta(config, "username") if err != nil { @@ -200,14 +203,18 @@ func parseMongoDBMetadata(config *scalersconfig.ScalerConfig) (*mongoDBMetadata, } } - if meta.connectionString != "" { + switch { + case meta.connectionString != "": connStr = meta.connectionString - } else { - // Build connection str + case meta.scheme == "mongodb+srv": + // nosemgrep: db-connection-string + connStr = fmt.Sprintf("%s://%s:%s@%s/%s", meta.scheme, url.QueryEscape(meta.username), url.QueryEscape(meta.password), meta.host, meta.dbName) + default: addr := net.JoinHostPort(meta.host, meta.port) // nosemgrep: db-connection-string connStr = fmt.Sprintf("%s://%s:%s@%s/%s", meta.scheme, url.QueryEscape(meta.username), url.QueryEscape(meta.password), addr, meta.dbName) } + meta.triggerIndex = config.TriggerIndex return &meta, connStr, nil } diff --git a/pkg/scalers/mongo_scaler_test.go b/pkg/scalers/mongo_scaler_test.go index 02f1e9479ef..fd9f54f8337 100644 --- a/pkg/scalers/mongo_scaler_test.go +++ b/pkg/scalers/mongo_scaler_test.go @@ -73,7 +73,7 @@ var testMONGODBMetadata = []parseMongoDBMetadataTestData{ // mongodb srv support { metadata: map[string]string{"query": `{"name":"John"}`, "collection": "demo", "queryValue": "12"}, - authParams: map[string]string{"dbName": "test", "scheme": "mongodb+srv", "host": "localhost", "port": "1234", "username": "sample", "password": "sec@ure"}, + authParams: map[string]string{"dbName": "test", "scheme": "mongodb+srv", "host": "localhost", "port": "", "username": "sample", "password": "sec@ure"}, resolvedEnv: testMongoDBResolvedEnv, raisesError: false, }, @@ -90,7 +90,7 @@ var mongoDBConnectionStringTestDatas = []mongoDBConnectionStringTestData{ {metadataTestData: &testMONGODBMetadata[2], connectionString: "mongodb://mongodb0.example.com:27017"}, {metadataTestData: &testMONGODBMetadata[3], connectionString: "mongodb://sample:test%40password@localhost:1234/test"}, {metadataTestData: &testMONGODBMetadata[4], connectionString: "mongodb://sample:sec%40ure@localhost:1234/test"}, - {metadataTestData: &testMONGODBMetadata[5], connectionString: "mongodb+srv://sample:sec%40ure@localhost:1234/test"}, + {metadataTestData: &testMONGODBMetadata[5], connectionString: "mongodb+srv://sample:sec%40ure@localhost/test"}, } var mongoDBMetricIdentifiers = []mongoDBMetricIdentifier{ diff --git a/pkg/scaling/executor/scale_executor.go b/pkg/scaling/executor/scale_executor.go index ee5f15aa171..b28061a495e 100644 --- a/pkg/scaling/executor/scale_executor.go +++ b/pkg/scaling/executor/scale_executor.go @@ -39,7 +39,7 @@ const ( // ScaleExecutor contains methods RequestJobScale and RequestScale type ScaleExecutor interface { - RequestJobScale(ctx context.Context, scaledJob *kedav1alpha1.ScaledJob, isActive bool, scaleTo int64, maxScale int64) + RequestJobScale(ctx context.Context, scaledJob *kedav1alpha1.ScaledJob, isActive bool, isError bool, scaleTo int64, maxScale int64) RequestScale(ctx context.Context, scaledObject *kedav1alpha1.ScaledObject, isActive bool, isError bool, options *ScaleExecutorOptions) } diff --git a/pkg/scaling/executor/scale_jobs.go b/pkg/scaling/executor/scale_jobs.go index 52f7ea37fdc..958b952e05b 100644 --- a/pkg/scaling/executor/scale_jobs.go +++ b/pkg/scaling/executor/scale_jobs.go @@ -38,7 +38,7 @@ const ( defaultFailedJobsHistoryLimit = int32(100) ) -func (e *scaleExecutor) RequestJobScale(ctx context.Context, scaledJob *kedav1alpha1.ScaledJob, isActive bool, scaleTo int64, maxScale int64) { +func (e *scaleExecutor) RequestJobScale(ctx context.Context, scaledJob *kedav1alpha1.ScaledJob, isActive, isError bool, scaleTo int64, maxScale int64) { logger := e.logger.WithValues("scaledJob.Name", scaledJob.Name, "scaledJob.Namespace", scaledJob.Namespace) runningJobCount := e.getRunningJobCount(ctx, scaledJob) @@ -65,6 +65,19 @@ func (e *scaleExecutor) RequestJobScale(ctx context.Context, scaledJob *kedav1al logger.V(1).Info("No change in activity") } + if isError { + // some triggers responded with error + // Set ScaledJob.Status.ReadyCondition to Unknown + readyCondition := scaledJob.Status.Conditions.GetReadyCondition() + msg := "Some triggers defined in ScaledJob are not working correctly" + logger.V(1).Info(msg) + if !readyCondition.IsUnknown() { + if err := e.setReadyCondition(ctx, logger, scaledJob, metav1.ConditionUnknown, "PartialTriggerError", msg); err != nil { + logger.Error(err, "error setting ready condition") + } + } + } + condition := scaledJob.Status.Conditions.GetActiveCondition() if condition.IsUnknown() || condition.IsTrue() != isActive { if isActive { @@ -101,6 +114,10 @@ func (e *scaleExecutor) getScalingDecision(scaledJob *kedav1alpha1.ScaledJob, ru } func (e *scaleExecutor) createJobs(ctx context.Context, logger logr.Logger, scaledJob *kedav1alpha1.ScaledJob, scaleTo int64, maxScale int64) { + if maxScale <= 0 { + logger.Info("No need to create jobs - all requested jobs already exist", "jobs", maxScale) + return + } logger.Info("Creating jobs", "Effective number of max jobs", maxScale) if scaleTo > maxScale { scaleTo = maxScale @@ -137,6 +154,13 @@ func (e *scaleExecutor) generateJobs(logger logr.Logger, scaledJob *kedav1alpha1 labels[key] = value } + annotations := map[string]string{ + "scaledjob.keda.sh/generation": strconv.FormatInt(scaledJob.Generation, 10), + } + for key, value := range scaledJob.ObjectMeta.Annotations { + annotations[key] = value + } + jobs := make([]*batchv1.Job, int(scaleTo)) for i := 0; i < int(scaleTo); i++ { job := &batchv1.Job{ @@ -144,7 +168,7 @@ func (e *scaleExecutor) generateJobs(logger logr.Logger, scaledJob *kedav1alpha1 GenerateName: scaledJob.GetName() + "-", Namespace: scaledJob.GetNamespace(), Labels: labels, - Annotations: scaledJob.ObjectMeta.Annotations, + Annotations: annotations, }, Spec: *scaledJob.Spec.JobTargetRef.DeepCopy(), } diff --git a/pkg/scaling/executor/scale_jobs_test.go b/pkg/scaling/executor/scale_jobs_test.go index 7542c1acb58..545a26583c4 100644 --- a/pkg/scaling/executor/scale_jobs_test.go +++ b/pkg/scaling/executor/scale_jobs_test.go @@ -316,8 +316,11 @@ func TestCreateJobs(t *testing.T) { func TestGenerateJobs(t *testing.T) { var ( - expectedAnnotations = map[string]string{"test": "test"} - expectedLabels = map[string]string{ + expectedAnnotations = map[string]string{ + "test": "test", + "scaledjob.keda.sh/generation": "0", + } + expectedLabels = map[string]string{ "app.kubernetes.io/managed-by": "keda-operator", "app.kubernetes.io/name": "test", "app.kubernetes.io/part-of": "test", diff --git a/pkg/scaling/scale_handler.go b/pkg/scaling/scale_handler.go index 52ff3c3c742..5a955e48e66 100644 --- a/pkg/scaling/scale_handler.go +++ b/pkg/scaling/scale_handler.go @@ -259,8 +259,8 @@ func (h *scaleHandler) checkScalers(ctx context.Context, scalableObject interfac return } - isActive, scaleTo, maxScale := h.isScaledJobActive(ctx, obj) - h.scaleExecutor.RequestJobScale(ctx, obj, isActive, scaleTo, maxScale) + isActive, isError, scaleTo, maxScale := h.isScaledJobActive(ctx, obj) + h.scaleExecutor.RequestJobScale(ctx, obj, isActive, isError, scaleTo, maxScale) } } @@ -293,7 +293,7 @@ func (h *scaleHandler) getScalersCacheForScaledObject(ctx context.Context, scale // performGetScalersCache returns cache for input scalableObject, it is common code used by GetScalersCache() and getScalersCacheForScaledObject() methods func (h *scaleHandler) performGetScalersCache(ctx context.Context, key string, scalableObject interface{}, scalableObjectGeneration *int64, scalableObjectKind, scalableObjectNamespace, scalableObjectName string) (*cache.ScalersCache, error) { h.scalerCachesLock.RLock() - regenerateCache := false + if cache, ok := h.scalerCaches[key]; ok { // generation was specified -> let's include it in the check as well if scalableObjectGeneration != nil { @@ -301,15 +301,12 @@ func (h *scaleHandler) performGetScalersCache(ctx context.Context, key string, s h.scalerCachesLock.RUnlock() return cache, nil } - // object was found in cache, but the generation is not correct, - // we'll need to close scalers in the cache and - // proceed further to recreate the cache - regenerateCache = false } else { h.scalerCachesLock.RUnlock() return cache, nil } } + h.scalerCachesLock.RUnlock() if scalableObject == nil { @@ -379,17 +376,17 @@ func (h *scaleHandler) performGetScalersCache(ctx context.Context, key string, s default: } - // Scalers Close() could be impacted by timeouts, blocking the mutex - // until the timeout happens. Instead of locking the mutex, we take - // the old cache item and we close it in another goroutine, not locking - // the cache: https://github.com/kedacore/keda/issues/5083 - if regenerateCache { - oldCache := h.scalerCaches[key] + h.scalerCachesLock.Lock() + defer h.scalerCachesLock.Unlock() + + if oldCache, ok := h.scalerCaches[key]; ok { + // Scalers Close() could be impacted by timeouts, blocking the mutex + // until the timeout happens. Instead of locking the mutex, we take + // the old cache item and we close it in another goroutine, not locking + // the cache: https://github.com/kedacore/keda/issues/5083 go oldCache.Close(ctx) } - h.scalerCachesLock.Lock() - defer h.scalerCachesLock.Unlock() h.scalerCaches[key] = newCache return h.scalerCaches[key], nil } @@ -816,15 +813,16 @@ func (*scaleHandler) getScalerState(ctx context.Context, scaler scalers.Scaler, // getScaledJobMetrics returns metrics for specified metric name for a ScaledJob identified by its name and namespace. // It could either query the metric value directly from the scaler or from a cache, that's being stored for the scaler. -func (h *scaleHandler) getScaledJobMetrics(ctx context.Context, scaledJob *kedav1alpha1.ScaledJob) []scaledjob.ScalerMetrics { +func (h *scaleHandler) getScaledJobMetrics(ctx context.Context, scaledJob *kedav1alpha1.ScaledJob) ([]scaledjob.ScalerMetrics, bool) { logger := log.WithValues("scaledJob.Namespace", scaledJob.Namespace, "scaledJob.Name", scaledJob.Name) cache, err := h.GetScalersCache(ctx, scaledJob) metricscollector.RecordScaledJobError(scaledJob.Namespace, scaledJob.Name, err) if err != nil { log.Error(err, "error getting scalers cache", "scaledJob.Namespace", scaledJob.Namespace, "scaledJob.Name", scaledJob.Name) - return nil + return nil, true } + var isError bool var scalersMetrics []scaledjob.ScalerMetrics scalers, scalerConfigs := cache.GetScalers() for scalerIndex, scaler := range scalers { @@ -852,8 +850,9 @@ func (h *scaleHandler) getScaledJobMetrics(ctx context.Context, scaledJob *kedav metricscollector.RecordScalerLatency(scaledJob.Namespace, scaledJob.Name, scalerName, scalerIndex, metricName, false, latency) } if err != nil { - scalerLogger.V(1).Info("Error getting scaler metrics and activity, but continue", "error", err) + scalerLogger.Error(err, "Error getting scaler metrics and activity, but continue") cache.Recorder.Event(scaledJob, corev1.EventTypeWarning, eventreason.KEDAScalerFailed, err.Error()) + isError = true continue } if isTriggerActive { @@ -886,21 +885,21 @@ func (h *scaleHandler) getScaledJobMetrics(ctx context.Context, scaledJob *kedav metricscollector.RecordScalerActive(scaledJob.Namespace, scaledJob.Name, scalerName, scalerIndex, metricName, false, isTriggerActive) } } - return scalersMetrics + return scalersMetrics, isError } // isScaledJobActive returns whether the input ScaledJob: // is active as the first return value, // the second and the third return values indicate queueLength and maxValue for scale -func (h *scaleHandler) isScaledJobActive(ctx context.Context, scaledJob *kedav1alpha1.ScaledJob) (bool, int64, int64) { +func (h *scaleHandler) isScaledJobActive(ctx context.Context, scaledJob *kedav1alpha1.ScaledJob) (bool, bool, int64, int64) { logger := logf.Log.WithName("scalemetrics") - scalersMetrics := h.getScaledJobMetrics(ctx, scaledJob) + scalersMetrics, isError := h.getScaledJobMetrics(ctx, scaledJob) isActive, queueLength, maxValue, maxFloatValue := scaledjob.IsScaledJobActive(scalersMetrics, scaledJob.Spec.ScalingStrategy.MultipleScalersCalculation, scaledJob.MinReplicaCount(), scaledJob.MaxReplicaCount()) logger.V(1).WithValues("scaledJob.Name", scaledJob.Name).Info("Checking if ScaleJob Scalers are active", "isActive", isActive, "maxValue", maxFloatValue, "MultipleScalersCalculation", scaledJob.Spec.ScalingStrategy.MultipleScalersCalculation) - return isActive, queueLength, maxValue + return isActive, isError, queueLength, maxValue } // getTrueMetricArray is a help function made for composite scaler to determine diff --git a/pkg/scaling/scale_handler_test.go b/pkg/scaling/scale_handler_test.go index 7a822c163a8..23180308841 100644 --- a/pkg/scaling/scale_handler_test.go +++ b/pkg/scaling/scale_handler_test.go @@ -661,19 +661,21 @@ func TestIsScaledJobActive(t *testing.T) { scalerCachesLock: &sync.RWMutex{}, scaledObjectsMetricCache: metricscache.NewMetricsCache(), } - isActive, queueLength, maxValue := sh.isScaledJobActive(context.TODO(), scaledJobSingle) + // nosemgrep: context-todo + isActive, isError, queueLength, maxValue := sh.isScaledJobActive(context.TODO(), scaledJobSingle) assert.Equal(t, true, isActive) + assert.Equal(t, false, isError) assert.Equal(t, int64(20), queueLength) assert.Equal(t, int64(10), maxValue) scalerCache.Close(context.Background()) // Test the valiation scalerTestDatam := []scalerTestData{ - newScalerTestData("s0-queueLength", 100, "max", 20, 1, true, 10, 2, true, 5, 3, true, 7, 4, false, true, 20, 20), - newScalerTestData("queueLength", 100, "min", 20, 1, true, 10, 2, true, 5, 3, true, 7, 4, false, true, 5, 2), - newScalerTestData("messageCount", 100, "avg", 20, 1, true, 10, 2, true, 5, 3, true, 7, 4, false, true, 12, 9), - newScalerTestData("s3-messageCount", 100, "sum", 20, 1, true, 10, 2, true, 5, 3, true, 7, 4, false, true, 35, 27), - newScalerTestData("s10-messageCount", 25, "sum", 20, 1, true, 10, 2, true, 5, 3, true, 7, 4, false, true, 35, 25), + newScalerTestData("s0-queueLength", 100, "max", 20, 1, true, 10, 2, true, 5, 3, true, 7, 4, false, true, false, 20, 20), + newScalerTestData("queueLength", 100, "min", 20, 1, true, 10, 2, true, 5, 3, true, 7, 4, false, true, false, 5, 2), + newScalerTestData("messageCount", 100, "avg", 20, 1, true, 10, 2, true, 5, 3, true, 7, 4, false, true, false, 12, 9), + newScalerTestData("s3-messageCount", 100, "sum", 20, 1, true, 10, 2, true, 5, 3, true, 7, 4, false, true, false, 35, 27), + newScalerTestData("s10-messageCount", 25, "sum", 20, 1, true, 10, 2, true, 5, 3, true, 7, 4, false, true, false, 35, 25), } for index, scalerTestData := range scalerTestDatam { @@ -717,9 +719,11 @@ func TestIsScaledJobActive(t *testing.T) { scaledObjectsMetricCache: metricscache.NewMetricsCache(), } fmt.Printf("index: %d", index) - isActive, queueLength, maxValue = sh.isScaledJobActive(context.TODO(), scaledJob) + // nosemgrep: context-todo + isActive, isError, queueLength, maxValue = sh.isScaledJobActive(context.TODO(), scaledJob) // assert.Equal(t, 5, index) assert.Equal(t, scalerTestData.ResultIsActive, isActive) + assert.Equal(t, scalerTestData.ResultIsError, isError) assert.Equal(t, scalerTestData.ResultQueueLength, queueLength) assert.Equal(t, scalerTestData.ResultMaxValue, maxValue) scalerCache.Close(context.Background()) @@ -757,8 +761,10 @@ func TestIsScaledJobActiveIfQueueEmptyButMinReplicaCountGreaterZero(t *testing.T scaledObjectsMetricCache: metricscache.NewMetricsCache(), } - isActive, queueLength, maxValue := sh.isScaledJobActive(context.TODO(), scaledJobSingle) + // nosemgrep: context-todo + isActive, isError, queueLength, maxValue := sh.isScaledJobActive(context.TODO(), scaledJobSingle) assert.Equal(t, true, isActive) + assert.Equal(t, false, isError) assert.Equal(t, int64(0), queueLength) assert.Equal(t, int64(0), maxValue) scalerCache.Close(context.Background()) @@ -781,6 +787,7 @@ func newScalerTestData( scaler4AverageValue int, //nolint:golint,unparam scaler4IsActive bool, //nolint:golint,unparam resultIsActive bool, //nolint:golint,unparam + resultIsError bool, //nolint:golint,unparam resultQueueLength, resultMaxLength int) scalerTestData { return scalerTestData{ @@ -800,6 +807,7 @@ func newScalerTestData( Scaler4AverageValue: int64(scaler4AverageValue), Scaler4IsActive: scaler4IsActive, ResultIsActive: resultIsActive, + ResultIsError: resultIsError, ResultQueueLength: int64(resultQueueLength), ResultMaxValue: int64(resultMaxLength), } @@ -822,6 +830,7 @@ type scalerTestData struct { Scaler4AverageValue int64 Scaler4IsActive bool ResultIsActive bool + ResultIsError bool ResultQueueLength int64 ResultMaxValue int64 MinReplicaCount int32 diff --git a/tests/helper/helper.go b/tests/helper/helper.go index bbb3b8124b1..e1ff632951b 100644 --- a/tests/helper/helper.go +++ b/tests/helper/helper.go @@ -77,7 +77,6 @@ var ( AzureADMsiID = os.Getenv("TF_AZURE_IDENTITY_1_APP_FULL_ID") AzureADMsiClientID = os.Getenv("TF_AZURE_IDENTITY_1_APP_ID") AzureADTenantID = os.Getenv("TF_AZURE_SP_TENANT") - AzureRunAadPodIdentityTests = os.Getenv("AZURE_RUN_AAD_POD_IDENTITY_TESTS") AzureRunWorkloadIdentityTests = os.Getenv("AZURE_RUN_WORKLOAD_IDENTITY_TESTS") AwsIdentityTests = os.Getenv("AWS_RUN_IDENTITY_TESTS") GcpIdentityTests = os.Getenv("GCP_RUN_IDENTITY_TESTS") @@ -85,6 +84,9 @@ var ( InstallCertManager = AwsIdentityTests == StringTrue || GcpIdentityTests == StringTrue InstallKeda = os.Getenv("E2E_INSTALL_KEDA") InstallKafka = os.Getenv("E2E_INSTALL_KAFKA") + // As this isn't supported anymore after 2.15, we need to skip the check on v2.14 + // to execute the installation + AzureRunAadPodIdentityTests = "true" ) var ( diff --git a/tests/run-all.go b/tests/run-all.go index bf9029c5ff4..76134d55e32 100644 --- a/tests/run-all.go +++ b/tests/run-all.go @@ -41,11 +41,25 @@ type TestResult struct { func main() { ctx := context.Background() + // + // Detect test cases + // e2eRegex := os.Getenv("E2E_TEST_REGEX") if e2eRegex == "" { e2eRegex = ".*_test.go" } + regularTestFiles := getRegularTestFiles(e2eRegex) + sequentialTestFiles := getSequentialTestFiles(e2eRegex) + if len(regularTestFiles) == 0 && len(sequentialTestFiles) == 0 { + fmt.Printf("No test has been executed, please review your regex: '%s'\n", e2eRegex) + os.Exit(1) + } + + if len(os.Args) > 1 && os.Args[1] == "regex-check" { + return + } + // // Install KEDA // @@ -57,17 +71,6 @@ func main() { os.Exit(1) } - // - // Detect test cases - // - regularTestFiles := getRegularTestFiles(e2eRegex) - sequentialTestFiles := getSequentialTestFiles(e2eRegex) - if len(regularTestFiles) == 0 && len(sequentialTestFiles) == 0 { - uninstallKeda(ctx) - fmt.Printf("No test has been executed, please review your regex: '%s'\n", e2eRegex) - os.Exit(1) - } - // // Execute regular tests // diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/.gitignore b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/.gitignore new file mode 100644 index 00000000000..8cdb9103650 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/.gitignore @@ -0,0 +1,4 @@ +# live test artifacts +Dockerfile +k8s.yaml +sshkey* diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md index f6749c03059..a8c2feb6d47 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md @@ -1,5 +1,59 @@ # Release History +## 1.7.0 (2024-06-20) + +### Features Added +* `AzurePipelinesCredential` authenticates an Azure Pipelines service connection with + workload identity federation + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.7.0-beta.1 +* Removed the persistent token caching API. It will return in v1.8.0-beta.1 + +## 1.7.0-beta.1 (2024-06-10) + +### Features Added +* Restored `AzurePipelinesCredential` and persistent token caching API + +## Breaking Changes +> These changes affect only code written against a beta version such as v1.6.0-beta.4 +* Values which `NewAzurePipelinesCredential` read from environment variables in + prior versions are now parameters +* Renamed `AzurePipelinesServiceConnectionCredentialOptions` to `AzurePipelinesCredentialOptions` + +### Bugs Fixed +* Managed identity bug fixes + +## 1.6.0 (2024-06-10) + +### Features Added +* `NewOnBehalfOfCredentialWithClientAssertions` creates an on-behalf-of credential + that authenticates with client assertions such as federated credentials + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.6.0-beta.4 +* Removed `AzurePipelinesCredential` and the persistent token caching API. + They will return in v1.7.0-beta.1 + +### Bugs Fixed +* Managed identity bug fixes + +## 1.6.0-beta.4 (2024-05-14) + +### Features Added +* `AzurePipelinesCredential` authenticates an Azure Pipeline service connection with + workload identity federation + +## 1.6.0-beta.3 (2024-04-09) + +### Breaking Changes +* `DefaultAzureCredential` now sends a probe request with no retries for IMDS managed identity + environments to avoid excessive retry delays when the IMDS endpoint is not available. This + should improve credential chain resolution for local development scenarios. + +### Bugs Fixed +* `ManagedIdentityCredential` now specifies resource IDs correctly for Azure Container Instances + ## 1.5.2 (2024-04-09) ### Bugs Fixed @@ -9,6 +63,28 @@ * Restored v1.4.0 error behavior for empty tenant IDs * Upgraded dependencies +## 1.6.0-beta.2 (2024-02-06) + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.6.0-beta.1 +* Replaced `ErrAuthenticationRequired` with `AuthenticationRequiredError`, a struct + type that carries the `TokenRequestOptions` passed to the `GetToken` call which + returned the error. + +### Bugs Fixed +* Fixed more cases in which credential chains like `DefaultAzureCredential` + should try their next credential after attempting managed identity + authentication in a Docker Desktop container + +### Other Changes +* `AzureCLICredential` uses the CLI's `expires_on` value for token expiration + +## 1.6.0-beta.1 (2024-01-17) + +### Features Added +* Restored persistent token caching API first added in v1.5.0-beta.1 +* Added `AzureCLICredentialOptions.Subscription` + ## 1.5.1 (2024-01-17) ### Bugs Fixed @@ -135,7 +211,7 @@ ### Features Added * By default, credentials set client capability "CP1" to enable support for - [Continuous Access Evaluation (CAE)](https://docs.microsoft.com/azure/active-directory/develop/app-resilience-continuous-access-evaluation). + [Continuous Access Evaluation (CAE)](https://learn.microsoft.com/entra/identity-platform/app-resilience-continuous-access-evaluation). This indicates to Microsoft Entra ID that your application can handle CAE claims challenges. You can disable this behavior by setting the environment variable "AZURE_IDENTITY_DISABLE_CP1" to "true". * `InteractiveBrowserCredentialOptions.LoginHint` enables pre-populating the login diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md index 1a649202303..4404be82449 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md @@ -1,6 +1,6 @@ # Migrating from autorest/adal to azidentity -`azidentity` provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/azure/active-directory/fundamentals/new-name)) authentication for the newest Azure SDK modules (`github.com/azure-sdk-for-go/sdk/...`). Older Azure SDK packages (`github.com/azure-sdk-for-go/services/...`) use types from `github.com/go-autorest/autorest/adal` instead. +`azidentity` provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/entra/fundamentals/new-name)) authentication for the newest Azure SDK modules (`github.com/azure-sdk-for-go/sdk/...`). Older Azure SDK packages (`github.com/azure-sdk-for-go/services/...`) use types from `github.com/go-autorest/autorest/adal` instead. This guide shows common authentication code using `autorest/adal` and its equivalent using `azidentity`. @@ -284,7 +284,7 @@ if err == nil { } ``` -Note that `azidentity` credentials use the Microsoft Entra endpoint, which requires OAuth 2 scopes instead of the resource identifiers `autorest/adal` expects. For more information, see [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/permissions-consent-overview). +Note that `azidentity` credentials use the Microsoft Entra endpoint, which requires OAuth 2 scopes instead of the resource identifiers `autorest/adal` expects. For more information, see [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/permissions-consent-overview). ## Use azidentity credentials with older packages diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md index b6ad2d39f84..7e201ea2fdb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md @@ -1,9 +1,9 @@ # Azure Identity Client Module for Go -The Azure Identity module provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/azure/active-directory/fundamentals/new-name)) token authentication support across the Azure SDK. It includes a set of `TokenCredential` implementations, which can be used with Azure SDK clients supporting token authentication. +The Azure Identity module provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/entra/fundamentals/new-name)) token authentication support across the Azure SDK. It includes a set of `TokenCredential` implementations, which can be used with Azure SDK clients supporting token authentication. [![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/azidentity)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity) -| [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/) +| [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity/) | [Source code](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/azidentity) # Getting started @@ -30,7 +30,7 @@ When debugging and executing code locally, developers typically use their own ac #### Authenticating via the Azure CLI `DefaultAzureCredential` and `AzureCLICredential` can authenticate as the user -signed in to the [Azure CLI](https://docs.microsoft.com/cli/azure). To sign in to the Azure CLI, run `az login`. On a system with a default web browser, the Azure CLI will launch the browser to authenticate a user. +signed in to the [Azure CLI](https://learn.microsoft.com/cli/azure). To sign in to the Azure CLI, run `az login`. On a system with a default web browser, the Azure CLI will launch the browser to authenticate a user. When no default browser is available, `az login` will use the device code authentication flow. This can also be selected manually by running `az login --use-device-code`. @@ -69,14 +69,14 @@ The `azidentity` module focuses on OAuth authentication with Microsoft Entra ID. ## Managed Identity `DefaultAzureCredential` and `ManagedIdentityCredential` support -[managed identity authentication](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview) +[managed identity authentication](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/overview) in any hosting environment which supports managed identities, such as (this list is not exhaustive): -* [Azure App Service](https://docs.microsoft.com/azure/app-service/overview-managed-identity) -* [Azure Arc](https://docs.microsoft.com/azure/azure-arc/servers/managed-identity-authentication) -* [Azure Cloud Shell](https://docs.microsoft.com/azure/cloud-shell/msi-authorization) -* [Azure Kubernetes Service](https://docs.microsoft.com/azure/aks/use-managed-identity) -* [Azure Service Fabric](https://docs.microsoft.com/azure/service-fabric/concepts-managed-identity) -* [Azure Virtual Machines](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token) +* [Azure App Service](https://learn.microsoft.com/azure/app-service/overview-managed-identity) +* [Azure Arc](https://learn.microsoft.com/azure/azure-arc/servers/managed-identity-authentication) +* [Azure Cloud Shell](https://learn.microsoft.com/azure/cloud-shell/msi-authorization) +* [Azure Kubernetes Service](https://learn.microsoft.com/azure/aks/use-managed-identity) +* [Azure Service Fabric](https://learn.microsoft.com/azure/service-fabric/concepts-managed-identity) +* [Azure Virtual Machines](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/how-to-use-vm-token) ## Examples @@ -140,6 +140,7 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) |Credential|Usage |-|- +|[AzurePipelinesCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzurePipelinesCredential)|Authenticate an Azure Pipelines [service connection](https://learn.microsoft.com/azure/devops/pipelines/library/service-endpoints?view=azure-devops&tabs=yaml) |[ClientAssertionCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientAssertionCredential)|Authenticate a service principal with a signed client assertion |[ClientCertificateCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientCertificateCredential)|Authenticate a service principal with a certificate |[ClientSecretCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientSecretCredential)|Authenticate a service principal with a secret @@ -207,7 +208,7 @@ For more details, see the [token caching documentation](https://aka.ms/azsdk/go/ Credentials return an `error` when they fail to authenticate or lack data they require to authenticate. For guidance on resolving errors from specific credential types, see the [troubleshooting guide](https://aka.ms/azsdk/go/identity/troubleshoot). -For more details on handling specific Microsoft Entra errors, see the Microsoft Entra [error code documentation](https://learn.microsoft.com/azure/active-directory/develop/reference-error-codes). +For more details on handling specific Microsoft Entra errors, see the Microsoft Entra [error code documentation](https://learn.microsoft.com/entra/identity-platform/reference-error-codes). ### Logging diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD index c0d6601469c..fbaa2922048 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD @@ -45,7 +45,7 @@ With persistent disk token caching enabled, the library first determines if a va #### Example code -See the [package documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.5.0-beta.1#pkg-overview) for code examples demonstrating how to configure persistent caching and access cached data. +See the [package documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.6.0-beta.2#pkg-overview) for example code demonstrating how to configure persistent caching and access cached data. ### Credentials supporting token caching @@ -57,6 +57,7 @@ The following table indicates the state of in-memory and persistent caching in e |--------------------------------|---------------------------------------------------------------------|--------------------------| | `AzureCLICredential` | Not Supported | Not Supported | | `AzureDeveloperCLICredential` | Not Supported | Not Supported | +| `AzurePipelinesCredential` | Supported | Supported | | `ClientAssertionCredential` | Supported | Supported | | `ClientCertificateCredential` | Supported | Supported | | `ClientSecretCredential` | Supported | Supported | diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md index 832c599eb90..54016a07098 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md @@ -10,6 +10,7 @@ This troubleshooting guide covers failure investigation techniques, common error - [Enable and configure logging](#enable-and-configure-logging) - [Troubleshoot AzureCLICredential authentication issues](#troubleshoot-azureclicredential-authentication-issues) - [Troubleshoot AzureDeveloperCLICredential authentication issues](#troubleshoot-azuredeveloperclicredential-authentication-issues) +- [Troubleshoot AzurePipelinesCredential authentication issues](#troubleshoot-azurepipelinescredential-authentication-issues) - [Troubleshoot ClientCertificateCredential authentication issues](#troubleshoot-clientcertificatecredential-authentication-issues) - [Troubleshoot ClientSecretCredential authentication issues](#troubleshoot-clientsecretcredential-authentication-issues) - [Troubleshoot DefaultAzureCredential authentication issues](#troubleshoot-defaultazurecredential-authentication-issues) @@ -58,7 +59,7 @@ This error contains several pieces of information: - __Failing Credential Type__: The type of credential that failed to authenticate. This can be helpful when diagnosing issues with chained credential types such as `DefaultAzureCredential` or `ChainedTokenCredential`. -- __Microsoft Entra ID Error Code and Message__: The error code and message returned by Microsoft Entra ID. This can give insight into the specific reason the request failed. For instance, in this case authentication failed because the provided client secret is incorrect. [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/reference-error-codes#aadsts-error-codes) has more information on AADSTS error codes. +- __Microsoft Entra ID Error Code and Message__: The error code and message returned by Microsoft Entra ID. This can give insight into the specific reason the request failed. For instance, in this case authentication failed because the provided client secret is incorrect. [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/reference-error-codes#aadsts-error-codes) has more information on AADSTS error codes. - __Correlation ID and Timestamp__: The correlation ID and timestamp identify the request in server-side logs. This information can be useful to support engineers diagnosing unexpected Microsoft Entra failures. @@ -97,17 +98,17 @@ azlog.SetEvents(azidentity.EventAuthentication) | Error Code | Issue | Mitigation | |---|---|---| -|AADSTS7000215|An invalid client secret was provided.|Ensure the secret provided to the credential constructor is valid. If unsure, create a new client secret using the Azure portal. Details on creating a new client secret are in [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| -|AADSTS7000222|An expired client secret was provided.|Create a new client secret using the Azure portal. Details on creating a new client secret are in [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| -|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| +|AADSTS7000215|An invalid client secret was provided.|Ensure the secret provided to the credential constructor is valid. If unsure, create a new client secret using the Azure portal. Details on creating a new client secret are in [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| +|AADSTS7000222|An expired client secret was provided.|Create a new client secret using the Azure portal. Details on creating a new client secret are in [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| +|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal).| ## Troubleshoot ClientCertificateCredential authentication issues | Error Code | Description | Mitigation | |---|---|---| -|AADSTS700027|Client assertion contains an invalid signature.|Ensure the specified certificate has been uploaded to the application registration as described in [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-1-upload-a-certificate).| -|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| +|AADSTS700027|Client assertion contains an invalid signature.|Ensure the specified certificate has been uploaded to the application registration as described in [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal#option-1-upload-a-certificate).| +|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal).| ## Troubleshoot UsernamePasswordCredential authentication issues @@ -123,20 +124,20 @@ azlog.SetEvents(azidentity.EventAuthentication) |Host Environment| | | |---|---|---| -|Azure Virtual Machines and Scale Sets|[Configuration](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm)|[Troubleshooting](#azure-virtual-machine-managed-identity)| -|Azure App Service and Azure Functions|[Configuration](https://docs.microsoft.com/azure/app-service/overview-managed-identity)|[Troubleshooting](#azure-app-service-and-azure-functions-managed-identity)| +|Azure Virtual Machines and Scale Sets|[Configuration](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/qs-configure-portal-windows-vm)|[Troubleshooting](#azure-virtual-machine-managed-identity)| +|Azure App Service and Azure Functions|[Configuration](https://learn.microsoft.com/azure/app-service/overview-managed-identity)|[Troubleshooting](#azure-app-service-and-azure-functions-managed-identity)| |Azure Kubernetes Service|[Configuration](https://azure.github.io/aad-pod-identity/docs/)|[Troubleshooting](#azure-kubernetes-service-managed-identity)| -|Azure Arc|[Configuration](https://docs.microsoft.com/azure/azure-arc/servers/managed-identity-authentication)|| -|Azure Service Fabric|[Configuration](https://docs.microsoft.com/azure/service-fabric/concepts-managed-identity)|| +|Azure Arc|[Configuration](https://learn.microsoft.com/azure/azure-arc/servers/managed-identity-authentication)|| +|Azure Service Fabric|[Configuration](https://learn.microsoft.com/azure/service-fabric/concepts-managed-identity)|| ### Azure Virtual Machine managed identity | Error Message |Description| Mitigation | |---|---|---| -|The requested identity hasn’t been assigned to this resource.|The IMDS endpoint responded with a status code of 400, indicating the requested identity isn’t assigned to the VM.|If using a user assigned identity, ensure the specified ID is correct.

If using a system assigned identity, make sure it has been enabled as described in [managed identity documentation](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm#enable-system-assigned-managed-identity-on-an-existing-vm).| +|The requested identity hasn’t been assigned to this resource.|The IMDS endpoint responded with a status code of 400, indicating the requested identity isn’t assigned to the VM.|If using a user assigned identity, ensure the specified ID is correct.

If using a system assigned identity, make sure it has been enabled as described in [managed identity documentation](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/qs-configure-portal-windows-vm#enable-system-assigned-managed-identity-on-an-existing-vm).| |The request failed due to a gateway error.|The request to the IMDS endpoint failed due to a gateway error, 502 or 504 status code.|IMDS doesn't support requests via proxy or gateway. Disable proxies or gateways running on the VM for requests to the IMDS endpoint `http://169.254.169.254`| -|No response received from the managed identity endpoint.|No response was received for the request to IMDS or the request timed out.|

| -|Multiple attempts failed to obtain a token from the managed identity endpoint.|The credential has exhausted its retries for a token request.|| +|No response received from the managed identity endpoint.|No response was received for the request to IMDS or the request timed out.|| +|Multiple attempts failed to obtain a token from the managed identity endpoint.|The credential has exhausted its retries for a token request.|| #### Verify IMDS is available on the VM @@ -152,7 +153,7 @@ curl 'http://169.254.169.254/metadata/identity/oauth2/token?resource=https://man | Error Message |Description| Mitigation | |---|---|---| -|Get "`http://169.254.169.254/...`" i/o timeout|The App Service host hasn't set environment variables for managed identity configuration.|| +|Get "`http://169.254.169.254/...`" i/o timeout|The App Service host hasn't set environment variables for managed identity configuration.|| #### Verify the App Service managed identity endpoint is available @@ -177,8 +178,8 @@ curl "$IDENTITY_ENDPOINT?resource=https://management.core.windows.net&api-versio | Error Message |Description| Mitigation | |---|---|---| -|Azure CLI not found on path|The Azure CLI isn’t installed or isn't on the application's path.|| -|Please run 'az login' to set up account|No account is currently logged into the Azure CLI, or the login has expired.|| +|Azure CLI not found on path|The Azure CLI isn’t installed or isn't on the application's path.|| +|Please run 'az login' to set up account|No account is currently logged into the Azure CLI, or the login has expired.|| #### Verify the Azure CLI can obtain tokens @@ -226,6 +227,15 @@ azd auth token --output json --scope https://management.core.windows.net/.defaul |---|---|---| |no client ID/tenant ID/token file specified|Incomplete configuration|In most cases these values are provided via environment variables set by Azure Workload Identity.