diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml new file mode 100755 index 00000000..54dfd96d --- /dev/null +++ b/.buildkite/pipeline.yml @@ -0,0 +1,105 @@ +agents: + queue: k8s-builders + +steps: + - label: lint + key: lint + command: make lint + plugins: + - docker#v5.11.0: + image: "golang:1.22" + environment: + - BUILDKITE=true + - label: unit + key: unit + command: make unit + plugins: + - docker#v5.11.0: + image: "golang:1.22" + depends_on: + - lint + + - label: test_network + key: test_network + if: build.pull_request.labels includes 'ci-ready' + command: make test_network + plugins: + - seek-oss/aws-sm#v2.3.2: + json-to-env: + - secret-id: sdlc/prod/buildkite/integration_provider_secret_redpanda_client + - docker#v5.11.0: + image: "golang:1.22" + environment: + - INTEGRATION_PROVIDER_SECRET_REDPANDA_CLIENT_ID + - INTEGRATION_PROVIDER_SECRET_REDPANDA_CLIENT_SECRET + depends_on: + - lint + - unit + - label: test_cluster_aws + key: test_cluster_aws + if: build.pull_request.labels includes 'ci-ready' + command: make test_cluster_aws + plugins: + - seek-oss/aws-sm#v2.3.2: + json-to-env: + - secret-id: sdlc/prod/buildkite/integration_provider_secret_redpanda_client + - docker#v5.11.0: + image: "golang:1.22" + environment: + - INTEGRATION_PROVIDER_SECRET_REDPANDA_CLIENT_ID + - INTEGRATION_PROVIDER_SECRET_REDPANDA_CLIENT_SECRET + depends_on: + - lint + - unit + - test_network + - label: test_cluster_azure + key: test_cluster_azure + if: build.pull_request.labels includes 'ci-ready' + command: make test_cluster_azure + plugins: + - seek-oss/aws-sm#v2.3.2: + json-to-env: + - secret-id: sdlc/prod/buildkite/integration_provider_secret_redpanda_client + - docker#v5.11.0: + image: "golang:1.22" + environment: + - INTEGRATION_PROVIDER_SECRET_REDPANDA_CLIENT_ID + - INTEGRATION_PROVIDER_SECRET_REDPANDA_CLIENT_SECRET + depends_on: + - lint + - unit + - test_network + - label: test_cluster_gcp + key: test_cluster_gcp + if: build.pull_request.labels includes 'ci-ready' + command: make test_cluster_gcp + plugins: + - seek-oss/aws-sm#v2.3.2: + json-to-env: + - secret-id: sdlc/prod/buildkite/integration_provider_secret_redpanda_client + - docker#v5.11.0: + image: "golang:1.22" + environment: + - INTEGRATION_PROVIDER_SECRET_REDPANDA_CLIENT_ID + - INTEGRATION_PROVIDER_SECRET_REDPANDA_CLIENT_SECRET + depends_on: + - lint + - unit + - test_network + - label: test_serverless_cluster + key: test_serverless_cluster + if: build.pull_request.labels includes 'ci-ready' + command: make test_serverless_cluster + plugins: + - seek-oss/aws-sm#v2.3.2: + json-to-env: + - secret-id: sdlc/prod/buildkite/integration_provider_secret_redpanda_client + - docker#v5.11.0: + image: "golang:1.22" + environment: + - INTEGRATION_PROVIDER_SECRET_REDPANDA_CLIENT_ID + - INTEGRATION_PROVIDER_SECRET_REDPANDA_CLIENT_SECRET + depends_on: + - lint + - unit + - test_network \ No newline at end of file diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index a1e7045f..656089a8 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -11,10 +11,10 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: ref: ${{ github.head_ref }} - + - name: Run make command run: make doc diff --git a/.github/workflows/lint-golang.yml b/.github/workflows/lint-golang.yml deleted file mode 100644 index 56b12f09..00000000 --- a/.github/workflows/lint-golang.yml +++ /dev/null @@ -1,40 +0,0 @@ -name: Lint golang - -on: [ push, pull_request ] - -jobs: - go: - name: Lint go files - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@v3 - - - name: Setup go - uses: actions/setup-go@v3 - with: - go-version: stable - - - name: Run golangci-lint - uses: golangci/golangci-lint-action@v3 - with: - version: v1.55.2 - args: --timeout 5m - - - name: Install gofumpt - env: - GOFUMPT_VER: 0.5.0 - run: | - mkdir -v -p "$HOME/.local/bin" - wget -O "$HOME/.local/bin/gofumpt" "https://github.com/mvdan/gofumpt/releases/download/v${GOFUMPT_VER}/gofumpt_v${GOFUMPT_VER}_linux_amd64" - chmod 0700 "$HOME/.local/bin/gofumpt" - - - name: Run gofumpt - run: | - find . -type f -name '*.go' ! -name '*.pb.go' | xargs -n1 "$HOME/.local/bin/gofumpt" -w -lang=1.21 - git diff --exit-code - - - name: go mod tidy - run: | - go mod tidy - git diff --exit-code -- go.mod go.sum \ No newline at end of file diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 8d5d933b..262f891d 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,15 +1,10 @@ # Terraform Provider release workflow. name: Release - -# This GitHub action creates a release when a tag that matches the pattern -# "v*" (e.g. v0.1.0) is created. on: push: tags: - 'v*' -# Releases need permissions to read and write the repository contents. -# GitHub considers creating releases and uploading assets as writing contents. permissions: contents: write @@ -29,22 +24,22 @@ jobs: secret-ids: | ,sdlc/prod/github/tf_provider_rp parse-json-secrets: true - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@v4 with: # Allow goreleaser to access older tag information. fetch-depth: 0 - - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 + - uses: actions/setup-go@v5 with: go-version-file: 'go.mod' cache: true - name: Import GPG key - uses: crazy-max/ghaction-import-gpg@82a020f1f7f605c65dd2449b392a52c3fcfef7ef # v6.0.0 + uses: crazy-max/ghaction-import-gpg@v6 id: import_gpg with: gpg_private_key: ${{ env.TF_PROVIDER_RP_GPG_PRIVATE_KEY }} passphrase: ${{ env.TF_PROVIDER_RP_PASSPHRASE }} - name: Run GoReleaser - uses: goreleaser/goreleaser-action@7ec5c2b0c6cdda6e8bbb49444bc797dd33d74dd8 # v5.0.0 + uses: goreleaser/goreleaser-action@v6 with: args: release --clean env: diff --git a/.gitignore b/.gitignore index 463e5ed1..e4670eee 100644 --- a/.gitignore +++ b/.gitignore @@ -26,3 +26,11 @@ terraform-provider-redpanda tools/tfplugindocs .idea/** + +**.terraform.d** +**.terraform** +**terraform.tfstate** +**terraform.tfstate.backup** +terraform-provider-hashicorp +**/.tmp_* +.cluster_info* diff --git a/.golangci.yml b/.golangci.yml index 6959b3f4..80f03d8b 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,7 +1,5 @@ run: allow-parallel-runners: true - skip-dirs: - - proto # golangci-lint by default ignores some staticcheck and vet raised issues that # are actually important to catch. The following ensures that we do not ignore @@ -9,18 +7,6 @@ run: issues: exclude-use-default: false exclude-rules: - - linters: - - goconst - text: "string `UNSPECIFIED` has" - path: redpanda/utils/utils.go - - linters: - - goconst - text: "string `ANY` has" - path: redpanda/utils/utils.go - - linters: - - goconst - text: "string `UNKNOWN` has" - path: redpanda/utils/utils.go - path: _test\.go linters: - gocognit @@ -93,12 +79,6 @@ linters-settings: require-explanation: true require-specific: true - # If gofumpt is run outside a module, it assumes Go 1.0 rather than the - # latest Go. We always want the latest formatting. - # - # https://github.com/mvdan/gofumpt/issues/137 - gofumpt: - lang-version: "1.21" cyclop: max-complexity: 20 gocyclo: @@ -106,7 +86,6 @@ linters-settings: gocognit: min-complexity: 30 gci: - no-prefix-comments: true # no leading comment; we allow inline for nolint sections: - standard # stdlib - default # everything not std, not within cloudv2 @@ -127,9 +106,6 @@ linters-settings: - importShadow # disabled due to 1.18 failures - hugeParam - - rangeValCopy - - typeDefFirst - - paramTypeCombine # Revive is yet another metalinter with a lot of useful lints. # The below opts in to all the ones we would like to use. revive: @@ -137,8 +113,6 @@ linters-settings: enable-all-rules: true severity: warning confidence: 0.7 - error-code: 1 - warning-code: 1 rules: - name: add-constant disabled: true diff --git a/Makefile b/Makefile index 12f65cc2..136df5cf 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,3 @@ -.PHONY: all doc int unit test lint linter tfplugindocs_install generate_docs integration_tests unit_tests install_gofumpt install_lint build - GOBIN := $(PWD)/tools TFPLUGINDOCSCMD := $(GOBIN)/tfplugindocs GOCMD=go @@ -7,64 +5,56 @@ BUFCMD=buf GOFUMPTCMD=gofumpt GOLANGCILINTCMD=golangci-lint -all: doc lint test - +.PHONY: doc doc: tfplugindocs_install generate_docs -int: integration_tests +.PHONY: int +int: all_integration_tests +.PHONY: unit unit: unit_tests -test: unit_tests integration_tests - +.PHONY: lint lint: install_gofumpt install_lint linter +.PHONY: ready ready: doc lint tidy +# Task to both clean and generate mocks +.PHONY: mock +mock: clean-mocks generate-mocks + +.PHONY: tidy tidy: @echo "running go mod tidy..." @$(GOCMD) mod tidy +.PHONY: tfplugindocs_install tfplugindocs_install: @echo "installing tfplugindocs..." @GOBIN=$(GOBIN) $(GOCMD) install github.com/hashicorp/terraform-plugin-docs/cmd/tfplugindocs@latest +.PHONY: generate_docs generate_docs: tfplugindocs_install @echo "generating provider_documentation..." @$(TFPLUGINDOCSCMD) -integration_tests: +REDPANDA_CLIENT_ID ?= $(or $(INTEGRATION_PROVIDER_SECRET_REDPANDA_CLIENT_ID),$(REDPANDA_CLIENT_ID)) +REDPANDA_CLIENT_SECRET ?= $(or $(INTEGRATION_PROVIDER_SECRET_REDPANDA_CLIENT_SECRET),$(REDPANDA_CLIENT_SECRET)) +REDPANDA_CLOUD_ENVIRONMENT ?= "ign" +.PHONY: all_integration_tests +all_integration_tests: @echo "running integration tests..." @DEBUG=true \ - REDPANDA_CLIENT_ID=$${REDPANDA_CLIENT_ID} \ - REDPANDA_CLIENT_SECRET=$${REDPANDA_CLIENT_SECRET} \ + REDPANDA_CLIENT_ID=$(REDPANDA_CLIENT_ID) \ + REDPANDA_CLIENT_SECRET=$(REDPANDA_CLIENT_SECRET) \ RUN_CLUSTER_TESTS=true \ TF_ACC=true \ TF_LOG=DEBUG \ VERSION=ign \ $(GOCMD) test -v -parallel=5 -timeout=0 ./redpanda/tests -bulk_tests_data: - @echo "running bulk tests..." - @DEBUG=true \ - REDPANDA_CLIENT_ID=$${REDPANDA_CLIENT_ID} \ - REDPANDA_CLIENT_SECRET=$${REDPANDA_CLIENT_SECRET} \ - BULK_CLUSTER_ID=$${BULK_CLUSTER_ID} \ - RUN_BULK_TESTS=true \ - TF_ACC=true \ - VERSION=ign \ - $(GOCMD) test -v -parallel=5 -timeout=0 -run TestAccResourcesBulkData ./redpanda/tests - -bulk_tests_res: - @echo "running bulk tests..." - @DEBUG=true \ - REDPANDA_CLIENT_ID=$${REDPANDA_CLIENT_ID} \ - REDPANDA_CLIENT_SECRET=$${REDPANDA_CLIENT_SECRET} \ - RUN_BULK_TESTS=true \ - TF_ACC=true \ - VERSION=ign \ - $(GOCMD) test -v -parallel=5 -timeout=0 -run TestAccResourcesBulkRes ./redpanda/tests - +.PHONY: unit_tests unit_tests: @echo "running unit tests..." @DEBUG=true \ @@ -73,65 +63,278 @@ unit_tests: RUN_CLUSTER_TESTS=false \ $(GOCMD) test -short ./... +.PHONY: install_gofumpt install_gofumpt: @echo "installing gofumpt..." @$(GOCMD) install mvdan.cc/gofumpt@v0.6.0 +.PHONY: install_lint install_lint: @echo "installing linter..." @$(GOCMD) install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.55.2 +.PHONY: linter linter: - @echo "running gofumpt..." - @$(GOFUMPTCMD) -w -d . - @echo "running linter..." - @$(GOLANGCILINTCMD) run --config=.golangci.yml + @if [ "$$BUILDKITE" = "true" ]; then \ + GOFLAGS="-buildvcs=false" $(GOLANGCILINTCMD) run; \ + else \ + $(GOLANGCILINTCMD) run; \ + fi +.PHONY: fix-lint +fix-lint: + @echo "running gofumpt..." + @$(GOLANGCILINTCMD) run --fix -# Allow overriding these variables from the environment OS ?= $(shell go env GOOS) ARCH ?= $(shell go env GOARCH) -PROVIDER_VERSION ?= 0.5.2 +PROVIDER_VERSION ?= 0.7.1 +PROVIDER_NAMESPACE ?= hashicorp PROVIDER_NAME ?= redpanda -PROVIDER_NAMESPACE ?= redpanda-data CONTENT_ROOT ?= $(PWD) -PROVIDER_DIR := $(CONTENT_ROOT)/.terraform.d/plugins/registry.terraform.io/$(PROVIDER_NAMESPACE)/$(PROVIDER_NAME)/$(PROVIDER_VERSION)/$(OS)_$(ARCH) - -# Path to the built provider binary +CLOUD_PROVIDER ?= aws +TEST_TYPE ?= cluster +DATASOURCE_TEST_DIR ?= standard +TF_CONFIG_DIR ?= examples/$(TEST_TYPE)/$(CLOUD_PROVIDER) +PROVIDER_DIR := .terraform.d/plugins/registry.terraform.io/$(PROVIDER_NAMESPACE)/$(PROVIDER_NAME)/$(PROVIDER_VERSION)/$(OS)_$(ARCH) +TF_LOG ?= WARN +# path to the built binary PROVIDER_BINARY := $(PWD)/terraform-provider-$(PROVIDER_NAME) -build: +.PHONY: build-provider +build-provider: @echo "building terraform provider..." @$(GOCMD) build -o $(PROVIDER_BINARY) .PHONY: move-provider -move-provider: build +move-provider: @echo "moving provider binary to content root..." - @mkdir -p $(PROVIDER_DIR) - @cp $(PROVIDER_BINARY) $(PROVIDER_DIR)/terraform-provider-$(PROVIDER_NAME)_v$(PROVIDER_VERSION) + @echo "PROVIDER_DIR: $(PROVIDER_DIR)" + @mkdir -p $(call GET_TF_CONFIG_DIR)/$(PROVIDER_DIR) + @cp $(PROVIDER_BINARY) $(call GET_TF_CONFIG_DIR)/$(PROVIDER_DIR)/terraform-provider-$(PROVIDER_NAME)_v$(PROVIDER_VERSION) + +REDPANDA_CLOUD_ENVIRONMENT ?= "ign" + +.PHONY: apply +apply: build-provider move-provider test-create + +.PHONY: teardown +teardown: test-destroy +PREFIX ?= tfrp-local +CLOUD_PROVIDER ?= aws +CLUSTER_INFO_FILE := .cluster_info_$(CLOUD_PROVIDER).json + +define GET_OR_CREATE_CLUSTER_INFO +$(shell \ + if [ -f $(CLUSTER_INFO_FILE) ]; then \ + cat $(CLUSTER_INFO_FILE); \ + else \ + CLUSTER_NAME="$(PREFIX)-$$(LC_ALL=C tr -dc 'A-Za-z0-9' < /dev/urandom | head -c 4)"; \ + echo '{"name":"'$$CLUSTER_NAME'","id":""}' | tee $(CLUSTER_INFO_FILE); \ + fi \ +) +endef + +# Function to determine TF_CONFIG_DIR +define GET_TF_CONFIG_DIR +$(shell \ + if [ "$(TEST_TYPE)" = "cluster" ]; then \ + echo "examples/$(TEST_TYPE)/$(CLOUD_PROVIDER)"; \ + elif [ "$(TEST_TYPE)" = "datasource" ]; then \ + echo "examples/$(TEST_TYPE)/$(DATASOURCE_TEST_DIR)"; \ + else \ + echo "Error: Invalid TEST_TYPE" >&2; \ + exit 1; \ + fi \ +) +endef + +define UPDATE_CLUSTER_ID +$(shell \ + CLUSTER_INFO='$(1)' \ + CLUSTER_ID='$(2)' \ + NEW_INFO=$$(echo $$CLUSTER_INFO | jq --arg id "$$CLUSTER_ID" '.id = $$id') \ + echo $$NEW_INFO > $(CLUSTER_INFO_FILE) \ +) +endef -.PHONY: test-actual -test-actual: build test-create test-destroy +define GET_CLUSTER_NAME +$(shell \ + CLUSTER_INFO='$(call GET_OR_CREATE_CLUSTER_INFO)' \ + echo $$CLUSTER_INFO | jq -r '.name' \ +) +endef + +define GET_CLUSTER_ID +$(shell \ + CLUSTER_INFO='$(call GET_OR_CREATE_CLUSTER_INFO)' \ + echo $$CLUSTER_INFO | jq -r '.id' \ +) +endef -TF_CONFIG_DIR ?= examples/bulk-res .PHONY: test-create -test-create: - @echo "Applying Terraform configuration..." - @cd $(TF_CONFIG_DIR) && \ - REDPANDA_CLIENT_ID="$${REDPANDA_CLIENT_ID}" \ - REDPANDA_CLIENT_SECRET="$${REDPANDA_CLIENT_SECRET}" \ - REDPANDA_CLOUD_ENVIRONMENT="$${REDPANDA_CLOUD_ENVIRONMENT}" \ - TF_LOG=DEBUG \ +test-create: tf-init tf-apply update-cluster-info + +.PHONY: tf-init +tf-init: + @echo "Initializing Terraform..." + @cd $(call GET_TF_CONFIG_DIR) && \ + ls -ltrah && \ + REDPANDA_CLIENT_ID="$(REDPANDA_CLIENT_ID)" \ + REDPANDA_CLIENT_SECRET="$(REDPANDA_CLIENT_SECRET)" \ + REDPANDA_CLOUD_ENVIRONMENT="$(REDPANDA_CLOUD_ENVIRONMENT)" \ + TF_LOG=$(TF_LOG) \ TF_INSECURE_SKIP_PROVIDER_VERIFICATION=true \ - TF_PLUGIN_DIR=$(PROVIDER_DIR) - terraform init && \ - terraform apply -parallelism 10 -auto-approve + TF_PLUGIN_CACHE_DIR=.terraform.d/plugins_cache \ + terraform init -plugin-dir=.terraform.d/plugins + +.PHONY: tf-apply +tf-apply: + @echo "Constructing Terraform apply command..." + @(cd $(call GET_TF_CONFIG_DIR) && \ + CLUSTER_INFO='$(GET_OR_CREATE_CLUSTER_INFO)' \ + CLUSTER_NAME=$$(echo '$(GET_OR_CREATE_CLUSTER_INFO)' | jq -r '.name') \ + REDPANDA_CLIENT_ID="$(REDPANDA_CLIENT_ID)" \ + REDPANDA_CLIENT_SECRET="$(REDPANDA_CLIENT_SECRET)" \ + REDPANDA_CLOUD_ENVIRONMENT="$(REDPANDA_CLOUD_ENVIRONMENT)" \ + TF_LOG=$(TF_LOG) \ + TF_INSECURE_SKIP_PROVIDER_VERIFICATION=true \ + TF_PLUGIN_CACHE_DIR=.terraform.d/plugins_cache \ + bash -c 'if grep -q "resource \"redpanda_cluster\"" *.tf; then \ + terraform apply -auto-approve \ + -var="resource_group_name=$$CLUSTER_NAME" \ + -var="network_name=$$CLUSTER_NAME" \ + -var="cluster_name=$$CLUSTER_NAME"; \ + elif grep -q "resource \"redpanda_serverless_cluster\"" *.tf; then \ + terraform apply -auto-approve \ + -var="resource_group_name=$$CLUSTER_NAME" \ + -var="cluster_name=$$CLUSTER_NAME"; \ + elif grep -q "data \"redpanda_cluster\"" *.tf; then \ + CLUSTER_ID=$$(echo "$$CLUSTER_INFO" | jq -r ".id"); \ + terraform apply -auto-approve -var="cluster_id=$$CLUSTER_ID"; \ + else \ + echo "Error: No supported Redpanda cluster configuration found in Terraform files."; \ + exit 1; \ + fi') + +.PHONY: update-cluster-info +update-cluster-info: + @echo "Updating cluster info..." + @cd $(call GET_TF_CONFIG_DIR) && \ + CLUSTER_INFO='$(GET_OR_CREATE_CLUSTER_INFO)' \ + CLUSTER_ID=$$(terraform show -json | jq -r '.values.root_module.resources[] | select(.type == "redpanda_cluster" or .type == "redpanda_serverless_cluster") | .values.id') && \ + if [ -n "$$CLUSTER_ID" ]; then \ + NEW_CLUSTER_INFO=$$(echo "$$CLUSTER_INFO" | jq --arg id "$$CLUSTER_ID" '.id = $$id'); \ + echo "$$NEW_CLUSTER_INFO" > $(CURDIR)/$(CLUSTER_INFO_FILE); \ + echo "Updated cluster info: $$NEW_CLUSTER_INFO"; \ + else \ + echo "No cluster ID found. Cluster info not updated."; \ + fi .PHONY: test-destroy test-destroy: @echo "Destroying Terraform configuration..." - @cd $(TF_CONFIG_DIR) && \ - REDPANDA_CLIENT_ID="$${REDPANDA_CLIENT_ID}" && \ - TF_LOG=DEBUG && \ - terraform init && \ - terraform destroy -auto-approve \ No newline at end of file + @(cd $(TF_CONFIG_DIR) && \ + CLUSTER_INFO='$(call GET_OR_CREATE_CLUSTER_INFO)' \ + CLUSTER_NAME=$$(echo "$$CLUSTER_INFO" | jq -r '.name') \ + CLUSTER_ID=$$(echo "$$CLUSTER_INFO" | jq -r '.id') \ + REDPANDA_CLIENT_ID="$(REDPANDA_CLIENT_ID)" \ + REDPANDA_CLIENT_SECRET="$(REDPANDA_CLIENT_SECRET)" \ + REDPANDA_CLOUD_ENVIRONMENT=""$(REDPANDA_CLOUD_ENVIRONMENT)"" \ + TF_LOG=$(TF_LOG) \ + TF_INSECURE_SKIP_PROVIDER_VERIFICATION=true \ + TF_PLUGIN_CACHE_DIR=.terraform.d/plugins_cache \ + bash -c 'terraform init -plugin-dir=.terraform.d/plugins && \ + if grep -q "resource \"redpanda_cluster\"" *.tf; then \ + terraform destroy -auto-approve \ + -var="resource_group_name=$$CLUSTER_NAME" \ + -var="network_name=$$CLUSTER_NAME" \ + -var="cluster_name=$$CLUSTER_NAME"; \ + elif grep -q "resource \"redpanda_serverless_cluster\"" *.tf; then \ + terraform destroy -auto-approve \ + -var="resource_group_name=$$CLUSTER_NAME" \ + -var="cluster_name=$$CLUSTER_NAME"; \ + elif grep -q "data \"redpanda_cluster\"" *.tf; then \ + terraform destroy -auto-approve \ + -var="cluster_id=$$CLUSTER_ID"; \ + else \ + echo "Error: No supported Redpanda cluster configuration found in Terraform files."; \ + exit 1; \ + fi') +# Define the directory where the mocks are located +MOCKS_DIR := redpanda/mocks + +# Task to generate all mocks +.PHONY: generate-mocks +generate-mocks: + @echo "Generating mocks..." + @cd $(MOCKS_DIR) && go generate + @echo "Mocks generated successfully." + +# Task to clean generated mocks +.PHONY: clean-mocks +clean-mocks: + @echo "Cleaning generated mocks..." + @rm -f $(MOCKS_DIR)/mock_*.go + @echo "Mocks cleaned successfully." + +.PHONY: test_network +test_network: + @echo "Running TestAccResourcesNetwork..." + @DEBUG=true \ + REDPANDA_CLIENT_ID="$(REDPANDA_CLIENT_ID)" \ + REDPANDA_CLIENT_SECRET="$(REDPANDA_CLIENT_SECRET)" \ + TF_ACC=true \ + TF_LOG=$(TF_LOG) \ + VERSION=ign \ + $(GOCMD) test -v -timeout=1h ./redpanda/tests -run TestAccResourcesNetwork + +TIMEOUT ?= 6h +.PHONY: test_cluster_aws +test_cluster_aws: + @echo "Running TestAccResourcesClusterAWS..." + @DEBUG=true \ + REDPANDA_CLIENT_ID="$(REDPANDA_CLIENT_ID)" \ + REDPANDA_CLIENT_SECRET="$(REDPANDA_CLIENT_SECRET)" \ + RUN_CLUSTER_TESTS=true \ + TF_ACC=true \ + TF_LOG=$(TF_LOG) \ + VERSION=ign \ + $(GOCMD) test -v -timeout=$(TIMEOUT) ./redpanda/tests -run TestAccResourcesClusterAWS + +.PHONY: test_cluster_azure +test_cluster_azure: + @echo "Running TestAccResourcesClusterAzure..." + @DEBUG=true \ + REDPANDA_CLIENT_ID="$(REDPANDA_CLIENT_ID)" \ + REDPANDA_CLIENT_SECRET="$(REDPANDA_CLIENT_SECRET)" \ + RUN_CLUSTER_TESTS=true \ + TF_ACC=true \ + TF_LOG=$(TF_LOG) \ + VERSION=ign \ + $(GOCMD) test -v -timeout=$(TIMEOUT) ./redpanda/tests -run TestAccResourcesClusterAzure + +.PHONY: test_cluster_gcp +test_cluster_gcp: + @echo "Running TestAccResourcesClusterGCP..." + @DEBUG=true \ + REDPANDA_CLIENT_ID="$(REDPANDA_CLIENT_ID)" \ + REDPANDA_CLIENT_SECRET="$(REDPANDA_CLIENT_SECRET)" \ + REDPANDA_VERSION="24.2.20240809182625" \ + RUN_CLUSTER_TESTS=true \ + TF_ACC=true \ + TF_LOG=$(TF_LOG) \ + VERSION=ign \ + $(GOCMD) test -v -timeout=$(TIMEOUT) ./redpanda/tests -run TestAccResourcesClusterGCP + +.PHONY: test_serverless_cluster +test_serverless_cluster: + @echo "Running TestAccResourcesStrippedDownServerlessCluster..." + @DEBUG=true \ + REDPANDA_CLIENT_ID="$(REDPANDA_CLIENT_ID)" \ + REDPANDA_CLIENT_SECRET="$(REDPANDA_CLIENT_SECRET)" \ + RUN_SERVERLESS_TESTS=true \ + TF_ACC=true \ + TF_LOG=$(TF_LOG) \ + VERSION=ign \ + $(GOCMD) test -v -timeout=$(TIMEOUT) ./redpanda/tests -run TestAccResourcesStrippedDownServerlessCluster diff --git a/README.md b/README.md index 46422690..a540fef4 100644 --- a/README.md +++ b/README.md @@ -1,95 +1,178 @@ # Redpanda Terraform Provider -!!! THIS IS AN ALPHA RELEASE !!! +![Alpha Release](https://img.shields.io/badge/release-alpha-red.svg) -Please be aware that all features are subject to change and may not be fully supported at this time. +> **Warning**: This is an alpha release. All features are subject to change and may not be fully supported at this time. -The Redpanda terraform provider is a [Terraform](https://www.terraform.io/) plugin that allows you to create -and manage resources on [Redpanda Cloud.](https://redpanda.com/redpanda-cloud) +The Redpanda Terraform Provider is a [Terraform](https://www.terraform.io/) plugin that allows you to create and manage +resources on [Redpanda Cloud](https://redpanda.com/redpanda-cloud). -## Getting started +## Table of Contents -To add the Redpanda provider: +- [Getting Started](#getting-started) +- [Contributing](#contributing) + - [Pull Request Process](#pull-request-process) +- [Makefile Commands - Developer Guide](#makefile-commands---developer-guide) + - [Prerequisites](#prerequisites) + - [Cluster Management Commands](#cluster-management-commands) + - [Development Commands](#development-commands) + - [Best Practices](#best-practices) +- [Contributing](#contributing) +- [Releasing a Version](#releasing-a-version) +- [Support](#support) -```hcl -terraform { - required_providers { - redpanda = { - source = "redpanda-data/redpanda" - } - } -} -``` +## Getting Started -### Authentication +User documentation on the Terraform provider is available at +the [Terraform Registry](https://registry.terraform.io/providers/redpanda-data/redpanda/latest/docs). -Client credentials for authentication can be provided as: +## Contributing -**Static credentials:** +When contributing to this project, please ensure you've run `make ready` and all tests pass before submitting a pull +request. If you've added new functionality, consider adding appropriate unit and integration tests. -```terraform -provider "redpanda" { - client_id = "" - client_secret = "" -} -``` +### Pull Request Process -or +* (optional) Use the label docs to generate documentation +* Use the label ci-ready to run integration tests -**Environment variables:** +## Makefile Commands - Developer Guide -``` -REDPANDA_CLIENT_ID= -REDPANDA_CLIENT_SECRET= +This guide provides an overview of the key Makefile commands used in the development and testing of the Redpanda +Terraform Provider. These commands help streamline the development process, manage Redpanda clusters for testing, and +ensure code quality. + +### Prerequisites + +Before using these commands, ensure you have the following: + +- Go installed on your system +- Terraform CLI installed +- Access to a Redpanda Cloud account with appropriate permissions +- Required environment variables set (REDPANDA_CLIENT_ID, REDPANDA_CLIENT_SECRET) + +### Cluster Management Commands + +These commands are used to create and manage Redpanda clusters for testing purposes. + +#### apply + +Creates and sets up a Redpanda cluster using Terraform. This is intended for use in manual testing and development. It +should not be active when running the integration tests or you will lose the cluster ID and name. + +Here's an example usage + +```shell +# Test type defaults to cluster +# Cloud provider defaults to aws +make apply +# make changes to examples/cluster/aws/main.tf +# rerun apply to review +make apply + +# switch to datasource to test accessing the cluster with datasource and creating resources +# this is convenient for manual testing of changes to dataplane resources +# make changes in examples/datasource/standard/main.tf +export TEST_TYPE=datasource +make apply + +# switch to GCP to validate cluster against GCP. +# Note that you won't lose your AWS state or cluster when doing this +export TEST_TYPE=cluster +export CLOUD_PROVIDER=gcp + +make apply + +# clean up by tearing down the GCP cluster +make teardown + +# switch back to AWS and cleanup +export CLOUD_PROVIDER=aws +make teardown ``` -## Developing the provider +Command: `make apply` -### Requirements +**Key Variables:** -- [Go](https://go.dev/) -- [Terraform](https://www.terraform.io/) +- `REDPANDA_CLIENT_ID`: Redpanda Cloud client ID +- `REDPANDA_CLIENT_SECRET`: Redpanda Cloud client secret +- `REDPANDA_CLOUD_ENVIRONMENT`: Redpanda Cloud environment (ign or prod) +- `TF_CONFIG_DIR`: Terraform configuration directory (auto-generated) +- `CLOUD_PROVIDER`: Cloud provider (e.g., aws, azure, gcp) +- `TEST_TYPE`: Type of test (e.g., cluster, datasource) -### Building the provider +The `TF_CONFIG_DIR` is dynamically constructed based on the `TEST_TYPE` and `CLOUD_PROVIDER`: -After building the provider (`go build`), you may override the plugin with your -locally built provider. +For cluster tests: `TF_CONFIG_DIR := examples/$(TEST_TYPE)/$(CLOUD_PROVIDER)` +For datasource tests: `TF_CONFIG_DIR := examples/$(TEST_TYPE)/$(DATASOURCE_TEST_DIR)` -Follow [Terraform documentation](https://developer.hashicorp.com/terraform/cli/config/config-file#development-overrides-for-provider-developers) -on dev overrides for provider developers. +This is done to enable persisting the name and id of a cluster created by apply while still allowing for name +randomization. Names and IDs are persisted by cloud provider, so you can switch between providers without losing them. +You can also switch from the cluster test to the datasource test and the correct cluster will be reused depending on the +cloud provider you have set. -### Running Acceptance Test +#### teardown -The following environment variables are required to run the acceptance tests: +Destroys the current Redpanda cluster and associated infrastructure managed by Terraform. -```yaml -# For acceptance test -TF_ACC=true +Command: `make teardown` - # For long-running cluster tests -RUN_CLUSTER_TESTS=true +This command uses the same `TF_CONFIG_DIR` as the `apply` command to ensure it targets the correct resources. - # For datasource tests, against existing cluster -TEST_AGAINST_EXISTING_CLUSTER=true -CLUSTER_ID= -``` +### Development Commands -### Releasing a Version +These commands assist in code development, testing, and maintenance. -Do not change the Terraform Registry Manifest version! This is the version of the protocol, not the provider +#### ready -## Generating Docs +Prepares the project by generating documentation, running linters, and tidying dependencies. -To generate docs run the following commands +Command: `make ready` -```shell -go install github.com/hashicorp/terraform-plugin-docs/cmd/tfplugindocs@latest -tfplugindocs -``` +This command is useful to run before committing changes to ensure code quality and up-to-date documentation. + +#### unit + +Runs unit tests for the project. + +Command: `make unit` + +**Note:** This command uses dummy credentials and does not run cluster tests. + +#### int + +Runs integration tests for the project. + +Command: `make int` + +**Important:** This command requires valid Redpanda credentials and will create actual resources in your Redpanda Cloud +account. + +#### mock + +Cleans and regenerates mock files used in testing. + +Command: `make mock` + +Mocks are generated using mockgen from specific interfaces as defined in redpanda/mocks/mocks.go. Once you have tagged +them with go generate, you can run this command to generate the mocks. + +### Best Practices + +1. Always run `make ready` before committing changes to ensure code quality and documentation accuracy. +2. Use `make unit` for quick, local testing that doesn't require Redpanda credentials. +3. Use `apply` and `teardown` for more complex manual testing during development +4. Run the integration tests by tagging your PR with `ci-ready` to ensure all tests pass before merging. + +## Releasing a Version + +Do not change the Terraform Registry Manifest version! This is the version of the protocol, not the provider. To release +a version cut a release in GitHub. Goreleaser will handle things from there. ## Support To raise issues, questions, or interact with the community: -- [Github Issues ](https://github.com/redpanda-data/terraform-provider-redpanda/issues) -- [Slack](https://redpanda.com/slack) +- [Github Issues](https://github.com/redpanda-data/terraform-provider-redpanda/issues) +- [Slack](https://redpanda.com/slack) \ No newline at end of file diff --git a/docs/data-sources/cluster.md b/docs/data-sources/cluster.md index 1a54d31c..2527059a 100644 --- a/docs/data-sources/cluster.md +++ b/docs/data-sources/cluster.md @@ -91,7 +91,7 @@ Read-Only: - `ca_certificates_pem` (List of String) CA certificate in PEM format. - `enabled` (Boolean) Whether mTLS is enabled. -- `principal_mapping_rules` (List of String) Principal mapping rules for mTLS authentication. Only valid for Kafka API. See the Redpanda documentation on configuring authentication. +- `principal_mapping_rules` (List of String) Principal mapping rules for mTLS authentication. See the Redpanda documentation on configuring authentication. @@ -109,7 +109,7 @@ Read-Only: - `ca_certificates_pem` (List of String) CA certificate in PEM format. - `enabled` (Boolean) Whether mTLS is enabled. -- `principal_mapping_rules` (List of String) Principal mapping rules for mTLS authentication. Only valid for Kafka API. See the Redpanda documentation on configuring authentication. +- `principal_mapping_rules` (List of String) Principal mapping rules for mTLS authentication. See the Redpanda documentation on configuring authentication. @@ -127,7 +127,7 @@ Read-Only: - `ca_certificates_pem` (List of String) CA certificate in PEM format. - `enabled` (Boolean) Whether mTLS is enabled. -- `principal_mapping_rules` (List of String) Principal mapping rules for mTLS authentication. Only valid for Kafka API. See the Redpanda documentation on configuring authentication. +- `principal_mapping_rules` (List of String) Principal mapping rules for mTLS authentication. See the Redpanda documentation on configuring authentication. ## Usage @@ -186,7 +186,7 @@ variable "topic_config" { } variable "user_name" { - default = "test-username" + default = "data-test-username" } variable "user_pw" { @@ -198,7 +198,7 @@ variable "mechanism" { } variable "topic_name" { - default = "test-topic" + default = "data-test-topic" } variable "partition_count" { diff --git a/docs/index.md b/docs/index.md index 61bfc548..beda7fa5 100644 --- a/docs/index.md +++ b/docs/index.md @@ -73,9 +73,14 @@ resource "redpanda_cluster" "test" { throughput_tier = var.throughput_tier zones = var.zones allow_deletion = true - tags = { + tags = { "key" = "value" } + aws_private_link = { + enabled = true + connect_console = true + allowed_principals = ["arn:aws:iam::123456789024:root"] + } } variable "resource_group_name" { @@ -91,11 +96,11 @@ variable "cluster_name" { } variable "region" { - default = "us-east-1" + default = "us-east-2" } variable "zones" { - default = ["use1-az2", "use1-az4", "use1-az6"] + default = ["use2-az1", "use2-az2", "use2-az3"] } variable "cloud_provider" { @@ -105,6 +110,58 @@ variable "cloud_provider" { variable "throughput_tier" { default = "tier-1-aws-v2-arm" } + +resource "redpanda_user" "test" { + name = var.user_name + password = var.user_pw + mechanism = var.mechanism + cluster_api_url = redpanda_cluster.test.cluster_api_url +} + +resource "redpanda_topic" "test" { + name = var.topic_name + partition_count = var.partition_count + replication_factor = var.replication_factor + cluster_api_url = redpanda_cluster.test.cluster_api_url + allow_deletion = true +} + + +resource "redpanda_acl" "test" { + resource_type = "TOPIC" + resource_name = redpanda_topic.test.name + resource_pattern_type = "LITERAL" + principal = "User:${redpanda_user.test.name}" + host = "*" + operation = "READ" + permission_type = "ALLOW" + cluster_api_url = redpanda_cluster.test.cluster_api_url +} + + +variable "user_name" { + default = "test-username" +} + +variable "user_pw" { + default = "password" +} + +variable "mechanism" { + default = "scram-sha-256" +} + +variable "topic_name" { + default = "test-topic" +} + +variable "partition_count" { + default = 3 +} + +variable "replication_factor" { + default = 3 +} ``` ### Example Usage for a GCP Dedicated Cluster @@ -136,9 +193,20 @@ resource "redpanda_cluster" "test" { throughput_tier = var.throughput_tier zones = var.zones allow_deletion = true - tags = { - "key" = "value" - } + ## This is a reference for GCP tags + # tags = { + # "key" = "value" + # } + ## This is a reference for GCP Private Service Connect + # gcp_private_service_connect = { + # enabled = true + # global_access_enabled = true + # consumer_accept_list = [ + # { + # source = "projects/123456789012" + # } + # ] + # } } variable "cluster_name" { @@ -166,7 +234,60 @@ variable "cloud_provider" { } variable "throughput_tier" { - default = "tier-1-gcp-um4g" + default = "tier-1-gcp-v2-x86" +} + + +resource "redpanda_user" "test" { + name = var.user_name + password = var.user_pw + mechanism = var.mechanism + cluster_api_url = redpanda_cluster.test.cluster_api_url +} + +resource "redpanda_topic" "test" { + name = var.topic_name + partition_count = var.partition_count + replication_factor = var.replication_factor + cluster_api_url = redpanda_cluster.test.cluster_api_url + allow_deletion = true +} + + +resource "redpanda_acl" "test" { + resource_type = "TOPIC" + resource_name = redpanda_topic.test.name + resource_pattern_type = "LITERAL" + principal = "User:${redpanda_user.test.name}" + host = "*" + operation = "READ" + permission_type = "ALLOW" + cluster_api_url = redpanda_cluster.test.cluster_api_url +} + + +variable "user_name" { + default = "test-username" +} + +variable "user_pw" { + default = "password" +} + +variable "mechanism" { + default = "scram-sha-256" +} + +variable "topic_name" { + default = "test-topic" +} + +variable "partition_count" { + default = 3 +} + +variable "replication_factor" { + default = 3 } ``` @@ -219,7 +340,7 @@ variable "topic_config" { } variable "user_name" { - default = "test-username" + default = "data-test-username" } variable "user_pw" { @@ -231,7 +352,7 @@ variable "mechanism" { } variable "topic_name" { - default = "test-topic" + default = "data-test-topic" } variable "partition_count" { diff --git a/docs/resources/acl.md b/docs/resources/acl.md index 40470f39..23e4b899 100644 --- a/docs/resources/acl.md +++ b/docs/resources/acl.md @@ -47,7 +47,6 @@ resource "redpanda_network" "test" { cidr_block = "10.0.0.0/20" } - resource "redpanda_cluster" "test" { name = var.cluster_name resource_group_id = redpanda_resource_group.test.id @@ -60,9 +59,41 @@ resource "redpanda_cluster" "test" { zones = var.zones allow_deletion = true tags = { - // not actually used as API does not consume it yet but we keep it in state for when it does "key" = "value" } + aws_private_link = { + enabled = true + connect_console = true + allowed_principals = ["arn:aws:iam::123456789024:root"] + } +} + +variable "resource_group_name" { + default = "testname" +} + +variable "network_name" { + default = "testname" +} + +variable "cluster_name" { + default = "testname" +} + +variable "region" { + default = "us-east-2" +} + +variable "zones" { + default = ["use2-az1", "use2-az2", "use2-az3"] +} + +variable "cloud_provider" { + default = "aws" +} + +variable "throughput_tier" { + default = "tier-1-aws-v2-arm" } resource "redpanda_user" "test" { @@ -92,33 +123,6 @@ resource "redpanda_acl" "test" { cluster_api_url = redpanda_cluster.test.cluster_api_url } -variable "resource_group_name" { - default = "testname" -} - -variable "network_name" { - default = "testname" -} - -variable "cluster_name" { - default = "testname" -} - -variable "region" { - default = "us-east-1" -} - -variable "zones" { - default = ["use1-az2", "use1-az4", "use1-az6"] -} - -variable "cloud_provider" { - default = "aws" -} - -variable "throughput_tier" { - default = "tier-1-aws-v2-arm" -} variable "user_name" { default = "test-username" diff --git a/docs/resources/cluster.md b/docs/resources/cluster.md index e559e800..22b468e0 100644 --- a/docs/resources/cluster.md +++ b/docs/resources/cluster.md @@ -96,7 +96,7 @@ Required: - `ca_certificates_pem` (List of String) CA certificate in PEM format. - `enabled` (Boolean) Whether mTLS is enabled. -- `principal_mapping_rules` (List of String) Principal mapping rules for mTLS authentication. Only valid for Kafka API. See the Redpanda documentation on configuring authentication. +- `principal_mapping_rules` (List of String) Principal mapping rules for mTLS authentication. See the Redpanda documentation on configuring authentication. @@ -114,7 +114,7 @@ Required: - `ca_certificates_pem` (List of String) CA certificate in PEM format. - `enabled` (Boolean) Whether mTLS is enabled. -- `principal_mapping_rules` (List of String) Principal mapping rules for mTLS authentication. Only valid for Kafka API. See the Redpanda documentation on configuring authentication. +- `principal_mapping_rules` (List of String) Principal mapping rules for mTLS authentication. See the Redpanda documentation on configuring authentication. @@ -132,7 +132,7 @@ Required: - `ca_certificates_pem` (List of String) CA certificate in PEM format. - `enabled` (Boolean) Whether mTLS is enabled. -- `principal_mapping_rules` (List of String) Principal mapping rules for mTLS authentication. Only valid for Kafka API. See the Redpanda documentation on configuring authentication. +- `principal_mapping_rules` (List of String) Principal mapping rules for mTLS authentication. See the Redpanda documentation on configuring authentication. ## Usage @@ -165,9 +165,14 @@ resource "redpanda_cluster" "test" { throughput_tier = var.throughput_tier zones = var.zones allow_deletion = true - tags = { + tags = { "key" = "value" } + aws_private_link = { + enabled = true + connect_console = true + allowed_principals = ["arn:aws:iam::123456789024:root"] + } } variable "resource_group_name" { @@ -183,11 +188,11 @@ variable "cluster_name" { } variable "region" { - default = "us-east-1" + default = "us-east-2" } variable "zones" { - default = ["use1-az2", "use1-az4", "use1-az6"] + default = ["use2-az1", "use2-az2", "use2-az3"] } variable "cloud_provider" { @@ -197,6 +202,58 @@ variable "cloud_provider" { variable "throughput_tier" { default = "tier-1-aws-v2-arm" } + +resource "redpanda_user" "test" { + name = var.user_name + password = var.user_pw + mechanism = var.mechanism + cluster_api_url = redpanda_cluster.test.cluster_api_url +} + +resource "redpanda_topic" "test" { + name = var.topic_name + partition_count = var.partition_count + replication_factor = var.replication_factor + cluster_api_url = redpanda_cluster.test.cluster_api_url + allow_deletion = true +} + + +resource "redpanda_acl" "test" { + resource_type = "TOPIC" + resource_name = redpanda_topic.test.name + resource_pattern_type = "LITERAL" + principal = "User:${redpanda_user.test.name}" + host = "*" + operation = "READ" + permission_type = "ALLOW" + cluster_api_url = redpanda_cluster.test.cluster_api_url +} + + +variable "user_name" { + default = "test-username" +} + +variable "user_pw" { + default = "password" +} + +variable "mechanism" { + default = "scram-sha-256" +} + +variable "topic_name" { + default = "test-topic" +} + +variable "partition_count" { + default = 3 +} + +variable "replication_factor" { + default = 3 +} ``` ### On GCP @@ -228,9 +285,20 @@ resource "redpanda_cluster" "test" { throughput_tier = var.throughput_tier zones = var.zones allow_deletion = true - tags = { - "key" = "value" - } + ## This is a reference for GCP tags + # tags = { + # "key" = "value" + # } + ## This is a reference for GCP Private Service Connect + # gcp_private_service_connect = { + # enabled = true + # global_access_enabled = true + # consumer_accept_list = [ + # { + # source = "projects/123456789012" + # } + # ] + # } } variable "cluster_name" { @@ -258,7 +326,60 @@ variable "cloud_provider" { } variable "throughput_tier" { - default = "tier-1-gcp-um4g" + default = "tier-1-gcp-v2-x86" +} + + +resource "redpanda_user" "test" { + name = var.user_name + password = var.user_pw + mechanism = var.mechanism + cluster_api_url = redpanda_cluster.test.cluster_api_url +} + +resource "redpanda_topic" "test" { + name = var.topic_name + partition_count = var.partition_count + replication_factor = var.replication_factor + cluster_api_url = redpanda_cluster.test.cluster_api_url + allow_deletion = true +} + + +resource "redpanda_acl" "test" { + resource_type = "TOPIC" + resource_name = redpanda_topic.test.name + resource_pattern_type = "LITERAL" + principal = "User:${redpanda_user.test.name}" + host = "*" + operation = "READ" + permission_type = "ALLOW" + cluster_api_url = redpanda_cluster.test.cluster_api_url +} + + +variable "user_name" { + default = "test-username" +} + +variable "user_pw" { + default = "password" +} + +variable "mechanism" { + default = "scram-sha-256" +} + +variable "topic_name" { + default = "test-topic" +} + +variable "partition_count" { + default = 3 +} + +variable "replication_factor" { + default = 3 } ``` @@ -318,7 +439,7 @@ variable "topic_config" { } variable "user_name" { - default = "test-username" + default = "data-test-username" } variable "user_pw" { @@ -330,7 +451,7 @@ variable "mechanism" { } variable "topic_name" { - default = "test-topic" + default = "data-test-topic" } variable "partition_count" { diff --git a/docs/resources/resource_group.md b/docs/resources/resource_group.md index 25e3fc90..785762fd 100644 --- a/docs/resources/resource_group.md +++ b/docs/resources/resource_group.md @@ -31,9 +31,115 @@ resource "redpanda_resource_group" "test" { name = var.resource_group_name } +resource "redpanda_network" "test" { + name = var.network_name + resource_group_id = redpanda_resource_group.test.id + cloud_provider = var.cloud_provider + region = var.region + cluster_type = "dedicated" + cidr_block = "10.0.0.0/20" +} + +resource "redpanda_cluster" "test" { + name = var.cluster_name + resource_group_id = redpanda_resource_group.test.id + network_id = redpanda_network.test.id + cloud_provider = var.cloud_provider + region = var.region + cluster_type = "dedicated" + connection_type = "public" + throughput_tier = var.throughput_tier + zones = var.zones + allow_deletion = true + tags = { + "key" = "value" + } + aws_private_link = { + enabled = true + connect_console = true + allowed_principals = ["arn:aws:iam::123456789024:root"] + } +} + variable "resource_group_name" { default = "testname" } + +variable "network_name" { + default = "testname" +} + +variable "cluster_name" { + default = "testname" +} + +variable "region" { + default = "us-east-2" +} + +variable "zones" { + default = ["use2-az1", "use2-az2", "use2-az3"] +} + +variable "cloud_provider" { + default = "aws" +} + +variable "throughput_tier" { + default = "tier-1-aws-v2-arm" +} + +resource "redpanda_user" "test" { + name = var.user_name + password = var.user_pw + mechanism = var.mechanism + cluster_api_url = redpanda_cluster.test.cluster_api_url +} + +resource "redpanda_topic" "test" { + name = var.topic_name + partition_count = var.partition_count + replication_factor = var.replication_factor + cluster_api_url = redpanda_cluster.test.cluster_api_url + allow_deletion = true +} + + +resource "redpanda_acl" "test" { + resource_type = "TOPIC" + resource_name = redpanda_topic.test.name + resource_pattern_type = "LITERAL" + principal = "User:${redpanda_user.test.name}" + host = "*" + operation = "READ" + permission_type = "ALLOW" + cluster_api_url = redpanda_cluster.test.cluster_api_url +} + + +variable "user_name" { + default = "test-username" +} + +variable "user_pw" { + default = "password" +} + +variable "mechanism" { + default = "scram-sha-256" +} + +variable "topic_name" { + default = "test-topic" +} + +variable "partition_count" { + default = 3 +} + +variable "replication_factor" { + default = 3 +} ``` ## Import diff --git a/docs/resources/topic.md b/docs/resources/topic.md index a25a3847..394ee838 100644 --- a/docs/resources/topic.md +++ b/docs/resources/topic.md @@ -48,7 +48,6 @@ resource "redpanda_network" "test" { cidr_block = "10.0.0.0/20" } - resource "redpanda_cluster" "test" { name = var.cluster_name resource_group_id = redpanda_resource_group.test.id @@ -61,9 +60,41 @@ resource "redpanda_cluster" "test" { zones = var.zones allow_deletion = true tags = { - // not actually used as API does not consume it yet but we keep it in state for when it does "key" = "value" } + aws_private_link = { + enabled = true + connect_console = true + allowed_principals = ["arn:aws:iam::123456789024:root"] + } +} + +variable "resource_group_name" { + default = "testname" +} + +variable "network_name" { + default = "testname" +} + +variable "cluster_name" { + default = "testname" +} + +variable "region" { + default = "us-east-2" +} + +variable "zones" { + default = ["use2-az1", "use2-az2", "use2-az3"] +} + +variable "cloud_provider" { + default = "aws" +} + +variable "throughput_tier" { + default = "tier-1-aws-v2-arm" } resource "redpanda_user" "test" { @@ -93,33 +124,6 @@ resource "redpanda_acl" "test" { cluster_api_url = redpanda_cluster.test.cluster_api_url } -variable "resource_group_name" { - default = "testname" -} - -variable "network_name" { - default = "testname" -} - -variable "cluster_name" { - default = "testname" -} - -variable "region" { - default = "us-east-1" -} - -variable "zones" { - default = ["use1-az2", "use1-az4", "use1-az6"] -} - -variable "cloud_provider" { - default = "aws" -} - -variable "throughput_tier" { - default = "tier-1-aws-v2-arm" -} variable "user_name" { default = "test-username" diff --git a/docs/resources/user.md b/docs/resources/user.md index 6bde4c28..91b4c651 100644 --- a/docs/resources/user.md +++ b/docs/resources/user.md @@ -46,7 +46,6 @@ resource "redpanda_network" "test" { cidr_block = "10.0.0.0/20" } - resource "redpanda_cluster" "test" { name = var.cluster_name resource_group_id = redpanda_resource_group.test.id @@ -59,9 +58,41 @@ resource "redpanda_cluster" "test" { zones = var.zones allow_deletion = true tags = { - // not actually used as API does not consume it yet but we keep it in state for when it does "key" = "value" } + aws_private_link = { + enabled = true + connect_console = true + allowed_principals = ["arn:aws:iam::123456789024:root"] + } +} + +variable "resource_group_name" { + default = "testname" +} + +variable "network_name" { + default = "testname" +} + +variable "cluster_name" { + default = "testname" +} + +variable "region" { + default = "us-east-2" +} + +variable "zones" { + default = ["use2-az1", "use2-az2", "use2-az3"] +} + +variable "cloud_provider" { + default = "aws" +} + +variable "throughput_tier" { + default = "tier-1-aws-v2-arm" } resource "redpanda_user" "test" { @@ -91,33 +122,6 @@ resource "redpanda_acl" "test" { cluster_api_url = redpanda_cluster.test.cluster_api_url } -variable "resource_group_name" { - default = "testname" -} - -variable "network_name" { - default = "testname" -} - -variable "cluster_name" { - default = "testname" -} - -variable "region" { - default = "us-east-1" -} - -variable "zones" { - default = ["use1-az2", "use1-az4", "use1-az6"] -} - -variable "cloud_provider" { - default = "aws" -} - -variable "throughput_tier" { - default = "tier-1-aws-v2-arm" -} variable "user_name" { default = "test-username" diff --git a/examples/bulk-res/README.md b/examples/bulk-res/README.md deleted file mode 100644 index 32d62847..00000000 --- a/examples/bulk-res/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# Bulk - -Examples in this directory are primarily intended to provide test cases for resources and are not particularly informative for actual -usage. \ No newline at end of file diff --git a/examples/bulk-res/main.tf b/examples/bulk-res/main.tf deleted file mode 100644 index a240840e..00000000 --- a/examples/bulk-res/main.tf +++ /dev/null @@ -1,124 +0,0 @@ -terraform { - required_providers { - redpanda = { - source = "redpanda-data/redpanda" - } - } -} - -provider "redpanda" { - # Configuration options for the redpanda provider -} - -resource "redpanda_resource_group" "test" { - name = var.resource_group_name -} - -resource "redpanda_network" "test" { - name = var.network_name - resource_group_id = redpanda_resource_group.test.id - cloud_provider = var.cloud_provider - region = var.region - cluster_type = "dedicated" - cidr_block = "10.0.0.0/20" -} - - -resource "redpanda_cluster" "test" { - name = var.cluster_name - resource_group_id = redpanda_resource_group.test.id - network_id = redpanda_network.test.id - cloud_provider = var.cloud_provider - region = var.region - cluster_type = "dedicated" - connection_type = "public" - throughput_tier = var.throughput_tier - zones = var.zones - allow_deletion = true - tags = { - // not actually used as API does not consume it yet but we keep it in state for when it does - "key" = "value" - } -} -resource "redpanda_user" "test" { - name = var.user_name - password = var.user_pw - mechanism = var.mechanism - cluster_api_url = redpanda_cluster.test.cluster_api_url -} - -resource "redpanda_topic" "test" { - count = 100 - name = "${var.topic_name}_${count.index + 1}" - partition_count = var.partition_count - replication_factor = var.replication_factor - cluster_api_url = redpanda_cluster.test.cluster_api_url - allow_deletion = true -} - -resource "redpanda_acl" "test" { - count = 100 - resource_type = "TOPIC" - resource_name = redpanda_topic.test[count.index].name - resource_pattern_type = "LITERAL" - principal = "User:${redpanda_user.test.name}" - host = "*" - operation = "READ" - permission_type = "ALLOW" - cluster_api_url = redpanda_cluster.test.cluster_api_url -} - -variable "cluster_id" { - default = "testname" -} -variable "resource_group_name" { - default = "tfrp-acc-testbulk-aKtA" -} - -variable "network_name" { - default = "public-network-rate-limit-test" -} - -variable "cluster_name" { - default = "rate-limit-test" -} - -variable "region" { - default = "us-east-2" -} - -variable "zones" { - default = ["use2-az1"] -} - -variable "cloud_provider" { - default = "aws" -} - -variable "user_name" { - default = "test-username" -} - -variable "user_pw" { - default = "password" -} - -variable "mechanism" { - default = "scram-sha-256" -} - -variable "topic_name" { - default = "test-topic" -} - -variable "partition_count" { - default = 3 -} - -variable "replication_factor" { - default = 3 -} - -variable "throughput_tier" { - default = "tier-1-aws-v2-arm" -} \ No newline at end of file diff --git a/examples/cluster/aws/main.tf b/examples/cluster/aws/main.tf index 47016011..96c0f534 100644 --- a/examples/cluster/aws/main.tf +++ b/examples/cluster/aws/main.tf @@ -24,9 +24,14 @@ resource "redpanda_cluster" "test" { throughput_tier = var.throughput_tier zones = var.zones allow_deletion = true - tags = { + tags = { "key" = "value" } + aws_private_link = { + enabled = true + connect_console = true + allowed_principals = ["arn:aws:iam::123456789024:root"] + } } variable "resource_group_name" { @@ -42,11 +47,11 @@ variable "cluster_name" { } variable "region" { - default = "us-east-1" + default = "us-east-2" } variable "zones" { - default = ["use1-az2", "use1-az4", "use1-az6"] + default = ["use2-az1", "use2-az2", "use2-az3"] } variable "cloud_provider" { @@ -55,4 +60,56 @@ variable "cloud_provider" { variable "throughput_tier" { default = "tier-1-aws-v2-arm" -} \ No newline at end of file +} + +resource "redpanda_user" "test" { + name = var.user_name + password = var.user_pw + mechanism = var.mechanism + cluster_api_url = redpanda_cluster.test.cluster_api_url +} + +resource "redpanda_topic" "test" { + name = var.topic_name + partition_count = var.partition_count + replication_factor = var.replication_factor + cluster_api_url = redpanda_cluster.test.cluster_api_url + allow_deletion = true +} + + +resource "redpanda_acl" "test" { + resource_type = "TOPIC" + resource_name = redpanda_topic.test.name + resource_pattern_type = "LITERAL" + principal = "User:${redpanda_user.test.name}" + host = "*" + operation = "READ" + permission_type = "ALLOW" + cluster_api_url = redpanda_cluster.test.cluster_api_url +} + + +variable "user_name" { + default = "test-username" +} + +variable "user_pw" { + default = "password" +} + +variable "mechanism" { + default = "scram-sha-256" +} + +variable "topic_name" { + default = "test-topic" +} + +variable "partition_count" { + default = 3 +} + +variable "replication_factor" { + default = 3 +} diff --git a/examples/cluster/azure/main.tf b/examples/cluster/azure/main.tf index 4c7ca7c2..fcdeef01 100644 --- a/examples/cluster/azure/main.tf +++ b/examples/cluster/azure/main.tf @@ -27,6 +27,11 @@ resource "redpanda_cluster" "test" { tags = { "key" = "value" } + azure_private_link = { + enabled = true + connect_console = true + allowed_subscriptions = ["12345678-1234-1234-1234-123456789012"] + } } variable "resource_group_name" { @@ -55,4 +60,57 @@ variable "zones" { variable "throughput_tier" { default = "tier-1-azure-v2-x86" -} \ No newline at end of file +} + + +resource "redpanda_user" "test" { + name = var.user_name + password = var.user_pw + mechanism = var.mechanism + cluster_api_url = redpanda_cluster.test.cluster_api_url +} + +resource "redpanda_topic" "test" { + name = var.topic_name + partition_count = var.partition_count + replication_factor = var.replication_factor + cluster_api_url = redpanda_cluster.test.cluster_api_url + allow_deletion = true +} + + +resource "redpanda_acl" "test" { + resource_type = "TOPIC" + resource_name = redpanda_topic.test.name + resource_pattern_type = "LITERAL" + principal = "User:${redpanda_user.test.name}" + host = "*" + operation = "READ" + permission_type = "ALLOW" + cluster_api_url = redpanda_cluster.test.cluster_api_url +} + + +variable "user_name" { + default = "test-username" +} + +variable "user_pw" { + default = "password" +} + +variable "mechanism" { + default = "scram-sha-256" +} + +variable "topic_name" { + default = "test-topic" +} + +variable "partition_count" { + default = 3 +} + +variable "replication_factor" { + default = 3 +} diff --git a/examples/cluster/gcp/main.tf b/examples/cluster/gcp/main.tf index 1b3cf8f3..a70d1bfc 100644 --- a/examples/cluster/gcp/main.tf +++ b/examples/cluster/gcp/main.tf @@ -24,9 +24,20 @@ resource "redpanda_cluster" "test" { throughput_tier = var.throughput_tier zones = var.zones allow_deletion = true - tags = { - "key" = "value" - } + ## This is a reference for GCP tags + # tags = { + # "key" = "value" + # } + ## This is a reference for GCP Private Service Connect + # gcp_private_service_connect = { + # enabled = true + # global_access_enabled = true + # consumer_accept_list = [ + # { + # source = "projects/123456789012" + # } + # ] + # } } variable "cluster_name" { @@ -54,5 +65,58 @@ variable "cloud_provider" { } variable "throughput_tier" { - default = "tier-1-gcp-um4g" -} \ No newline at end of file + default = "tier-1-gcp-v2-x86" +} + + +resource "redpanda_user" "test" { + name = var.user_name + password = var.user_pw + mechanism = var.mechanism + cluster_api_url = redpanda_cluster.test.cluster_api_url +} + +resource "redpanda_topic" "test" { + name = var.topic_name + partition_count = var.partition_count + replication_factor = var.replication_factor + cluster_api_url = redpanda_cluster.test.cluster_api_url + allow_deletion = true +} + + +resource "redpanda_acl" "test" { + resource_type = "TOPIC" + resource_name = redpanda_topic.test.name + resource_pattern_type = "LITERAL" + principal = "User:${redpanda_user.test.name}" + host = "*" + operation = "READ" + permission_type = "ALLOW" + cluster_api_url = redpanda_cluster.test.cluster_api_url +} + + +variable "user_name" { + default = "test-username" +} + +variable "user_pw" { + default = "password" +} + +variable "mechanism" { + default = "scram-sha-256" +} + +variable "topic_name" { + default = "test-topic" +} + +variable "partition_count" { + default = 3 +} + +variable "replication_factor" { + default = 3 +} diff --git a/examples/cluster/private-link/main.tf b/examples/cluster/private-link/main.tf deleted file mode 100644 index 983f6c21..00000000 --- a/examples/cluster/private-link/main.tf +++ /dev/null @@ -1,73 +0,0 @@ -provider "redpanda" {} - -resource "redpanda_resource_group" "test" { - name = var.resource_group_name -} - -resource "redpanda_network" "test" { - name = var.network_name - resource_group_id = redpanda_resource_group.test.id - cloud_provider = var.cloud_provider - region = var.region - cluster_type = "dedicated" - cidr_block = "10.0.0.0/20" -} - -resource "redpanda_cluster" "test" { - name = var.cluster_name - resource_group_id = redpanda_resource_group.test.id - network_id = redpanda_network.test.id - cloud_provider = var.cloud_provider - region = var.region - cluster_type = "dedicated" - connection_type = "public" - throughput_tier = var.throughput_tier - zones = var.zones - allow_deletion = true - tags = { - "key" = "value" - } - aws_private_link = { - enabled = true - connect_console = true - allowed_principals = ["arn:aws:iam::123456789024:root"] - } - azure_private_link = { - enabled = true - connect_console = true - allowed_principals = ["12345678-1234-1234-1234-123456789012"] - } - gcp_private_link = { - enabled = true - connect_console = true - allowed_principals = ["projects/123456789012"] - } -} - -variable "resource_group_name" { - default = "testname" -} - -variable "network_name" { - default = "testname" -} - -variable "cluster_name" { - default = "testname" -} - -variable "region" { - default = "us-east-1" -} - -variable "zones" { - default = ["use1-az2", "use1-az4", "use1-az6"] -} - -variable "cloud_provider" { - default = "aws" -} - -variable "throughput_tier" { - default = "tier-1-aws-v2-arm" -} \ No newline at end of file diff --git a/examples/serverless-cluster/main.tf b/examples/cluster/serverless/main.tf similarity index 100% rename from examples/serverless-cluster/main.tf rename to examples/cluster/serverless/main.tf diff --git a/examples/bulk-data/README.md b/examples/datasource/bulk/README.md similarity index 100% rename from examples/bulk-data/README.md rename to examples/datasource/bulk/README.md diff --git a/examples/bulk-data/main.tf b/examples/datasource/bulk/main.tf similarity index 100% rename from examples/bulk-data/main.tf rename to examples/datasource/bulk/main.tf diff --git a/examples/datasource/main.tf b/examples/datasource/standard/main.tf similarity index 95% rename from examples/datasource/main.tf rename to examples/datasource/standard/main.tf index 3807c4d7..11b57076 100644 --- a/examples/datasource/main.tf +++ b/examples/datasource/standard/main.tf @@ -44,7 +44,7 @@ variable "topic_config" { } variable "user_name" { - default = "test-username" + default = "data-test-username" } variable "user_pw" { @@ -56,7 +56,7 @@ variable "mechanism" { } variable "topic_name" { - default = "test-topic" + default = "data-test-topic" } variable "partition_count" { diff --git a/examples/resourcegroup/main.tf b/examples/resourcegroup/main.tf deleted file mode 100644 index 130e6152..00000000 --- a/examples/resourcegroup/main.tf +++ /dev/null @@ -1,9 +0,0 @@ -provider "redpanda" {} - -resource "redpanda_resource_group" "test" { - name = var.resource_group_name -} - -variable "resource_group_name" { - default = "testname" -} diff --git a/examples/user-acl-topic/main.tf b/examples/user-acl-topic/main.tf deleted file mode 100644 index 7de93fc0..00000000 --- a/examples/user-acl-topic/main.tf +++ /dev/null @@ -1,111 +0,0 @@ -provider "redpanda" {} - -resource "redpanda_resource_group" "test" { - name = var.resource_group_name -} - -resource "redpanda_network" "test" { - name = var.network_name - resource_group_id = redpanda_resource_group.test.id - cloud_provider = var.cloud_provider - region = var.region - cluster_type = "dedicated" - cidr_block = "10.0.0.0/20" -} - - -resource "redpanda_cluster" "test" { - name = var.cluster_name - resource_group_id = redpanda_resource_group.test.id - network_id = redpanda_network.test.id - cloud_provider = var.cloud_provider - region = var.region - cluster_type = "dedicated" - connection_type = "public" - throughput_tier = var.throughput_tier - zones = var.zones - allow_deletion = true - tags = { - // not actually used as API does not consume it yet but we keep it in state for when it does - "key" = "value" - } -} - -resource "redpanda_user" "test" { - name = var.user_name - password = var.user_pw - mechanism = var.mechanism - cluster_api_url = redpanda_cluster.test.cluster_api_url -} - -resource "redpanda_topic" "test" { - name = var.topic_name - partition_count = var.partition_count - replication_factor = var.replication_factor - cluster_api_url = redpanda_cluster.test.cluster_api_url - allow_deletion = true -} - - -resource "redpanda_acl" "test" { - resource_type = "TOPIC" - resource_name = redpanda_topic.test.name - resource_pattern_type = "LITERAL" - principal = "User:${redpanda_user.test.name}" - host = "*" - operation = "READ" - permission_type = "ALLOW" - cluster_api_url = redpanda_cluster.test.cluster_api_url -} - -variable "resource_group_name" { - default = "testname" -} - -variable "network_name" { - default = "testname" -} - -variable "cluster_name" { - default = "testname" -} - -variable "region" { - default = "us-east-1" -} - -variable "zones" { - default = ["use1-az2", "use1-az4", "use1-az6"] -} - -variable "cloud_provider" { - default = "aws" -} - -variable "throughput_tier" { - default = "tier-1-aws-v2-arm" -} - -variable "user_name" { - default = "test-username" -} - -variable "user_pw" { - default = "password" -} - -variable "mechanism" { - default = "scram-sha-256" -} - -variable "topic_name" { - default = "test-topic" -} - -variable "partition_count" { - default = 3 -} - -variable "replication_factor" { - default = 3 -} diff --git a/go.mod b/go.mod index 6222433c..bddf5542 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,6 @@ module github.com/redpanda-data/terraform-provider-redpanda -go 1.22.2 - -toolchain go1.22.3 +go 1.22.4 require ( buf.build/gen/go/redpandadata/cloud/grpc/go v1.4.0-20240715174738-f694d610cde4.2 diff --git a/redpanda/cloud/cloud.go b/redpanda/cloud/cloud.go index adcf8bb3..10e91400 100644 --- a/redpanda/cloud/cloud.go +++ b/redpanda/cloud/cloud.go @@ -117,7 +117,7 @@ var rl = newRateLimiter(500) // SpawnConn returns a grpc connection to the given URL, it adds a bearer token // to each request with the given 'authToken'. -func SpawnConn(url string, authToken string) (*grpc.ClientConn, error) { +func SpawnConn(url, authToken string) (*grpc.ClientConn, error) { return grpc.NewClient( url, // Chain the interceptors using grpc_middleware.ChainUnaryClient diff --git a/redpanda/cloud/controlplane.go b/redpanda/cloud/controlplane.go index ead3f078..6a9060c8 100644 --- a/redpanda/cloud/controlplane.go +++ b/redpanda/cloud/controlplane.go @@ -25,6 +25,19 @@ import ( "google.golang.org/grpc" ) +// CpClientSet defines the interface for ControlPlaneClientSet +type CpClientSet interface { + CreateResourceGroup(ctx context.Context, name string) (*controlplanev1beta2.ResourceGroup, error) + ResourceGroupForID(ctx context.Context, id string) (*controlplanev1beta2.ResourceGroup, error) + ResourceGroupForName(ctx context.Context, name string) (*controlplanev1beta2.ResourceGroup, error) + NetworkForID(ctx context.Context, id string) (*controlplanev1beta2.Network, error) + NetworkForName(ctx context.Context, name string) (*controlplanev1beta2.Network, error) + ClusterForID(ctx context.Context, id string) (*controlplanev1beta2.Cluster, error) + ClusterForName(ctx context.Context, name string) (*controlplanev1beta2.Cluster, error) + ServerlessClusterForID(ctx context.Context, id string) (*controlplanev1beta2.ServerlessCluster, error) + ServerlessClusterForName(ctx context.Context, name string) (*controlplanev1beta2.ServerlessCluster, error) +} + // ControlPlaneClientSet holds the respective service clients to interact with // the control plane endpoints of the Public API. type ControlPlaneClientSet struct { diff --git a/redpanda/cloud/ratelimiter.go b/redpanda/cloud/ratelimiter.go index a295fda9..2674aebc 100644 --- a/redpanda/cloud/ratelimiter.go +++ b/redpanda/cloud/ratelimiter.go @@ -1,3 +1,18 @@ +// Copyright 2024 Redpanda Data, Inc. +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package cloud import ( diff --git a/redpanda/mocks/mock_cp_client_set.go b/redpanda/mocks/mock_cp_client_set.go new file mode 100644 index 00000000..19602b17 --- /dev/null +++ b/redpanda/mocks/mock_cp_client_set.go @@ -0,0 +1,171 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/redpanda-data/terraform-provider-redpanda/redpanda/cloud (interfaces: CpClientSet) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + controlplanev1beta2 "buf.build/gen/go/redpandadata/cloud/protocolbuffers/go/redpanda/api/controlplane/v1beta2" + gomock "github.com/golang/mock/gomock" +) + +// MockCpClientSet is a mock of CpClientSet interface. +type MockCpClientSet struct { + ctrl *gomock.Controller + recorder *MockCpClientSetMockRecorder +} + +// MockCpClientSetMockRecorder is the mock recorder for MockCpClientSet. +type MockCpClientSetMockRecorder struct { + mock *MockCpClientSet +} + +// NewMockCpClientSet creates a new mock instance. +func NewMockCpClientSet(ctrl *gomock.Controller) *MockCpClientSet { + mock := &MockCpClientSet{ctrl: ctrl} + mock.recorder = &MockCpClientSetMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockCpClientSet) EXPECT() *MockCpClientSetMockRecorder { + return m.recorder +} + +// ClusterForID mocks base method. +func (m *MockCpClientSet) ClusterForID(arg0 context.Context, arg1 string) (*controlplanev1beta2.Cluster, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClusterForID", arg0, arg1) + ret0, _ := ret[0].(*controlplanev1beta2.Cluster) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClusterForID indicates an expected call of ClusterForID. +func (mr *MockCpClientSetMockRecorder) ClusterForID(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterForID", reflect.TypeOf((*MockCpClientSet)(nil).ClusterForID), arg0, arg1) +} + +// ClusterForName mocks base method. +func (m *MockCpClientSet) ClusterForName(arg0 context.Context, arg1 string) (*controlplanev1beta2.Cluster, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClusterForName", arg0, arg1) + ret0, _ := ret[0].(*controlplanev1beta2.Cluster) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ClusterForName indicates an expected call of ClusterForName. +func (mr *MockCpClientSetMockRecorder) ClusterForName(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterForName", reflect.TypeOf((*MockCpClientSet)(nil).ClusterForName), arg0, arg1) +} + +// CreateResourceGroup mocks base method. +func (m *MockCpClientSet) CreateResourceGroup(arg0 context.Context, arg1 string) (*controlplanev1beta2.ResourceGroup, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateResourceGroup", arg0, arg1) + ret0, _ := ret[0].(*controlplanev1beta2.ResourceGroup) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateResourceGroup indicates an expected call of CreateResourceGroup. +func (mr *MockCpClientSetMockRecorder) CreateResourceGroup(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateResourceGroup", reflect.TypeOf((*MockCpClientSet)(nil).CreateResourceGroup), arg0, arg1) +} + +// NetworkForID mocks base method. +func (m *MockCpClientSet) NetworkForID(arg0 context.Context, arg1 string) (*controlplanev1beta2.Network, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetworkForID", arg0, arg1) + ret0, _ := ret[0].(*controlplanev1beta2.Network) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetworkForID indicates an expected call of NetworkForID. +func (mr *MockCpClientSetMockRecorder) NetworkForID(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetworkForID", reflect.TypeOf((*MockCpClientSet)(nil).NetworkForID), arg0, arg1) +} + +// NetworkForName mocks base method. +func (m *MockCpClientSet) NetworkForName(arg0 context.Context, arg1 string) (*controlplanev1beta2.Network, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetworkForName", arg0, arg1) + ret0, _ := ret[0].(*controlplanev1beta2.Network) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetworkForName indicates an expected call of NetworkForName. +func (mr *MockCpClientSetMockRecorder) NetworkForName(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetworkForName", reflect.TypeOf((*MockCpClientSet)(nil).NetworkForName), arg0, arg1) +} + +// ResourceGroupForID mocks base method. +func (m *MockCpClientSet) ResourceGroupForID(arg0 context.Context, arg1 string) (*controlplanev1beta2.ResourceGroup, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ResourceGroupForID", arg0, arg1) + ret0, _ := ret[0].(*controlplanev1beta2.ResourceGroup) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ResourceGroupForID indicates an expected call of ResourceGroupForID. +func (mr *MockCpClientSetMockRecorder) ResourceGroupForID(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResourceGroupForID", reflect.TypeOf((*MockCpClientSet)(nil).ResourceGroupForID), arg0, arg1) +} + +// ResourceGroupForName mocks base method. +func (m *MockCpClientSet) ResourceGroupForName(arg0 context.Context, arg1 string) (*controlplanev1beta2.ResourceGroup, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ResourceGroupForName", arg0, arg1) + ret0, _ := ret[0].(*controlplanev1beta2.ResourceGroup) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ResourceGroupForName indicates an expected call of ResourceGroupForName. +func (mr *MockCpClientSetMockRecorder) ResourceGroupForName(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResourceGroupForName", reflect.TypeOf((*MockCpClientSet)(nil).ResourceGroupForName), arg0, arg1) +} + +// ServerlessClusterForID mocks base method. +func (m *MockCpClientSet) ServerlessClusterForID(arg0 context.Context, arg1 string) (*controlplanev1beta2.ServerlessCluster, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ServerlessClusterForID", arg0, arg1) + ret0, _ := ret[0].(*controlplanev1beta2.ServerlessCluster) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ServerlessClusterForID indicates an expected call of ServerlessClusterForID. +func (mr *MockCpClientSetMockRecorder) ServerlessClusterForID(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ServerlessClusterForID", reflect.TypeOf((*MockCpClientSet)(nil).ServerlessClusterForID), arg0, arg1) +} + +// ServerlessClusterForName mocks base method. +func (m *MockCpClientSet) ServerlessClusterForName(arg0 context.Context, arg1 string) (*controlplanev1beta2.ServerlessCluster, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ServerlessClusterForName", arg0, arg1) + ret0, _ := ret[0].(*controlplanev1beta2.ServerlessCluster) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ServerlessClusterForName indicates an expected call of ServerlessClusterForName. +func (mr *MockCpClientSetMockRecorder) ServerlessClusterForName(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ServerlessClusterForName", reflect.TypeOf((*MockCpClientSet)(nil).ServerlessClusterForName), arg0, arg1) +} diff --git a/redpanda/mocks/operations_mock.go b/redpanda/mocks/mock_operations_service_client.go similarity index 55% rename from redpanda/mocks/operations_mock.go rename to redpanda/mocks/mock_operations_service_client.go index 9f917b3b..0fac18d2 100644 --- a/redpanda/mocks/operations_mock.go +++ b/redpanda/mocks/mock_operations_service_client.go @@ -1,13 +1,16 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: buf.build/gen/go/redpandadata/cloud/grpc/go/redpanda/api/controlplane/v1beta2/controlplanev1beta2grpc (interfaces: OperationServiceClient) + +// Package mocks is a generated GoMock package. package mocks import ( - "context" - "fmt" - "reflect" + context "context" + reflect "reflect" controlplanev1beta2 "buf.build/gen/go/redpandadata/cloud/protocolbuffers/go/redpanda/api/controlplane/v1beta2" - "github.com/golang/mock/gomock" - "google.golang.org/grpc" + gomock "github.com/golang/mock/gomock" + grpc "google.golang.org/grpc" ) // MockOperationServiceClient is a mock of OperationServiceClient interface. @@ -34,53 +37,41 @@ func (m *MockOperationServiceClient) EXPECT() *MockOperationServiceClientMockRec } // GetOperation mocks base method. -func (m *MockOperationServiceClient) GetOperation(ctx context.Context, in *controlplanev1beta2.GetOperationRequest, opts ...grpc.CallOption) (*controlplanev1beta2.GetOperationResponse, error) { +func (m *MockOperationServiceClient) GetOperation(arg0 context.Context, arg1 *controlplanev1beta2.GetOperationRequest, arg2 ...grpc.CallOption) (*controlplanev1beta2.GetOperationResponse, error) { m.ctrl.T.Helper() - varargs := []any{ctx, in} - for _, a := range opts { + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "GetOperation", varargs...) - ret0, ok := ret[0].(*controlplanev1beta2.GetOperationResponse) - if !ok { - fmt.Println("unexpected type") - } - ret1, ok := ret[1].(error) - if !ok { - fmt.Print("unexpected type") - } + ret0, _ := ret[0].(*controlplanev1beta2.GetOperationResponse) + ret1, _ := ret[1].(error) return ret0, ret1 } +// GetOperation indicates an expected call of GetOperation. +func (mr *MockOperationServiceClientMockRecorder) GetOperation(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOperation", reflect.TypeOf((*MockOperationServiceClient)(nil).GetOperation), varargs...) +} + // ListOperations mocks base method. -func (m *MockOperationServiceClient) ListOperations(ctx context.Context, in *controlplanev1beta2.ListOperationsRequest, opts ...grpc.CallOption) (*controlplanev1beta2.ListOperationsResponse, error) { +func (m *MockOperationServiceClient) ListOperations(arg0 context.Context, arg1 *controlplanev1beta2.ListOperationsRequest, arg2 ...grpc.CallOption) (*controlplanev1beta2.ListOperationsResponse, error) { m.ctrl.T.Helper() - varargs := []any{ctx, in} - for _, a := range opts { + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "ListOperations", varargs...) - ret0, ok := ret[0].(*controlplanev1beta2.ListOperationsResponse) - if !ok { - fmt.Println("unexpected type") - } - ret1, ok := ret[1].(error) - if !ok { - fmt.Print("unexpected type") - } + ret0, _ := ret[0].(*controlplanev1beta2.ListOperationsResponse) + ret1, _ := ret[1].(error) return ret0, ret1 } -// GetOperation indicates an expected call of GetOperation. -func (mr *MockOperationServiceClientMockRecorder) GetOperation(ctx, in any, opts ...any) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]any{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOperation", reflect.TypeOf((*MockOperationServiceClient)(nil).GetOperation), varargs...) -} - // ListOperations indicates an expected call of ListOperations. -func (mr *MockOperationServiceClientMockRecorder) ListOperations(ctx, in any, opts ...any) *gomock.Call { +func (mr *MockOperationServiceClientMockRecorder) ListOperations(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]any{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListOperation", reflect.TypeOf((*MockOperationServiceClient)(nil).ListOperations), varargs...) + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListOperations", reflect.TypeOf((*MockOperationServiceClient)(nil).ListOperations), varargs...) } diff --git a/redpanda/mocks/mock_serverless_cluster_service_client.go b/redpanda/mocks/mock_serverless_cluster_service_client.go new file mode 100644 index 00000000..56d1035f --- /dev/null +++ b/redpanda/mocks/mock_serverless_cluster_service_client.go @@ -0,0 +1,157 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: buf.build/gen/go/redpandadata/cloud/grpc/go/redpanda/api/controlplane/v1beta2/controlplanev1beta2grpc (interfaces: ServerlessClusterServiceClient) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + controlplanev1beta2 "buf.build/gen/go/redpandadata/cloud/protocolbuffers/go/redpanda/api/controlplane/v1beta2" + gomock "github.com/golang/mock/gomock" + grpc "google.golang.org/grpc" +) + +// MockServerlessClusterServiceClient is a mock of ServerlessClusterServiceClient interface. +type MockServerlessClusterServiceClient struct { + ctrl *gomock.Controller + recorder *MockServerlessClusterServiceClientMockRecorder +} + +// MockServerlessClusterServiceClientMockRecorder is the mock recorder for MockServerlessClusterServiceClient. +type MockServerlessClusterServiceClientMockRecorder struct { + mock *MockServerlessClusterServiceClient +} + +// NewMockServerlessClusterServiceClient creates a new mock instance. +func NewMockServerlessClusterServiceClient(ctrl *gomock.Controller) *MockServerlessClusterServiceClient { + mock := &MockServerlessClusterServiceClient{ctrl: ctrl} + mock.recorder = &MockServerlessClusterServiceClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockServerlessClusterServiceClient) EXPECT() *MockServerlessClusterServiceClientMockRecorder { + return m.recorder +} + +// CreateServerlessCluster mocks base method. +func (m *MockServerlessClusterServiceClient) CreateServerlessCluster(arg0 context.Context, arg1 *controlplanev1beta2.CreateServerlessClusterRequest, arg2 ...grpc.CallOption) (*controlplanev1beta2.CreateServerlessClusterOperation, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateServerlessCluster", varargs...) + ret0, _ := ret[0].(*controlplanev1beta2.CreateServerlessClusterOperation) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateServerlessCluster indicates an expected call of CreateServerlessCluster. +func (mr *MockServerlessClusterServiceClientMockRecorder) CreateServerlessCluster(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateServerlessCluster", reflect.TypeOf((*MockServerlessClusterServiceClient)(nil).CreateServerlessCluster), varargs...) +} + +// DeleteServerlessCluster mocks base method. +func (m *MockServerlessClusterServiceClient) DeleteServerlessCluster(arg0 context.Context, arg1 *controlplanev1beta2.DeleteServerlessClusterRequest, arg2 ...grpc.CallOption) (*controlplanev1beta2.DeleteServerlessClusterOperation, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteServerlessCluster", varargs...) + ret0, _ := ret[0].(*controlplanev1beta2.DeleteServerlessClusterOperation) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteServerlessCluster indicates an expected call of DeleteServerlessCluster. +func (mr *MockServerlessClusterServiceClientMockRecorder) DeleteServerlessCluster(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteServerlessCluster", reflect.TypeOf((*MockServerlessClusterServiceClient)(nil).DeleteServerlessCluster), varargs...) +} + +// DummyCreateMetadata mocks base method. +func (m *MockServerlessClusterServiceClient) DummyCreateMetadata(arg0 context.Context, arg1 *controlplanev1beta2.CreateServerlessClusterRequest, arg2 ...grpc.CallOption) (*controlplanev1beta2.CreateServerlessClusterMetadata, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DummyCreateMetadata", varargs...) + ret0, _ := ret[0].(*controlplanev1beta2.CreateServerlessClusterMetadata) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DummyCreateMetadata indicates an expected call of DummyCreateMetadata. +func (mr *MockServerlessClusterServiceClientMockRecorder) DummyCreateMetadata(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DummyCreateMetadata", reflect.TypeOf((*MockServerlessClusterServiceClient)(nil).DummyCreateMetadata), varargs...) +} + +// DummyDeleteMetadata mocks base method. +func (m *MockServerlessClusterServiceClient) DummyDeleteMetadata(arg0 context.Context, arg1 *controlplanev1beta2.DeleteServerlessClusterRequest, arg2 ...grpc.CallOption) (*controlplanev1beta2.DeleteServerlessClusterMetadata, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DummyDeleteMetadata", varargs...) + ret0, _ := ret[0].(*controlplanev1beta2.DeleteServerlessClusterMetadata) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DummyDeleteMetadata indicates an expected call of DummyDeleteMetadata. +func (mr *MockServerlessClusterServiceClientMockRecorder) DummyDeleteMetadata(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DummyDeleteMetadata", reflect.TypeOf((*MockServerlessClusterServiceClient)(nil).DummyDeleteMetadata), varargs...) +} + +// GetServerlessCluster mocks base method. +func (m *MockServerlessClusterServiceClient) GetServerlessCluster(arg0 context.Context, arg1 *controlplanev1beta2.GetServerlessClusterRequest, arg2 ...grpc.CallOption) (*controlplanev1beta2.GetServerlessClusterResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetServerlessCluster", varargs...) + ret0, _ := ret[0].(*controlplanev1beta2.GetServerlessClusterResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetServerlessCluster indicates an expected call of GetServerlessCluster. +func (mr *MockServerlessClusterServiceClientMockRecorder) GetServerlessCluster(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServerlessCluster", reflect.TypeOf((*MockServerlessClusterServiceClient)(nil).GetServerlessCluster), varargs...) +} + +// ListServerlessClusters mocks base method. +func (m *MockServerlessClusterServiceClient) ListServerlessClusters(arg0 context.Context, arg1 *controlplanev1beta2.ListServerlessClustersRequest, arg2 ...grpc.CallOption) (*controlplanev1beta2.ListServerlessClustersResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListServerlessClusters", varargs...) + ret0, _ := ret[0].(*controlplanev1beta2.ListServerlessClustersResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListServerlessClusters indicates an expected call of ListServerlessClusters. +func (mr *MockServerlessClusterServiceClientMockRecorder) ListServerlessClusters(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListServerlessClusters", reflect.TypeOf((*MockServerlessClusterServiceClient)(nil).ListServerlessClusters), varargs...) +} diff --git a/redpanda/mocks/mock_topic_service_client.go b/redpanda/mocks/mock_topic_service_client.go new file mode 100644 index 00000000..c482c8c2 --- /dev/null +++ b/redpanda/mocks/mock_topic_service_client.go @@ -0,0 +1,157 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: buf.build/gen/go/redpandadata/dataplane/grpc/go/redpanda/api/dataplane/v1alpha1/dataplanev1alpha1grpc (interfaces: TopicServiceClient) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + dataplanev1alpha1 "buf.build/gen/go/redpandadata/dataplane/protocolbuffers/go/redpanda/api/dataplane/v1alpha1" + gomock "github.com/golang/mock/gomock" + grpc "google.golang.org/grpc" +) + +// MockTopicServiceClient is a mock of TopicServiceClient interface. +type MockTopicServiceClient struct { + ctrl *gomock.Controller + recorder *MockTopicServiceClientMockRecorder +} + +// MockTopicServiceClientMockRecorder is the mock recorder for MockTopicServiceClient. +type MockTopicServiceClientMockRecorder struct { + mock *MockTopicServiceClient +} + +// NewMockTopicServiceClient creates a new mock instance. +func NewMockTopicServiceClient(ctrl *gomock.Controller) *MockTopicServiceClient { + mock := &MockTopicServiceClient{ctrl: ctrl} + mock.recorder = &MockTopicServiceClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockTopicServiceClient) EXPECT() *MockTopicServiceClientMockRecorder { + return m.recorder +} + +// CreateTopic mocks base method. +func (m *MockTopicServiceClient) CreateTopic(arg0 context.Context, arg1 *dataplanev1alpha1.CreateTopicRequest, arg2 ...grpc.CallOption) (*dataplanev1alpha1.CreateTopicResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateTopic", varargs...) + ret0, _ := ret[0].(*dataplanev1alpha1.CreateTopicResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateTopic indicates an expected call of CreateTopic. +func (mr *MockTopicServiceClientMockRecorder) CreateTopic(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateTopic", reflect.TypeOf((*MockTopicServiceClient)(nil).CreateTopic), varargs...) +} + +// DeleteTopic mocks base method. +func (m *MockTopicServiceClient) DeleteTopic(arg0 context.Context, arg1 *dataplanev1alpha1.DeleteTopicRequest, arg2 ...grpc.CallOption) (*dataplanev1alpha1.DeleteTopicResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteTopic", varargs...) + ret0, _ := ret[0].(*dataplanev1alpha1.DeleteTopicResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteTopic indicates an expected call of DeleteTopic. +func (mr *MockTopicServiceClientMockRecorder) DeleteTopic(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteTopic", reflect.TypeOf((*MockTopicServiceClient)(nil).DeleteTopic), varargs...) +} + +// GetTopicConfigurations mocks base method. +func (m *MockTopicServiceClient) GetTopicConfigurations(arg0 context.Context, arg1 *dataplanev1alpha1.GetTopicConfigurationsRequest, arg2 ...grpc.CallOption) (*dataplanev1alpha1.GetTopicConfigurationsResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetTopicConfigurations", varargs...) + ret0, _ := ret[0].(*dataplanev1alpha1.GetTopicConfigurationsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTopicConfigurations indicates an expected call of GetTopicConfigurations. +func (mr *MockTopicServiceClientMockRecorder) GetTopicConfigurations(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTopicConfigurations", reflect.TypeOf((*MockTopicServiceClient)(nil).GetTopicConfigurations), varargs...) +} + +// ListTopics mocks base method. +func (m *MockTopicServiceClient) ListTopics(arg0 context.Context, arg1 *dataplanev1alpha1.ListTopicsRequest, arg2 ...grpc.CallOption) (*dataplanev1alpha1.ListTopicsResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListTopics", varargs...) + ret0, _ := ret[0].(*dataplanev1alpha1.ListTopicsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListTopics indicates an expected call of ListTopics. +func (mr *MockTopicServiceClientMockRecorder) ListTopics(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListTopics", reflect.TypeOf((*MockTopicServiceClient)(nil).ListTopics), varargs...) +} + +// SetTopicConfigurations mocks base method. +func (m *MockTopicServiceClient) SetTopicConfigurations(arg0 context.Context, arg1 *dataplanev1alpha1.SetTopicConfigurationsRequest, arg2 ...grpc.CallOption) (*dataplanev1alpha1.SetTopicConfigurationsResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "SetTopicConfigurations", varargs...) + ret0, _ := ret[0].(*dataplanev1alpha1.SetTopicConfigurationsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SetTopicConfigurations indicates an expected call of SetTopicConfigurations. +func (mr *MockTopicServiceClientMockRecorder) SetTopicConfigurations(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTopicConfigurations", reflect.TypeOf((*MockTopicServiceClient)(nil).SetTopicConfigurations), varargs...) +} + +// UpdateTopicConfigurations mocks base method. +func (m *MockTopicServiceClient) UpdateTopicConfigurations(arg0 context.Context, arg1 *dataplanev1alpha1.UpdateTopicConfigurationsRequest, arg2 ...grpc.CallOption) (*dataplanev1alpha1.UpdateTopicConfigurationsResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateTopicConfigurations", varargs...) + ret0, _ := ret[0].(*dataplanev1alpha1.UpdateTopicConfigurationsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateTopicConfigurations indicates an expected call of UpdateTopicConfigurations. +func (mr *MockTopicServiceClientMockRecorder) UpdateTopicConfigurations(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateTopicConfigurations", reflect.TypeOf((*MockTopicServiceClient)(nil).UpdateTopicConfigurations), varargs...) +} diff --git a/redpanda/mocks/mock_user_service_client.go b/redpanda/mocks/mock_user_service_client.go new file mode 100644 index 00000000..780db9c0 --- /dev/null +++ b/redpanda/mocks/mock_user_service_client.go @@ -0,0 +1,117 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: buf.build/gen/go/redpandadata/dataplane/grpc/go/redpanda/api/dataplane/v1alpha1/dataplanev1alpha1grpc (interfaces: UserServiceClient) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + dataplanev1alpha1 "buf.build/gen/go/redpandadata/dataplane/protocolbuffers/go/redpanda/api/dataplane/v1alpha1" + gomock "github.com/golang/mock/gomock" + grpc "google.golang.org/grpc" +) + +// MockUserServiceClient is a mock of UserServiceClient interface. +type MockUserServiceClient struct { + ctrl *gomock.Controller + recorder *MockUserServiceClientMockRecorder +} + +// MockUserServiceClientMockRecorder is the mock recorder for MockUserServiceClient. +type MockUserServiceClientMockRecorder struct { + mock *MockUserServiceClient +} + +// NewMockUserServiceClient creates a new mock instance. +func NewMockUserServiceClient(ctrl *gomock.Controller) *MockUserServiceClient { + mock := &MockUserServiceClient{ctrl: ctrl} + mock.recorder = &MockUserServiceClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockUserServiceClient) EXPECT() *MockUserServiceClientMockRecorder { + return m.recorder +} + +// CreateUser mocks base method. +func (m *MockUserServiceClient) CreateUser(arg0 context.Context, arg1 *dataplanev1alpha1.CreateUserRequest, arg2 ...grpc.CallOption) (*dataplanev1alpha1.CreateUserResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateUser", varargs...) + ret0, _ := ret[0].(*dataplanev1alpha1.CreateUserResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateUser indicates an expected call of CreateUser. +func (mr *MockUserServiceClientMockRecorder) CreateUser(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateUser", reflect.TypeOf((*MockUserServiceClient)(nil).CreateUser), varargs...) +} + +// DeleteUser mocks base method. +func (m *MockUserServiceClient) DeleteUser(arg0 context.Context, arg1 *dataplanev1alpha1.DeleteUserRequest, arg2 ...grpc.CallOption) (*dataplanev1alpha1.DeleteUserResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteUser", varargs...) + ret0, _ := ret[0].(*dataplanev1alpha1.DeleteUserResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteUser indicates an expected call of DeleteUser. +func (mr *MockUserServiceClientMockRecorder) DeleteUser(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUser", reflect.TypeOf((*MockUserServiceClient)(nil).DeleteUser), varargs...) +} + +// ListUsers mocks base method. +func (m *MockUserServiceClient) ListUsers(arg0 context.Context, arg1 *dataplanev1alpha1.ListUsersRequest, arg2 ...grpc.CallOption) (*dataplanev1alpha1.ListUsersResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListUsers", varargs...) + ret0, _ := ret[0].(*dataplanev1alpha1.ListUsersResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListUsers indicates an expected call of ListUsers. +func (mr *MockUserServiceClientMockRecorder) ListUsers(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListUsers", reflect.TypeOf((*MockUserServiceClient)(nil).ListUsers), varargs...) +} + +// UpdateUser mocks base method. +func (m *MockUserServiceClient) UpdateUser(arg0 context.Context, arg1 *dataplanev1alpha1.UpdateUserRequest, arg2 ...grpc.CallOption) (*dataplanev1alpha1.UpdateUserResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateUser", varargs...) + ret0, _ := ret[0].(*dataplanev1alpha1.UpdateUserResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateUser indicates an expected call of UpdateUser. +func (mr *MockUserServiceClientMockRecorder) UpdateUser(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateUser", reflect.TypeOf((*MockUserServiceClient)(nil).UpdateUser), varargs...) +} diff --git a/redpanda/mocks/mocks.go b/redpanda/mocks/mocks.go index a0737008..ce5e91ae 100644 --- a/redpanda/mocks/mocks.go +++ b/redpanda/mocks/mocks.go @@ -13,5 +13,14 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build generate +// +build generate + // Package mocks provides the mocked resources and its clients for testing. package mocks + +//go:generate mockgen -destination=./mock_topic_service_client.go -package=mocks buf.build/gen/go/redpandadata/dataplane/grpc/go/redpanda/api/dataplane/v1alpha1/dataplanev1alpha1grpc TopicServiceClient +//go:generate mockgen -destination=./mock_user_service_client.go -package=mocks buf.build/gen/go/redpandadata/dataplane/grpc/go/redpanda/api/dataplane/v1alpha1/dataplanev1alpha1grpc UserServiceClient +//go:generate mockgen -destination=./mock_operations_service_client.go -package=mocks buf.build/gen/go/redpandadata/cloud/grpc/go/redpanda/api/controlplane/v1beta2/controlplanev1beta2grpc OperationServiceClient +//go:generate mockgen -destination=./mock_serverless_cluster_service_client.go -package=mocks buf.build/gen/go/redpandadata/cloud/grpc/go/redpanda/api/controlplane/v1beta2/controlplanev1beta2grpc ServerlessClusterServiceClient +//go:generate mockgen -destination=./mock_cp_client_set.go -package=mocks github.com/redpanda-data/terraform-provider-redpanda/redpanda/cloud CpClientSet diff --git a/redpanda/redpanda_test.go b/redpanda/redpanda_test.go index 55752746..cc468a7a 100644 --- a/redpanda/redpanda_test.go +++ b/redpanda/redpanda_test.go @@ -1,3 +1,18 @@ +// Copyright 2023 Redpanda Data, Inc. +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package redpanda import ( diff --git a/redpanda/resources/acl/acl.go b/redpanda/resources/acl/acl.go index a3ab8529..1ecae54d 100644 --- a/redpanda/resources/acl/acl.go +++ b/redpanda/resources/acl/acl.go @@ -28,7 +28,7 @@ import ( // stringToEnum converts a string to an enum given a certain map. It prepends // 's' with 'prepend'. -func stringToEnum(s string, prepend string, m map[string]int32) (int32, error) { +func stringToEnum(s, prepend string, m map[string]int32) (int32, error) { if e, ok := m[prepend+s]; ok { return e, nil } diff --git a/redpanda/resources/cluster/cluster.go b/redpanda/resources/cluster/cluster.go new file mode 100644 index 00000000..3de20ada --- /dev/null +++ b/redpanda/resources/cluster/cluster.go @@ -0,0 +1,312 @@ +// Copyright 2024 Redpanda Data, Inc. +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cluster + +import ( + "context" + "fmt" + "reflect" + + controlplanev1beta2 "buf.build/gen/go/redpandadata/cloud/protocolbuffers/go/redpanda/api/controlplane/v1beta2" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/redpanda-data/terraform-provider-redpanda/redpanda/models" + "github.com/redpanda-data/terraform-provider-redpanda/redpanda/utils" +) + +func gcpConnectConsumerModelToStruct(accept []*models.GcpPrivateServiceConnectConsumer) []*controlplanev1beta2.GCPPrivateServiceConnectConsumer { + var output []*controlplanev1beta2.GCPPrivateServiceConnectConsumer + for _, a := range accept { + output = append(output, &controlplanev1beta2.GCPPrivateServiceConnectConsumer{ + Source: a.Source, + }) + } + return output +} + +func gcpConnectConsumerStructToModel(accept []*controlplanev1beta2.GCPPrivateServiceConnectConsumer) []*models.GcpPrivateServiceConnectConsumer { + var output []*models.GcpPrivateServiceConnectConsumer + for _, a := range accept { + output = append(output, &models.GcpPrivateServiceConnectConsumer{ + Source: a.Source, + }) + } + return output +} + +func toMtlsModel(ctx context.Context, mtls *controlplanev1beta2.MTLSSpec) (*models.Mtls, diag.Diagnostics) { + if isMtlsSpecNil(mtls) { + return nil, nil + } + + capem, err := types.ListValueFrom(ctx, types.StringType, mtls.GetCaCertificatesPem()) + if err != nil { + return nil, err + } + maprules, err := types.ListValueFrom(ctx, types.StringType, mtls.GetPrincipalMappingRules()) + if err != nil { + return nil, err + } + return &models.Mtls{ + Enabled: types.BoolValue(mtls.GetEnabled()), + CaCertificatesPem: capem, + PrincipalMappingRules: maprules, + }, nil +} + +func toMtlsSpec(mtls *models.Mtls) *controlplanev1beta2.MTLSSpec { + if isMtlsStructNil(mtls) { + return &controlplanev1beta2.MTLSSpec{ + Enabled: false, + CaCertificatesPem: make([]string, 0), + PrincipalMappingRules: make([]string, 0), + } + } + return &controlplanev1beta2.MTLSSpec{ + Enabled: mtls.Enabled.ValueBool(), + CaCertificatesPem: utils.TypeListToStringSlice(mtls.CaCertificatesPem), + PrincipalMappingRules: utils.TypeListToStringSlice(mtls.PrincipalMappingRules), + } +} + +func isMtlsNil(container any) bool { + v := reflect.ValueOf(container) + if v.Kind() != reflect.Struct && v.Kind() != reflect.Ptr { + return true + } + + if !v.IsValid() || v.IsNil() { + return true + } + + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if v.Kind() != reflect.Struct { + return true + } + mtlsField := v.FieldByName("Mtls") + if !mtlsField.IsValid() || mtlsField.IsNil() { + return true + } + return isMtlsStructNil(mtlsField.Interface().(*models.Mtls)) +} + +func isMtlsStructNil(m *models.Mtls) bool { + return m == nil || (m.Enabled.IsNull() && m.CaCertificatesPem.IsNull() && m.PrincipalMappingRules.IsNull()) +} + +func isMtlsSpecNil(m *controlplanev1beta2.MTLSSpec) bool { + return m == nil || (!m.GetEnabled() && len(m.GetCaCertificatesPem()) == 0 && len(m.GetPrincipalMappingRules()) == 0) +} + +func isAwsPrivateLinkStructNil(m *models.AwsPrivateLink) bool { + return m == nil || (m.Enabled.IsNull() && m.ConnectConsole.IsNull() && m.AllowedPrincipals.IsNull()) +} + +func isAwsPrivateLinkSpecNil(m *controlplanev1beta2.AWSPrivateLinkStatus) bool { + return m == nil || (!m.Enabled && !m.ConnectConsole && len(m.AllowedPrincipals) == 0) +} + +func isAzurePrivateLinkStructNil(m *models.AzurePrivateLink) bool { + return m == nil || (m.Enabled.IsNull() && m.AllowedSubscriptions.IsNull() && m.ConnectConsole.IsNull()) +} + +func isAzurePrivateLinkSpecNil(m *controlplanev1beta2.AzurePrivateLinkStatus) bool { + return m == nil || (!m.Enabled && len(m.AllowedSubscriptions) == 0 && !m.ConnectConsole) +} + +func isGcpPrivateServiceConnectStructNil(m *models.GcpPrivateServiceConnect) bool { + return m == nil || (m.Enabled.IsNull() && m.GlobalAccessEnabled.IsNull() && len(m.ConsumerAcceptList) == 0) +} + +func isGcpPrivateServiceConnectSpecNil(m *controlplanev1beta2.GCPPrivateServiceConnectStatus) bool { + return m == nil || (!m.Enabled && !m.GlobalAccessEnabled && len(m.ConsumerAcceptList) == 0) +} + +// generateClusterRequest was pulled out to enable unit testing +func generateClusterRequest(model models.Cluster) (*controlplanev1beta2.ClusterCreate, error) { + provider, err := utils.StringToCloudProvider(model.CloudProvider.ValueString()) + if err != nil { + return nil, fmt.Errorf("unable to parse cloud provider: %v", err) + } + clusterType, err := utils.StringToClusterType(model.ClusterType.ValueString()) + if err != nil { + return nil, fmt.Errorf("unable to parse cluster type: %v", err) + } + rpVersion := model.RedpandaVersion.ValueString() + + output := &controlplanev1beta2.ClusterCreate{ + Name: model.Name.ValueString(), + ConnectionType: utils.StringToConnectionType(model.ConnectionType.ValueString()), + CloudProvider: provider, + RedpandaVersion: &rpVersion, + ThroughputTier: model.ThroughputTier.ValueString(), + Region: model.Region.ValueString(), + Zones: utils.TypeListToStringSlice(model.Zones), + ResourceGroupId: model.ResourceGroupID.ValueString(), + NetworkId: model.NetworkID.ValueString(), + Type: clusterType, + CloudProviderTags: utils.TypeMapToStringMap(model.Tags), + } + if !isAwsPrivateLinkStructNil(model.AwsPrivateLink) { + if !model.AwsPrivateLink.AllowedPrincipals.IsNull() { + output.AwsPrivateLink = &controlplanev1beta2.AWSPrivateLinkSpec{ + Enabled: model.AwsPrivateLink.Enabled.ValueBool(), + AllowedPrincipals: utils.TypeListToStringSlice(model.AwsPrivateLink.AllowedPrincipals), + ConnectConsole: model.AwsPrivateLink.ConnectConsole.ValueBool(), + } + } + } + if !isGcpPrivateServiceConnectStructNil(model.GcpPrivateServiceConnect) { + if len(model.GcpPrivateServiceConnect.ConsumerAcceptList) > 0 { + output.GcpPrivateServiceConnect = &controlplanev1beta2.GCPPrivateServiceConnectSpec{ + Enabled: model.GcpPrivateServiceConnect.Enabled.ValueBool(), + GlobalAccessEnabled: model.GcpPrivateServiceConnect.GlobalAccessEnabled.ValueBool(), + ConsumerAcceptList: gcpConnectConsumerModelToStruct(model.GcpPrivateServiceConnect.ConsumerAcceptList), + } + } + } + + if !isAzurePrivateLinkStructNil(model.AzurePrivateLink) { + if !model.AzurePrivateLink.AllowedSubscriptions.IsNull() { + output.AzurePrivateLink = &controlplanev1beta2.AzurePrivateLinkSpec{ + Enabled: model.AzurePrivateLink.Enabled.ValueBool(), + AllowedSubscriptions: utils.TypeListToStringSlice(model.AzurePrivateLink.AllowedSubscriptions), + ConnectConsole: model.AzurePrivateLink.ConnectConsole.ValueBool(), + } + } + } + + if model.KafkaAPI != nil { + output.KafkaApi = &controlplanev1beta2.KafkaAPISpec{ + Mtls: toMtlsSpec(model.KafkaAPI.Mtls), + } + } + if model.HTTPProxy != nil { + output.HttpProxy = &controlplanev1beta2.HTTPProxySpec{ + Mtls: toMtlsSpec(model.HTTPProxy.Mtls), + } + } + if model.SchemaRegistry != nil { + output.SchemaRegistry = &controlplanev1beta2.SchemaRegistrySpec{ + Mtls: toMtlsSpec(model.SchemaRegistry.Mtls), + } + } + if !model.ReadReplicaClusterIds.IsNull() { + output.ReadReplicaClusterIds = utils.TypeListToStringSlice(model.ReadReplicaClusterIds) + } + + return output, nil +} + +// generateModel populates the Cluster model to be persisted to state for Create, Read and Update operations. It is also indirectly used by Import +func generateModel(ctx context.Context, cfg models.Cluster, cluster *controlplanev1beta2.Cluster) (*models.Cluster, error) { + output := &models.Cluster{ + Name: types.StringValue(cluster.Name), + ConnectionType: types.StringValue(utils.ConnectionTypeToString(cluster.ConnectionType)), + CloudProvider: types.StringValue(utils.CloudProviderToString(cluster.CloudProvider)), + ClusterType: types.StringValue(utils.ClusterTypeToString(cluster.Type)), + RedpandaVersion: cfg.RedpandaVersion, + ThroughputTier: types.StringValue(cluster.ThroughputTier), + Region: types.StringValue(cluster.Region), + AllowDeletion: cfg.AllowDeletion, + Tags: cfg.Tags, + ResourceGroupID: types.StringValue(cluster.ResourceGroupId), + NetworkID: types.StringValue(cluster.NetworkId), + ID: types.StringValue(cluster.Id), + } + + clusterZones, d := types.ListValueFrom(ctx, types.StringType, cluster.Zones) + if d.HasError() { + return nil, fmt.Errorf("failed to parse cluster zones: %v", d) + } + output.Zones = clusterZones + + if cluster.GetDataplaneApi() != nil { + clusterURL, err := utils.SplitSchemeDefPort(cluster.DataplaneApi.Url, "443") + if err != nil { + return nil, fmt.Errorf("unable to parse Cluster API URL: %v", err) + } + output.ClusterAPIURL = basetypes.NewStringValue(clusterURL) + } + + rr, d := types.ListValueFrom(ctx, types.StringType, cluster.ReadReplicaClusterIds) + if d.HasError() { + return nil, fmt.Errorf("failed to parse read replica cluster IDs: %v", d) + } + output.ReadReplicaClusterIds = rr + + if !isAwsPrivateLinkSpecNil(cluster.AwsPrivateLink) { + ap, dg := types.ListValueFrom(ctx, types.StringType, cluster.AwsPrivateLink.AllowedPrincipals) + if dg.HasError() { + return nil, fmt.Errorf("failed to parse AWS Private Link: %v", dg) + } + output.AwsPrivateLink = &models.AwsPrivateLink{ + Enabled: types.BoolValue(cluster.AwsPrivateLink.Enabled), + ConnectConsole: types.BoolValue(cluster.AwsPrivateLink.ConnectConsole), + AllowedPrincipals: ap, + } + } + if !isGcpPrivateServiceConnectSpecNil(cluster.GcpPrivateServiceConnect) { + output.GcpPrivateServiceConnect = &models.GcpPrivateServiceConnect{ + Enabled: types.BoolValue(cluster.GcpPrivateServiceConnect.Enabled), + GlobalAccessEnabled: types.BoolValue(cluster.GcpPrivateServiceConnect.GlobalAccessEnabled), + ConsumerAcceptList: gcpConnectConsumerStructToModel(cluster.GcpPrivateServiceConnect.ConsumerAcceptList), + } + } + + if !isAzurePrivateLinkSpecNil(cluster.AzurePrivateLink) { + as, dg := types.ListValueFrom(ctx, types.StringType, cluster.AzurePrivateLink.AllowedSubscriptions) + if dg.HasError() { + return nil, fmt.Errorf("failed to parse Azure Private Link: %v", dg) + } + output.AzurePrivateLink = &models.AzurePrivateLink{ + Enabled: types.BoolValue(cluster.AzurePrivateLink.Enabled), + ConnectConsole: types.BoolValue(cluster.AzurePrivateLink.ConnectConsole), + AllowedSubscriptions: as, + } + } + kAPI, err := toMtlsModel(ctx, cluster.GetKafkaApi().GetMtls()) + if err != nil { + return nil, fmt.Errorf("failed to parse Kafka API MTLS: %v", err) + } + if kAPI != nil { + output.KafkaAPI = &models.KafkaAPI{ + Mtls: kAPI, + } + } + ht, err := toMtlsModel(ctx, cluster.GetHttpProxy().GetMtls()) + if err != nil { + return nil, fmt.Errorf("failed to parse HTTP Proxy MTLS: %v", err) + } + if ht != nil { + output.HTTPProxy = &models.HTTPProxy{ + Mtls: ht, + } + } + sr, err := toMtlsModel(ctx, cluster.GetSchemaRegistry().GetMtls()) + if err != nil { + return nil, fmt.Errorf("failed to parse Schema Registry MTLS: %v", err) + } + if sr != nil { + output.SchemaRegistry = &models.SchemaRegistry{ + Mtls: sr, + } + } + return output, nil +} diff --git a/redpanda/resources/cluster/data_cluster.go b/redpanda/resources/cluster/data_cluster.go index a83f5d58..93c4196d 100644 --- a/redpanda/resources/cluster/data_cluster.go +++ b/redpanda/resources/cluster/data_cluster.go @@ -332,7 +332,7 @@ func datasourceClusterSchema() schema.Schema { "principal_mapping_rules": schema.ListAttribute{ ElementType: types.StringType, Computed: true, - Description: "Principal mapping rules for mTLS authentication. Only valid for Kafka API. See the Redpanda documentation on configuring authentication.", + Description: "Principal mapping rules for mTLS authentication. See the Redpanda documentation on configuring authentication.", }, }, }, @@ -358,7 +358,7 @@ func datasourceClusterSchema() schema.Schema { "principal_mapping_rules": schema.ListAttribute{ ElementType: types.StringType, Computed: true, - Description: "Principal mapping rules for mTLS authentication. Only valid for Kafka API. See the Redpanda documentation on configuring authentication.", + Description: "Principal mapping rules for mTLS authentication. See the Redpanda documentation on configuring authentication.", }, }, }, @@ -384,7 +384,7 @@ func datasourceClusterSchema() schema.Schema { "principal_mapping_rules": schema.ListAttribute{ ElementType: types.StringType, Computed: true, - Description: "Principal mapping rules for mTLS authentication. Only valid for Kafka API. See the Redpanda documentation on configuring authentication.", + Description: "Principal mapping rules for mTLS authentication. See the Redpanda documentation on configuring authentication.", }, }, }, diff --git a/redpanda/resources/cluster/resource_cluster.go b/redpanda/resources/cluster/resource_cluster.go index 44054942..1891cd82 100644 --- a/redpanda/resources/cluster/resource_cluster.go +++ b/redpanda/resources/cluster/resource_cluster.go @@ -20,11 +20,9 @@ package cluster import ( "context" "fmt" - "reflect" "time" controlplanev1beta2 "buf.build/gen/go/redpandadata/cloud/protocolbuffers/go/redpanda/api/controlplane/v1beta2" - "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" @@ -32,8 +30,8 @@ import ( "github.com/hashicorp/terraform-plugin-framework/resource/schema/mapplanmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" "github.com/hashicorp/terraform-plugin-framework/types" - "github.com/hashicorp/terraform-plugin-framework/types/basetypes" "github.com/redpanda-data/terraform-provider-redpanda/redpanda/cloud" "github.com/redpanda-data/terraform-provider-redpanda/redpanda/config" "github.com/redpanda-data/terraform-provider-redpanda/redpanda/models" @@ -117,6 +115,9 @@ func resourceClusterSchema() schema.Schema { Required: true, Description: "Throughput tier of the cluster.", PlanModifiers: []planmodifier.String{stringplanmodifier.RequiresReplace()}, + Validators: []validator.String{ + validators.ThroughputTierValidator{}, + }, }, "region": schema.StringAttribute{ Optional: true, @@ -177,6 +178,12 @@ func resourceClusterSchema() schema.Schema { Description: "The ARN of the principals that can access the Redpanda AWS PrivateLink Endpoint Service. To grant permissions to all principals, use an asterisk (*).", }, }, + Validators: []validator.Object{ + validators.CloudProviderDependentValidator{ + AttributeName: "aws_private_link", + CloudProvider: "aws", + }, + }, }, "azure_private_link": schema.SingleNestedAttribute{ Optional: true, @@ -196,6 +203,12 @@ func resourceClusterSchema() schema.Schema { Description: "Whether Redpanda Azure Private Link Endpoint Service is enabled.", }, }, + Validators: []validator.Object{ + validators.CloudProviderDependentValidator{ + AttributeName: "azure_private_link", + CloudProvider: "azure", + }, + }, }, "gcp_private_service_connect": schema.SingleNestedAttribute{ Optional: true, @@ -222,6 +235,12 @@ func resourceClusterSchema() schema.Schema { }, }, }, + Validators: []validator.Object{ + validators.CloudProviderDependentValidator{ + AttributeName: "gcp_private_service_connect", + CloudProvider: "gcp", + }, + }, }, "kafka_api": schema.SingleNestedAttribute{ Optional: true, @@ -243,7 +262,7 @@ func resourceClusterSchema() schema.Schema { "principal_mapping_rules": schema.ListAttribute{ ElementType: types.StringType, Required: true, - Description: "Principal mapping rules for mTLS authentication. Only valid for Kafka API. See the Redpanda documentation on configuring authentication.", + Description: "Principal mapping rules for mTLS authentication. See the Redpanda documentation on configuring authentication.", }, }, }, @@ -269,7 +288,7 @@ func resourceClusterSchema() schema.Schema { "principal_mapping_rules": schema.ListAttribute{ ElementType: types.StringType, Required: true, - Description: "Principal mapping rules for mTLS authentication. Only valid for Kafka API. See the Redpanda documentation on configuring authentication.", + Description: "Principal mapping rules for mTLS authentication. See the Redpanda documentation on configuring authentication.", }, }, }, @@ -295,7 +314,7 @@ func resourceClusterSchema() schema.Schema { "principal_mapping_rules": schema.ListAttribute{ ElementType: types.StringType, Required: true, - Description: "Principal mapping rules for mTLS authentication. Only valid for Kafka API. See the Redpanda documentation on configuring authentication.", + Description: "Principal mapping rules for mTLS authentication. See the Redpanda documentation on configuring authentication.", }, }, }, @@ -316,7 +335,7 @@ func (c *Cluster) Create(ctx context.Context, req resource.CreateRequest, resp * var model models.Cluster resp.Diagnostics.Append(req.Plan.Get(ctx, &model)...) - clusterReq, err := GenerateClusterRequest(model) + clusterReq, err := generateClusterRequest(model) if err != nil { resp.Diagnostics.AddError("unable to parse CreateCluster request", err.Error()) return @@ -341,7 +360,7 @@ func (c *Cluster) Create(ctx context.Context, req resource.CreateRequest, resp * resp.Diagnostics.AddError(fmt.Sprintf("successfully created the cluster with ID %q, but failed to read the cluster configuration: %v", model.ID.ValueString(), err), err.Error()) return } - persist, err := GenerateModel(ctx, model, cluster) + persist, err := generateModel(ctx, model, cluster) if err != nil { resp.Diagnostics.AddError("failed to generate model for state during cluster.Create", err.Error()) return @@ -365,7 +384,7 @@ func (c *Cluster) Read(ctx context.Context, req resource.ReadRequest, resp *reso resp.Diagnostics.AddError(fmt.Sprintf("failed to read cluster %s", model.ID), err.Error()) return } - persist, err := GenerateModel(ctx, model, cluster) + persist, err := generateModel(ctx, model, cluster) if err != nil { resp.Diagnostics.AddError("failed to generate model for state during cluster.Read", err.Error()) return @@ -449,7 +468,7 @@ func (c *Cluster) Update(ctx context.Context, req resource.UpdateRequest, resp * var cfg models.Cluster resp.Diagnostics.Append(req.Config.Get(ctx, &cfg)...) - persist, err := GenerateModel(ctx, cfg, cluster) + persist, err := generateModel(ctx, cfg, cluster) if err != nil { resp.Diagnostics.AddError("failed to generate model for state during cluster.Update", err.Error()) return @@ -468,7 +487,7 @@ func (c *Cluster) Delete(ctx context.Context, req resource.DeleteRequest, resp * } // We need to wait for the cluster to be in a running state before we can delete it - _, err := utils.GetClusterUntilRunningState(ctx, 0, 30, model.Name.ValueString(), c.CpCl) + _, err := utils.GetClusterUntilRunningState(ctx, 0, 30, model.Name.ValueString(), 1*time.Minute, c.CpCl) if err != nil { return } @@ -491,285 +510,3 @@ func (c *Cluster) Delete(ctx context.Context, req resource.DeleteRequest, resp * func (*Cluster) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) } - -// GenerateClusterRequest was pulled out to enable unit testing -func GenerateClusterRequest(model models.Cluster) (*controlplanev1beta2.ClusterCreate, error) { - provider, err := utils.StringToCloudProvider(model.CloudProvider.ValueString()) - if err != nil { - return nil, fmt.Errorf("unable to parse cloud provider: %v", err) - } - clusterType, err := utils.StringToClusterType(model.ClusterType.ValueString()) - if err != nil { - return nil, fmt.Errorf("unable to parse cluster type: %v", err) - } - rpVersion := model.RedpandaVersion.ValueString() - - output := &controlplanev1beta2.ClusterCreate{ - Name: model.Name.ValueString(), - ConnectionType: utils.StringToConnectionType(model.ConnectionType.ValueString()), - CloudProvider: provider, - RedpandaVersion: &rpVersion, - ThroughputTier: model.ThroughputTier.ValueString(), - Region: model.Region.ValueString(), - Zones: utils.TypeListToStringSlice(model.Zones), - ResourceGroupId: model.ResourceGroupID.ValueString(), - NetworkId: model.NetworkID.ValueString(), - Type: clusterType, - CloudProviderTags: utils.TypeMapToStringMap(model.Tags), - } - if !isAwsPrivateLinkStructNil(model.AwsPrivateLink) { - if !model.AwsPrivateLink.AllowedPrincipals.IsNull() { - output.AwsPrivateLink = &controlplanev1beta2.AWSPrivateLinkSpec{ - Enabled: model.AwsPrivateLink.Enabled.ValueBool(), - AllowedPrincipals: utils.TypeListToStringSlice(model.AwsPrivateLink.AllowedPrincipals), - ConnectConsole: model.AwsPrivateLink.ConnectConsole.ValueBool(), - } - } - } - if !isGcpPrivateServiceConnectStructNil(model.GcpPrivateServiceConnect) { - if len(model.GcpPrivateServiceConnect.ConsumerAcceptList) > 0 { - output.GcpPrivateServiceConnect = &controlplanev1beta2.GCPPrivateServiceConnectSpec{ - Enabled: model.GcpPrivateServiceConnect.Enabled.ValueBool(), - GlobalAccessEnabled: model.GcpPrivateServiceConnect.GlobalAccessEnabled.ValueBool(), - ConsumerAcceptList: gcpConnectConsumerModelToStruct(model.GcpPrivateServiceConnect.ConsumerAcceptList), - } - } - } - - if !isAzurePrivateLinkStructNil(model.AzurePrivateLink) { - if !model.AzurePrivateLink.AllowedSubscriptions.IsNull() { - output.AzurePrivateLink = &controlplanev1beta2.AzurePrivateLinkSpec{ - Enabled: model.AzurePrivateLink.Enabled.ValueBool(), - AllowedSubscriptions: utils.TypeListToStringSlice(model.AzurePrivateLink.AllowedSubscriptions), - ConnectConsole: model.AzurePrivateLink.ConnectConsole.ValueBool(), - } - } - } - - if model.KafkaAPI != nil { - output.KafkaApi = &controlplanev1beta2.KafkaAPISpec{ - Mtls: toMtlsSpec(model.KafkaAPI.Mtls), - } - } - if model.HTTPProxy != nil { - output.HttpProxy = &controlplanev1beta2.HTTPProxySpec{ - Mtls: toMtlsSpec(model.HTTPProxy.Mtls), - } - } - if model.SchemaRegistry != nil { - output.SchemaRegistry = &controlplanev1beta2.SchemaRegistrySpec{ - Mtls: toMtlsSpec(model.SchemaRegistry.Mtls), - } - } - if !model.ReadReplicaClusterIds.IsNull() { - output.ReadReplicaClusterIds = utils.TypeListToStringSlice(model.ReadReplicaClusterIds) - } - - return output, nil -} - -// GenerateModel populates the Cluster model to be persisted to state for Create, Read and Update operations. It is also indirectly used by Import -func GenerateModel(ctx context.Context, cfg models.Cluster, cluster *controlplanev1beta2.Cluster) (*models.Cluster, error) { - output := &models.Cluster{ - Name: types.StringValue(cluster.Name), - ConnectionType: types.StringValue(utils.ConnectionTypeToString(cluster.ConnectionType)), - CloudProvider: types.StringValue(utils.CloudProviderToString(cluster.CloudProvider)), - ClusterType: types.StringValue(utils.ClusterTypeToString(cluster.Type)), - RedpandaVersion: cfg.RedpandaVersion, - ThroughputTier: types.StringValue(cluster.ThroughputTier), - Region: types.StringValue(cluster.Region), - AllowDeletion: cfg.AllowDeletion, - Tags: cfg.Tags, - ResourceGroupID: types.StringValue(cluster.ResourceGroupId), - NetworkID: types.StringValue(cluster.NetworkId), - ID: types.StringValue(cluster.Id), - } - - clusterZones, d := types.ListValueFrom(ctx, types.StringType, cluster.Zones) - if d.HasError() { - return nil, fmt.Errorf("failed to parse cluster zones: %v", d) - } - output.Zones = clusterZones - - if cluster.GetDataplaneApi() != nil { - clusterURL, err := utils.SplitSchemeDefPort(cluster.DataplaneApi.Url, "443") - if err != nil { - return nil, fmt.Errorf("unable to parse Cluster API URL: %v", err) - } - output.ClusterAPIURL = basetypes.NewStringValue(clusterURL) - } - - rr, d := types.ListValueFrom(ctx, types.StringType, cluster.ReadReplicaClusterIds) - if d.HasError() { - return nil, fmt.Errorf("failed to parse read replica cluster IDs: %v", d) - } - output.ReadReplicaClusterIds = rr - - if !isAwsPrivateLinkSpecNil(cluster.AwsPrivateLink) { - ap, dg := types.ListValueFrom(ctx, types.StringType, cluster.AwsPrivateLink.AllowedPrincipals) - if dg.HasError() { - return nil, fmt.Errorf("failed to parse AWS Private Link: %v", dg) - } - output.AwsPrivateLink = &models.AwsPrivateLink{ - Enabled: types.BoolValue(cluster.AwsPrivateLink.Enabled), - ConnectConsole: types.BoolValue(cluster.AwsPrivateLink.ConnectConsole), - AllowedPrincipals: ap, - } - } - if !isGcpPrivateServiceConnectSpecNil(cluster.GcpPrivateServiceConnect) { - output.GcpPrivateServiceConnect = &models.GcpPrivateServiceConnect{ - Enabled: types.BoolValue(cluster.GcpPrivateServiceConnect.Enabled), - GlobalAccessEnabled: types.BoolValue(cluster.GcpPrivateServiceConnect.GlobalAccessEnabled), - ConsumerAcceptList: gcpConnectConsumerStructToModel(cluster.GcpPrivateServiceConnect.ConsumerAcceptList), - } - } - - if !isAzurePrivateLinkSpecNil(cluster.AzurePrivateLink) { - as, dg := types.ListValueFrom(ctx, types.StringType, cluster.AzurePrivateLink.AllowedSubscriptions) - if dg.HasError() { - return nil, fmt.Errorf("failed to parse Azure Private Link: %v", dg) - } - output.AzurePrivateLink = &models.AzurePrivateLink{ - Enabled: types.BoolValue(cluster.AzurePrivateLink.Enabled), - ConnectConsole: types.BoolValue(cluster.AzurePrivateLink.ConnectConsole), - AllowedSubscriptions: as, - } - } - kAPI, err := toMtlsModel(ctx, cluster.GetKafkaApi().GetMtls()) - if err != nil { - return nil, fmt.Errorf("failed to parse Kafka API MTLS: %v", err) - } - if kAPI != nil { - output.KafkaAPI = &models.KafkaAPI{ - Mtls: kAPI, - } - } - ht, err := toMtlsModel(ctx, cluster.GetHttpProxy().GetMtls()) - if err != nil { - return nil, fmt.Errorf("failed to parse HTTP Proxy MTLS: %v", err) - } - if ht != nil { - output.HTTPProxy = &models.HTTPProxy{ - Mtls: ht, - } - } - sr, err := toMtlsModel(ctx, cluster.GetSchemaRegistry().GetMtls()) - if err != nil { - return nil, fmt.Errorf("failed to parse Schema Registry MTLS: %v", err) - } - if sr != nil { - output.SchemaRegistry = &models.SchemaRegistry{ - Mtls: sr, - } - } - return output, nil -} - -func gcpConnectConsumerModelToStruct(accept []*models.GcpPrivateServiceConnectConsumer) []*controlplanev1beta2.GCPPrivateServiceConnectConsumer { - var output []*controlplanev1beta2.GCPPrivateServiceConnectConsumer - for _, a := range accept { - output = append(output, &controlplanev1beta2.GCPPrivateServiceConnectConsumer{ - Source: a.Source, - }) - } - return output -} - -func gcpConnectConsumerStructToModel(accept []*controlplanev1beta2.GCPPrivateServiceConnectConsumer) []*models.GcpPrivateServiceConnectConsumer { - var output []*models.GcpPrivateServiceConnectConsumer - for _, a := range accept { - output = append(output, &models.GcpPrivateServiceConnectConsumer{ - Source: a.Source, - }) - } - return output -} - -func toMtlsModel(ctx context.Context, mtls *controlplanev1beta2.MTLSSpec) (*models.Mtls, diag.Diagnostics) { - if isMtlsSpecNil(mtls) { - return nil, nil - } - - capem, err := types.ListValueFrom(ctx, types.StringType, mtls.GetCaCertificatesPem()) - if err != nil { - return nil, err - } - maprules, err := types.ListValueFrom(ctx, types.StringType, mtls.GetPrincipalMappingRules()) - if err != nil { - return nil, err - } - return &models.Mtls{ - Enabled: types.BoolValue(mtls.GetEnabled()), - CaCertificatesPem: capem, - PrincipalMappingRules: maprules, - }, nil -} - -func toMtlsSpec(mtls *models.Mtls) *controlplanev1beta2.MTLSSpec { - if isMtlsStructNil(mtls) { - return emptyMtlsSpec() - } - return &controlplanev1beta2.MTLSSpec{ - Enabled: mtls.Enabled.ValueBool(), - CaCertificatesPem: utils.TypeListToStringSlice(mtls.CaCertificatesPem), - PrincipalMappingRules: utils.TypeListToStringSlice(mtls.PrincipalMappingRules), - } -} - -func isMtlsNil(container any) bool { - v := reflect.ValueOf(container) - if !v.IsValid() || v.IsNil() { - return true - } - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - if v.Kind() != reflect.Struct { - return true - } - mtlsField := v.FieldByName("Mtls") - if !mtlsField.IsValid() || mtlsField.IsNil() { - return true - } - return isMtlsStructNil(mtlsField.Interface().(*models.Mtls)) -} - -func isMtlsStructNil(m *models.Mtls) bool { - return m == nil || (m.Enabled.IsNull() && m.CaCertificatesPem.IsNull() && m.PrincipalMappingRules.IsNull()) -} - -func isMtlsSpecNil(m *controlplanev1beta2.MTLSSpec) bool { - return m == nil || (!m.GetEnabled() && len(m.GetCaCertificatesPem()) == 0 && len(m.GetPrincipalMappingRules()) == 0) -} - -func emptyMtlsSpec() *controlplanev1beta2.MTLSSpec { - return &controlplanev1beta2.MTLSSpec{ - Enabled: false, - CaCertificatesPem: make([]string, 0), - PrincipalMappingRules: make([]string, 0), - } -} - -func isAwsPrivateLinkStructNil(m *models.AwsPrivateLink) bool { - return m == nil || (m.Enabled.IsNull() && m.ConnectConsole.IsNull() && m.AllowedPrincipals.IsNull()) -} - -func isAwsPrivateLinkSpecNil(m *controlplanev1beta2.AWSPrivateLinkStatus) bool { - return m == nil || (!m.Enabled && !m.ConnectConsole && len(m.AllowedPrincipals) == 0) -} - -func isAzurePrivateLinkStructNil(m *models.AzurePrivateLink) bool { - return m == nil || (m.Enabled.IsNull() && m.AllowedSubscriptions.IsNull() && m.ConnectConsole.IsNull()) -} - -func isAzurePrivateLinkSpecNil(m *controlplanev1beta2.AzurePrivateLinkStatus) bool { - return m == nil || (!m.Enabled && len(m.AllowedSubscriptions) == 0 && !m.ConnectConsole) -} - -func isGcpPrivateServiceConnectStructNil(m *models.GcpPrivateServiceConnect) bool { - return m == nil || (m.Enabled.IsNull() && m.GlobalAccessEnabled.IsNull() && len(m.ConsumerAcceptList) == 0) -} - -func isGcpPrivateServiceConnectSpecNil(m *controlplanev1beta2.GCPPrivateServiceConnectStatus) bool { - return m == nil || (!m.Enabled && !m.GlobalAccessEnabled && len(m.ConsumerAcceptList) == 0) -} diff --git a/redpanda/resources/cluster/resource_cluster_test.go b/redpanda/resources/cluster/resource_cluster_test.go index fe8b6459..5efc6c5a 100644 --- a/redpanda/resources/cluster/resource_cluster_test.go +++ b/redpanda/resources/cluster/resource_cluster_test.go @@ -58,12 +58,12 @@ func TestGenerateClusterRequest(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got, _ := GenerateClusterRequest(tt.args.model); !reflect.DeepEqual(got, tt.want) { + if got, _ := generateClusterRequest(tt.args.model); !reflect.DeepEqual(got, tt.want) { fmt.Println("got") spew.Dump(got) fmt.Println("want") spew.Dump(tt.want) - t.Errorf("GenerateClusterRequest() = %v, want %v", got, tt.want) + t.Errorf("generateClusterRequest() = %v, want %v", got, tt.want) } }) } @@ -293,6 +293,7 @@ func TestGenerateModel(t *testing.T) { AwsPrivateLink: &models.AwsPrivateLink{ Enabled: types.BoolValue(true), AllowedPrincipals: utils.TestingOnlyStringSliceToTypeList([]string{"arn:aws:iam::123456789012:root"}), + ConnectConsole: types.BoolValue(false), }, }, wantErr: false, @@ -360,7 +361,7 @@ func TestGenerateModel(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - result, err := GenerateModel(ctx, tc.cfg, tc.cluster) + result, err := generateModel(ctx, tc.cfg, tc.cluster) if tc.wantErr { assert.Error(t, err) @@ -371,3 +372,66 @@ func TestGenerateModel(t *testing.T) { }) } } + +func TestIsMtlsNil(t *testing.T) { + tests := []struct { + name string + container any + want bool + }{ + { + name: "KafkaAPI with nil Mtls", + container: &models.KafkaAPI{Mtls: nil}, + want: true, + }, + { + name: "KafkaAPI with non-nil Mtls", + container: &models.KafkaAPI{Mtls: &models.Mtls{ + Enabled: types.BoolValue(true), + }}, + want: false, + }, + { + name: "HTTPProxy with nil Mtls", + container: &models.HTTPProxy{Mtls: nil}, + want: true, + }, + { + name: "HTTPProxy with non-nil Mtls", + container: &models.HTTPProxy{Mtls: &models.Mtls{ + Enabled: types.BoolValue(true), + }}, + want: false, + }, + { + name: "SchemaRegistry with nil Mtls", + container: &models.SchemaRegistry{Mtls: nil}, + want: true, + }, + { + name: "SchemaRegistry with non-nil Mtls", + container: &models.SchemaRegistry{Mtls: &models.Mtls{ + Enabled: types.BoolValue(true), + }}, + want: false, + }, + { + name: "Nil container", + container: nil, + want: true, + }, + { + name: "Non-struct container", + container: "not a struct", + want: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := isMtlsNil(tt.container); got != tt.want { + t.Errorf("isMtlsNil() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/redpanda/tests/acceptance_test.go b/redpanda/tests/acceptance_test.go index b3375158..3ad81244 100644 --- a/redpanda/tests/acceptance_test.go +++ b/redpanda/tests/acceptance_test.go @@ -17,24 +17,19 @@ import ( ) const ( - awsDedicatedClusterFile = "../../examples/cluster/aws/main.tf" - azureDedicatedClusterFile = "../../examples/cluster/azure/main.tf" - awsDedicatedPrivateLinkClusterFile = "../../examples/cluster/private-link/main.tf" - gcpDedicatedClusterFile = "../../examples/cluster/gcp/main.tf" - dedicatedResourceGroupFile = "../../examples/resourcegroup/main.tf" - dedicatedNetworkFile = "../../examples/network/main.tf" - dedicatedUserACLsTopicFile = "../../examples/user-acl-topic/main.tf" - dataSourcesTest = "../../examples/datasource/main.tf" - serverlessClusterFile = "../../examples/serverless-cluster/main.tf" - bulkDataCreateFile = "../../examples/bulk-data/main.tf" - bulkResCreateFile = "../../examples/bulk-res/main.tf" + awsDedicatedClusterFile = "../../examples/cluster/aws/main.tf" + azureDedicatedClusterFile = "../../examples/cluster/azure/main.tf" + gcpDedicatedClusterFile = "../../examples/cluster/gcp/main.tf" + serverlessClusterFile = "../../examples/cluster/serverless/main.tf" + dedicatedNetworkFile = "../../examples/network/main.tf" + dataSourcesTest = "../../examples/datasource/standard/main.tf" + bulkDataCreateFile = "../../examples/datasource/bulk/main.tf" // These are the resource names as named in the TF files. resourceGroupName = "redpanda_resource_group.test" networkResourceName = "redpanda_network.test" clusterResourceName = "redpanda_cluster.test" userResourceName = "redpanda_user.test" topicResourceName = "redpanda_topic.test" - aclResourceName = "redpanda_acl.test" serverlessResourceName = "redpanda_serverless_cluster.test" ) @@ -46,351 +41,12 @@ var ( clientID = os.Getenv(redpanda.ClientIDEnv) clientSecret = os.Getenv(redpanda.ClientSecretEnv) testAgainstExistingCluster = os.Getenv("TEST_AGAINST_EXISTING_CLUSTER") + redpandaVersion = os.Getenv("REDPANDA_VERSION") testaws = "testaws" testawsRename = "testaws-rename" testazure = "testazure" ) -func TestAccResourceGroup(t *testing.T) { - ctx := context.Background() - name := generateRandomName(accNamePrepend + "testrg") - origTestCaseVars := make(map[string]config.Variable) - maps.Copy(origTestCaseVars, providerCfgIDSecretVars) - origTestCaseVars["resource_group_name"] = config.StringVariable(name) - - rename := generateRandomName(accNamePrepend + "testrg-rename") - updateTestCaseVars := make(map[string]config.Variable) - maps.Copy(updateTestCaseVars, origTestCaseVars) - updateTestCaseVars["resource_group_name"] = config.StringVariable(rename) - - var c *cloud.ControlPlaneClientSet - var err error - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - if c == nil { - c, err = newTestClients(ctx, clientID, clientSecret, "ign") - if err != nil { - t.Fatal(err) - } - } - }, - Steps: []resource.TestStep{ - { - ConfigFile: config.StaticFile(dedicatedResourceGroupFile), - ConfigVariables: origTestCaseVars, - ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resourceGroupName, "name", name), - ), - }, - { - ConfigFile: config.StaticFile(dedicatedResourceGroupFile), - ConfigVariables: updateTestCaseVars, - ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resourceGroupName, "name", rename), - ), - }, - { - ResourceName: resourceGroupName, - ConfigFile: config.StaticFile(dedicatedResourceGroupFile), - ConfigVariables: updateTestCaseVars, - ImportState: true, - ImportStateVerify: true, - ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resourceGroupName, "name", rename), - ), - }, - { - ConfigFile: config.StaticFile(dedicatedResourceGroupFile), - ConfigVariables: updateTestCaseVars, - Destroy: true, - ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, - }, - }, - }) - resource.AddTestSweepers(generateRandomName("resourcegroupRenameSweeper"), &resource.Sweeper{ - Name: rename, - F: sweepResourceGroup{ - ResourceGroupName: rename, - Client: c, - }.SweepResourceGroup, - }) - resource.AddTestSweepers(generateRandomName("resourcegroupSweeper"), &resource.Sweeper{ - Name: name, - F: sweepResourceGroup{ - ResourceGroupName: name, - Client: c, - }.SweepResourceGroup, - }) -} - -func TestAccResourcesBulkData(t *testing.T) { - if !strings.Contains(runBulkTests, "true") { - t.Skip("skipping cluster tests") - } - ctx := context.Background() - - name := generateRandomName(accNamePrepend + "testbulk") - origTestCaseVars := make(map[string]config.Variable) - maps.Copy(origTestCaseVars, providerCfgIDSecretVars) - origTestCaseVars["resource_group_name"] = config.StringVariable(name) - origTestCaseVars["network_name"] = config.StringVariable(name) - origTestCaseVars["cluster_name"] = config.StringVariable(name) - origTestCaseVars["cluster_id"] = config.StringVariable(os.Getenv("BULK_CLUSTER_ID")) - - c, err := newTestClients(ctx, clientID, clientSecret, "ign") - if err != nil { - t.Fatal(err) - } - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Steps: []resource.TestStep{ - { - ConfigFile: config.StaticFile(bulkDataCreateFile), - ConfigVariables: origTestCaseVars, - ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, - }, - { - ConfigFile: config.StaticFile(bulkDataCreateFile), - ConfigVariables: origTestCaseVars, - Destroy: true, - ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, - }, - }, - }, - ) - resource.AddTestSweepers(generateRandomName("clusterSweeper"), &resource.Sweeper{ - Name: name, - F: sweepCluster{ - ClusterName: name, - Client: c, - }.SweepCluster, - }) - resource.AddTestSweepers(generateRandomName("networkSweeper"), &resource.Sweeper{ - Name: name, - F: sweepNetwork{ - NetworkName: name, - Client: c, - }.SweepNetworks, - }) - resource.AddTestSweepers(generateRandomName("resourcegroupSweeper"), &resource.Sweeper{ - Name: name, - F: sweepResourceGroup{ - ResourceGroupName: name, - Client: c, - }.SweepResourceGroup, - }) -} - -func TestAccResourcesBulkRes(t *testing.T) { - if !strings.Contains(runBulkTests, "true") { - t.Skip("skipping cluster tests") - } - ctx := context.Background() - - name := generateRandomName(accNamePrepend + "testbulk") - origTestCaseVars := make(map[string]config.Variable) - maps.Copy(origTestCaseVars, providerCfgIDSecretVars) - origTestCaseVars["resource_group_name"] = config.StringVariable(name) - origTestCaseVars["network_name"] = config.StringVariable(name) - origTestCaseVars["cluster_name"] = config.StringVariable(name) - origTestCaseVars["cluster_id"] = config.StringVariable(os.Getenv("BULK_CLUSTER_ID")) - - c, err := newTestClients(ctx, clientID, clientSecret, "ign") - if err != nil { - t.Fatal(err) - } - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Steps: []resource.TestStep{ - { - ConfigFile: config.StaticFile(bulkResCreateFile), - ConfigVariables: origTestCaseVars, - ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, - }, - { - ConfigFile: config.StaticFile(bulkResCreateFile), - ConfigVariables: origTestCaseVars, - Destroy: true, - ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, - }, - }, - }, - ) - resource.AddTestSweepers(generateRandomName("clusterSweeper"), &resource.Sweeper{ - Name: name, - F: sweepCluster{ - ClusterName: name, - Client: c, - }.SweepCluster, - }) - resource.AddTestSweepers(generateRandomName("networkSweeper"), &resource.Sweeper{ - Name: name, - F: sweepNetwork{ - NetworkName: name, - Client: c, - }.SweepNetworks, - }) - resource.AddTestSweepers(generateRandomName("resourcegroupSweeper"), &resource.Sweeper{ - Name: name, - F: sweepResourceGroup{ - ResourceGroupName: name, - Client: c, - }.SweepResourceGroup, - }) -} - -func TestAccResourcesStrippedDownClusterAWS(t *testing.T) { - if !strings.Contains(runClusterTests, "true") { - t.Skip("skipping cluster tests") - } - ctx := context.Background() - - name := generateRandomName(accNamePrepend + testaws) - origTestCaseVars := make(map[string]config.Variable) - maps.Copy(origTestCaseVars, providerCfgIDSecretVars) - origTestCaseVars["resource_group_name"] = config.StringVariable(name) - origTestCaseVars["network_name"] = config.StringVariable(name) - origTestCaseVars["cluster_name"] = config.StringVariable(name) - - rename := generateRandomName(accNamePrepend + testawsRename) - updateTestCaseVars := make(map[string]config.Variable) - maps.Copy(updateTestCaseVars, origTestCaseVars) - updateTestCaseVars["cluster_name"] = config.StringVariable(rename) - - c, err := newTestClients(ctx, clientID, clientSecret, "ign") - if err != nil { - t.Fatal(err) - } - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Steps: []resource.TestStep{ - { - ConfigFile: config.StaticFile(awsDedicatedClusterFile), - ConfigVariables: origTestCaseVars, - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resourceGroupName, "name", name), - resource.TestCheckResourceAttr(networkResourceName, "name", name), - resource.TestCheckResourceAttr(clusterResourceName, "name", name), - ), - ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, - }, - { - ConfigFile: config.StaticFile(awsDedicatedClusterFile), - ConfigVariables: updateTestCaseVars, - Destroy: true, - ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, - }, - }, - }, - ) - resource.AddTestSweepers(generateRandomName("renameClusterSweeper"), &resource.Sweeper{ - Name: rename, - F: sweepCluster{ - ClusterName: rename, - Client: c, - }.SweepCluster, - }) - resource.AddTestSweepers(generateRandomName("clusterSweeper"), &resource.Sweeper{ - Name: name, - F: sweepCluster{ - ClusterName: name, - Client: c, - }.SweepCluster, - }) - resource.AddTestSweepers(generateRandomName("networkSweeper"), &resource.Sweeper{ - Name: name, - F: sweepNetwork{ - NetworkName: name, - Client: c, - }.SweepNetworks, - }) - resource.AddTestSweepers(generateRandomName("resourcegroupSweeper"), &resource.Sweeper{ - Name: name, - F: sweepResourceGroup{ - ResourceGroupName: name, - Client: c, - }.SweepResourceGroup, - }) -} - -func TestAccResourcesPrivateLinkStrippedDownClusterAWS(t *testing.T) { - if !strings.Contains(runClusterTests, "true") { - t.Skip("skipping cluster tests") - } - ctx := context.Background() - - name := generateRandomName(accNamePrepend + testaws) - origTestCaseVars := make(map[string]config.Variable) - maps.Copy(origTestCaseVars, providerCfgIDSecretVars) - origTestCaseVars["resource_group_name"] = config.StringVariable(name) - origTestCaseVars["network_name"] = config.StringVariable(name) - origTestCaseVars["cluster_name"] = config.StringVariable(name) - - rename := generateRandomName(accNamePrepend + testawsRename) - updateTestCaseVars := make(map[string]config.Variable) - maps.Copy(updateTestCaseVars, origTestCaseVars) - updateTestCaseVars["cluster_name"] = config.StringVariable(rename) - - c, err := newTestClients(ctx, clientID, clientSecret, "ign") - if err != nil { - t.Fatal(err) - } - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Steps: []resource.TestStep{ - { - ConfigFile: config.StaticFile(awsDedicatedPrivateLinkClusterFile), - ConfigVariables: origTestCaseVars, - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resourceGroupName, "name", name), - resource.TestCheckResourceAttr(networkResourceName, "name", name), - resource.TestCheckResourceAttr(clusterResourceName, "name", name), - ), - ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, - }, - { - ConfigFile: config.StaticFile(awsDedicatedPrivateLinkClusterFile), - ConfigVariables: updateTestCaseVars, - Destroy: true, - ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, - }, - }, - }, - ) - resource.AddTestSweepers(generateRandomName("renameClusterSweeper"), &resource.Sweeper{ - Name: rename, - F: sweepCluster{ - ClusterName: rename, - Client: c, - }.SweepCluster, - }) - resource.AddTestSweepers(generateRandomName("clusterSweeper"), &resource.Sweeper{ - Name: name, - F: sweepCluster{ - ClusterName: name, - Client: c, - }.SweepCluster, - }) - resource.AddTestSweepers(generateRandomName("networkSweeper"), &resource.Sweeper{ - Name: name, - F: sweepNetwork{ - NetworkName: name, - Client: c, - }.SweepNetworks, - }) - resource.AddTestSweepers(generateRandomName("resourcegroupSweeper"), &resource.Sweeper{ - Name: name, - F: sweepResourceGroup{ - ResourceGroupName: name, - Client: c, - }.SweepResourceGroup, - }) -} - func TestAccResourcesNetwork(t *testing.T) { ctx := context.Background() @@ -400,272 +56,79 @@ func TestAccResourcesNetwork(t *testing.T) { origTestCaseVars["resource_group_name"] = config.StringVariable(name) origTestCaseVars["network_name"] = config.StringVariable(name) - rename := generateRandomName(accNamePrepend + "testnet-rename") - updateTestCaseVars := make(map[string]config.Variable) - maps.Copy(updateTestCaseVars, origTestCaseVars) - updateTestCaseVars["network_name"] = config.StringVariable(rename) - - var c *cloud.ControlPlaneClientSet - var err error - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - if c == nil { - c, err = newTestClients(ctx, clientID, clientSecret, "ign") - if err != nil { - t.Fatal(err) - } - } - }, - Steps: []resource.TestStep{ - { - ConfigFile: config.StaticFile(dedicatedNetworkFile), - ConfigVariables: origTestCaseVars, - ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resourceGroupName, "name", name), - resource.TestCheckResourceAttr(networkResourceName, "name", name), - func(_ *terraform.State) error { - n, err := c.NetworkForName(ctx, name) - if err != nil { - return err - } - if n == nil { - return fmt.Errorf("unable to find network %q after creation", name) - } - t.Logf("Successfully created network %v", name) - return nil - }, - ), - }, - { - ConfigFile: config.StaticFile(dedicatedNetworkFile), - ConfigVariables: updateTestCaseVars, - ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resourceGroupName, "name", name), - resource.TestCheckResourceAttr(networkResourceName, "name", rename), - ), - }, - { - ResourceName: networkResourceName, - ConfigFile: config.StaticFile(dedicatedNetworkFile), - ConfigVariables: updateTestCaseVars, - ImportState: true, - ImportStateVerify: true, - ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resourceGroupName, "name", name), - resource.TestCheckResourceAttr(networkResourceName, "name", rename), - ), - }, - { - ConfigFile: config.StaticFile(dedicatedNetworkFile), - ConfigVariables: updateTestCaseVars, - Destroy: true, - ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, - }, - }, - }) - resource.AddTestSweepers(generateRandomName("renamedNetworkSweeper"), &resource.Sweeper{ - Name: rename, - F: sweepNetwork{ - NetworkName: rename, - Client: c, - }.SweepNetworks, - }) - resource.AddTestSweepers(generateRandomName("networkSweeper"), &resource.Sweeper{ - Name: name, - F: sweepNetwork{ - NetworkName: name, - Client: c, - }.SweepNetworks, - }) - resource.AddTestSweepers(generateRandomName("resourcegroupSweeper"), &resource.Sweeper{ - Name: name, - F: sweepResourceGroup{ - ResourceGroupName: name, - Client: c, - }.SweepResourceGroup, - }) -} - -func TestAccResourcesClusterAWS(t *testing.T) { - if !strings.Contains(runClusterTests, "true") { - t.Skip("skipping cluster tests") - } - ctx := context.Background() - - name := generateRandomName(accNamePrepend + testaws) - origTestCaseVars := make(map[string]config.Variable) - maps.Copy(origTestCaseVars, providerCfgIDSecretVars) - origTestCaseVars["resource_group_name"] = config.StringVariable(name) - origTestCaseVars["network_name"] = config.StringVariable(name) - origTestCaseVars["cluster_name"] = config.StringVariable(name) - - rename := generateRandomName(accNamePrepend + testawsRename) - updateTestCaseVars := make(map[string]config.Variable) - maps.Copy(updateTestCaseVars, origTestCaseVars) - updateTestCaseVars["cluster_name"] = config.StringVariable(rename) - - c, err := newTestClients(ctx, clientID, clientSecret, "ign") - if err != nil { - t.Fatal(err) - } - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Steps: []resource.TestStep{ - { - ConfigFile: config.StaticFile(awsDedicatedClusterFile), - ConfigVariables: origTestCaseVars, - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resourceGroupName, "name", name), - resource.TestCheckResourceAttr(networkResourceName, "name", name), - resource.TestCheckResourceAttr(clusterResourceName, "name", name), - ), - ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, - }, - { - ConfigFile: config.StaticFile(awsDedicatedClusterFile), - ConfigVariables: updateTestCaseVars, - ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resourceGroupName, "name", name), - resource.TestCheckResourceAttr(networkResourceName, "name", name), - resource.TestCheckResourceAttr(clusterResourceName, "name", rename), - ), - }, - { - ResourceName: clusterResourceName, - ConfigFile: config.StaticFile(awsDedicatedClusterFile), - ConfigVariables: updateTestCaseVars, - ImportState: true, - ImportStateVerify: true, - // These two only matter on apply; On apply the user will be - // getting Plan, not State, and have correct values for both. - ImportStateVerifyIgnore: []string{"tags", "allow_deletion"}, - ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resourceGroupName, "name", name), - resource.TestCheckResourceAttr(networkResourceName, "name", name), - resource.TestCheckResourceAttr(clusterResourceName, "name", rename), - ), - }, - { - ConfigFile: config.StaticFile(awsDedicatedClusterFile), - ConfigVariables: updateTestCaseVars, - Destroy: true, - ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, - }, - }, - }, - ) - resource.AddTestSweepers(generateRandomName("resourcegroupSweeper"), &resource.Sweeper{ - Name: name, - F: sweepResourceGroup{ - ResourceGroupName: name, - Client: c, - }.SweepResourceGroup, - }) - resource.AddTestSweepers(generateRandomName("networkSweeper"), &resource.Sweeper{ - Name: name, - F: sweepNetwork{ - NetworkName: name, - Client: c, - }.SweepNetworks, - }) - resource.AddTestSweepers(generateRandomName("clusterSweeper"), &resource.Sweeper{ - Name: name, - F: sweepCluster{ - ClusterName: name, - Client: c, - }.SweepCluster, - }) - resource.AddTestSweepers(generateRandomName("clusterSweeper"), &resource.Sweeper{ - Name: rename, - F: sweepCluster{ - ClusterName: rename, - Client: c, - }.SweepCluster, - }) -} - -func TestAccResourcesClusterAzure(t *testing.T) { - if !strings.Contains(runClusterTests, "true") { - t.Skip("skipping cluster tests") - } - ctx := context.Background() - - name := generateRandomName(accNamePrepend + testazure) - origTestCaseVars := make(map[string]config.Variable) - maps.Copy(origTestCaseVars, providerCfgIDSecretVars) - origTestCaseVars["resource_group_name"] = config.StringVariable(name) - origTestCaseVars["network_name"] = config.StringVariable(name) - origTestCaseVars["cluster_name"] = config.StringVariable(name) - - rename := generateRandomName(accNamePrepend + testawsRename) + rename := generateRandomName(accNamePrepend + "testnet-rename") updateTestCaseVars := make(map[string]config.Variable) maps.Copy(updateTestCaseVars, origTestCaseVars) - updateTestCaseVars["cluster_name"] = config.StringVariable(rename) + updateTestCaseVars["network_name"] = config.StringVariable(rename) - c, err := newTestClients(ctx, clientID, clientSecret, "ign") - if err != nil { - t.Fatal(err) - } + var c *cloud.ControlPlaneClientSet + var err error resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { + testAccPreCheck(t) + if c == nil { + c, err = newTestClients(ctx, clientID, clientSecret, "ign") + if err != nil { + t.Fatal(err) + } + } + }, Steps: []resource.TestStep{ { - ConfigFile: config.StaticFile(azureDedicatedClusterFile), - ConfigVariables: origTestCaseVars, + ConfigFile: config.StaticFile(dedicatedNetworkFile), + ConfigVariables: origTestCaseVars, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceGroupName, "name", name), resource.TestCheckResourceAttr(networkResourceName, "name", name), - resource.TestCheckResourceAttr(clusterResourceName, "name", name), + func(_ *terraform.State) error { + n, err := c.NetworkForName(ctx, name) + if err != nil { + return err + } + if n == nil { + return fmt.Errorf("unable to find network %q after creation", name) + } + t.Logf("Successfully created network %v", name) + return nil + }, ), - ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, }, { - ConfigFile: config.StaticFile(azureDedicatedClusterFile), + ConfigFile: config.StaticFile(dedicatedNetworkFile), ConfigVariables: updateTestCaseVars, ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceGroupName, "name", name), - resource.TestCheckResourceAttr(networkResourceName, "name", name), - resource.TestCheckResourceAttr(clusterResourceName, "name", rename), + resource.TestCheckResourceAttr(networkResourceName, "name", rename), ), }, { - ResourceName: clusterResourceName, - ConfigFile: config.StaticFile(azureDedicatedClusterFile), - ConfigVariables: updateTestCaseVars, - ImportState: true, - ImportStateVerify: true, - // These two only matter on apply; On apply the user will be - // getting Plan, not State, and have correct values for both. - ImportStateVerifyIgnore: []string{"tags", "allow_deletion"}, + ResourceName: networkResourceName, + ConfigFile: config.StaticFile(dedicatedNetworkFile), + ConfigVariables: updateTestCaseVars, + ImportState: true, + ImportStateVerify: true, ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceGroupName, "name", name), - resource.TestCheckResourceAttr(networkResourceName, "name", name), - resource.TestCheckResourceAttr(clusterResourceName, "name", rename), + resource.TestCheckResourceAttr(networkResourceName, "name", rename), ), }, { - ConfigFile: config.StaticFile(azureDedicatedClusterFile), + ConfigFile: config.StaticFile(dedicatedNetworkFile), ConfigVariables: updateTestCaseVars, Destroy: true, ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, }, }, - }, - ) - resource.AddTestSweepers(generateRandomName("resourcegroupSweeper"), &resource.Sweeper{ - Name: name, - F: sweepResourceGroup{ - ResourceGroupName: name, - Client: c, - }.SweepResourceGroup, + }) + resource.AddTestSweepers(generateRandomName("renamedNetworkSweeper"), &resource.Sweeper{ + Name: rename, + F: sweepNetwork{ + NetworkName: rename, + Client: c, + }.SweepNetworks, }) resource.AddTestSweepers(generateRandomName("networkSweeper"), &resource.Sweeper{ Name: name, @@ -674,39 +137,29 @@ func TestAccResourcesClusterAzure(t *testing.T) { Client: c, }.SweepNetworks, }) - resource.AddTestSweepers(generateRandomName("clusterSweeper"), &resource.Sweeper{ + resource.AddTestSweepers(generateRandomName("resourcegroupSweeper"), &resource.Sweeper{ Name: name, - F: sweepCluster{ - ClusterName: name, - Client: c, - }.SweepCluster, - }) - resource.AddTestSweepers(generateRandomName("clusterSweeper"), &resource.Sweeper{ - Name: rename, - F: sweepCluster{ - ClusterName: rename, - Client: c, - }.SweepCluster, + F: sweepResourceGroup{ + ResourceGroupName: name, + Client: c, + }.SweepResourceGroup, }) } -func TestAccUpdatePrivateLinkClusterAWS(t *testing.T) { - if !strings.Contains(runClusterTests, "true") { +func TestAccResourcesBulk(t *testing.T) { + if !strings.Contains(runBulkTests, "true") { t.Skip("skipping cluster tests") } ctx := context.Background() - name := generateRandomName(accNamePrepend + testaws) + name := generateRandomName(accNamePrepend + "testbulk") origTestCaseVars := make(map[string]config.Variable) maps.Copy(origTestCaseVars, providerCfgIDSecretVars) origTestCaseVars["resource_group_name"] = config.StringVariable(name) origTestCaseVars["network_name"] = config.StringVariable(name) origTestCaseVars["cluster_name"] = config.StringVariable(name) - updateTestCaseVars := make(map[string]config.Variable) - maps.Copy(updateTestCaseVars, origTestCaseVars) - updateTestCaseVars["aws_private_link.allowed_principals"] = config.ListVariable( - config.StringVariable("arn:aws:iam::123456789012:root"), - ) + origTestCaseVars["cluster_id"] = config.StringVariable(os.Getenv("BULK_CLUSTER_ID")) + c, err := newTestClients(ctx, clientID, clientSecret, "ign") if err != nil { t.Fatal(err) @@ -715,35 +168,12 @@ func TestAccUpdatePrivateLinkClusterAWS(t *testing.T) { PreCheck: func() { testAccPreCheck(t) }, Steps: []resource.TestStep{ { - ConfigFile: config.StaticFile(awsDedicatedPrivateLinkClusterFile), + ConfigFile: config.StaticFile(bulkDataCreateFile), ConfigVariables: origTestCaseVars, ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resourceGroupName, "name", name), - resource.TestCheckResourceAttr(networkResourceName, "name", name), - resource.TestCheckResourceAttr(clusterResourceName, "name", name), - resource.TestCheckResourceAttr(clusterResourceName, "aws_private_link.enabled", "true"), - ), - }, - { - ResourceName: clusterResourceName, - ConfigFile: config.StaticFile(awsDedicatedPrivateLinkClusterFile), - ConfigVariables: updateTestCaseVars, - ImportState: true, - ImportStateVerify: true, - // These two only matter on apply; On apply the user will be - // getting Plan, not State, and have correct values for both. - ImportStateVerifyIgnore: []string{"tags", "allow_deletion"}, - ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resourceGroupName, "name", name), - resource.TestCheckResourceAttr(networkResourceName, "name", name), - resource.TestCheckResourceAttr(clusterResourceName, "name", name), - resource.TestCheckResourceAttr(clusterResourceName, "aws_private_link.enabled", "true"), - ), }, { - ConfigFile: config.StaticFile(awsDedicatedPrivateLinkClusterFile), + ConfigFile: config.StaticFile(bulkDataCreateFile), ConfigVariables: origTestCaseVars, Destroy: true, ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, @@ -751,12 +181,12 @@ func TestAccUpdatePrivateLinkClusterAWS(t *testing.T) { }, }, ) - resource.AddTestSweepers(generateRandomName("resourcegroupSweeper"), &resource.Sweeper{ + resource.AddTestSweepers(generateRandomName("clusterSweeper"), &resource.Sweeper{ Name: name, - F: sweepResourceGroup{ - ResourceGroupName: name, - Client: c, - }.SweepResourceGroup, + F: sweepCluster{ + ClusterName: name, + Client: c, + }.SweepCluster, }) resource.AddTestSweepers(generateRandomName("networkSweeper"), &resource.Sweeper{ Name: name, @@ -765,129 +195,65 @@ func TestAccUpdatePrivateLinkClusterAWS(t *testing.T) { Client: c, }.SweepNetworks, }) - resource.AddTestSweepers(generateRandomName("clusterSweeper"), &resource.Sweeper{ + resource.AddTestSweepers(generateRandomName("resourcegroupSweeper"), &resource.Sweeper{ Name: name, - F: sweepCluster{ - ClusterName: name, - Client: c, - }.SweepCluster, + F: sweepResourceGroup{ + ResourceGroupName: name, + Client: c, + }.SweepResourceGroup, }) } -func TestAccResourcesClusterGCP(t *testing.T) { +func TestAccResourcesClusterAWS(t *testing.T) { if !strings.Contains(runClusterTests, "true") { t.Skip("skipping cluster tests") } ctx := context.Background() + name := generateRandomName(accNamePrepend + testaws) + rename := generateRandomName(accNamePrepend + testawsRename) + testRunner(ctx, name, rename, "", awsDedicatedClusterFile, t) +} - name := generateRandomName(accNamePrepend + "testgcp") - origTestCaseVars := make(map[string]config.Variable) - maps.Copy(origTestCaseVars, providerCfgIDSecretVars) - origTestCaseVars["resource_group_name"] = config.StringVariable(name) - origTestCaseVars["network_name"] = config.StringVariable(name) - origTestCaseVars["cluster_name"] = config.StringVariable(name) - - rename := generateRandomName(accNamePrepend + "testgcp-rename") - updateTestCaseVars := make(map[string]config.Variable) - maps.Copy(updateTestCaseVars, origTestCaseVars) - updateTestCaseVars["cluster_name"] = config.StringVariable(rename) - - c, err := newTestClients(ctx, clientID, clientSecret, "ign") - if err != nil { - t.Fatal(err) +func TestAccResourcesClusterAzure(t *testing.T) { + if !strings.Contains(runClusterTests, "true") { + t.Skip("skipping cluster tests") } - resource.ParallelTest( - t, - resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Steps: []resource.TestStep{ - { - ConfigFile: config.StaticFile(gcpDedicatedClusterFile), - ConfigVariables: origTestCaseVars, - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resourceGroupName, "name", name), - resource.TestCheckResourceAttr(networkResourceName, "name", name), - resource.TestCheckResourceAttr(clusterResourceName, "name", name), - ), - ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, - }, - { - ConfigFile: config.StaticFile(gcpDedicatedClusterFile), - ConfigVariables: updateTestCaseVars, - ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resourceGroupName, "name", name), - resource.TestCheckResourceAttr(networkResourceName, "name", name), - resource.TestCheckResourceAttr(clusterResourceName, "name", rename), - ), - }, - { - ResourceName: clusterResourceName, - ConfigFile: config.StaticFile(gcpDedicatedClusterFile), - ConfigVariables: updateTestCaseVars, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"tags", "allow_deletion"}, - ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resourceGroupName, "name", name), - resource.TestCheckResourceAttr(networkResourceName, "name", name), - resource.TestCheckResourceAttr(clusterResourceName, "name", rename), - ), - }, - { - ConfigFile: config.StaticFile(gcpDedicatedClusterFile), - ConfigVariables: updateTestCaseVars, - Destroy: true, - ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, - }, - }, - }, - ) - - resource.AddTestSweepers(generateRandomName("resourcegroupSweeper"), &resource.Sweeper{ - Name: name, - F: sweepResourceGroup{ - ResourceGroupName: name, - Client: c, - }.SweepResourceGroup, - }) - resource.AddTestSweepers(generateRandomName("networkSweeper"), &resource.Sweeper{ - Name: name, - F: sweepNetwork{ - NetworkName: name, - Client: c, - }.SweepNetworks, - }) - resource.AddTestSweepers(generateRandomName("clusterSweeper"), &resource.Sweeper{ - Name: name, - F: sweepCluster{ - ClusterName: name, - Client: c, - }.SweepCluster, - }) - resource.AddTestSweepers(generateRandomName("clusterSweeper"), &resource.Sweeper{ - Name: rename, - F: sweepCluster{ - ClusterName: rename, - Client: c, - }.SweepCluster, - }) + if !strings.Contains(os.Getenv("RUN_AZURE_TESTS"), "true") { + t.Skip("skipping azure tests") + } + ctx := context.Background() + name := generateRandomName(accNamePrepend + testazure) + rename := generateRandomName(accNamePrepend + testawsRename) + testRunner(ctx, name, rename, "", azureDedicatedClusterFile, t) } -func TestAccResourcesUserACLsTopic(t *testing.T) { +func TestAccResourcesClusterGCP(t *testing.T) { if !strings.Contains(runClusterTests, "true") { - t.Skip("skipping cluster user-acl-topic tests") + t.Skip("skipping cluster tests") } ctx := context.Background() + name := generateRandomName(accNamePrepend + "testgcp") + rename := generateRandomName(accNamePrepend + "testgcp-rename") + testRunner(ctx, name, rename, redpandaVersion, gcpDedicatedClusterFile, t) +} - name := generateRandomName(accNamePrepend + "test-user-acl-topic") +// testRunner is a helper function that runs a series of tests on a given cluster in a given cloud provider. +func testRunner(ctx context.Context, name, rename, version, testFile string, t *testing.T) { origTestCaseVars := make(map[string]config.Variable) maps.Copy(origTestCaseVars, providerCfgIDSecretVars) origTestCaseVars["resource_group_name"] = config.StringVariable(name) origTestCaseVars["network_name"] = config.StringVariable(name) origTestCaseVars["cluster_name"] = config.StringVariable(name) origTestCaseVars["user_name"] = config.StringVariable(name) + origTestCaseVars["topic_name"] = config.StringVariable(name) + if version != "" { + // version is only necessary to resolve a GCP install pack issue. we should generally use latest (nil) + origTestCaseVars["version"] = config.StringVariable(version) + } + + updateTestCaseVars := make(map[string]config.Variable) + maps.Copy(updateTestCaseVars, origTestCaseVars) + updateTestCaseVars["cluster_name"] = config.StringVariable(rename) c, err := newTestClients(ctx, clientID, clientSecret, "ign") if err != nil { @@ -897,19 +263,20 @@ func TestAccResourcesUserACLsTopic(t *testing.T) { PreCheck: func() { testAccPreCheck(t) }, Steps: []resource.TestStep{ { - ConfigFile: config.StaticFile(dedicatedUserACLsTopicFile), - ConfigVariables: origTestCaseVars, - ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + ConfigFile: config.StaticFile(testFile), + ConfigVariables: origTestCaseVars, Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceGroupName, "name", name), resource.TestCheckResourceAttr(networkResourceName, "name", name), resource.TestCheckResourceAttr(clusterResourceName, "name", name), resource.TestCheckResourceAttr(userResourceName, "name", name), + resource.TestCheckResourceAttr(topicResourceName, "name", name), ), + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, }, { ResourceName: userResourceName, - ConfigFile: config.StaticFile(dedicatedUserACLsTopicFile), + ConfigFile: config.StaticFile(testFile), ConfigVariables: origTestCaseVars, ImportState: true, ImportStateIdFunc: func(_ *terraform.State) (string, error) { @@ -937,6 +304,7 @@ func TestAccResourcesUserACLsTopic(t *testing.T) { } return nil }, + ImportStateVerifyIgnore: []string{"tags", "allow_deletion"}, ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceGroupName, "name", name), @@ -946,15 +314,40 @@ func TestAccResourcesUserACLsTopic(t *testing.T) { ), }, { - ConfigFile: config.StaticFile(dedicatedUserACLsTopicFile), - ConfigVariables: origTestCaseVars, + ConfigFile: config.StaticFile(testFile), + ConfigVariables: updateTestCaseVars, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(resourceGroupName, "name", name), + resource.TestCheckResourceAttr(networkResourceName, "name", name), + resource.TestCheckResourceAttr(clusterResourceName, "name", rename), + ), + }, + { + ResourceName: clusterResourceName, + ConfigFile: config.StaticFile(testFile), + ConfigVariables: updateTestCaseVars, + ImportState: true, + ImportStateVerify: true, + // These two only matter on apply; On apply the user will be + // getting Plan, not State, and have correct values for both. + ImportStateVerifyIgnore: []string{"tags", "allow_deletion"}, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(resourceGroupName, "name", name), + resource.TestCheckResourceAttr(networkResourceName, "name", name), + resource.TestCheckResourceAttr(clusterResourceName, "name", rename), + ), + }, + { + ConfigFile: config.StaticFile(testFile), + ConfigVariables: updateTestCaseVars, Destroy: true, ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, }, }, }, ) - resource.AddTestSweepers(generateRandomName("resourcegroupSweeper"), &resource.Sweeper{ Name: name, F: sweepResourceGroup{ @@ -976,6 +369,13 @@ func TestAccResourcesUserACLsTopic(t *testing.T) { Client: c, }.SweepCluster, }) + resource.AddTestSweepers(generateRandomName("clusterSweeper"), &resource.Sweeper{ + Name: rename, + F: sweepCluster{ + ClusterName: rename, + Client: c, + }.SweepCluster, + }) } func TestAccResourcesWithDataSources(t *testing.T) { diff --git a/redpanda/tests/sweep_cluster.go b/redpanda/tests/sweep_cluster.go index f91fa23d..0c430075 100644 --- a/redpanda/tests/sweep_cluster.go +++ b/redpanda/tests/sweep_cluster.go @@ -31,7 +31,7 @@ type sweepCluster struct { func (s sweepCluster) SweepCluster(_ string) error { ctx := context.Background() - cluster, err := utils.GetClusterUntilRunningState(ctx, 0, 50, s.ClusterName, s.Client) + cluster, err := utils.GetClusterUntilRunningState(ctx, 0, 50, s.ClusterName, 1*time.Minute, s.Client) if err != nil { return err } diff --git a/redpanda/tests/utils.go b/redpanda/tests/utils.go index 5c73695d..3ede7b1b 100644 --- a/redpanda/tests/utils.go +++ b/redpanda/tests/utils.go @@ -1,3 +1,18 @@ +// Copyright 2023 Redpanda Data, Inc. +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package tests import ( diff --git a/redpanda/utils/utils.go b/redpanda/utils/utils.go index 4e75fb28..a2952c60 100644 --- a/redpanda/utils/utils.go +++ b/redpanda/utils/utils.go @@ -32,6 +32,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-go/tftypes" "github.com/hashicorp/terraform-plugin-log/tflog" rpknet "github.com/redpanda-data/redpanda/src/go/rpk/pkg/net" "github.com/redpanda-data/terraform-provider-redpanda/redpanda/cloud" @@ -105,7 +106,7 @@ func ClusterTypeToString(provider controlplanev1beta2.Cluster_Type) string { } // AreWeDoneYet checks an operation's state until one of completion, failure or timeout is reached. -func AreWeDoneYet(ctx context.Context, op *controlplanev1beta2.Operation, timeout time.Duration, waitUnit time.Duration, client controlplanev1beta2grpc.OperationServiceClient) error { +func AreWeDoneYet(ctx context.Context, op *controlplanev1beta2.Operation, timeout, waitUnit time.Duration, client controlplanev1beta2grpc.OperationServiceClient) error { startTime := time.Now() endTime := startTime.Add(timeout) errChan := make(chan error, 1) @@ -178,7 +179,7 @@ func ConnectionTypeToString(t controlplanev1beta2.Cluster_ConnectionType) string func TypeListToStringSlice(t types.List) []string { var s []string for _, v := range t.Elements() { - s = append(s, strings.Trim(v.String(), "\"")) // it's easier to strip the quotes than type coverting until you hit something that doesn't include them + s = append(s, strings.Trim(v.String(), "\"")) // it's easier to strip the quotes than type converting until you hit something that doesn't include them } return s } @@ -189,6 +190,15 @@ func TestingOnlyStringSliceToTypeList(s []string) types.List { return o } +// StringSliceToSliceValues converts a string slice to a slice of tftypes.Value +func StringSliceToSliceValues(s []string) []tftypes.Value { + var values []tftypes.Value + for _, v := range s { + values = append(values, tftypes.NewValue(tftypes.String, v)) + } + return values +} + // TrimmedStringValue returns the string value of a types.String with the quotes removed. // This is necessary as terraform has a tendency to slap these bad boys in at random which causes the API to fail func TrimmedStringValue(s string) types.String { @@ -251,6 +261,9 @@ func UserMechanismToString(m *dataplanev1alpha1.SASLMechanism) string { func TopicConfigurationToMap(cfg []*dataplanev1alpha1.Topic_Configuration) (types.Map, error) { configs := make(map[string]attr.Value, len(cfg)) for _, v := range cfg { + if v.Value == nil { + return types.Map{}, fmt.Errorf("nil value for topic configuration %q", v.Name) + } configs[v.Name] = types.StringValue(*v.Value) } cfgMap, diag := types.MapValue(types.StringType, configs) @@ -339,7 +352,7 @@ func SplitSchemeDefPort(url, def string) (string, error) { } // GetClusterUntilRunningState returns a cluster in the running state or an error -func GetClusterUntilRunningState(ctx context.Context, count, limit int, clusterName string, client *cloud.ControlPlaneClientSet) (*controlplanev1beta2.Cluster, error) { +func GetClusterUntilRunningState(ctx context.Context, count, limit int, clusterName string, wait time.Duration, client cloud.CpClientSet) (*controlplanev1beta2.Cluster, error) { count++ if count >= limit { return nil, fmt.Errorf("cluster %q did not reach the running state after %d attempts", clusterName, count) @@ -353,12 +366,12 @@ func GetClusterUntilRunningState(ctx context.Context, count, limit int, clusterN return cluster, nil } - time.Sleep(1 * time.Minute) - return GetClusterUntilRunningState(ctx, count, limit, clusterName, client) + time.Sleep(wait) + return GetClusterUntilRunningState(ctx, count, limit, clusterName, wait, client) } // GetServerlessClusterUntilRunningState returns a serverless cluster in the running state or an error -func GetServerlessClusterUntilRunningState(ctx context.Context, count, limit int, clusterName string, client *cloud.ControlPlaneClientSet) (*controlplanev1beta2.ServerlessCluster, error) { +func GetServerlessClusterUntilRunningState(ctx context.Context, count, limit int, clusterName string, client cloud.CpClientSet) (*controlplanev1beta2.ServerlessCluster, error) { count++ if count >= limit { return nil, fmt.Errorf("serverless cluster %q did not reach the running state after %d attempts", clusterName, count) @@ -380,7 +393,10 @@ func GetServerlessClusterUntilRunningState(ctx context.Context, count, limit int func TypeMapToStringMap(tags types.Map) map[string]string { tagsMap := make(map[string]string) for k, v := range tags.Elements() { - tagsMap[k] = strings.ReplaceAll(v.String(), "\"", "") + tagsMap[k] = strings.ReplaceAll(strings.ReplaceAll(v.String(), "\\", ""), "\"", "") + } + if len(tagsMap) == 0 { + return nil } return tagsMap } diff --git a/redpanda/utils/utils_test.go b/redpanda/utils/utils_test.go index 35fbe397..99fc3291 100644 --- a/redpanda/utils/utils_test.go +++ b/redpanda/utils/utils_test.go @@ -2,12 +2,16 @@ package utils import ( "context" + "fmt" "reflect" + "sort" "testing" "time" controlplanev1beta2 "buf.build/gen/go/redpandadata/cloud/protocolbuffers/go/redpanda/api/controlplane/v1beta2" + dataplanev1alpha1 "buf.build/gen/go/redpandadata/dataplane/protocolbuffers/go/redpanda/api/dataplane/v1alpha1" "github.com/golang/mock/gomock" + "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-framework/types/basetypes" "github.com/redpanda-data/terraform-provider-redpanda/redpanda/mocks" @@ -139,13 +143,18 @@ func TestTypeMapToStringMap(t *testing.T) { { name: "Empty map", args: args{tags: mustMap(t, map[string]string{})}, - want: map[string]string{}, + want: nil, }, { name: "Single key", args: args{tags: mustMap(t, map[string]string{"key": "value"})}, want: map[string]string{"key": "value"}, }, + { + name: "Single key with quotes", + args: args{tags: mustMap(t, map[string]string{"key": `"value"`})}, + want: map[string]string{"key": "value"}, + }, { name: "Multiple keys", args: args{tags: mustMap(t, map[string]string{"key1": "value1", "key2": "value2"})}, @@ -160,3 +169,652 @@ func TestTypeMapToStringMap(t *testing.T) { }) } } + +func TestTypeListToStringSlice(t *testing.T) { + testCases := []struct { + name string + input types.List + expected []string + }{ + { + name: "test conversion", + input: TestingOnlyStringSliceToTypeList([]string{"a", "b", "c"}), + expected: []string{"a", "b", "c"}, + }, + { + name: "test empty conversion", + input: TestingOnlyStringSliceToTypeList([]string{}), + expected: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := TypeListToStringSlice(tc.input) + if !reflect.DeepEqual(result, tc.expected) { + t.Errorf("Expected %v, but got %v", tc.expected, result) + } + }) + } +} + +func TestFindUserByName(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockClient := mocks.NewMockUserServiceClient(ctrl) + + testCases := []struct { + name string + setupMock func() + inputName string + expectedUser *dataplanev1alpha1.ListUsersResponse_User + expectedErr string + }{ + { + name: "User found", + setupMock: func() { + mockClient.EXPECT().ListUsers(gomock.Any(), &dataplanev1alpha1.ListUsersRequest{ + Filter: &dataplanev1alpha1.ListUsersRequest_Filter{ + Name: "alice", + }, + }).Return(&dataplanev1alpha1.ListUsersResponse{ + Users: []*dataplanev1alpha1.ListUsersResponse_User{ + {Name: "alice"}, + {Name: "bob"}, + }, + }, nil) + }, + inputName: "alice", + expectedUser: &dataplanev1alpha1.ListUsersResponse_User{Name: "alice"}, + expectedErr: "", + }, + { + name: "User not found", + setupMock: func() { + mockClient.EXPECT().ListUsers(gomock.Any(), &dataplanev1alpha1.ListUsersRequest{ + Filter: &dataplanev1alpha1.ListUsersRequest_Filter{ + Name: "charlie", + }, + }).Return(&dataplanev1alpha1.ListUsersResponse{ + Users: []*dataplanev1alpha1.ListUsersResponse_User{ + {Name: "alice"}, + {Name: "bob"}, + }, + }, nil) + }, + inputName: "charlie", + expectedUser: nil, + expectedErr: "user not found", + }, + { + name: "ListUsers error", + setupMock: func() { + mockClient.EXPECT().ListUsers(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("connection error")) + }, + inputName: "alice", + expectedUser: nil, + expectedErr: "connection error", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + tc.setupMock() + + user, err := FindUserByName(context.Background(), tc.inputName, mockClient) + + if tc.expectedErr != "" { + if err == nil { + t.Errorf("Expected error %q, but got nil", tc.expectedErr) + } else if err.Error() != tc.expectedErr { + t.Errorf("Expected error %q, but got %q", tc.expectedErr, err.Error()) + } + } else if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + if !reflect.DeepEqual(user, tc.expectedUser) { + t.Errorf("Expected user %+v, but got %+v", tc.expectedUser, user) + } + }) + } +} + +func TestSplitSchemeDefPort(t *testing.T) { + testCases := []struct { + name string + url string + defaultPort string + expected string + expectError bool + }{ + { + name: "URL with scheme and port", + url: "http://example.com:8080", + defaultPort: "80", + expected: "example.com:8080", + expectError: false, + }, + { + name: "URL with scheme, no port", + url: "https://example.com", + defaultPort: "443", + expected: "example.com:443", + expectError: false, + }, + { + name: "URL without scheme, with port", + url: "example.com:9090", + defaultPort: "80", + expected: "example.com:9090", + expectError: false, + }, + { + name: "URL without scheme, no port", + url: "example.com", + defaultPort: "80", + expected: "example.com:80", + expectError: false, + }, + { + name: "IP address with port", + url: "192.168.1.1:8080", + defaultPort: "80", + expected: "192.168.1.1:8080", + expectError: false, + }, + { + name: "IP address without port", + url: "192.168.1.1", + defaultPort: "80", + expected: "192.168.1.1:80", + expectError: false, + }, + { + name: "Invalid URL", + url: "http://[invalid", + defaultPort: "80", + expected: "", + expectError: true, + }, + { + name: "Empty URL", + url: "", + defaultPort: "80", + expected: "", + expectError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result, err := SplitSchemeDefPort(tc.url, tc.defaultPort) + + if tc.expectError { + if err == nil { + t.Errorf("Expected an error, but got none") + } + } else { + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if result != tc.expected { + t.Errorf("Expected %q, but got %q", tc.expected, result) + } + } + }) + } +} + +func TestTopicConfigurationToMap(t *testing.T) { + testCases := []struct { + name string + input []*dataplanev1alpha1.Topic_Configuration + expected types.Map + expectedErr string + }{ + { + name: "Empty configuration", + input: []*dataplanev1alpha1.Topic_Configuration{}, + expected: func() types.Map { + m, _ := types.MapValue(types.StringType, map[string]attr.Value{}) + return m + }(), + expectedErr: "", + }, + { + name: "Single configuration", + input: []*dataplanev1alpha1.Topic_Configuration{ + {Name: "retention.ms", Value: StringToStringPointer("86400000")}, + }, + expected: func() types.Map { + m, _ := types.MapValue(types.StringType, map[string]attr.Value{ + "retention.ms": types.StringValue("86400000"), + }) + return m + }(), + expectedErr: "", + }, + { + name: "Multiple configurations", + input: []*dataplanev1alpha1.Topic_Configuration{ + {Name: "retention.ms", Value: StringToStringPointer("86400000")}, + {Name: "cleanup.policy", Value: StringToStringPointer("delete")}, + {Name: "max.message.bytes", Value: StringToStringPointer("1000000")}, + }, + expected: func() types.Map { + m, _ := types.MapValue(types.StringType, map[string]attr.Value{ + "retention.ms": types.StringValue("86400000"), + "cleanup.policy": types.StringValue("delete"), + "max.message.bytes": types.StringValue("1000000"), + }) + return m + }(), + expectedErr: "", + }, + { + name: "Configuration with nil value", + input: []*dataplanev1alpha1.Topic_Configuration{ + {Name: "retention.ms", Value: StringToStringPointer("86400000")}, + {Name: "cleanup.policy", Value: nil}, + }, + expected: func() types.Map { + return types.Map{} + }(), + expectedErr: "nil value for topic configuration \"cleanup.policy\"", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result, err := TopicConfigurationToMap(tc.input) + + if tc.expectedErr != "" { + if err == nil { + t.Errorf("Expected error %q, but got nil", tc.expectedErr) + } else if err.Error() != tc.expectedErr { + t.Errorf("Expected error %q, but got %q", tc.expectedErr, err.Error()) + } + } else if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + if !reflect.DeepEqual(result, tc.expected) { + t.Errorf("Expected %+v, but got %+v", tc.expected, result) + } + }) + } +} + +func TestMapToCreateTopicConfiguration(t *testing.T) { + testCases := []struct { + name string + input types.Map + expected []*dataplanev1alpha1.CreateTopicRequest_Topic_Config + expectedErr string + }{ + { + name: "Empty configuration", + input: func() types.Map { + m, _ := types.MapValue(types.StringType, map[string]attr.Value{}) + return m + }(), + expected: nil, + expectedErr: "", + }, + { + name: "Single configuration", + input: func() types.Map { + m, _ := types.MapValue(types.StringType, map[string]attr.Value{ + "retention.ms": types.StringValue("86400000"), + }) + return m + }(), + expected: []*dataplanev1alpha1.CreateTopicRequest_Topic_Config{ + {Name: "retention.ms", Value: StringToStringPointer("86400000")}, + }, + expectedErr: "", + }, + { + name: "Multiple configurations", + input: func() types.Map { + m, _ := types.MapValue(types.StringType, map[string]attr.Value{ + "cleanup.policy": types.StringValue("delete"), + "retention.ms": types.StringValue("86400000"), + "max.message.bytes": types.StringValue("1000000"), + }) + return m + }(), + expected: []*dataplanev1alpha1.CreateTopicRequest_Topic_Config{ + {Name: "cleanup.policy", Value: StringToStringPointer("delete")}, + {Name: "retention.ms", Value: StringToStringPointer("86400000")}, + {Name: "max.message.bytes", Value: StringToStringPointer("1000000")}, + }, + expectedErr: "", + }, + { + name: "Configuration with null value", + input: func() types.Map { + m, _ := types.MapValue(types.StringType, map[string]attr.Value{ + "retention.ms": types.StringValue("86400000"), + "cleanup.policy": types.StringNull(), + }) + return m + }(), + expected: nil, + expectedErr: "topic configuration \"cleanup.policy\" must have a value", + }, + { + name: "Configuration with unknown value", + input: func() types.Map { + m, _ := types.MapValue(types.StringType, map[string]attr.Value{ + "retention.ms": types.StringValue("86400000"), + "cleanup.policy": types.StringUnknown(), + }) + return m + }(), + expected: nil, + expectedErr: "topic configuration \"cleanup.policy\" must have a value", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result, err := MapToCreateTopicConfiguration(tc.input) + if tc.expectedErr != "" { + if err == nil { + t.Errorf("Expected error %q, but got nil", tc.expectedErr) + } else if err.Error() != tc.expectedErr { + t.Errorf("Expected error %q, but got %q", tc.expectedErr, err.Error()) + } + } else if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + sort.Slice(tc.expected, func(i, j int) bool { + return tc.expected[i].Name < tc.expected[j].Name + }) + + sort.Slice(result, func(i, j int) bool { + return result[i].Name < result[j].Name + }) + + if !reflect.DeepEqual(result, tc.expected) { + t.Errorf("Expected %+v, but got %+v", tc.expected, result) + } + }) + } +} + +func TestFindTopicByName(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockClient := mocks.NewMockTopicServiceClient(ctrl) + + testCases := []struct { + name string + setupMock func() + inputName string + expectedTopic *dataplanev1alpha1.ListTopicsResponse_Topic + expectedErr string + }{ + { + name: "Topic found", + setupMock: func() { + mockClient.EXPECT().ListTopics(gomock.Any(), &dataplanev1alpha1.ListTopicsRequest{ + Filter: &dataplanev1alpha1.ListTopicsRequest_Filter{ + NameContains: "test-topic", + }, + }).Return(&dataplanev1alpha1.ListTopicsResponse{ + Topics: []*dataplanev1alpha1.ListTopicsResponse_Topic{ + {Name: "test-topic"}, + {Name: "another-topic"}, + }, + }, nil) + }, + inputName: "test-topic", + expectedTopic: &dataplanev1alpha1.ListTopicsResponse_Topic{Name: "test-topic"}, + expectedErr: "", + }, + { + name: "Topic not found", + setupMock: func() { + mockClient.EXPECT().ListTopics(gomock.Any(), &dataplanev1alpha1.ListTopicsRequest{ + Filter: &dataplanev1alpha1.ListTopicsRequest_Filter{ + NameContains: "non-existent-topic", + }, + }).Return(&dataplanev1alpha1.ListTopicsResponse{ + Topics: []*dataplanev1alpha1.ListTopicsResponse_Topic{ + {Name: "test-topic"}, + {Name: "another-topic"}, + }, + }, nil) + }, + inputName: "non-existent-topic", + expectedTopic: nil, + expectedErr: "topic non-existent-topic not found", + }, + { + name: "ListTopics error", + setupMock: func() { + mockClient.EXPECT().ListTopics(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("connection error")) + }, + inputName: "test-topic", + expectedTopic: nil, + expectedErr: "connection error", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + tc.setupMock() + + topic, err := FindTopicByName(context.Background(), tc.inputName, mockClient) + + if tc.expectedErr != "" { + if err == nil { + t.Errorf("Expected error %q, but got nil", tc.expectedErr) + } else if err.Error() != tc.expectedErr { + t.Errorf("Expected error %q, but got %q", tc.expectedErr, err.Error()) + } + } else if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + if !reflect.DeepEqual(topic, tc.expectedTopic) { + t.Errorf("Expected topic %+v, but got %+v", tc.expectedTopic, topic) + } + }) + } +} + +func TestGetClusterUntilRunningState(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockClient := mocks.NewMockCpClientSet(ctrl) + ctx := context.Background() + + testCases := []struct { + name string + clusterName string + limit int + setupMock func() + wait time.Duration + expectedCluster *controlplanev1beta2.Cluster + expectedErr string + }{ + { + name: "Cluster becomes ready immediately", + clusterName: "test-cluster", + limit: 5, + wait: 1 * time.Second, + setupMock: func() { + mockClient.EXPECT().ClusterForName(gomock.Any(), "test-cluster").Return( + &controlplanev1beta2.Cluster{State: controlplanev1beta2.Cluster_STATE_READY}, nil, + ) + }, + expectedCluster: &controlplanev1beta2.Cluster{State: controlplanev1beta2.Cluster_STATE_READY}, + expectedErr: "", + }, + { + name: "Cluster becomes ready after multiple attempts", + clusterName: "test-cluster", + limit: 5, + wait: 1 * time.Second, + setupMock: func() { + gomock.InOrder( + mockClient.EXPECT().ClusterForName(gomock.Any(), "test-cluster").Return( + &controlplanev1beta2.Cluster{State: controlplanev1beta2.Cluster_STATE_CREATING}, nil, + ), + mockClient.EXPECT().ClusterForName(gomock.Any(), "test-cluster").Return( + &controlplanev1beta2.Cluster{State: controlplanev1beta2.Cluster_STATE_READY}, nil, + ), + ) + }, + expectedCluster: &controlplanev1beta2.Cluster{State: controlplanev1beta2.Cluster_STATE_READY}, + expectedErr: "", + }, + { + name: "Cluster not found", + clusterName: "non-existent-cluster", + wait: 1 * time.Second, + limit: 5, + setupMock: func() { + mockClient.EXPECT().ClusterForName(gomock.Any(), "non-existent-cluster").Return( + nil, fmt.Errorf("cluster not found"), + ).AnyTimes() + }, + expectedCluster: nil, + expectedErr: "cluster \"non-existent-cluster\" did not reach the running state after 5 attempts", + }, + { + name: "Timeout reached", + clusterName: "slow-cluster", + limit: 3, + wait: 1 * time.Second, + setupMock: func() { + mockClient.EXPECT().ClusterForName(gomock.Any(), "slow-cluster").Return( + &controlplanev1beta2.Cluster{State: controlplanev1beta2.Cluster_STATE_CREATING}, nil, + ).AnyTimes() + }, + expectedCluster: nil, + expectedErr: "cluster \"slow-cluster\" did not reach the running state after 3 attempts", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + tc.setupMock() + cluster, err := GetClusterUntilRunningState(ctx, 0, tc.limit, tc.clusterName, tc.wait, mockClient) + + if tc.expectedErr != "" { + if err == nil { + t.Errorf("Expected error %q, but got nil", tc.expectedErr) + } else if err.Error() != tc.expectedErr { + t.Errorf("Expected error %q, but got %q", tc.expectedErr, err.Error()) + } + } else if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + if !reflect.DeepEqual(cluster, tc.expectedCluster) { + t.Errorf("Expected cluster %+v, but got %+v", tc.expectedCluster, cluster) + } + }) + } +} + +func TestGetServerlessClusterUntilRunningState(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockClient := mocks.NewMockCpClientSet(ctrl) + ctx := context.Background() + + testCases := []struct { + name string + clusterName string + limit int + setupMock func() + expectedCluster *controlplanev1beta2.ServerlessCluster + expectedErr string + }{ + { + name: "Cluster becomes ready immediately", + clusterName: "test-cluster", + limit: 5, + setupMock: func() { + mockClient.EXPECT().ServerlessClusterForName(gomock.Any(), "test-cluster").Return( + &controlplanev1beta2.ServerlessCluster{State: controlplanev1beta2.ServerlessCluster_STATE_READY}, nil, + ) + }, + expectedCluster: &controlplanev1beta2.ServerlessCluster{State: controlplanev1beta2.ServerlessCluster_STATE_READY}, + expectedErr: "", + }, + { + name: "Cluster becomes ready after multiple attempts", + clusterName: "test-cluster", + limit: 5, + setupMock: func() { + gomock.InOrder( + mockClient.EXPECT().ServerlessClusterForName(gomock.Any(), "test-cluster").Return( + &controlplanev1beta2.ServerlessCluster{State: controlplanev1beta2.ServerlessCluster_STATE_CREATING}, nil, + ), + mockClient.EXPECT().ServerlessClusterForName(gomock.Any(), "test-cluster").Return( + &controlplanev1beta2.ServerlessCluster{State: controlplanev1beta2.ServerlessCluster_STATE_READY}, nil, + ), + ) + }, + expectedCluster: &controlplanev1beta2.ServerlessCluster{State: controlplanev1beta2.ServerlessCluster_STATE_READY}, + expectedErr: "", + }, + { + name: "Cluster not found", + clusterName: "non-existent-cluster", + limit: 5, + setupMock: func() { + mockClient.EXPECT().ServerlessClusterForName(gomock.Any(), "non-existent-cluster").Return( + nil, fmt.Errorf("cluster not found"), + ).AnyTimes() + }, + expectedCluster: nil, + expectedErr: "serverless cluster \"non-existent-cluster\" did not reach the running state after 5 attempts", + }, + { + name: "Timeout reached", + clusterName: "slow-cluster", + limit: 3, + setupMock: func() { + mockClient.EXPECT().ServerlessClusterForName(gomock.Any(), "slow-cluster").Return( + &controlplanev1beta2.ServerlessCluster{State: controlplanev1beta2.ServerlessCluster_STATE_CREATING}, nil, + ).AnyTimes() + }, + expectedCluster: nil, + expectedErr: "serverless cluster \"slow-cluster\" did not reach the running state after 3 attempts", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + tc.setupMock() + + cluster, err := GetServerlessClusterUntilRunningState(ctx, 0, tc.limit, tc.clusterName, mockClient) + + if tc.expectedErr != "" { + if err == nil { + t.Errorf("Expected error %q, but got nil", tc.expectedErr) + } else if err.Error() != tc.expectedErr { + t.Errorf("Expected error %q, but got %q", tc.expectedErr, err.Error()) + } + } else if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + if !reflect.DeepEqual(cluster, tc.expectedCluster) { + t.Errorf("Expected cluster %+v, but got %+v", tc.expectedCluster, cluster) + } + }) + } +} diff --git a/redpanda/validators/cloud_provider.go b/redpanda/validators/cloud_provider.go new file mode 100644 index 00000000..4b7ea4b3 --- /dev/null +++ b/redpanda/validators/cloud_provider.go @@ -0,0 +1,62 @@ +// Copyright 2023 Redpanda Data, Inc. +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validators + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// CloudProviderDependentValidator is a custom validator to ensure that an attribute is only set when cloud_provider is a specific value +// For example when using this on aws_private_link it will ensure that the HCL fails validation unless cloud_provider is set to "aws" +// AttributeName should be the name of the attribute that is being validated +// CloudProvider should be the value of cloud_provider that the attribute is dependent on +type CloudProviderDependentValidator struct { + AttributeName string + CloudProvider string +} + +// Description provides a description of the validator +func (v CloudProviderDependentValidator) Description(_ context.Context) string { + return fmt.Sprintf("ensures that %s is only set when cloud_provider is %s", v.AttributeName, v.CloudProvider) +} + +// MarkdownDescription provides a description of the validator in markdown format +func (v CloudProviderDependentValidator) MarkdownDescription(_ context.Context) string { + return fmt.Sprintf("Ensures that `%s` is only set when `cloud_provider` is `%s`", v.AttributeName, v.CloudProvider) +} + +// ValidateObject validates an object +func (v CloudProviderDependentValidator) ValidateObject(ctx context.Context, req validator.ObjectRequest, resp *validator.ObjectResponse) { + var cloudProvider types.String + if diags := req.Config.GetAttribute(ctx, req.Path.ParentPath().AtName("cloud_provider"), &cloudProvider); diags.HasError() { + resp.Diagnostics.Append(diags...) + return + } + + // If the object is set and cloud_provider is known but doesn't match, add an error + if !req.ConfigValue.IsNull() && !cloudProvider.IsUnknown() && cloudProvider.ValueString() != v.CloudProvider { + resp.Diagnostics.AddAttributeError( + req.Path, + "Invalid Configuration", + fmt.Sprintf("%s can only be set when cloud_provider is %s, but it is set to %s", + v.AttributeName, v.CloudProvider, cloudProvider.ValueString()), + ) + } +} diff --git a/redpanda/validators/throughput_tiers.go b/redpanda/validators/throughput_tiers.go new file mode 100644 index 00000000..9f27d2e7 --- /dev/null +++ b/redpanda/validators/throughput_tiers.go @@ -0,0 +1,213 @@ +// Copyright 2023 Redpanda Data, Inc. +// +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validators + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// ThroughputTierValidator is a custom validator that ensures that the throughput tier is valid for the given cloud provider, region, and zones +type ThroughputTierValidator struct{} + +// Description provides a description of the validator +func (ThroughputTierValidator) Description(_ context.Context) string { + return "ensures that the throughput tier is valid for the given cloud provider, region, and zones" +} + +// MarkdownDescription provides a description of the validator in markdown format +func (ThroughputTierValidator) MarkdownDescription(_ context.Context) string { + return "Ensures that the throughput tier is valid for the given cloud provider, region, and zones" +} + +// ValidateString validates a string +func (ThroughputTierValidator) ValidateString(ctx context.Context, req validator.StringRequest, resp *validator.StringResponse) { + if req.ConfigValue.IsUnknown() || req.ConfigValue.IsNull() { + return + } + + var zones types.List + var cloudProvider, region types.String + resp.Diagnostics.Append(req.Config.GetAttribute(ctx, path.Root("cloud_provider"), &cloudProvider)...) + resp.Diagnostics.Append(req.Config.GetAttribute(ctx, path.Root("region"), ®ion)...) + resp.Diagnostics.Append(req.Config.GetAttribute(ctx, path.Root("zones"), &zones)...) + if resp.Diagnostics.HasError() { + return + } + throughputTier := req.ConfigValue.ValueString() + + // Validate the throughput tier + validTiers := make(map[string]bool) + invalidZones := make([]string, 0) + + for _, zoneElement := range zones.Elements() { + zone := zoneElement.(types.String).ValueString() + if tiers, ok := validThroughputTiers[cloudProvider.ValueString()][region.ValueString()][zone]; ok { + for _, tier := range tiers { + validTiers[tier] = true + } + } else { + invalidZones = append(invalidZones, zone) + } + } + + if len(invalidZones) > 0 { + resp.Diagnostics.AddAttributeError( + req.Path, + "Invalid Zones", + fmt.Sprintf("The following zones are not valid for the given cloud provider (%s) and region (%s): %v", + cloudProvider.ValueString(), region.ValueString(), invalidZones), + ) + return + } + + if _, isValid := validTiers[throughputTier]; !isValid { + resp.Diagnostics.AddAttributeError( + req.Path, + "Invalid Throughput Tier", + fmt.Sprintf("The throughput tier %s is not valid for the given cloud provider (%s), region (%s), and zones. Valid tiers are: %v", + throughputTier, cloudProvider.ValueString(), region.ValueString(), getKeys(validTiers)), + ) + } +} + +// Helper function to get keys from a map +func getKeys(m map[string]bool) []string { + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + return keys +} + +var validThroughputTiers = map[string]map[string]map[string][]string{ + "aws": { + "ap-south-1": { + "aps1-az1": {"tier-1-aws-v2-x86", "tier-2-aws-v2-x86", "tier-3-aws-v2-x86", "tier-4-aws-v2-x86", "tier-5-aws-v2-x86"}, + "aps1-az2": {"tier-1-aws-v2-x86", "tier-2-aws-v2-x86", "tier-3-aws-v2-x86", "tier-4-aws-v2-x86", "tier-5-aws-v2-x86"}, + "aps1-az3": {"tier-1-aws-v2-x86", "tier-2-aws-v2-x86", "tier-3-aws-v2-x86", "tier-4-aws-v2-x86", "tier-5-aws-v2-x86"}, + }, + "ap-southeast-1": { + "apse1-az1": {"tier-1-aws-v2-arm", "tier-2-aws-v2-arm", "tier-3-aws-v2-arm", "tier-4-aws-v2-arm", "tier-5-aws-v2-arm"}, + "apse1-az2": {"tier-1-aws-v2-arm", "tier-2-aws-v2-arm", "tier-3-aws-v2-arm", "tier-4-aws-v2-arm", "tier-5-aws-v2-arm"}, + "apse1-az3": {"tier-1-aws-v2-arm", "tier-2-aws-v2-arm", "tier-3-aws-v2-arm", "tier-4-aws-v2-arm", "tier-5-aws-v2-arm"}, + }, + "ap-southeast-2": { + "apse2-az1": {"tier-1-aws-v2-arm", "tier-2-aws-v2-arm", "tier-3-aws-v2-arm", "tier-4-aws-v2-arm", "tier-5-aws-v2-arm"}, + "apse2-az2": {"tier-1-aws-v2-arm", "tier-2-aws-v2-arm", "tier-3-aws-v2-arm", "tier-4-aws-v2-arm", "tier-5-aws-v2-arm"}, + "apse2-az3": {"tier-1-aws-v2-arm", "tier-2-aws-v2-arm", "tier-3-aws-v2-arm", "tier-4-aws-v2-arm", "tier-5-aws-v2-arm"}, + }, + "ca-central-1": { + "cac1-az1": {"tier-1-aws-v2-arm", "tier-2-aws-v2-arm", "tier-3-aws-v2-arm", "tier-4-aws-v2-arm", "tier-5-aws-v2-arm"}, + "cac1-az2": {"tier-1-aws-v2-arm", "tier-2-aws-v2-arm", "tier-3-aws-v2-arm", "tier-4-aws-v2-arm", "tier-5-aws-v2-arm"}, + "cac1-az4": {"tier-1-aws-v2-arm", "tier-2-aws-v2-arm", "tier-3-aws-v2-arm", "tier-4-aws-v2-arm", "tier-5-aws-v2-arm"}, + }, + "eu-central-1": { + "euc1-az1": {"tier-1-aws-v2-arm", "tier-2-aws-v2-arm", "tier-3-aws-v2-arm", "tier-4-aws-v2-arm", "tier-5-aws-v2-arm"}, + "euc1-az2": {"tier-1-aws-v2-arm", "tier-2-aws-v2-arm", "tier-3-aws-v2-arm", "tier-4-aws-v2-arm", "tier-5-aws-v2-arm"}, + "euc1-az3": {"tier-1-aws-v2-arm", "tier-2-aws-v2-arm", "tier-3-aws-v2-arm", "tier-4-aws-v2-arm", "tier-5-aws-v2-arm"}, + }, + "eu-west-1": { + "euw1-az1": {"tier-1-aws-v2-arm", "tier-2-aws-v2-arm", "tier-3-aws-v2-arm", "tier-4-aws-v2-arm", "tier-5-aws-v2-arm"}, + "euw1-az2": {"tier-1-aws-v2-arm", "tier-2-aws-v2-arm", "tier-3-aws-v2-arm", "tier-4-aws-v2-arm", "tier-5-aws-v2-arm"}, + "euw1-az3": {"tier-1-aws-v2-arm", "tier-2-aws-v2-arm", "tier-3-aws-v2-arm", "tier-4-aws-v2-arm", "tier-5-aws-v2-arm"}, + }, + "eu-west-2": { + "euw2-az1": {"tier-1-aws-v2-arm", "tier-2-aws-v2-arm", "tier-3-aws-v2-arm", "tier-4-aws-v2-arm", "tier-5-aws-v2-arm"}, + "euw2-az2": {"tier-1-aws-v2-arm", "tier-2-aws-v2-arm", "tier-3-aws-v2-arm", "tier-4-aws-v2-arm", "tier-5-aws-v2-arm"}, + "euw2-az3": {"tier-1-aws-v2-arm", "tier-2-aws-v2-arm", "tier-3-aws-v2-arm", "tier-4-aws-v2-arm", "tier-5-aws-v2-arm"}, + }, + "us-east-1": { + "use1-az2": {"tier-1-aws-v2-arm", "tier-2-aws-v2-arm", "tier-3-aws-v2-arm", "tier-4-aws-v2-arm", "tier-5-aws-v2-arm"}, + "use1-az4": {"tier-1-aws-v2-arm", "tier-2-aws-v2-arm", "tier-3-aws-v2-arm", "tier-4-aws-v2-arm", "tier-5-aws-v2-arm"}, + "use1-az6": {"tier-1-aws-v2-arm", "tier-2-aws-v2-arm", "tier-3-aws-v2-arm", "tier-4-aws-v2-arm", "tier-5-aws-v2-arm"}, + }, + "us-east-2": { + "use2-az1": {"tier-1-aws-v2-arm", "tier-2-aws-v2-arm", "tier-3-aws-v2-arm", "tier-4-aws-v2-arm", "tier-5-aws-v2-arm"}, + "use2-az2": {"tier-1-aws-v2-arm", "tier-2-aws-v2-arm", "tier-3-aws-v2-arm", "tier-4-aws-v2-arm", "tier-5-aws-v2-arm"}, + "use2-az3": {"tier-1-aws-v2-arm", "tier-2-aws-v2-arm", "tier-3-aws-v2-arm", "tier-4-aws-v2-arm", "tier-5-aws-v2-arm"}, + }, + "us-west-2": { + "usw2-az1": {"tier-1-aws-v2-arm", "tier-2-aws-v2-arm", "tier-3-aws-v2-arm", "tier-4-aws-v2-arm", "tier-5-aws-v2-arm"}, + "usw2-az2": {"tier-1-aws-v2-arm", "tier-2-aws-v2-arm", "tier-3-aws-v2-arm", "tier-4-aws-v2-arm", "tier-5-aws-v2-arm"}, + "usw2-az3": {"tier-1-aws-v2-arm", "tier-2-aws-v2-arm", "tier-3-aws-v2-arm", "tier-4-aws-v2-arm", "tier-5-aws-v2-arm"}, + }, + }, + "gcp": { + "asia-south1": { + "asia-south1-a": {"tier-1-gcp-v2-x86", "tier-2-gcp-v2-x86", "tier-3-gcp-v2-x86", "tier-4-gcp-v2-x86", "tier-5-gcp-v2-x86"}, + "asia-south1-b": {"tier-1-gcp-v2-x86", "tier-2-gcp-v2-x86", "tier-3-gcp-v2-x86", "tier-4-gcp-v2-x86", "tier-5-gcp-v2-x86"}, + "asia-south1-c": {"tier-1-gcp-v2-x86", "tier-2-gcp-v2-x86", "tier-3-gcp-v2-x86", "tier-4-gcp-v2-x86", "tier-5-gcp-v2-x86"}, + }, + "asia-southeast1": { + "asia-southeast1-a": {"tier-1-gcp-v2-x86", "tier-2-gcp-v2-x86", "tier-3-gcp-v2-x86", "tier-4-gcp-v2-x86", "tier-5-gcp-v2-x86"}, + "asia-southeast1-b": {"tier-1-gcp-v2-x86", "tier-2-gcp-v2-x86", "tier-3-gcp-v2-x86", "tier-4-gcp-v2-x86", "tier-5-gcp-v2-x86"}, + "asia-southeast1-c": {"tier-1-gcp-v2-x86", "tier-2-gcp-v2-x86", "tier-3-gcp-v2-x86", "tier-4-gcp-v2-x86", "tier-5-gcp-v2-x86"}, + }, + "australia-southeast1": { + "australia-southeast1-a": {"tier-1-gcp-v2-x86", "tier-2-gcp-v2-x86", "tier-3-gcp-v2-x86", "tier-4-gcp-v2-x86", "tier-5-gcp-v2-x86"}, + "australia-southeast1-b": {"tier-1-gcp-v2-x86", "tier-2-gcp-v2-x86", "tier-3-gcp-v2-x86", "tier-4-gcp-v2-x86", "tier-5-gcp-v2-x86"}, + "australia-southeast1-c": {"tier-1-gcp-v2-x86", "tier-2-gcp-v2-x86", "tier-3-gcp-v2-x86", "tier-4-gcp-v2-x86", "tier-5-gcp-v2-x86"}, + }, + "europe-west1": { + "europe-west1-b": {"tier-1-gcp-v2-x86", "tier-2-gcp-v2-x86", "tier-3-gcp-v2-x86", "tier-4-gcp-v2-x86", "tier-5-gcp-v2-x86"}, + "europe-west1-c": {"tier-1-gcp-v2-x86", "tier-2-gcp-v2-x86", "tier-3-gcp-v2-x86", "tier-4-gcp-v2-x86", "tier-5-gcp-v2-x86"}, + "europe-west1-d": {"tier-1-gcp-v2-x86", "tier-2-gcp-v2-x86", "tier-3-gcp-v2-x86", "tier-4-gcp-v2-x86", "tier-5-gcp-v2-x86"}, + }, + "europe-west2": { + "europe-west2-a": {"tier-1-gcp-v2-x86", "tier-2-gcp-v2-x86", "tier-3-gcp-v2-x86", "tier-4-gcp-v2-x86", "tier-5-gcp-v2-x86"}, + "europe-west2-b": {"tier-1-gcp-v2-x86", "tier-2-gcp-v2-x86", "tier-3-gcp-v2-x86", "tier-4-gcp-v2-x86", "tier-5-gcp-v2-x86"}, + "europe-west2-c": {"tier-1-gcp-v2-x86", "tier-2-gcp-v2-x86", "tier-3-gcp-v2-x86", "tier-4-gcp-v2-x86", "tier-5-gcp-v2-x86"}, + }, + "northamerica-northeast1": { + "northamerica-northeast1-a": {"tier-1-gcp-v2-x86", "tier-2-gcp-v2-x86", "tier-3-gcp-v2-x86", "tier-4-gcp-v2-x86", "tier-5-gcp-v2-x86"}, + "northamerica-northeast1-b": {"tier-1-gcp-v2-x86", "tier-2-gcp-v2-x86", "tier-3-gcp-v2-x86", "tier-4-gcp-v2-x86", "tier-5-gcp-v2-x86"}, + "northamerica-northeast1-c": {"tier-1-gcp-v2-x86", "tier-2-gcp-v2-x86", "tier-3-gcp-v2-x86", "tier-4-gcp-v2-x86", "tier-5-gcp-v2-x86"}, + }, + "us-central1": { + "us-central1-a": {"tier-1-gcp-v2-x86", "tier-2-gcp-v2-x86", "tier-3-gcp-v2-x86", "tier-4-gcp-v2-x86", "tier-5-gcp-v2-x86"}, + "us-central1-b": {"tier-1-gcp-v2-x86", "tier-2-gcp-v2-x86", "tier-3-gcp-v2-x86", "tier-4-gcp-v2-x86", "tier-5-gcp-v2-x86"}, + "us-central1-c": {"tier-1-gcp-v2-x86", "tier-2-gcp-v2-x86", "tier-3-gcp-v2-x86", "tier-4-gcp-v2-x86", "tier-5-gcp-v2-x86"}, + "us-central1-f": {"tier-1-gcp-v2-x86", "tier-2-gcp-v2-x86", "tier-3-gcp-v2-x86", "tier-4-gcp-v2-x86", "tier-5-gcp-v2-x86"}, + }, + "us-east1": { + "us-east1-b": {"tier-1-gcp-v2-x86", "tier-2-gcp-v2-x86", "tier-3-gcp-v2-x86", "tier-4-gcp-v2-x86", "tier-5-gcp-v2-x86"}, + "us-east1-c": {"tier-1-gcp-v2-x86", "tier-2-gcp-v2-x86", "tier-3-gcp-v2-x86", "tier-4-gcp-v2-x86", "tier-5-gcp-v2-x86"}, + "us-east1-d": {"tier-1-gcp-v2-x86", "tier-2-gcp-v2-x86", "tier-3-gcp-v2-x86", "tier-4-gcp-v2-x86", "tier-5-gcp-v2-x86"}, + }, + }, + "azure": { + "eastus": { + "eastus-az1": {"tier-1-azure", "tier-2-azure", "tier-3-azure", "tier-4-azure", "tier-5-azure", "tier-6-azure", "tier-7-azure", "tier-8-azure", "tier-9-azure"}, + "eastus-az2": {"tier-1-azure", "tier-2-azure", "tier-3-azure", "tier-4-azure", "tier-5-azure", "tier-6-azure", "tier-7-azure", "tier-8-azure", "tier-9-azure"}, + "eastus-az3": {"tier-1-azure", "tier-2-azure", "tier-3-azure", "tier-4-azure", "tier-5-azure", "tier-6-azure", "tier-7-azure", "tier-8-azure", "tier-9-azure"}, + }, + "ukwest": { + "ukwest-az1": {"tier-1-azure", "tier-2-azure", "tier-3-azure", "tier-4-azure", "tier-5-azure", "tier-6-azure", "tier-7-azure", "tier-8-azure", "tier-9-azure"}, + "ukwest-az2": {"tier-1-azure", "tier-2-azure", "tier-3-azure", "tier-4-azure", "tier-5-azure", "tier-6-azure", "tier-7-azure", "tier-8-azure", "tier-9-azure"}, + "ukwest-az3": {"tier-1-azure", "tier-2-azure", "tier-3-azure", "tier-4-azure", "tier-5-azure", "tier-6-azure", "tier-7-azure", "tier-8-azure", "tier-9-azure"}, + }, + "uksouth": { + "uksouth-az1": {"tier-1-azure", "tier-2-azure", "tier-3-azure", "tier-4-azure", "tier-5-azure", "tier-6-azure", "tier-7-azure", "tier-8-azure", "tier-9-azure"}, + "uksouth-az2": {"tier-1-azure", "tier-2-azure", "tier-3-azure", "tier-4-azure", "tier-5-azure", "tier-6-azure", "tier-7-azure", "tier-8-azure", "tier-9-azure"}, + "uksouth-az3": {"tier-1-azure", "tier-2-azure", "tier-3-azure", "tier-4-azure", "tier-5-azure", "tier-6-azure", "tier-7-azure", "tier-8-azure", "tier-9-azure"}, + }, + }, +} diff --git a/redpanda/validators/throughput_tiers_test.go b/redpanda/validators/throughput_tiers_test.go new file mode 100644 index 00000000..f7d2dc04 --- /dev/null +++ b/redpanda/validators/throughput_tiers_test.go @@ -0,0 +1,167 @@ +package validators + +import ( + "context" + "testing" + + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-go/tftypes" + "github.com/redpanda-data/terraform-provider-redpanda/redpanda/utils" +) + +func TestThroughputTierValidator_ValidateString(t *testing.T) { + v := ThroughputTierValidator{} + + tests := []struct { + name string + cloudProvider string + region string + zones []string + throughputTier string + expectError bool + }{ + { + name: "Valid AWS tier", + cloudProvider: "aws", + region: "us-east-1", + zones: []string{"use1-az2"}, + throughputTier: "tier-1-aws-v2-arm", + expectError: false, + }, + { + name: "Invalid AWS tier", + cloudProvider: "aws", + region: "us-east-1", + zones: []string{"use1-az2"}, + throughputTier: "tier-1-aws-v2-x86", + expectError: true, + }, + { + name: "Valid GCP tier", + cloudProvider: "gcp", + region: "us-central1", + zones: []string{"us-central1-a"}, + throughputTier: "tier-3-gcp-v2-x86", + expectError: false, + }, + { + name: "Invalid GCP tier", + cloudProvider: "gcp", + region: "us-central1", + zones: []string{"us-central1-a"}, + throughputTier: "tier-6-gcp-v2-x86", + expectError: true, + }, + { + name: "Valid Azure tier", + cloudProvider: "azure", + region: "eastus", + zones: []string{"eastus-az1"}, + throughputTier: "tier-5-azure", + expectError: false, + }, + { + name: "Invalid Azure tier", + cloudProvider: "azure", + region: "eastus", + zones: []string{"eastus-az1"}, + throughputTier: "tier-1-azure-beta", + expectError: true, + }, + { + name: "Invalid cloud provider", + cloudProvider: "invalid", + region: "us-east-1", + zones: []string{"use1-az2"}, + throughputTier: "tier-1-aws-v2-arm", + expectError: true, + }, + { + name: "Invalid region", + cloudProvider: "aws", + region: "invalid-region", + zones: []string{"use1-az2"}, + throughputTier: "tier-1-aws-v2-arm", + expectError: true, + }, + { + name: "Invalid zone", + cloudProvider: "aws", + region: "us-east-1", + zones: []string{"invalid-zone"}, + throughputTier: "tier-1-aws-v2-arm", + expectError: true, + }, + { + name: "Multiple valid zones", + cloudProvider: "aws", + region: "us-east-1", + zones: []string{"use1-az2", "use1-az4", "use1-az6"}, + throughputTier: "tier-1-aws-v2-arm", + expectError: false, + }, + { + name: "Mix of valid and invalid zones", + cloudProvider: "aws", + region: "us-east-1", + zones: []string{"use1-az2", "invalid-zone"}, + throughputTier: "tier-1-aws-v2-arm", + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + resp := &validator.StringResponse{} + req := validator.StringRequest{ + Path: path.Root("throughput_tier"), + PathExpression: path.MatchRoot("throughput_tier"), + ConfigValue: types.StringValue(tt.throughputTier), + Config: tfsdk.Config{ + Raw: tftypes.NewValue(tftypes.Object{ + AttributeTypes: map[string]tftypes.Type{ + "region": tftypes.String, + "cloud_provider": tftypes.String, + "zones": tftypes.List{ElementType: tftypes.String}, + "throughput_tier": tftypes.String, + }, + }, map[string]tftypes.Value{ + "cloud_provider": tftypes.NewValue(tftypes.String, tt.cloudProvider), + "region": tftypes.NewValue(tftypes.String, tt.region), + "zones": tftypes.NewValue(tftypes.List{ElementType: tftypes.String}, utils.StringSliceToSliceValues(tt.zones)), + "throughput_tier": tftypes.NewValue(tftypes.String, tt.throughputTier), + }), + Schema: schema.Schema{ + Attributes: map[string]schema.Attribute{ + "throughput_tier": schema.StringAttribute{ + Required: true, + }, + "cloud_provider": schema.StringAttribute{ + Optional: true, + }, + "region": schema.StringAttribute{ + Optional: true, + }, + "zones": schema.ListAttribute{ + Optional: true, + ElementType: types.StringType, + }, + }, + }, + }, + } + v.ValidateString(context.Background(), req, resp) + + if tt.expectError && !resp.Diagnostics.HasError() { + t.Errorf("expected error, but got none") + } + if !tt.expectError && resp.Diagnostics.HasError() { + t.Errorf("unexpected error: %v", resp.Diagnostics) + } + }) + } +} diff --git a/templates/data-sources/cluster.md.tmpl b/templates/data-sources/cluster.md.tmpl index 3581f702..2e264fc5 100644 --- a/templates/data-sources/cluster.md.tmpl +++ b/templates/data-sources/cluster.md.tmpl @@ -21,7 +21,7 @@ data "redpanda_cluster" "example" { ### Example Usage of a data source BYOC to manage users and ACLs -{{ tffile "examples/datasource/main.tf" }} +{{ tffile "examples/datasource/standard/main.tf" }} ## Limitations diff --git a/templates/data-sources/serverless_cluster.md.tmpl b/templates/data-sources/serverless_cluster.md.tmpl index c47b04b4..f9bac781 100644 --- a/templates/data-sources/serverless_cluster.md.tmpl +++ b/templates/data-sources/serverless_cluster.md.tmpl @@ -21,7 +21,7 @@ data "redpanda_serverless_cluster" "example" { ### Example Usage to create a serverless cluster -{{ tffile "examples/serverless-cluster/main.tf" }} +{{ tffile "examples/cluster/serverless/main.tf" }} ## Limitations diff --git a/templates/index.md.tmpl b/templates/index.md.tmpl index 66c16a35..b1d19013 100644 --- a/templates/index.md.tmpl +++ b/templates/index.md.tmpl @@ -34,8 +34,8 @@ Terraform 1.0 or later: ### Example Usage of a data source BYOC to manage users and ACLs -{{ tffile "examples/datasource/main.tf" }} +{{ tffile "examples/datasource/standard/main.tf" }} ### Example Usage to create a serverless cluster -{{ tffile "examples/serverless-cluster/main.tf" }} +{{ tffile "examples/cluster/serverless/main.tf" }} diff --git a/templates/resources/acl.md.tmpl b/templates/resources/acl.md.tmpl index f316b143..3bf92e76 100644 --- a/templates/resources/acl.md.tmpl +++ b/templates/resources/acl.md.tmpl @@ -15,7 +15,7 @@ Creates an Access Control List (ACL) in a Redpanda cluster. ## Usage -{{ tffile "examples/user-acl-topic/main.tf" }} +{{ tffile "examples/cluster/aws/main.tf" }} ## Limitations diff --git a/templates/resources/cluster.md.tmpl b/templates/resources/cluster.md.tmpl index 1d1974a4..7bcf677d 100644 --- a/templates/resources/cluster.md.tmpl +++ b/templates/resources/cluster.md.tmpl @@ -32,7 +32,7 @@ We are not currently able to support the provisioning of "BYOC" clusters using t ### Example Usage of a data source BYOC to manage users and ACLs -{{ tffile "examples/datasource/main.tf" }} +{{ tffile "examples/datasource/standard/main.tf" }} ## Import diff --git a/templates/resources/resource_group.md.tmpl b/templates/resources/resource_group.md.tmpl index e1569fd0..7be16e9b 100644 --- a/templates/resources/resource_group.md.tmpl +++ b/templates/resources/resource_group.md.tmpl @@ -15,7 +15,7 @@ Creates a Resource Group in the Redpanda Cloud. ## Usage -{{ tffile "examples/resourcegroup/main.tf" }} +{{ tffile "examples/cluster/aws/main.tf" }} ## Import diff --git a/templates/resources/serverless_cluster.md.tmpl b/templates/resources/serverless_cluster.md.tmpl index 99c0f217..fde16bab 100644 --- a/templates/resources/serverless_cluster.md.tmpl +++ b/templates/resources/serverless_cluster.md.tmpl @@ -17,7 +17,7 @@ Enables the provisioning and management of Redpanda serverless clusters on AWS. ### On AWS -{{ tffile "examples/serverless-cluster/main.tf" }} +{{ tffile "examples/cluster/serverless/main.tf" }} ## Limitations @@ -25,7 +25,7 @@ We are not currently able to support the provisioning of serverless clusters on ### Example Usage to create a serverless cluster -{{ tffile "examples/serverless-cluster/main.tf" }} +{{ tffile "examples/cluster/serverless/main.tf" }} ## Import diff --git a/templates/resources/topic.md.tmpl b/templates/resources/topic.md.tmpl index c01c3d8c..b624ad31 100644 --- a/templates/resources/topic.md.tmpl +++ b/templates/resources/topic.md.tmpl @@ -15,7 +15,7 @@ Creates a topic in a Redpanda Cluster ## Usage -{{ tffile "examples/user-acl-topic/main.tf" }} +{{ tffile "examples/cluster/aws/main.tf" }} ## Limitations diff --git a/templates/resources/user.md.tmpl b/templates/resources/user.md.tmpl index a085847b..1e7c0fee 100644 --- a/templates/resources/user.md.tmpl +++ b/templates/resources/user.md.tmpl @@ -15,7 +15,7 @@ Creates a topic in a Redpanda Cluster ## Usage -{{ tffile "examples/user-acl-topic/main.tf" }} +{{ tffile "examples/cluster/aws/main.tf" }} ## Security Considerations