diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml deleted file mode 100644 index 43566691..00000000 --- a/.github/workflows/build.yml +++ /dev/null @@ -1,52 +0,0 @@ -name: Build and Unit Test - -concurrency: - group: test-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -on: - push: - branches: - - main - - release-* - tags: - - '*' - paths-ignore: - - '**.md' - pull_request: - branches: - - main - - release-* - paths-ignore: - - 'config/**' - - '**.md' - -env: - GO_VERSION: '1.22' - -jobs: - build: - name: Build and Unit Test - runs-on: ubuntu-latest - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - name: Setup Go - uses: actions/setup-go@v5 - with: - go-version: ${{ env.GO_VERSION }} - - name: Lint - uses: golangci/golangci-lint-action@v6 - with: - args: --timeout 10m0s - - name: Verify all generated pieces are up-to-date - run: make generate-all && git add -N . && git diff --exit-code - - name: Unit tests - run: | - make test - - name: Build - run: | - make build - - name: Image build - run: | - make docker-build diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml new file mode 100644 index 00000000..eabaa1dd --- /dev/null +++ b/.github/workflows/build_test.yml @@ -0,0 +1,238 @@ +name: CI +on: + pull_request: + types: + - labeled + - opened + - synchronize + - reopened + branches: + - main + - release-* + paths-ignore: + - 'config/**' + - '**.md' +env: + GO_VERSION: '1.22' + REGISTRY_REPO: 'oci://ghcr.io/mirantis/hmc/charts-ci' + +jobs: + build: + concurrency: + group: build-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + name: Build and Unit Test + runs-on: ubuntu-latest + outputs: + version: ${{ steps.vars.outputs.version }} + clustername: ${{ steps.vars.outputs.clustername }} + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Lint + uses: golangci/golangci-lint-action@v6 + with: + args: --timeout 10m0s + - name: Verify all generated pieces are up-to-date + run: make generate-all && git add -N . && git diff --exit-code + - name: Unit tests + run: | + make test + - name: Set up Buildx + uses: docker/setup-buildx-action@v3 + - name: Login to GHCR + uses: docker/login-action@v3.3.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Get outputs + id: vars + run: | + echo "version=$(git describe --tags --always)" >> $GITHUB_OUTPUT + echo "clustername=ci-$(date +%s | cut -b6-10)" >> $GITHUB_OUTPUT + - name: Build and push HMC controller image + uses: docker/build-push-action@v6 + with: + build-args: | + LD_FLAGS=-s -w -X github.com/Mirantis/hmc/internal/build.Version=${{ steps.vars.outputs.version }} + context: . + platforms: linux/amd64 + tags: | + ghcr.io/mirantis/hmc/controller-ci:${{ steps.vars.outputs.version }} + push: true + cache-from: type=gha + cache-to: type=gha,mode=max + - name: Prepare and push HMC template charts + run: | + make hmc-chart-release + make helm-push + + controller-e2etest: + name: E2E Controller + if: ${{ contains( github.event.pull_request.labels.*.name, 'skip for now') }} + runs-on: ubuntu-latest + needs: build + concurrency: + group: controller-e2e-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + outputs: + clustername: ${{ needs.build.outputs.clustername }} + version: ${{ needs.build.outputs.version }} + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + - name: Setup kubectl + uses: azure/setup-kubectl@v4 + - name: Run E2E tests + env: + GINKGO_LABEL_FILTER: 'controller' + MANAGED_CLUSTER_NAME: ${{ needs.build.outputs.clustername }} + IMG: 'ghcr.io/mirantis/hmc/controller-ci:${{ needs.build.outputs.version }}' + VERSION: ${{ needs.build.outputs.version }} + run: | + make test-e2e + - name: Archive test results + if: ${{ failure() }} + uses: actions/upload-artifact@v4 + with: + name: cloud-e2etest-logs + path: | + test/e2e/*.log + + provider-cloud-e2etest: + name: E2E Cloud Providers + runs-on: ubuntu-latest + if: ${{ contains( github.event.pull_request.labels.*.name, 'test e2e') }} + needs: build + concurrency: + group: cloud-e2e-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + outputs: + clustername: ${{ needs.build.outputs.clustername }} + version: ${{ needs.build.outputs.version }} + env: + AWS_REGION: us-west-2 + AWS_ACCESS_KEY_ID: ${{ secrets.CI_AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.CI_AWS_SECRET_ACCESS_KEY }} + AZURE_SUBSCRIPTION_ID: ${{ secrets.CI_AZURE_SUBSCRIPTION_ID }} + AZURE_TENANT_ID: ${{ secrets.CI_AZURE_TENANT_ID }} + AZURE_CLIENT_ID: ${{ secrets.CI_AZURE_CLIENT_ID }} + AZURE_CLIENT_SECRET: ${{ secrets.CI_AZURE_CLIENT_SECRET }} + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + - name: Setup kubectl + uses: azure/setup-kubectl@v4 + - uses: actions/checkout@v4 + - name: Setup tmate session + uses: mxschmitt/action-tmate@v3 + with: + detached: true + - name: Run E2E tests + env: + GINKGO_LABEL_FILTER: 'provider:cloud' + MANAGED_CLUSTER_NAME: ${{ needs.build.outputs.clustername }} + IMG: 'ghcr.io/mirantis/hmc/controller-ci:${{ needs.build.outputs.version }}' + VERSION: ${{ needs.build.outputs.version }} + run: | + make test-e2e + - name: Archive test results + if: ${{ failure() }} + uses: actions/upload-artifact@v4 + with: + name: cloud-e2etest-logs + path: | + test/e2e/*.log + + provider-onprem-e2etest: + name: E2E On-Prem Providers + runs-on: self-hosted + if: ${{ contains( github.event.pull_request.labels.*.name, 'test e2e') }} + needs: build + concurrency: + group: onprem-e2e-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + outputs: + clustername: ${{ needs.build.outputs.clustername }} + version: ${{ needs.build.outputs.version }} + env: + VSPHERE_USER: ${{ secrets.CI_VSPHERE_USER }} + VSPHERE_PASSWORD: ${{ secrets.CI_VSPHERE_PASSWORD }} + VSPHERE_SERVER: ${{ secrets.CI_VSPHERE_SERVER }} + VSPHERE_THUMBPRINT: ${{ secrets.CI_VSPHERE_THUMBPRINT }} + VSPHERE_DATACENTER: ${{ secrets.CI_VSPHERE_DATACENTER }} + VSPHERE_DATASTORE: ${{ secrets.CI_VSPHERE_DATASTORE }} + VSPHERE_RESOURCEPOOL: ${{ secrets.CI_VSPHERE_RESOURCEPOOL }} + VSPHERE_FOLDER: ${{ secrets.CI_VSPHERE_FOLDER }} + VSPHERE_CONTROL_PLANE_ENDPOINT: ${{ secrets.CI_VSPHERE_CONTROL_PLANE_ENDPOINT }} + VSPHERE_VM_TEMPLATE: ${{ secrets.CI_VSPHERE_VM_TEMPLATE }} + VSPHERE_NETWORK: ${{ secrets.CI_VSPHERE_NETWORK }} + VSPHERE_SSH_KEY: ${{ secrets.CI_VSPHERE_SSH_KEY }} + NO_CLEANUP: 1 + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + - name: Setup kubectl + uses: azure/setup-kubectl@v4 + - name: Run E2E tests + env: + GINKGO_LABEL_FILTER: 'provider:onprem' + MANAGED_CLUSTER_NAME: ${{ needs.build.outputs.clustername }} + IMG: 'ghcr.io/mirantis/hmc/controller-ci:${{ needs.build.outputs.version }}' + VERSION: ${{ needs.build.outputs.version }} + run: | + make test-e2e + - name: Archive test results + if: ${{ failure() }} + uses: actions/upload-artifact@v4 + with: + name: onprem-e2etest-logs + path: | + test/e2e/*.log + + cleanup: + name: Cleanup + needs: + - build + - provider-cloud-e2etest + runs-on: ubuntu-latest + if: ${{ always() && !contains(needs.provider-cloud-e2etest.result, 'skipped') && contains(needs.build.result, 'success') }} + timeout-minutes: 15 + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + - name: AWS Test Resources + env: + AWS_REGION: us-west-2 + AWS_ACCESS_KEY_ID: ${{ secrets.CI_AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.CI_AWS_SECRET_ACCESS_KEY }} + CLUSTER_NAME: '${{ needs.build.outputs.clustername }}' + run: | + make dev-aws-nuke diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml deleted file mode 100644 index 681c985b..00000000 --- a/.github/workflows/test.yml +++ /dev/null @@ -1,111 +0,0 @@ -name: E2E Tests - -on: - pull_request: - types: - - labeled - - opened - - synchronize - - reopened - branches: - - main - - release-* - paths-ignore: - - 'config/**' - - '**.md' -env: - GO_VERSION: '1.22' - AWS_REGION: us-west-2 - AWS_ACCESS_KEY_ID: ${{ secrets.CI_AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.CI_AWS_SECRET_ACCESS_KEY }} - AZURE_SUBSCRIPTION_ID: ${{ secrets.CI_AZURE_SUBSCRIPTION_ID }} - AZURE_TENANT_ID: ${{ secrets.CI_AZURE_TENANT_ID }} - AZURE_CLIENT_ID: ${{ secrets.CI_AZURE_CLIENT_ID }} - AZURE_CLIENT_SECRET: ${{ secrets.CI_AZURE_CLIENT_SECRET }} - NAMESPACE: hmc-system - -jobs: - e2etest: - if: ${{ contains( github.event.pull_request.labels.*.name, 'test e2e') }} - concurrency: - group: test-e2e-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - name: E2E Tests - runs-on: ubuntu-latest - outputs: - clustername: ${{ steps.vars.outputs.clustername }} - version: ${{ steps.vars.outputs.version }} - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - name: Setup Go - uses: actions/setup-go@v5 - with: - go-version: ${{ env.GO_VERSION }} - - name: Set up Buildx - uses: docker/setup-buildx-action@v3 - - name: Login to GHCR - uses: docker/login-action@v3.3.0 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - name: Get outputs - id: vars - run: | - echo "version=$(git describe --tags --always)" >> $GITHUB_OUTPUT - echo "clustername=ci-$(date +%s)-e2e-test" >> $GITHUB_OUTPUT - - name: Build and push HMC controller image - uses: docker/build-push-action@v6 - with: - build-args: | - LD_FLAGS=-s -w -X github.com/Mirantis/hmc/internal/build.Version=${{ steps.vars.outputs.version }} - context: . - platforms: linux/amd64 - tags: | - ghcr.io/mirantis/hmc/controller-ci:${{ steps.vars.outputs.version }} - push: true - cache-from: type=gha - cache-to: type=gha,mode=max - - name: Prepare and push HMC template charts - run: | - make hmc-chart-release - REGISTRY_REPO="oci://ghcr.io/mirantis/hmc/charts-ci" make helm-push - - name: Setup kubectl - uses: azure/setup-kubectl@v4 - - name: Run E2E tests - env: - MANAGED_CLUSTER_NAME: ${{ steps.vars.outputs.clustername }} - REGISTRY_REPO: 'oci://ghcr.io/mirantis/hmc/charts-ci' - IMG: 'ghcr.io/mirantis/hmc/controller-ci:${{ steps.vars.outputs.version }}' - run: | - make test-e2e - - name: Archive test results - if: ${{ failure() }} - uses: actions/upload-artifact@v4 - with: - name: test-logs - path: | - test/e2e/*.log - cleanup: - name: Cleanup - needs: e2etest - runs-on: ubuntu-latest - if: ${{ always() && !contains(needs.*.result, 'skipped') }} - timeout-minutes: 15 - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - name: Setup Go - uses: actions/setup-go@v5 - with: - go-version: ${{ env.GO_VERSION }} - - name: AWS Test Resources - env: - CLUSTER_NAME: '${{ needs.e2etest.outputs.clustername }}' - run: | - make dev-aws-nuke diff --git a/Makefile b/Makefile index a7498d0d..eba447c2 100644 --- a/Makefile +++ b/Makefile @@ -111,7 +111,10 @@ test: generate-all fmt vet envtest tidy external-crd ## Run tests. # compatibility with other vendors. .PHONY: test-e2e # Run the e2e tests using a Kind k8s instance as the management cluster. test-e2e: cli-install - KIND_CLUSTER_NAME="hmc-test" KIND_VERSION=$(KIND_VERSION) go test ./test/e2e/ -v -ginkgo.v -timeout=3h + @if [ "$$GINKGO_LABEL_FILTER" ]; then \ + ginkgo_label_flag="-ginkgo.label-filter=$$GINKGO_LABEL_FILTER"; \ + fi; \ + KIND_CLUSTER_NAME="hmc-test" KIND_VERSION=$(KIND_VERSION) go test ./test/e2e/ -v -ginkgo.v -timeout=3h $$ginkgo_label_flag .PHONY: lint lint: golangci-lint ## Run golangci-lint linter & yamllint @@ -299,7 +302,7 @@ dev-push: docker-build helm-push .PHONY: dev-templates dev-templates: templates-generate - $(KUBECTL) -n $(NAMESPACE) apply -f $(PROVIDER_TEMPLATES_DIR)/hmc-templates/files/templates + $(KUBECTL) -n $(NAMESPACE) apply --force -f $(PROVIDER_TEMPLATES_DIR)/hmc-templates/files/templates .PHONY: dev-release dev-release: @@ -320,6 +323,9 @@ dev-vsphere-creds: envsubst .PHONY: dev-apply ## Apply the development environment by deploying the kind cluster, local registry and the HMC helm chart. dev-apply: kind-deploy registry-deploy dev-push dev-deploy dev-templates dev-release +.PHONY: test-apply +test-apply: set-hmc-version helm-package dev-deploy dev-templates dev-release + .PHONY: dev-destroy dev-destroy: kind-undeploy registry-undeploy ## Destroy the development environment by deleting the kind cluster and local registry. @@ -470,7 +476,7 @@ awscli: $(AWSCLI) $(AWSCLI): | $(LOCALBIN) @if [ $(OS) == "linux" ]; then \ curl "https://awscli.amazonaws.com/awscli-exe-linux-$(shell uname -m)-$(AWSCLI_VERSION).zip" -o "/tmp/awscliv2.zip"; \ - unzip -qq /tmp/awscliv2.zip -d /tmp; \ + unzip -oqq /tmp/awscliv2.zip -d /tmp; \ /tmp/aws/install -i $(LOCALBIN)/aws-cli -b $(LOCALBIN) --update; \ fi; \ if [ $(OS) == "darwin" ]; then \ diff --git a/config/dev/vsphere-managedcluster.yaml b/config/dev/vsphere-managedcluster.yaml index bb49416b..cad728e6 100644 --- a/config/dev/vsphere-managedcluster.yaml +++ b/config/dev/vsphere-managedcluster.yaml @@ -13,7 +13,7 @@ spec: name: vsphere-cluster-identity vsphere: server: ${VSPHERE_SERVER} - thumbprint: ${VSPHERE_THUMBPRINT} + thumbprint: ${VSPHERE_THUMBPRINT} datacenter: ${VSPHERE_DATACENTER} datastore: ${VSPHERE_DATASTORE} resourcePool: ${VSPHERE_RESOURCEPOOL} @@ -21,7 +21,7 @@ spec: username: ${VSPHERE_USER} password: ${VSPHERE_PASSWORD} controlPlaneEndpointIP: ${VSPHERE_CONTROL_PLANE_ENDPOINT} - + controlPlane: ssh: user: ubuntu diff --git a/docs/dev.md b/docs/dev.md index 19b40cec..1ea34df5 100644 --- a/docs/dev.md +++ b/docs/dev.md @@ -136,6 +136,26 @@ tests that run in CI use names such as `ci-1234567890-e2e-test`. You can always pass `MANAGED_CLUSTER_NAME=` from the get-go to customize the name used by the test. +### Filtering test runs +Provider tests are broken into two types, `onprem` and `cloud`. For CI, +`provider:onprem` tests run on self-hosted runners provided by Mirantis. +`provider:cloud` tests run on GitHub actions runners and interact with cloud +infrastructure providers such as AWS or Azure. + +Each specific provider test also has a label, for example, `provider:aws` can be +used to run only AWS tests. To utilize these filters with the `make test-e2e` +target pass the `GINKGO_LABEL_FILTER` env var, for example: + +``` +GINKGO_LABEL_FILTER="provider:cloud" make test-e2e +``` + +would run all cloud provider tests. To see a list of all available labels run: + +``` +ginkgo labels ./test/e2e +``` + ### Nuke created resources In CI we run `make dev-aws-nuke` to cleanup test resources, you can do so manually with: diff --git a/templates/cluster/vsphere-standalone-cp/templates/k0scontrolplane.yaml b/templates/cluster/vsphere-standalone-cp/templates/k0scontrolplane.yaml index 038fb1a9..708ce809 100644 --- a/templates/cluster/vsphere-standalone-cp/templates/k0scontrolplane.yaml +++ b/templates/cluster/vsphere-standalone-cp/templates/k0scontrolplane.yaml @@ -114,8 +114,10 @@ spec: images: driver: tag: v3.1.2 + repo: "registry.k8s.io/csi-vsphere/driver" syncer: tag: v3.1.2 + repo: "registry.k8s.io/csi-vsphere/syncer" machineTemplate: infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 diff --git a/test/e2e/controller.go b/test/e2e/controller.go deleted file mode 100644 index 8b8bdf24..00000000 --- a/test/e2e/controller.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2024 -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "context" - "fmt" - "strings" - - "github.com/Mirantis/hmc/test/kubeclient" - "github.com/Mirantis/hmc/test/managedcluster" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - hmcControllerLabel = "app.kubernetes.io/name=hmc" -) - -// verifyControllersUp validates that controllers for the given providers list -// are running and ready. Optionally specify providers to check for rather than -// waiting for all providers to be ready. -func verifyControllersUp(kc *kubeclient.KubeClient, providers ...managedcluster.ProviderType) error { - if err := validateController(kc, hmcControllerLabel, "hmc-controller-manager"); err != nil { - return err - } - - if providers == nil { - providers = []managedcluster.ProviderType{ - managedcluster.ProviderCAPI, - managedcluster.ProviderAWS, - managedcluster.ProviderAzure, - } - } - - for _, provider := range providers { - // Ensure only one controller pod is running. - if err := validateController(kc, managedcluster.GetProviderLabel(provider), string(provider)); err != nil { - return err - } - } - - return nil -} - -func validateController(kc *kubeclient.KubeClient, labelSelector string, name string) error { - deployList, err := kc.Client.AppsV1().Deployments(kc.Namespace).List(context.Background(), metav1.ListOptions{ - LabelSelector: labelSelector, - }) - if err != nil { - return fmt.Errorf("failed to list %s controller deployments: %w", name, err) - } - - if len(deployList.Items) < 1 { - return fmt.Errorf("expected at least 1 %s controller deployment, got %d", - name, len(deployList.Items)) - } - - deployment := deployList.Items[0] - - // Ensure the deployment is not being deleted. - if deployment.DeletionTimestamp != nil { - return fmt.Errorf("controller pod: %s deletion timestamp should be nil, got: %v", - deployment.Name, deployment.DeletionTimestamp) - } - // Ensure the deployment is running and has the expected name. - if !strings.Contains(deployment.Name, "controller-manager") { - return fmt.Errorf("controller deployment name %s does not contain 'controller-manager'", deployment.Name) - } - if deployment.Status.ReadyReplicas < 1 { - return fmt.Errorf("controller deployment: %s does not yet have any ReadyReplicas", deployment.Name) - } - - return nil -} diff --git a/test/e2e/controller_test.go b/test/e2e/controller_test.go new file mode 100644 index 00000000..91dd019c --- /dev/null +++ b/test/e2e/controller_test.go @@ -0,0 +1,21 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e + +import ( + . "github.com/onsi/ginkgo/v2" +) + +var _ = Describe("Controller", Label("controller"), Ordered, func() {}) diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index bccb1122..eef3a144 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -15,11 +15,26 @@ package e2e import ( + "bufio" + "context" "fmt" + "net/url" + "os" + "os/exec" + "path/filepath" + "strings" "testing" + "time" + internalutils "github.com/Mirantis/hmc/internal/utils" + "github.com/Mirantis/hmc/test/e2e/kubeclient" + "github.com/Mirantis/hmc/test/e2e/managedcluster" + "github.com/Mirantis/hmc/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" ) // Run e2e tests using the Ginkgo runner. @@ -28,3 +43,188 @@ func TestE2E(t *testing.T) { _, _ = fmt.Fprintf(GinkgoWriter, "Starting hmc suite\n") RunSpecs(t, "e2e suite") } + +var _ = BeforeSuite(func() { + GinkgoT().Setenv(managedcluster.EnvVarNamespace, internalutils.DefaultSystemNamespace) + + By("building and deploying the controller-manager") + cmd := exec.Command("make", "kind-deploy") + _, err := utils.Run(cmd) + Expect(err).NotTo(HaveOccurred()) + cmd = exec.Command("make", "test-apply") + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred()) + + By("validating that the hmc-controller and CAPI provider controllers are running and ready") + kc := kubeclient.NewFromLocal(internalutils.DefaultSystemNamespace) + Eventually(func() error { + err = verifyControllersUp(kc) + if err != nil { + _, _ = fmt.Fprintf(GinkgoWriter, "Controller validation failed: %v\n", err) + return err + } + return nil + }).WithTimeout(15 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) +}) + +var _ = AfterSuite(func() { + if !noCleanup() { + By("collecting logs from local controllers") + kc := kubeclient.NewFromLocal(internalutils.DefaultSystemNamespace) + collectLogArtifacts(kc, "") + + By("removing the controller-manager") + cmd := exec.Command("make", "dev-destroy") + _, err := utils.Run(cmd) + Expect(err).NotTo(HaveOccurred()) + } +}) + +// verifyControllersUp validates that controllers for all providers are running +// and ready. +func verifyControllersUp(kc *kubeclient.KubeClient) error { + if err := validateController(kc, utils.HMCControllerLabel, "hmc-controller-manager"); err != nil { + return err + } + + providers := []managedcluster.ProviderType{ + managedcluster.ProviderCAPI, + managedcluster.ProviderAWS, + managedcluster.ProviderAzure, + managedcluster.ProviderVSphere, + } + + for _, provider := range providers { + // Ensure only one controller pod is running. + if err := validateController(kc, managedcluster.GetProviderLabel(provider), string(provider)); err != nil { + return err + } + } + + return nil +} + +func validateController(kc *kubeclient.KubeClient, labelSelector string, name string) error { + deployList, err := kc.Client.AppsV1().Deployments(kc.Namespace).List(context.Background(), metav1.ListOptions{ + LabelSelector: labelSelector, + Limit: 1, + }) + if err != nil { + return fmt.Errorf("failed to list %s controller deployments: %w", name, err) + } + + if len(deployList.Items) < 1 { + return fmt.Errorf("expected at least 1 %s controller deployment", name) + } + + deployment := deployList.Items[0] + + // Ensure the deployment is not being deleted. + if deployment.DeletionTimestamp != nil { + return fmt.Errorf("controller pod: %s deletion timestamp should be nil, got: %v", + deployment.Name, deployment.DeletionTimestamp) + } + // Ensure the deployment is running and has the expected name. + if !strings.Contains(deployment.Name, "controller-manager") { + return fmt.Errorf("controller deployment name %s does not contain 'controller-manager'", deployment.Name) + } + if deployment.Status.ReadyReplicas < 1 { + return fmt.Errorf("controller deployment: %s does not yet have any ReadyReplicas", deployment.Name) + } + + return nil +} + +// templateBy wraps a Ginkgo By with a block describing the template being +// tested. +func templateBy(t managedcluster.Template, description string) { + GinkgoHelper() + By(fmt.Sprintf("[%s] %s", t, description)) +} + +// collectLogArtifacts collects log output from each the HMC controller, +// CAPI controller and the provider controller(s) as well as output from clusterctl +// and stores them in the test/e2e directory as artifacts. clusterName can be +// optionally provided, passing an empty string will prevent clusterctl output +// from being fetched. If collectLogArtifacts fails it produces a warning +// message to the GinkgoWriter, but does not fail the test. +func collectLogArtifacts(kc *kubeclient.KubeClient, clusterName string, providerTypes ...managedcluster.ProviderType) { + GinkgoHelper() + + filterLabels := []string{utils.HMCControllerLabel} + + var host string + hostURL, err := url.Parse(kc.Config.Host) + if err != nil { + utils.WarnError(fmt.Errorf("failed to parse host from kubeconfig: %w", err)) + } else { + host = strings.ReplaceAll(hostURL.Host, ":", "_") + } + + if providerTypes == nil { + filterLabels = managedcluster.FilterAllProviders() + } else { + for _, providerType := range providerTypes { + filterLabels = append(filterLabels, managedcluster.GetProviderLabel(providerType)) + } + } + + for _, label := range filterLabels { + pods, _ := kc.Client.CoreV1().Pods(kc.Namespace).List(context.Background(), metav1.ListOptions{ + LabelSelector: label, + }) + + for _, pod := range pods.Items { + req := kc.Client.CoreV1().Pods(kc.Namespace).GetLogs(pod.Name, &corev1.PodLogOptions{ + TailLines: ptr.To(int64(1000)), + }) + podLogs, err := req.Stream(context.Background()) + if err != nil { + utils.WarnError(fmt.Errorf("failed to get log stream for pod %s: %w", pod.Name, err)) + continue + } + + output, err := os.Create(fmt.Sprintf("./test/e2e/%s.log", host+"-"+pod.Name)) + if err != nil { + utils.WarnError(fmt.Errorf("failed to create log file for pod %s: %w", pod.Name, err)) + continue + } + + r := bufio.NewReader(podLogs) + _, err = r.WriteTo(output) + if err != nil { + utils.WarnError(fmt.Errorf("failed to write log file for pod %s: %w", pod.Name, err)) + } + + if err = podLogs.Close(); err != nil { + utils.WarnError(fmt.Errorf("failed to close log stream for pod %s: %w", pod.Name, err)) + } + if err = output.Close(); err != nil { + utils.WarnError(fmt.Errorf("failed to close log file for pod %s: %w", pod.Name, err)) + } + } + } + + if clusterName != "" { + cmd := exec.Command("./bin/clusterctl", + "describe", "cluster", clusterName, "--namespace", internalutils.DefaultSystemNamespace, "--show-conditions=all") + output, err := utils.Run(cmd) + if err != nil { + utils.WarnError(fmt.Errorf("failed to get clusterctl log: %w", err)) + return + } + err = os.WriteFile(filepath.Join("test/e2e", host+"-"+"clusterctl.log"), output, 0o644) + if err != nil { + utils.WarnError(fmt.Errorf("failed to write clusterctl log: %w", err)) + } + } +} + +func noCleanup() bool { + noCleanup := os.Getenv(managedcluster.EnvVarNoCleanup) + if noCleanup != "" { + By(fmt.Sprintf("skipping After node as %s is set", managedcluster.EnvVarNoCleanup)) + } + + return noCleanup != "" +} diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go deleted file mode 100644 index e13282d0..00000000 --- a/test/e2e/e2e_test.go +++ /dev/null @@ -1,579 +0,0 @@ -// Copyright 2024 -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package e2e - -import ( - "bufio" - "context" - "fmt" - "net/url" - "os" - "os/exec" - "path/filepath" - "strings" - "time" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/ptr" - - "github.com/Mirantis/hmc/test/kubeclient" - "github.com/Mirantis/hmc/test/managedcluster" - "github.com/Mirantis/hmc/test/managedcluster/aws" - "github.com/Mirantis/hmc/test/managedcluster/azure" - "github.com/Mirantis/hmc/test/managedcluster/vsphere" - "github.com/Mirantis/hmc/test/utils" -) - -const ( - namespace = "hmc-system" -) - -var _ = Describe("controller", Ordered, func() { - BeforeAll(func() { - By("building and deploying the controller-manager") - cmd := exec.Command("make", "dev-apply") - _, err := utils.Run(cmd) - Expect(err).NotTo(HaveOccurred()) - }) - - AfterAll(func() { - if !noCleanup() { - By("removing the controller-manager") - cmd := exec.Command("make", "dev-destroy") - _, err := utils.Run(cmd) - Expect(err).NotTo(HaveOccurred()) - } - }) - - Context("Operator", func() { - It("should run successfully", func() { - kc := kubeclient.NewFromLocal(namespace) - - By("validating that the hmc-controller and capi provider controllers are running") - Eventually(func() error { - err := verifyControllersUp(kc) - if err != nil { - _, _ = fmt.Fprintf(GinkgoWriter, "Controller validation failed: %v\n", err) - return err - } - return nil - }).WithTimeout(15 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - GinkgoT().Setenv("NAMESPACE", namespace) - cmd := exec.Command("make", "DEV_PROVIDER=aws", "dev-creds-apply") - _, err := utils.Run(cmd) - Expect(err).NotTo(HaveOccurred()) - // aws.CreateCredentialSecret(context.Background(), kc) - }) - }) - - Describe("AWS Templates", func() { - var ( - kc *kubeclient.KubeClient - standaloneClient *kubeclient.KubeClient - standaloneDeleteFunc func() error - hostedDeleteFunc func() error - kubecfgDeleteFunc func() error - clusterName string - ) - - BeforeAll(func() { - By("ensuring AWS credentials are set") - kc = kubeclient.NewFromLocal(namespace) - // aws.CreateCredentialSecret(context.Background(), kc) - GinkgoT().Setenv("NAMESPACE", namespace) - cmd := exec.Command("make", "DEV_PROVIDER=aws", "dev-creds-apply") - _, err := utils.Run(cmd) - Expect(err).NotTo(HaveOccurred()) - }) - - AfterEach(func() { - // If we failed collect logs from each of the affiliated controllers - // as well as the output of clusterctl to store as artifacts. - if CurrentSpecReport().Failed() && !noCleanup() { - By("collecting failure logs from controllers") - if kc != nil { - collectLogArtifacts(kc, clusterName, managedcluster.ProviderAWS, managedcluster.ProviderCAPI) - } - if standaloneClient != nil { - collectLogArtifacts(standaloneClient, clusterName, managedcluster.ProviderAWS, managedcluster.ProviderCAPI) - } - - By("deleting resources after failure") - for _, deleteFunc := range []func() error{ - kubecfgDeleteFunc, - hostedDeleteFunc, - standaloneDeleteFunc, - } { - if deleteFunc != nil { - err := deleteFunc() - Expect(err).NotTo(HaveOccurred()) - } - } - } - }) - - It("should work with an AWS provider", func() { - // Deploy a standalone cluster and verify it is running/ready. - // Deploy standalone with an xlarge instance since it will also be - // hosting the hosted cluster. - GinkgoT().Setenv(managedcluster.EnvVarAWSInstanceType, "t3.xlarge") - GinkgoT().Setenv(managedcluster.EnvVarInstallBeachHeadServices, "false") - - templateBy(managedcluster.TemplateAWSStandaloneCP, "creating a ManagedCluster") - sd := managedcluster.GetUnstructured(managedcluster.TemplateAWSStandaloneCP) - clusterName = sd.GetName() - - standaloneDeleteFunc = kc.CreateManagedCluster(context.Background(), sd) - - templateBy(managedcluster.TemplateAWSStandaloneCP, "waiting for infrastructure to deploy successfully") - deploymentValidator := managedcluster.NewProviderValidator( - managedcluster.TemplateAWSStandaloneCP, - clusterName, - managedcluster.ValidationActionDeploy, - ) - - Eventually(func() error { - return deploymentValidator.Validate(context.Background(), kc) - }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - - templateBy(managedcluster.TemplateAWSHostedCP, "installing controller and templates on standalone cluster") - - // Download the KUBECONFIG for the standalone cluster and load it - // so we can call Make targets against this cluster. - // TODO: Ideally we shouldn't use Make here and should just convert - // these Make targets into Go code, but this will require a - // helmclient. - var kubeCfgPath string - kubeCfgPath, kubecfgDeleteFunc = kc.WriteKubeconfig(context.Background(), clusterName) - - GinkgoT().Setenv("KUBECONFIG", kubeCfgPath) - cmd := exec.Command("make", "dev-deploy") - _, err := utils.Run(cmd) - Expect(err).NotTo(HaveOccurred()) - cmd = exec.Command("make", "dev-templates") - _, err = utils.Run(cmd) - Expect(err).NotTo(HaveOccurred()) - GinkgoT().Setenv("NAMESPACE", namespace) - cmd = exec.Command("make", "DEV_PROVIDER=aws", "dev-creds-apply") - _, err = utils.Run(cmd) - Expect(err).NotTo(HaveOccurred()) - Expect(os.Unsetenv("KUBECONFIG")).To(Succeed()) - - // Ensure AWS credentials are set in the standalone cluster. - standaloneClient = kc.NewFromCluster(context.Background(), namespace, clusterName) - // aws.CreateCredentialSecret(context.Background(), standaloneClient) - - templateBy(managedcluster.TemplateAWSHostedCP, "validating that the controller is ready") - Eventually(func() error { - err := verifyControllersUp(standaloneClient, managedcluster.ProviderCAPI, managedcluster.ProviderAWS) - if err != nil { - _, _ = fmt.Fprintf( - GinkgoWriter, "[%s] controller validation failed: %v\n", - string(managedcluster.TemplateAWSHostedCP), err) - return err - } - return nil - }).WithTimeout(15 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - - // Populate the environment variables required for the hosted - // cluster. - aws.PopulateHostedTemplateVars(context.Background(), kc, clusterName) - - templateBy(managedcluster.TemplateAWSHostedCP, "creating a ManagedCluster") - hd := managedcluster.GetUnstructured(managedcluster.TemplateAWSHostedCP) - hdName := hd.GetName() - - // Deploy the hosted cluster on top of the standalone cluster. - hostedDeleteFunc = standaloneClient.CreateManagedCluster(context.Background(), hd) - - // Patch the AWSCluster resource as Ready, see: - // https://docs.k0smotron.io/stable/capi-aws/#prepare-the-aws-infra-provider - // Use Eventually as the AWSCluster might not be available - // immediately. - templateBy(managedcluster.TemplateAWSHostedCP, "Patching AWSCluster to ready") - Eventually(func() error { - if err := aws.PatchAWSClusterReady(context.Background(), standaloneClient, hd.GetName()); err != nil { - _, _ = fmt.Fprintf(GinkgoWriter, "failed to patch AWSCluster to ready: %v, retrying...\n", err) - return err - } - _, _ = fmt.Fprintf(GinkgoWriter, "Patch succeeded\n") - return nil - }).WithTimeout(time.Minute).WithPolling(5 * time.Second).Should(Succeed()) - - // Verify the hosted cluster is running/ready. - templateBy(managedcluster.TemplateAWSHostedCP, "waiting for infrastructure to deploy successfully") - deploymentValidator = managedcluster.NewProviderValidator( - managedcluster.TemplateAWSHostedCP, - hdName, - managedcluster.ValidationActionDeploy, - ) - Eventually(func() error { - return deploymentValidator.Validate(context.Background(), standaloneClient) - }).WithTimeout(60 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - - // Delete the hosted ManagedCluster and verify it is removed. - templateBy(managedcluster.TemplateAWSHostedCP, "deleting the ManagedCluster") - err = hostedDeleteFunc() - Expect(err).NotTo(HaveOccurred()) - - deletionValidator := managedcluster.NewProviderValidator( - managedcluster.TemplateAWSHostedCP, - hdName, - managedcluster.ValidationActionDelete, - ) - Eventually(func() error { - return deletionValidator.Validate(context.Background(), standaloneClient) - }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - - // Now delete the standalone ManagedCluster and verify it is - // removed, it is deleted last since it is the basis for the hosted - // cluster. - /* - FIXME(#339): This is currently disabled as the deletion of the - standalone cluster is failing due to outstanding issues. - templateBy(managedcluster.TemplateAWSStandaloneCP, "deleting the ManagedCluster") - err = standaloneDeleteFunc() - Expect(err).NotTo(HaveOccurred()) - - deletionValidator = managedcluster.NewProviderValidator( - managedcluster.TemplateAWSStandaloneCP, - clusterName, - managedcluster.ValidationActionDelete, - ) - Eventually(func() error { - return deletionValidator.Validate(context.Background(), kc) - }).WithTimeout(10 * time.Minute).WithPolling(10 * - time.Second).Should(Succeed()) - */ - }) - }) - - Context("vSphere templates", func() { - var ( - kc *kubeclient.KubeClient - deleteFunc func() error - clusterName string - err error - ) - - BeforeAll(func() { - // Set here to skip CI runs for now - _, testVsphere := os.LookupEnv("TEST_VSPHERE") - if !testVsphere { - Skip("Skipping vSphere tests") - } - - By("ensuring that env vars are set correctly") - vsphere.CheckEnv() - By("creating kube client") - kc := kubeclient.NewFromLocal(namespace) - By("providing cluster identity") - credSecretName := "vsphere-cluster-identity-secret-e2e" - clusterIdentityName := "vsphere-cluster-identity-e2e" - Expect(vsphere.CreateSecret(kc, credSecretName)).Should(Succeed()) - Expect(vsphere.CreateClusterIdentity(kc, credSecretName, clusterIdentityName)).Should(Succeed()) - By("setting VSPHERE_CLUSTER_IDENTITY env variable") - Expect(os.Setenv("VSPHERE_CLUSTER_IDENTITY", clusterIdentityName)).Should(Succeed()) - }) - - AfterEach(func() { - // If we failed collect logs from each of the affiliated controllers - // as well as the output of clusterctl to store as artifacts. - if CurrentSpecReport().Failed() { - By("collecting failure logs from controllers") - collectLogArtifacts(kc, clusterName, managedcluster.ProviderVSphere, managedcluster.ProviderCAPI) - } - - if deleteFunc != nil { - By("deleting the deployment") - err = deleteFunc() - Expect(err).NotTo(HaveOccurred()) - } - }) - - It("should deploy standalone managed cluster", func() { - By("creating a managed cluster") - d := managedcluster.GetUnstructured(managedcluster.TemplateVSphereStandaloneCP) - clusterName = d.GetName() - - deleteFunc := kc.CreateManagedCluster(context.Background(), d) - - By("waiting for infrastructure providers to deploy successfully") - deploymentValidator := managedcluster.NewProviderValidator( - managedcluster.TemplateVSphereStandaloneCP, - clusterName, - managedcluster.ValidationActionDeploy, - ) - Eventually(func() error { - return deploymentValidator.Validate(context.Background(), kc) - }).WithTimeout(60 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - - deletionValidator := managedcluster.NewProviderValidator( - managedcluster.TemplateVSphereStandaloneCP, - clusterName, - managedcluster.ValidationActionDelete, - ) - By("verify the deployment deletes successfully") - err = deleteFunc() - Expect(err).NotTo(HaveOccurred()) - Eventually(func() error { - return deletionValidator.Validate(context.Background(), kc) - }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - }) - }) - - Describe("Azure Templates", Label("provider"), func() { - var ( - kc *kubeclient.KubeClient - standaloneClient *kubeclient.KubeClient - standaloneDeleteFunc func() error - hostedDeleteFunc func() error - kubecfgDeleteFunc func() error - sdName string - ) - - BeforeAll(func() { - By("ensuring Azure credentials are set") - kc = kubeclient.NewFromLocal(namespace) - azure.CreateCredentialSecret(context.Background(), kc) - }) - - AfterEach(func() { - // If we failed collect logs from each of the affiliated controllers - // as well as the output of clusterctl to store as artifacts. - if CurrentSpecReport().Failed() && !noCleanup() { - By("collecting failure logs from controllers") - if kc != nil { - collectLogArtifacts(kc, sdName, managedcluster.ProviderAzure, managedcluster.ProviderCAPI) - } - if standaloneClient != nil { - collectLogArtifacts(standaloneClient, sdName, managedcluster.ProviderAzure, managedcluster.ProviderCAPI) - } - - By("deleting resources after failure") - for _, deleteFunc := range []func() error{ - kubecfgDeleteFunc, - hostedDeleteFunc, - standaloneDeleteFunc, - } { - if deleteFunc != nil { - err := deleteFunc() - Expect(err).NotTo(HaveOccurred()) - } - } - } - }) - - It("should work with an Azure provider", func() { - templateBy(managedcluster.TemplateAzureStandaloneCP, "creating a ManagedCluster") - sd := managedcluster.GetUnstructured(managedcluster.TemplateAzureStandaloneCP) - sdName = sd.GetName() - - standaloneDeleteFunc := kc.CreateManagedCluster(context.Background(), sd) - - // verify the standalone cluster is deployed correctly - deploymentValidator := managedcluster.NewProviderValidator( - managedcluster.TemplateAzureStandaloneCP, - sdName, - managedcluster.ValidationActionDeploy, - ) - - templateBy(managedcluster.TemplateAzureStandaloneCP, "waiting for infrastructure provider to deploy successfully") - Eventually(func() error { - return deploymentValidator.Validate(context.Background(), kc) - }).WithTimeout(90 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - - // setup environment variables for deploying the hosted template (subnet name, etc) - azure.SetAzureEnvironmentVariables(sdName, kc) - - hd := managedcluster.GetUnstructured(managedcluster.TemplateAzureHostedCP) - hdName := hd.GetName() - - var kubeCfgPath string - kubeCfgPath, kubecfgDeleteFunc = kc.WriteKubeconfig(context.Background(), sdName) - - By("Deploy onto standalone cluster") - deployOnAzureCluster(kubeCfgPath) - - templateBy(managedcluster.TemplateAzureHostedCP, "creating a ManagedCluster") - standaloneClient = kc.NewFromCluster(context.Background(), namespace, sdName) - // verify the cluster is ready prior to creating credentials - Eventually(func() error { - err := verifyControllersUp(standaloneClient, managedcluster.ProviderAzure) - if err != nil { - _, _ = fmt.Fprintf(GinkgoWriter, "Controller validation failed: %v\n", err) - return err - } - return nil - }).WithTimeout(15 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - - By("Create azure credential secret") - azure.CreateCredentialSecret(context.Background(), standaloneClient) - - templateBy(managedcluster.TemplateAzureHostedCP, - fmt.Sprintf("creating a Deployment using template %s", managedcluster.TemplateAzureHostedCP)) - hostedDeleteFunc = standaloneClient.CreateManagedCluster(context.Background(), hd) - - templateBy(managedcluster.TemplateAzureHostedCP, "waiting for infrastructure to deploy successfully") - - deploymentValidator = managedcluster.NewProviderValidator( - managedcluster.TemplateAzureStandaloneCP, - hdName, - managedcluster.ValidationActionDeploy, - ) - - Eventually(func() error { - return deploymentValidator.Validate(context.Background(), standaloneClient) - }).WithTimeout(90 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - - By("verify the deployment deletes successfully") - err := hostedDeleteFunc() - Expect(err).NotTo(HaveOccurred()) - - err = standaloneDeleteFunc() - Expect(err).NotTo(HaveOccurred()) - - deploymentValidator = managedcluster.NewProviderValidator( - managedcluster.TemplateAzureHostedCP, - hdName, - managedcluster.ValidationActionDelete, - ) - - Eventually(func() error { - return deploymentValidator.Validate(context.Background(), standaloneClient) - }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - - deploymentValidator = managedcluster.NewProviderValidator( - managedcluster.TemplateAzureStandaloneCP, - hdName, - managedcluster.ValidationActionDelete, - ) - - Eventually(func() error { - return deploymentValidator.Validate(context.Background(), kc) - }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) - }) - }) -}) - -func deployOnAzureCluster(kubeCfgPath string) { - GinkgoT().Helper() - GinkgoT().Setenv("KUBECONFIG", kubeCfgPath) - cmd := exec.Command("kubectl", "create", "-f", - "https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/deploy/example/"+ - "storageclass-azuredisk-csi.yaml") - _, err := utils.Run(cmd) - Expect(err).NotTo(HaveOccurred()) - - cmd = exec.Command("kubectl", "patch", "storageclass", "managed-csi", "-p", - "{\"metadata\": {\"annotations\":{\"storageclass.kubernetes.io/is-default-class\":\"true\"}}}") - _, err = utils.Run(cmd) - Expect(err).NotTo(HaveOccurred()) - - cmd = exec.Command("make", "dev-deploy") - _, err = utils.Run(cmd) - Expect(err).NotTo(HaveOccurred()) - - cmd = exec.Command("make", "dev-templates") - _, err = utils.Run(cmd) - Expect(err).NotTo(HaveOccurred()) - Expect(os.Unsetenv("KUBECONFIG")).To(Succeed()) -} - -// templateBy wraps a Ginkgo By with a block describing the template being -// tested. -func templateBy(t managedcluster.Template, description string) { - GinkgoHelper() - By(fmt.Sprintf("[%s] %s", t, description)) -} - -// collectLogArtfiacts collects log output from each the HMC controller, -// CAPI controller and the provider controller(s) as well as output from clusterctl -// and stores them in the test/e2e directory as artifacts. If it fails it -// produces a warning message to the GinkgoWriter, but does not fail the test. -func collectLogArtifacts(kc *kubeclient.KubeClient, clusterName string, providerTypes ...managedcluster.ProviderType) { - GinkgoHelper() - - filterLabels := []string{hmcControllerLabel} - - var host string - hostURL, err := url.Parse(kc.Config.Host) - if err != nil { - utils.WarnError(fmt.Errorf("failed to parse host from kubeconfig: %w", err)) - } else { - host = strings.ReplaceAll(hostURL.Host, ":", "_") - } - - for _, providerType := range providerTypes { - filterLabels = append(filterLabels, managedcluster.GetProviderLabel(providerType)) - } - - for _, label := range filterLabels { - pods, _ := kc.Client.CoreV1().Pods(kc.Namespace).List(context.Background(), metav1.ListOptions{ - LabelSelector: label, - }) - - for _, pod := range pods.Items { - req := kc.Client.CoreV1().Pods(kc.Namespace).GetLogs(pod.Name, &corev1.PodLogOptions{ - TailLines: ptr.To(int64(1000)), - }) - podLogs, err := req.Stream(context.Background()) - if err != nil { - utils.WarnError(fmt.Errorf("failed to get log stream for pod %s: %w", pod.Name, err)) - continue - } - - output, err := os.Create(fmt.Sprintf("./test/e2e/%s.log", host+"-"+pod.Name)) - if err != nil { - utils.WarnError(fmt.Errorf("failed to create log file for pod %s: %w", pod.Name, err)) - _ = podLogs.Close() - continue - } - - r := bufio.NewReader(podLogs) - if _, err := r.WriteTo(output); err != nil { - utils.WarnError(fmt.Errorf("failed to write log file for pod %s: %w", pod.Name, err)) - } - - _ = podLogs.Close() - _ = output.Close() - } - } - - cmd := exec.Command("./bin/clusterctl", - "describe", "cluster", clusterName, "--namespace", namespace, "--show-conditions=all") - output, err := utils.Run(cmd) - if err != nil { - utils.WarnError(fmt.Errorf("failed to get clusterctl log: %w", err)) - return - } - - err = os.WriteFile(filepath.Join("test/e2e", host+"-"+"clusterctl.log"), output, 0o644) - if err != nil { - utils.WarnError(fmt.Errorf("failed to write clusterctl log: %w", err)) - } -} - -func noCleanup() bool { - noCleanup := os.Getenv(managedcluster.EnvVarNoCleanup) - if noCleanup != "" { - By(fmt.Sprintf("skipping After nodes as %s is set", managedcluster.EnvVarNoCleanup)) - } - - return noCleanup != "" -} diff --git a/test/kubeclient/kubeclient.go b/test/e2e/kubeclient/kubeclient.go similarity index 87% rename from test/kubeclient/kubeclient.go rename to test/e2e/kubeclient/kubeclient.go index 459e797a..55a24453 100644 --- a/test/kubeclient/kubeclient.go +++ b/test/e2e/kubeclient/kubeclient.go @@ -20,6 +20,7 @@ import ( "os" "path/filepath" + "github.com/Mirantis/hmc/test/utils" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" @@ -135,15 +136,41 @@ func newKubeClient(configBytes []byte, namespace string) *KubeClient { } // GetDynamicClient returns a dynamic client for the given GroupVersionResource. -func (kc *KubeClient) GetDynamicClient(gvr schema.GroupVersionResource) dynamic.ResourceInterface { +// +//nolint:revive +func (kc *KubeClient) GetDynamicClient(gvr schema.GroupVersionResource, namespaced bool) dynamic.ResourceInterface { GinkgoHelper() client, err := dynamic.NewForConfig(kc.Config) - Expect(err).NotTo(HaveOccurred(), "failed to create dynamic client") + Expect(err).NotTo(HaveOccurred(), "failed to create dynamic client for resource: %s", gvr.String()) + + if !namespaced { + return client.Resource(gvr) + } return client.Resource(gvr).Namespace(kc.Namespace) } +func (kc *KubeClient) CreateOrUpdateUnstructuredObject(gvr schema.GroupVersionResource, obj *unstructured.Unstructured, namespaced bool) { + GinkgoHelper() + + client := kc.GetDynamicClient(gvr, namespaced) + + kind, name := utils.ObjKindName(obj) + + resp, err := client.Get(context.Background(), name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + _, err = client.Create(context.Background(), obj, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred(), "failed to create %s: %s", kind, name) + } else { + Expect(err).NotTo(HaveOccurred(), "failed to get existing %s: %s", kind, name) + + obj.SetResourceVersion(resp.GetResourceVersion()) + _, err = client.Update(context.Background(), obj, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred(), "failed to update existing %s: %s", kind, name) + } +} + // CreateManagedCluster creates a managedcluster.hmc.mirantis.com in the given // namespace and returns a DeleteFunc to clean up the deployment. // The DeleteFunc is a no-op if the deployment has already been deleted. @@ -159,7 +186,7 @@ func (kc *KubeClient) CreateManagedCluster( Group: "hmc.mirantis.com", Version: "v1alpha1", Resource: "managedclusters", - }) + }, true) _, err := client.Create(ctx, managedcluster, metav1.CreateOptions{}) if !apierrors.IsAlreadyExists(err) { @@ -183,7 +210,7 @@ func (kc *KubeClient) GetCluster(ctx context.Context, clusterName string) (*unst Resource: "clusters", } - client := kc.GetDynamicClient(gvr) + client := kc.GetDynamicClient(gvr, true) cluster, err := client.Get(ctx, clusterName, metav1.GetOptions{}) if err != nil { @@ -198,7 +225,7 @@ func (kc *KubeClient) GetCluster(ctx context.Context, clusterName string) (*unst func (kc *KubeClient) listResource( ctx context.Context, gvr schema.GroupVersionResource, clusterName string, ) ([]unstructured.Unstructured, error) { - client := kc.GetDynamicClient(gvr) + client := kc.GetDynamicClient(gvr, true) resources, err := client.List(ctx, metav1.ListOptions{ LabelSelector: "cluster.x-k8s.io/cluster-name=" + clusterName, diff --git a/test/managedcluster/aws/aws.go b/test/e2e/managedcluster/aws/aws.go similarity index 62% rename from test/managedcluster/aws/aws.go rename to test/e2e/managedcluster/aws/aws.go index 0bdccafa..0c839941 100644 --- a/test/managedcluster/aws/aws.go +++ b/test/e2e/managedcluster/aws/aws.go @@ -17,83 +17,21 @@ package aws import ( - "bufio" - "bytes" "context" "encoding/json" - "errors" - "io" - "os" - "github.com/a8m/envsubst" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - - apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer/yaml" "k8s.io/apimachinery/pkg/types" - yamlutil "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" - "k8s.io/client-go/restmapper" - "github.com/Mirantis/hmc/test/kubeclient" - "github.com/Mirantis/hmc/test/managedcluster" + "github.com/Mirantis/hmc/test/e2e/kubeclient" + "github.com/Mirantis/hmc/test/e2e/managedcluster" ) -func CreateCredentialSecret(ctx context.Context, kc *kubeclient.KubeClient) { - GinkgoHelper() - serializer := yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme) - yamlFile, err := os.ReadFile("config/dev/aws-credentials.yaml") - Expect(err).NotTo(HaveOccurred()) - - yamlFile, err = envsubst.Bytes(yamlFile) - Expect(err).NotTo(HaveOccurred()) - - c := discovery.NewDiscoveryClientForConfigOrDie(kc.Config) - groupResources, err := restmapper.GetAPIGroupResources(c) - Expect(err).NotTo(HaveOccurred()) - - yamlReader := yamlutil.NewYAMLReader(bufio.NewReader(bytes.NewReader(yamlFile))) - for { - yamlDoc, err := yamlReader.Read() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - Expect(err).NotTo(HaveOccurred(), "failed to read yaml file") - } - - credentialResource := &unstructured.Unstructured{} - _, _, err = serializer.Decode(yamlDoc, nil, credentialResource) - Expect(err).NotTo(HaveOccurred(), "failed to parse credential resource") - - mapper := restmapper.NewDiscoveryRESTMapper(groupResources) - mapping, err := mapper.RESTMapping(credentialResource.GroupVersionKind().GroupKind()) - Expect(err).NotTo(HaveOccurred(), "failed to get rest mapping") - - dc := kc.GetDynamicClient(schema.GroupVersionResource{ - Group: credentialResource.GroupVersionKind().Group, - Version: credentialResource.GroupVersionKind().Version, - Resource: mapping.Resource.Resource, - }) - - exists, err := dc.Get(ctx, credentialResource.GetName(), metav1.GetOptions{}) - if !apierrors.IsNotFound(err) { - Expect(err).NotTo(HaveOccurred(), "failed to get azure credential secret") - } - - if exists == nil { - if _, err := dc.Create(ctx, credentialResource, metav1.CreateOptions{}); err != nil { - Expect(err).NotTo(HaveOccurred(), "failed to create azure credential secret") - } - } - } -} - // PopulateHostedTemplateVars populates the environment variables required for // the AWS hosted CP template by querying the standalone CP cluster with the // given kubeclient. @@ -160,5 +98,5 @@ func getAWSClusterClient(kc *kubeclient.KubeClient) dynamic.ResourceInterface { Group: "infrastructure.cluster.x-k8s.io", Version: "v1beta2", Resource: "awsclusters", - }) + }, true) } diff --git a/test/managedcluster/azure/azure.go b/test/e2e/managedcluster/azure/azure.go similarity index 58% rename from test/managedcluster/azure/azure.go rename to test/e2e/managedcluster/azure/azure.go index fcf26208..f0806552 100644 --- a/test/managedcluster/azure/azure.go +++ b/test/e2e/managedcluster/azure/azure.go @@ -15,29 +15,18 @@ package azure import ( - "bufio" - "bytes" "context" - "errors" "fmt" - "io" - "os" - "github.com/a8m/envsubst" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer/yaml" - yamlutil "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/discovery" - "k8s.io/client-go/restmapper" hmc "github.com/Mirantis/hmc/api/v1alpha1" - "github.com/Mirantis/hmc/test/kubeclient" + "github.com/Mirantis/hmc/test/e2e/kubeclient" ) func getAzureInfo(ctx context.Context, name string, kc *kubeclient.KubeClient) map[string]any { @@ -48,7 +37,7 @@ func getAzureInfo(ctx context.Context, name string, kc *kubeclient.KubeClient) m Resource: "azureclusters", } - dc := kc.GetDynamicClient(resourceID) + dc := kc.GetDynamicClient(resourceID, true) list, err := dc.List(ctx, metav1.ListOptions{ LabelSelector: labels.SelectorFromSet(map[string]string{hmc.FluxHelmChartNameKey: name}).String(), }) @@ -100,53 +89,3 @@ func SetAzureEnvironmentVariables(clusterName string, kc *kubeclient.KubeClient) routeTableName := routeTable["name"] GinkgoT().Setenv("AZURE_ROUTE_TABLE", fmt.Sprintf("%s", routeTableName)) } - -func CreateCredentialSecret(ctx context.Context, kc *kubeclient.KubeClient) { - GinkgoHelper() - serializer := yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme) - yamlFile, err := os.ReadFile("config/dev/azure-credentials.yaml") - Expect(err).NotTo(HaveOccurred()) - - yamlFile, err = envsubst.Bytes(yamlFile) - Expect(err).NotTo(HaveOccurred()) - - c := discovery.NewDiscoveryClientForConfigOrDie(kc.Config) - groupResources, err := restmapper.GetAPIGroupResources(c) - Expect(err).NotTo(HaveOccurred()) - - yamlReader := yamlutil.NewYAMLReader(bufio.NewReader(bytes.NewReader(yamlFile))) - for { - yamlDoc, err := yamlReader.Read() - if err != nil { - if errors.Is(err, io.EOF) { - break - } - Expect(err).NotTo(HaveOccurred(), "failed to read yaml file") - } - - credentialResource := &unstructured.Unstructured{} - _, _, err = serializer.Decode(yamlDoc, nil, credentialResource) - Expect(err).NotTo(HaveOccurred(), "failed to parse credential resource") - - mapper := restmapper.NewDiscoveryRESTMapper(groupResources) - mapping, err := mapper.RESTMapping(credentialResource.GroupVersionKind().GroupKind()) - Expect(err).NotTo(HaveOccurred(), "failed to get rest mapping") - - dc := kc.GetDynamicClient(schema.GroupVersionResource{ - Group: credentialResource.GroupVersionKind().Group, - Version: credentialResource.GroupVersionKind().Version, - Resource: mapping.Resource.Resource, - }) - - exists, err := dc.Get(ctx, credentialResource.GetName(), metav1.GetOptions{}) - if !apierrors.IsNotFound(err) { - Expect(err).NotTo(HaveOccurred(), "failed to get azure credential secret") - } - - if exists == nil { - if _, createErr := dc.Create(ctx, credentialResource, metav1.CreateOptions{}); err != nil { - Expect(createErr).NotTo(HaveOccurred(), "failed to create azure credential secret") - } - } - } -} diff --git a/test/e2e/managedcluster/clusteridentity/clusteridentity.go b/test/e2e/managedcluster/clusteridentity/clusteridentity.go new file mode 100644 index 00000000..91e69ea5 --- /dev/null +++ b/test/e2e/managedcluster/clusteridentity/clusteridentity.go @@ -0,0 +1,253 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clusteridentity + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/Mirantis/hmc/test/e2e/kubeclient" + "github.com/Mirantis/hmc/test/e2e/managedcluster" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type ClusterIdentity struct { + GroupVersionResource schema.GroupVersionResource + Kind string + SecretName string + IdentityName string + SecretData map[string]string + Spec map[string]any + Namespaced bool +} + +// New creates a ClusterIdentity resource, credential and associated secret for +// the given provider using the provided KubeClient and returns details about +// the created ClusterIdentity. +func New(kc *kubeclient.KubeClient, provider managedcluster.ProviderType) *ClusterIdentity { + GinkgoHelper() + + var ( + resource string + kind string + version string + secretStringData map[string]string + spec map[string]any + namespaced bool + ) + + secretName := fmt.Sprintf("%s-cluster-identity-secret", provider) + identityName := fmt.Sprintf("%s-cluster-identity", provider) + + switch provider { + case managedcluster.ProviderAWS: + resource = "awsclusterstaticidentities" + kind = "AWSClusterStaticIdentity" + version = "v1beta2" + secretStringData = map[string]string{ + "AccessKeyID": os.Getenv(managedcluster.EnvVarAWSAccessKeyID), + "SecretAccessKey": os.Getenv(managedcluster.EnvVarAWSSecretAccessKey), + } + spec = map[string]any{ + "secretRef": secretName, + "allowedNamespaces": map[string]any{ + "selector": map[string]any{ + "matchLabels": map[string]any{}, + }, + }, + } + case managedcluster.ProviderAzure: + resource = "azureclusteridentities" + kind = "AzureClusterIdentity" + version = "v1beta1" + secretStringData = map[string]string{ + "clientSecret": os.Getenv(managedcluster.EnvVarAzureClientSecret), + } + spec = map[string]any{ + "allowedNamespaces": map[string]any{}, + "clientID": os.Getenv(managedcluster.EnvVarAzureClientID), + "clientSecret": map[string]any{ + "name": secretName, + "namespace": kc.Namespace, + }, + "tenantID": os.Getenv(managedcluster.EnvVarAzureTenantID), + "type": "ServicePrincipal", + } + namespaced = true + case managedcluster.ProviderVSphere: + resource = "vsphereclusteridentities" + kind = "VSphereClusterIdentity" + version = "v1beta1" + secretStringData = map[string]string{ + "username": os.Getenv(managedcluster.EnvVarVSphereUser), + "password": os.Getenv(managedcluster.EnvVarVSpherePassword), + } + spec = map[string]any{ + "secretName": secretName, + "allowedNamespaces": map[string]any{ + "selector": map[string]any{ + "matchLabels": map[string]any{}, + }, + }, + } + default: + Fail(fmt.Sprintf("Unsupported provider: %s", provider)) + } + + ci := ClusterIdentity{ + GroupVersionResource: schema.GroupVersionResource{ + Group: "infrastructure.cluster.x-k8s.io", + Version: version, + Resource: resource, + }, + Kind: kind, + SecretName: secretName, + IdentityName: identityName, + SecretData: secretStringData, + Spec: spec, + Namespaced: namespaced, + } + + validateSecretDataPopulated(secretStringData) + ci.waitForResourceCRD(kc) + ci.createSecret(kc) + ci.createClusterIdentity(kc) + ci.createCredential(kc) + + return &ci +} + +func validateSecretDataPopulated(secretData map[string]string) { + for key, value := range secretData { + Expect(value).ToNot(BeEmpty(), fmt.Sprintf("Secret data key %s should not be empty", key)) + } +} + +// waitForResourceCRD ensures the CRD for the given resource is present by +// trying to list the resources of the given type until it succeeds. +func (ci *ClusterIdentity) waitForResourceCRD(kc *kubeclient.KubeClient) { + GinkgoHelper() + + By(fmt.Sprintf("waiting for %s CRD to be present", ci.Kind)) + + ctx := context.Background() + + Eventually(func() error { + crds, err := kc.ExtendedClient.ApiextensionsV1().CustomResourceDefinitions().List(ctx, metav1.ListOptions{}) + if err != nil { + return fmt.Errorf("failed to list CRDs: %w", err) + } + + for _, crd := range crds.Items { + if crd.Spec.Names.Kind == ci.Kind { + return nil + } + } + + _, _ = fmt.Fprintf(GinkgoWriter, "Failed to find CRD, retrying...\n") + return fmt.Errorf("failed to find CRD for resource: %s", ci.GroupVersionResource.String()) + }).WithTimeout(time.Minute).WithPolling(5 * time.Second).Should(Succeed()) +} + +// createSecret creates a secret affiliated with a ClusterIdentity. +func (ci *ClusterIdentity) createSecret(kc *kubeclient.KubeClient) { + GinkgoHelper() + + By(fmt.Sprintf("creating ClusterIdentity secret: %s", ci.SecretName)) + + ctx := context.Background() + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: ci.SecretName, + Namespace: kc.Namespace, + }, + StringData: ci.SecretData, + Type: corev1.SecretTypeOpaque, + } + + _, err := kc.Client.CoreV1().Secrets(kc.Namespace).Create(ctx, secret, metav1.CreateOptions{}) + if apierrors.IsAlreadyExists(err) { + resp, err := kc.Client.CoreV1().Secrets(kc.Namespace).Get(ctx, ci.SecretName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred(), "failed to get existing secret") + + secret.SetResourceVersion(resp.GetResourceVersion()) + _, err = kc.Client.CoreV1().Secrets(kc.Namespace).Update(ctx, secret, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred(), "failed to update existing secret") + } else { + Expect(err).NotTo(HaveOccurred(), "failed to create secret") + } +} + +func (ci *ClusterIdentity) createCredential(kc *kubeclient.KubeClient) { + GinkgoHelper() + + credName := fmt.Sprintf("%s-cred", ci.IdentityName) + By(fmt.Sprintf("creating Credential: %s", credName)) + + cred := &unstructured.Unstructured{ + Object: map[string]any{ + "apiVersion": "hmc.mirantis.com/v1alpha1", + "kind": "Credential", + "metadata": map[string]any{ + "name": credName, + "namespace": kc.Namespace, + }, + "spec": map[string]any{ + "identityRef": map[string]any{ + "apiVersion": ci.GroupVersionResource.Group + "/" + ci.GroupVersionResource.Version, + "kind": ci.Kind, + "name": ci.IdentityName, + "namespace": kc.Namespace, + }, + }, + }, + } + + kc.CreateOrUpdateUnstructuredObject(schema.GroupVersionResource{ + Group: "hmc.mirantis.com", + Version: "v1alpha1", + Resource: "credentials", + }, cred, true) +} + +// createClusterIdentity creates a ClusterIdentity resource. +func (ci *ClusterIdentity) createClusterIdentity(kc *kubeclient.KubeClient) { + GinkgoHelper() + + By(fmt.Sprintf("creating ClusterIdentity: %s", ci.IdentityName)) + + id := &unstructured.Unstructured{ + Object: map[string]any{ + "apiVersion": ci.GroupVersionResource.Group + "/" + ci.GroupVersionResource.Version, + "kind": ci.Kind, + "metadata": map[string]any{ + "name": ci.IdentityName, + "namespace": kc.Namespace, + }, + "spec": ci.Spec, + }, + } + + kc.CreateOrUpdateUnstructuredObject(ci.GroupVersionResource, id, ci.Namespaced) +} diff --git a/test/managedcluster/constants.go b/test/e2e/managedcluster/constants.go similarity index 69% rename from test/managedcluster/constants.go rename to test/e2e/managedcluster/constants.go index cd43527f..e6941e4b 100644 --- a/test/managedcluster/constants.go +++ b/test/e2e/managedcluster/constants.go @@ -17,7 +17,6 @@ package managedcluster const ( // Common EnvVarManagedClusterName = "MANAGED_CLUSTER_NAME" - EnvVarHostedManagedClusterName = "HOSTED_MANAGED_CLUSTER_NAME" EnvVarInstallBeachHeadServices = "INSTALL_BEACH_HEAD_SERVICES" EnvVarControlPlaneNumber = "CONTROL_PLANE_NUMBER" EnvVarWorkerNumber = "WORKER_NUMBER" @@ -27,11 +26,25 @@ const ( EnvVarNoCleanup = "NO_CLEANUP" // AWS + EnvVarAWSAccessKeyID = "AWS_ACCESS_KEY_ID" + EnvVarAWSSecretAccessKey = "AWS_SECRET_ACCESS_KEY" EnvVarAWSVPCID = "AWS_VPC_ID" EnvVarAWSSubnetID = "AWS_SUBNET_ID" EnvVarAWSSubnetAvailabilityZone = "AWS_SUBNET_AVAILABILITY_ZONE" EnvVarAWSInstanceType = "AWS_INSTANCE_TYPE" EnvVarAWSSecurityGroupID = "AWS_SG_ID" + EnvVarAWSClusterIdentity = "AWS_CLUSTER_IDENTITY" EnvVarPublicIP = "AWS_PUBLIC_IP" - AWSCredentialsSecretName = "aws-variables" + + // VSphere + EnvVarVSphereUser = "VSPHERE_USER" + EnvVarVSpherePassword = "VSPHERE_PASSWORD" + EnvVarVSphereClusterIdentity = "VSPHERE_CLUSTER_IDENTITY" + + // Azure + EnvVarAzureClientSecret = "AZURE_CLIENT_SECRET" + EnvVarAzureClientID = "AZURE_CLIENT_ID" + EnvVarAzureTenantID = "AZURE_TENANT_ID" + EnvVarAzureSubscription = "AZURE_SUBSCRIPTION" + EnvVarAzureClusterIdentity = "AZURE_CLUSTER_IDENTITY" ) diff --git a/test/managedcluster/managedcluster.go b/test/e2e/managedcluster/managedcluster.go similarity index 80% rename from test/managedcluster/managedcluster.go rename to test/e2e/managedcluster/managedcluster.go index 6c1edca0..b1cd4562 100644 --- a/test/managedcluster/managedcluster.go +++ b/test/e2e/managedcluster/managedcluster.go @@ -20,14 +20,13 @@ import ( "os" "strings" + "github.com/Mirantis/hmc/test/utils" "github.com/a8m/envsubst" "github.com/google/uuid" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "gopkg.in/yaml.v3" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - - "github.com/Mirantis/hmc/internal/utils" ) type ProviderType string @@ -70,30 +69,46 @@ var vsphereStandaloneCPManagedClusterTemplateBytes []byte //go:embed resources/vsphere-hosted-cp.yaml.tpl var vsphereHostedCPManagedClusterTemplateBytes []byte +func FilterAllProviders() []string { + return []string{ + utils.HMCControllerLabel, + GetProviderLabel(ProviderAWS), + GetProviderLabel(ProviderAzure), + GetProviderLabel(ProviderCAPI), + GetProviderLabel(ProviderVSphere), + } +} + func GetProviderLabel(provider ProviderType) string { return fmt.Sprintf("%s=%s", providerLabel, provider) } +func setClusterName(templateName Template) { + var generatedName string + + mcName := os.Getenv(EnvVarManagedClusterName) + if mcName == "" { + mcName = "e2e-test-" + uuid.New().String()[:8] + } + + providerName := strings.Split(string(templateName), "-")[0] + + // Append the provider name to the cluster name to ensure uniqueness between + // different deployed ManagedClusters. + generatedName = fmt.Sprintf("%s-%s", mcName, providerName) + if strings.Contains(string(templateName), "hosted") { + generatedName = fmt.Sprintf("%s-%s", generatedName, "hosted") + } + + GinkgoT().Setenv(EnvVarManagedClusterName, generatedName) +} + // GetUnstructured returns an unstructured ManagedCluster object based on the // provider and template. func GetUnstructured(templateName Template) *unstructured.Unstructured { GinkgoHelper() - generatedName := os.Getenv(EnvVarManagedClusterName) - if generatedName == "" { - generatedName = "e2e-test-" + uuid.New().String()[:8] - _, _ = fmt.Fprintf(GinkgoWriter, "Generated cluster name: %q\n", generatedName) - GinkgoT().Setenv(EnvVarManagedClusterName, generatedName) - } else { - _, _ = fmt.Fprintf(GinkgoWriter, "Using configured cluster name: %q\n", generatedName) - } - - var hostedName string - if strings.Contains(string(templateName), "-hosted") { - hostedName = generatedName + "-hosted" - GinkgoT().Setenv(EnvVarHostedManagedClusterName, hostedName) - _, _ = fmt.Fprintf(GinkgoWriter, "Creating hosted ManagedCluster with name: %q\n", hostedName) - } + setClusterName(templateName) var managedClusterTemplateBytes []byte switch templateName { @@ -119,10 +134,9 @@ func GetUnstructured(templateName Template) *unstructured.Unstructured { case TemplateAzureStandaloneCP: managedClusterTemplateBytes = azureStandaloneCPManagedClusterTemplateBytes default: - Fail(fmt.Sprintf("unsupported template: %s", templateName)) + Fail(fmt.Sprintf("Unsupported template: %s", templateName)) } - Expect(os.Setenv("NAMESPACE", utils.DefaultSystemNamespace)).NotTo(HaveOccurred()) managedClusterConfigBytes, err := envsubst.Bytes(managedClusterTemplateBytes) Expect(err).NotTo(HaveOccurred(), "failed to substitute environment variables") diff --git a/test/managedcluster/providervalidator.go b/test/e2e/managedcluster/providervalidator.go similarity index 98% rename from test/managedcluster/providervalidator.go rename to test/e2e/managedcluster/providervalidator.go index 2deae8ff..716c156d 100644 --- a/test/managedcluster/providervalidator.go +++ b/test/e2e/managedcluster/providervalidator.go @@ -18,7 +18,7 @@ import ( "context" "fmt" - "github.com/Mirantis/hmc/test/kubeclient" + "github.com/Mirantis/hmc/test/e2e/kubeclient" . "github.com/onsi/ginkgo/v2" ) diff --git a/test/managedcluster/resources/aws-hosted-cp.yaml.tpl b/test/e2e/managedcluster/resources/aws-hosted-cp.yaml.tpl similarity index 78% rename from test/managedcluster/resources/aws-hosted-cp.yaml.tpl rename to test/e2e/managedcluster/resources/aws-hosted-cp.yaml.tpl index 64e46a0b..0a62117d 100644 --- a/test/managedcluster/resources/aws-hosted-cp.yaml.tpl +++ b/test/e2e/managedcluster/resources/aws-hosted-cp.yaml.tpl @@ -1,12 +1,13 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ManagedCluster metadata: - name: ${HOSTED_MANAGED_CLUSTER_NAME} + name: ${MANAGED_CLUSTER_NAME} spec: template: aws-hosted-cp + credential: ${AWS_CLUSTER_IDENTITY}-cred config: clusterIdentity: - name: aws-cluster-identity + name: ${AWS_CLUSTER_IDENTITY} namespace: ${NAMESPACE} vpcID: ${AWS_VPC_ID} region: ${AWS_REGION} diff --git a/test/managedcluster/resources/aws-standalone-cp.yaml.tpl b/test/e2e/managedcluster/resources/aws-standalone-cp.yaml.tpl similarity index 82% rename from test/managedcluster/resources/aws-standalone-cp.yaml.tpl rename to test/e2e/managedcluster/resources/aws-standalone-cp.yaml.tpl index e5ae8848..068b077b 100644 --- a/test/managedcluster/resources/aws-standalone-cp.yaml.tpl +++ b/test/e2e/managedcluster/resources/aws-standalone-cp.yaml.tpl @@ -1,12 +1,13 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ManagedCluster metadata: - name: ${MANAGED_CLUSTER_NAME}-aws + name: ${MANAGED_CLUSTER_NAME} spec: template: aws-standalone-cp + credential: ${AWS_CLUSTER_IDENTITY}-cred config: clusterIdentity: - name: aws-cluster-identity + name: ${AWS_CLUSTER_IDENTITY} namespace: ${NAMESPACE} region: ${AWS_REGION} publicIP: ${AWS_PUBLIC_IP:=true} diff --git a/test/managedcluster/resources/azure-hosted-cp.yaml.tpl b/test/e2e/managedcluster/resources/azure-hosted-cp.yaml.tpl similarity index 81% rename from test/managedcluster/resources/azure-hosted-cp.yaml.tpl rename to test/e2e/managedcluster/resources/azure-hosted-cp.yaml.tpl index 6b8f7ad9..bae1c3a0 100644 --- a/test/managedcluster/resources/azure-hosted-cp.yaml.tpl +++ b/test/e2e/managedcluster/resources/azure-hosted-cp.yaml.tpl @@ -1,16 +1,17 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ManagedCluster metadata: - name: ${MANAGED_CLUSTER_NAME}-azure + name: ${MANAGED_CLUSTER_NAME} namespace: ${NAMESPACE} spec: template: azure-hosted-cp + credential: ${AZURE_CLUSTER_IDENTITY}-cred config: - location: "westus" + location: "westus2" subscriptionID: "${AZURE_SUBSCRIPTION_ID}" vmSize: Standard_A4_v2 clusterIdentity: - name: azure-cluster-identity + name: ${AZURE_CLUSTER_IDENTITY} namespace: hmc-system resourceGroup: "${AZURE_RESOURCE_GROUP}" network: diff --git a/test/managedcluster/resources/azure-standalone-cp.yaml.tpl b/test/e2e/managedcluster/resources/azure-standalone-cp.yaml.tpl similarity index 72% rename from test/managedcluster/resources/azure-standalone-cp.yaml.tpl rename to test/e2e/managedcluster/resources/azure-standalone-cp.yaml.tpl index 44d5abf6..8a8201ce 100644 --- a/test/managedcluster/resources/azure-standalone-cp.yaml.tpl +++ b/test/e2e/managedcluster/resources/azure-standalone-cp.yaml.tpl @@ -1,21 +1,23 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ManagedCluster metadata: - name: ${MANAGED_CLUSTER_NAME}-azure + name: ${MANAGED_CLUSTER_NAME} namespace: ${NAMESPACE} spec: template: azure-standalone-cp + credential: ${AZURE_CLUSTER_IDENTITY}-cred config: controlPlaneNumber: 1 workersNumber: 1 - location: "westus" + location: "westus2" subscriptionID: "${AZURE_SUBSCRIPTION_ID}" controlPlane: vmSize: Standard_A4_v2 worker: vmSize: Standard_A4_v2 + credential: ${AZURE_CLUSTER_IDENTITY}-cred clusterIdentity: - name: azure-cluster-identity + name: ${AZURE_CLUSTER_IDENTITY} namespace: ${NAMESPACE} tenantID: "${AZURE_TENANT_ID}" clientID: "${AZURE_CLIENT_ID}" diff --git a/test/managedcluster/resources/vsphere-hosted-cp.yaml.tpl b/test/e2e/managedcluster/resources/vsphere-hosted-cp.yaml.tpl similarity index 92% rename from test/managedcluster/resources/vsphere-hosted-cp.yaml.tpl rename to test/e2e/managedcluster/resources/vsphere-hosted-cp.yaml.tpl index a4c328b7..d4ae8765 100644 --- a/test/managedcluster/resources/vsphere-hosted-cp.yaml.tpl +++ b/test/e2e/managedcluster/resources/vsphere-hosted-cp.yaml.tpl @@ -1,9 +1,10 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ManagedCluster metadata: - name: ${MANAGED_CLUSTER_NAME}-vsphere + name: ${MANAGED_CLUSTER_NAME} spec: template: vsphere-hosted-cp + credential: ${VSPHERE_CLUSTER_IDENTITY}-cred config: controlPlaneNumber: ${CONTROL_PLANE_NUMBER:=1} workersNumber: ${WORKERS_NUMBER:=1} diff --git a/test/managedcluster/resources/vsphere-standalone-cp.yaml.tpl b/test/e2e/managedcluster/resources/vsphere-standalone-cp.yaml.tpl similarity index 93% rename from test/managedcluster/resources/vsphere-standalone-cp.yaml.tpl rename to test/e2e/managedcluster/resources/vsphere-standalone-cp.yaml.tpl index 81eb8edf..97cf036c 100644 --- a/test/managedcluster/resources/vsphere-standalone-cp.yaml.tpl +++ b/test/e2e/managedcluster/resources/vsphere-standalone-cp.yaml.tpl @@ -1,9 +1,10 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ManagedCluster metadata: - name: ${MANAGED_CLUSTER_NAME}-vsphere + name: ${MANAGED_CLUSTER_NAME} spec: template: vsphere-standalone-cp + credential: ${VSPHERE_CLUSTER_IDENTITY}-cred config: controlPlaneNumber: ${CONTROL_PLANE_NUMBER:=1} workersNumber: ${WORKERS_NUMBER:=1} diff --git a/test/managedcluster/validate_deleted.go b/test/e2e/managedcluster/validate_deleted.go similarity index 98% rename from test/managedcluster/validate_deleted.go rename to test/e2e/managedcluster/validate_deleted.go index 11c80141..7ceeb61a 100644 --- a/test/managedcluster/validate_deleted.go +++ b/test/e2e/managedcluster/validate_deleted.go @@ -19,7 +19,7 @@ import ( "errors" "fmt" - "github.com/Mirantis/hmc/test/kubeclient" + "github.com/Mirantis/hmc/test/e2e/kubeclient" "github.com/Mirantis/hmc/test/utils" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" diff --git a/test/managedcluster/validate_deployed.go b/test/e2e/managedcluster/validate_deployed.go similarity index 90% rename from test/managedcluster/validate_deployed.go rename to test/e2e/managedcluster/validate_deployed.go index 89c8ca69..b8224b4e 100644 --- a/test/managedcluster/validate_deployed.go +++ b/test/e2e/managedcluster/validate_deployed.go @@ -19,7 +19,7 @@ import ( "fmt" "strings" - "github.com/Mirantis/hmc/test/kubeclient" + "github.com/Mirantis/hmc/test/e2e/kubeclient" "github.com/Mirantis/hmc/test/utils" . "github.com/onsi/ginkgo/v2" corev1 "k8s.io/api/core/v1" @@ -62,6 +62,27 @@ func validateMachines(ctx context.Context, kc *kubeclient.KubeClient, clusterNam return err } + if len(machines) == 0 { + // No machines have been created yet, check for MachineDeployments to + // provide some debug information as to why no machines are present. + md, err := kc.ListMachineDeployments(ctx, clusterName) + if err != nil { + return fmt.Errorf("failed to list machine deployments: %w", err) + } + + for _, md := range md { + _, _ = fmt.Fprintf(GinkgoWriter, "No machines found, validating MachineDeployment %s\n", md.GetName()) + + if err := utils.ValidateObjectNamePrefix(&md, clusterName); err != nil { + Fail(err.Error()) + } + + if err := utils.ValidateConditionsTrue(&md); err != nil { + return err + } + } + } + for _, machine := range machines { if err := utils.ValidateObjectNamePrefix(&machine, clusterName); err != nil { Fail(err.Error()) @@ -191,7 +212,7 @@ func validateCSIDriver(ctx context.Context, kc *kubeclient.KubeClient, clusterNa return fmt.Errorf("failed to get test PVC: %w", err) } - if !strings.Contains(*pvc.Spec.StorageClassName, "csi") { + if pvc.Spec.StorageClassName != nil && !strings.Contains(*pvc.Spec.StorageClassName, "csi") { Fail(fmt.Sprintf("%s PersistentVolumeClaim does not have a CSI driver storageClass", pvcName)) } diff --git a/test/e2e/managedcluster/vsphere/vsphere.go b/test/e2e/managedcluster/vsphere/vsphere.go new file mode 100644 index 00000000..0d5db9ca --- /dev/null +++ b/test/e2e/managedcluster/vsphere/vsphere.go @@ -0,0 +1,36 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vsphere + +import ( + "github.com/Mirantis/hmc/test/e2e/managedcluster" +) + +func CheckEnv() { + managedcluster.ValidateDeploymentVars([]string{ + "VSPHERE_USER", + "VSPHERE_PASSWORD", + "VSPHERE_SERVER", + "VSPHERE_THUMBPRINT", + "VSPHERE_DATACENTER", + "VSPHERE_DATASTORE", + "VSPHERE_RESOURCEPOOL", + "VSPHERE_FOLDER", + "VSPHERE_CONTROL_PLANE_ENDPOINT", + "VSPHERE_VM_TEMPLATE", + "VSPHERE_NETWORK", + "VSPHERE_SSH_KEY", + }) +} diff --git a/test/e2e/provider_aws_test.go b/test/e2e/provider_aws_test.go new file mode 100644 index 00000000..bfde5cb2 --- /dev/null +++ b/test/e2e/provider_aws_test.go @@ -0,0 +1,201 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e + +import ( + "context" + "fmt" + "os" + "os/exec" + "time" + + internalutils "github.com/Mirantis/hmc/internal/utils" + "github.com/Mirantis/hmc/test/e2e/kubeclient" + "github.com/Mirantis/hmc/test/e2e/managedcluster" + "github.com/Mirantis/hmc/test/e2e/managedcluster/aws" + "github.com/Mirantis/hmc/test/e2e/managedcluster/clusteridentity" + "github.com/Mirantis/hmc/test/utils" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("AWS Templates", Label("provider:cloud", "provider:aws"), Ordered, func() { + var ( + kc *kubeclient.KubeClient + standaloneClient *kubeclient.KubeClient + standaloneDeleteFunc func() error + hostedDeleteFunc func() error + kubecfgDeleteFunc func() error + clusterName string + ) + + BeforeAll(func() { + By("providing cluster identity") + kc = kubeclient.NewFromLocal(internalutils.DefaultSystemNamespace) + ci := clusteridentity.New(kc, managedcluster.ProviderAWS) + Expect(os.Setenv(managedcluster.EnvVarAWSClusterIdentity, ci.IdentityName)).Should(Succeed()) + }) + + AfterAll(func() { + // If we failed collect logs from each of the affiliated controllers + // as well as the output of clusterctl to store as artifacts. + if CurrentSpecReport().Failed() && !noCleanup() { + if standaloneClient != nil { + By("collecting failure logs from hosted controllers") + collectLogArtifacts(standaloneClient, clusterName, managedcluster.ProviderAWS, managedcluster.ProviderCAPI) + } + + By("deleting resources after failure") + for _, deleteFunc := range []func() error{ + kubecfgDeleteFunc, + hostedDeleteFunc, + standaloneDeleteFunc, + } { + if deleteFunc != nil { + err := deleteFunc() + Expect(err).NotTo(HaveOccurred()) + } + } + } + }) + + It("should work with an AWS provider", func() { + // Deploy a standalone cluster and verify it is running/ready. + // Deploy standalone with an xlarge instance since it will also be + // hosting the hosted cluster. + GinkgoT().Setenv(managedcluster.EnvVarAWSInstanceType, "t3.xlarge") + GinkgoT().Setenv(managedcluster.EnvVarInstallBeachHeadServices, "false") + + templateBy(managedcluster.TemplateAWSStandaloneCP, "creating a ManagedCluster") + sd := managedcluster.GetUnstructured(managedcluster.TemplateAWSStandaloneCP) + clusterName = sd.GetName() + + standaloneDeleteFunc = kc.CreateManagedCluster(context.Background(), sd) + + templateBy(managedcluster.TemplateAWSStandaloneCP, "waiting for infrastructure to deploy successfully") + deploymentValidator := managedcluster.NewProviderValidator( + managedcluster.TemplateAWSStandaloneCP, + clusterName, + managedcluster.ValidationActionDeploy, + ) + + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), kc) + }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + templateBy(managedcluster.TemplateAWSHostedCP, "installing controller and templates on standalone cluster") + + // Download the KUBECONFIG for the standalone cluster and load it + // so we can call Make targets against this cluster. + // TODO: Ideally we shouldn't use Make here and should just convert + // these Make targets into Go code, but this will require a + // helmclient. + var kubeCfgPath string + kubeCfgPath, kubecfgDeleteFunc = kc.WriteKubeconfig(context.Background(), clusterName) + + GinkgoT().Setenv("KUBECONFIG", kubeCfgPath) + cmd := exec.Command("make", "test-apply") + _, err := utils.Run(cmd) + Expect(err).NotTo(HaveOccurred()) + Expect(os.Unsetenv("KUBECONFIG")).To(Succeed()) + + templateBy(managedcluster.TemplateAWSHostedCP, "validating that the controller is ready") + Eventually(func() error { + err := verifyControllersUp(standaloneClient) + if err != nil { + _, _ = fmt.Fprintf( + GinkgoWriter, "[%s] controller validation failed: %v\n", + string(managedcluster.TemplateAWSHostedCP), err) + return err + } + return nil + }).WithTimeout(15 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + // Ensure AWS credentials are set in the standalone cluster. + standaloneClient = kc.NewFromCluster(context.Background(), internalutils.DefaultSystemNamespace, clusterName) + clusteridentity.New(standaloneClient, managedcluster.ProviderAWS) + + // Populate the environment variables required for the hosted + // cluster. + aws.PopulateHostedTemplateVars(context.Background(), kc, clusterName) + + templateBy(managedcluster.TemplateAWSHostedCP, "creating a ManagedCluster") + hd := managedcluster.GetUnstructured(managedcluster.TemplateAWSHostedCP) + hdName := hd.GetName() + + // Deploy the hosted cluster on top of the standalone cluster. + hostedDeleteFunc = standaloneClient.CreateManagedCluster(context.Background(), hd) + + // Patch the AWSCluster resource as Ready, see: + // https://docs.k0smotron.io/stable/capi-aws/#prepare-the-aws-infra-provider + // Use Eventually as the AWSCluster might not be available + // immediately. + templateBy(managedcluster.TemplateAWSHostedCP, "Patching AWSCluster to ready") + Eventually(func() error { + if err := aws.PatchAWSClusterReady(context.Background(), standaloneClient, hd.GetName()); err != nil { + _, _ = fmt.Fprintf(GinkgoWriter, "failed to patch AWSCluster to ready: %v, retrying...\n", err) + return err + } + _, _ = fmt.Fprintf(GinkgoWriter, "Patch succeeded\n") + return nil + }).WithTimeout(time.Minute).WithPolling(5 * time.Second).Should(Succeed()) + + // Verify the hosted cluster is running/ready. + templateBy(managedcluster.TemplateAWSHostedCP, "waiting for infrastructure to deploy successfully") + deploymentValidator = managedcluster.NewProviderValidator( + managedcluster.TemplateAWSHostedCP, + hdName, + managedcluster.ValidationActionDeploy, + ) + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), standaloneClient) + }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + // Delete the hosted ManagedCluster and verify it is removed. + templateBy(managedcluster.TemplateAWSHostedCP, "deleting the ManagedCluster") + err = hostedDeleteFunc() + Expect(err).NotTo(HaveOccurred()) + + deletionValidator := managedcluster.NewProviderValidator( + managedcluster.TemplateAWSHostedCP, + hdName, + managedcluster.ValidationActionDelete, + ) + Eventually(func() error { + return deletionValidator.Validate(context.Background(), standaloneClient) + }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + // Now delete the standalone ManagedCluster and verify it is + // removed, it is deleted last since it is the basis for the hosted + // cluster. + /* + FIXME(#339): This is currently disabled as the deletion of the + standalone cluster is failing due to outstanding issues. + templateBy(managedcluster.TemplateAWSStandaloneCP, "deleting the ManagedCluster") + err = standaloneDeleteFunc() + Expect(err).NotTo(HaveOccurred()) + + deletionValidator = managedcluster.NewProviderValidator( + managedcluster.TemplateAWSStandaloneCP, + clusterName, + managedcluster.ValidationActionDelete, + ) + Eventually(func() error { + return deletionValidator.Validate(context.Background(), kc) + }).WithTimeout(10 * time.Minute).WithPolling(10 * + time.Second).Should(Succeed()) + */ + }) +}) diff --git a/test/e2e/provider_azure_test.go b/test/e2e/provider_azure_test.go new file mode 100644 index 00000000..9c5ed599 --- /dev/null +++ b/test/e2e/provider_azure_test.go @@ -0,0 +1,184 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e + +import ( + "context" + "fmt" + "os" + "os/exec" + "time" + + internalutils "github.com/Mirantis/hmc/internal/utils" + "github.com/Mirantis/hmc/test/e2e/kubeclient" + "github.com/Mirantis/hmc/test/e2e/managedcluster" + "github.com/Mirantis/hmc/test/e2e/managedcluster/azure" + "github.com/Mirantis/hmc/test/e2e/managedcluster/clusteridentity" + "github.com/Mirantis/hmc/test/utils" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Context("Azure Templates", Label("provider:cloud", "provider:azure"), Ordered, func() { + var ( + kc *kubeclient.KubeClient + standaloneClient *kubeclient.KubeClient + standaloneDeleteFunc func() error + hostedDeleteFunc func() error + kubecfgDeleteFunc func() error + sdName string + ) + + BeforeAll(func() { + By("ensuring Azure credentials are set") + kc = kubeclient.NewFromLocal(internalutils.DefaultSystemNamespace) + ci := clusteridentity.New(kc, managedcluster.ProviderAzure) + Expect(os.Setenv(managedcluster.EnvVarAzureClusterIdentity, ci.IdentityName)).Should(Succeed()) + }) + + AfterEach(func() { + // If we failed collect logs from each of the affiliated controllers + // as well as the output of clusterctl to store as artifacts. + if CurrentSpecReport().Failed() && !noCleanup() { + By("collecting failure logs from controllers") + if kc != nil { + collectLogArtifacts(kc, sdName, managedcluster.ProviderAzure, managedcluster.ProviderCAPI) + } + if standaloneClient != nil { + collectLogArtifacts(standaloneClient, sdName, managedcluster.ProviderAzure, managedcluster.ProviderCAPI) + } + + By("deleting resources after failure") + for _, deleteFunc := range []func() error{ + kubecfgDeleteFunc, + hostedDeleteFunc, + standaloneDeleteFunc, + } { + if deleteFunc != nil { + err := deleteFunc() + Expect(err).NotTo(HaveOccurred()) + } + } + } + }) + + It("should work with an Azure provider", func() { + templateBy(managedcluster.TemplateAzureStandaloneCP, "creating a ManagedCluster") + sd := managedcluster.GetUnstructured(managedcluster.TemplateAzureStandaloneCP) + sdName = sd.GetName() + + standaloneDeleteFunc := kc.CreateManagedCluster(context.Background(), sd) + + // verify the standalone cluster is deployed correctly + deploymentValidator := managedcluster.NewProviderValidator( + managedcluster.TemplateAzureStandaloneCP, + sdName, + managedcluster.ValidationActionDeploy, + ) + + templateBy(managedcluster.TemplateAzureStandaloneCP, "waiting for infrastructure provider to deploy successfully") + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), kc) + }).WithTimeout(90 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + // setup environment variables for deploying the hosted template (subnet name, etc) + azure.SetAzureEnvironmentVariables(sdName, kc) + + hd := managedcluster.GetUnstructured(managedcluster.TemplateAzureHostedCP) + hdName := hd.GetName() + + var kubeCfgPath string + kubeCfgPath, kubecfgDeleteFunc = kc.WriteKubeconfig(context.Background(), sdName) + + By("Deploy onto standalone cluster") + deployOnAzureCluster(kubeCfgPath) + + standaloneClient = kc.NewFromCluster(context.Background(), internalutils.DefaultSystemNamespace, sdName) + // verify the cluster is ready prior to creating credentials + Eventually(func() error { + err := verifyControllersUp(standaloneClient) + if err != nil { + _, _ = fmt.Fprintf(GinkgoWriter, "Controller validation failed: %v\n", err) + return err + } + return nil + }).WithTimeout(15 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + By("Create azure credential secret") + clusteridentity.New(standaloneClient, managedcluster.ProviderAzure) + + templateBy(managedcluster.TemplateAzureHostedCP, "creating a ManagedCluster") + hostedDeleteFunc = standaloneClient.CreateManagedCluster(context.Background(), hd) + + templateBy(managedcluster.TemplateAzureHostedCP, "waiting for infrastructure to deploy successfully") + + deploymentValidator = managedcluster.NewProviderValidator( + managedcluster.TemplateAzureHostedCP, + hdName, + managedcluster.ValidationActionDeploy, + ) + + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), standaloneClient) + }).WithTimeout(90 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + By("verify the deployment deletes successfully") + err := hostedDeleteFunc() + Expect(err).NotTo(HaveOccurred()) + + err = standaloneDeleteFunc() + Expect(err).NotTo(HaveOccurred()) + + deploymentValidator = managedcluster.NewProviderValidator( + managedcluster.TemplateAzureHostedCP, + hdName, + managedcluster.ValidationActionDelete, + ) + + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), standaloneClient) + }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + deploymentValidator = managedcluster.NewProviderValidator( + managedcluster.TemplateAzureStandaloneCP, + hdName, + managedcluster.ValidationActionDelete, + ) + + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), kc) + }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + }) +}) + +func deployOnAzureCluster(kubeCfgPath string) { + GinkgoT().Helper() + GinkgoT().Setenv("KUBECONFIG", kubeCfgPath) + cmd := exec.Command("kubectl", "apply", "-f", + "https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/deploy/example/"+ + "storageclass-azuredisk-csi.yaml") + _, err := utils.Run(cmd) + Expect(err).NotTo(HaveOccurred()) + + cmd = exec.Command("kubectl", "patch", "storageclass", "managed-csi", "-p", + "{\"metadata\": {\"annotations\":{\"storageclass.kubernetes.io/is-default-class\":\"true\"}}}") + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred()) + + cmd = exec.Command("make", "test-apply") + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred()) + Expect(os.Unsetenv("KUBECONFIG")).To(Succeed()) +} diff --git a/test/e2e/provider_vsphere_test.go b/test/e2e/provider_vsphere_test.go new file mode 100644 index 00000000..e31b29da --- /dev/null +++ b/test/e2e/provider_vsphere_test.go @@ -0,0 +1,94 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e + +import ( + "context" + "os" + "time" + + internalutils "github.com/Mirantis/hmc/internal/utils" + "github.com/Mirantis/hmc/test/e2e/kubeclient" + "github.com/Mirantis/hmc/test/e2e/managedcluster" + "github.com/Mirantis/hmc/test/e2e/managedcluster/clusteridentity" + "github.com/Mirantis/hmc/test/e2e/managedcluster/vsphere" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Context("vSphere Templates", Label("provider:onprem", "provider:vsphere"), Ordered, func() { + var ( + kc *kubeclient.KubeClient + deleteFunc func() error + clusterName string + err error + ) + + BeforeAll(func() { + By("ensuring that env vars are set correctly") + vsphere.CheckEnv() + By("creating kube client") + kc = kubeclient.NewFromLocal(internalutils.DefaultSystemNamespace) + By("providing cluster identity") + ci := clusteridentity.New(kc, managedcluster.ProviderVSphere) + By("setting VSPHERE_CLUSTER_IDENTITY env variable") + Expect(os.Setenv(managedcluster.EnvVarVSphereClusterIdentity, ci.IdentityName)).Should(Succeed()) + }) + + AfterEach(func() { + // If we failed collect logs from each of the affiliated controllers + // as well as the output of clusterctl to store as artifacts. + if CurrentSpecReport().Failed() { + By("collecting failure logs from controllers") + collectLogArtifacts(kc, clusterName, managedcluster.ProviderVSphere, managedcluster.ProviderCAPI) + } + + if deleteFunc != nil && !noCleanup() { + By("deleting the deployment") + err = deleteFunc() + Expect(err).NotTo(HaveOccurred()) + } + }) + + It("should deploy standalone managed cluster", func() { + By("creating a managed cluster") + d := managedcluster.GetUnstructured(managedcluster.TemplateVSphereStandaloneCP) + clusterName = d.GetName() + + deleteFunc := kc.CreateManagedCluster(context.Background(), d) + + By("waiting for infrastructure providers to deploy successfully") + deploymentValidator := managedcluster.NewProviderValidator( + managedcluster.TemplateVSphereStandaloneCP, + clusterName, + managedcluster.ValidationActionDeploy, + ) + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), kc) + }).WithTimeout(30 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + deletionValidator := managedcluster.NewProviderValidator( + managedcluster.TemplateVSphereStandaloneCP, + clusterName, + managedcluster.ValidationActionDelete, + ) + By("verify the deployment deletes successfully") + err = deleteFunc() + Expect(err).NotTo(HaveOccurred()) + Eventually(func() error { + return deletionValidator.Validate(context.Background(), kc) + }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + }) +}) diff --git a/test/managedcluster/vsphere/vsphere.go b/test/managedcluster/vsphere/vsphere.go deleted file mode 100644 index 1d9b3f4e..00000000 --- a/test/managedcluster/vsphere/vsphere.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2024 -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package vsphere - -import ( - "context" - "fmt" - "os" - - "github.com/Mirantis/hmc/test/kubeclient" - "github.com/Mirantis/hmc/test/managedcluster" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/dynamic" -) - -func CreateSecret(kc *kubeclient.KubeClient, secretName string) error { - ctx := context.Background() - _, err := kc.Client.CoreV1().Secrets(kc.Namespace).Get(ctx, secretName, metav1.GetOptions{}) - - if !apierrors.IsNotFound(err) { - return nil - } - username := os.Getenv("VSPHERE_USER") - password := os.Getenv("VSPHERE_PASSWORD") - - _, err = kc.Client.CoreV1().Secrets(kc.Namespace).Create(ctx, &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - }, - StringData: map[string]string{ - "username": username, - "password": password, - }, - Type: corev1.SecretTypeOpaque, - }, metav1.CreateOptions{}) - if err != nil { - return fmt.Errorf("failed to create vSphere credentials secret: %w", err) - } - - return nil -} - -func CreateClusterIdentity(kc *kubeclient.KubeClient, secretName string, identityName string) error { - ctx := context.Background() - client, err := dynamic.NewForConfig(kc.Config) - if err != nil { - return fmt.Errorf("failed to create dynamic client: %w", err) - } - - gvr := schema.GroupVersionResource{ - Group: "infrastructure.cluster.x-k8s.io", - Version: "v1beta1", - Resource: "vsphereclusteridentities", - } - - clusterIdentity := &unstructured.Unstructured{ - Object: map[string]any{ - "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", - "kind": "VSphereClusterIdentity", - "metadata": map[string]any{ - "name": identityName, - }, - "spec": map[string]any{ - "secretName": secretName, - "allowedNamespaces": map[string]any{ - "selector": map[string]any{ - "matchLabels": map[string]any{}, - }, - }, - }, - }, - } - - result, err := client.Resource(gvr).Create(ctx, clusterIdentity, metav1.CreateOptions{}) - if err != nil { - fmt.Printf("%+v", result) //nolint:revive // false-positive - return fmt.Errorf("failed to create vsphereclusteridentity: %w", err) - } - - return nil -} - -func CheckEnv() { - managedcluster.ValidateDeploymentVars([]string{ - "VSPHERE_USER", - "VSPHERE_PASSWORD", - "VSPHERE_SERVER", - "VSPHERE_THUMBPRINT", - "VSPHERE_DATACENTER", - "VSPHERE_DATASTORE", - "VSPHERE_RESOURCEPOOL", - "VSPHERE_FOLDER", - "VSPHERE_CONTROL_PLANE_ENDPOINT", - "VSPHERE_VM_TEMPLATE", - "VSPHERE_NETWORK", - "VSPHERE_SSH_KEY", - }) -} diff --git a/test/utils/utils.go b/test/utils/utils.go index 6c45e714..8c64a729 100644 --- a/test/utils/utils.go +++ b/test/utils/utils.go @@ -27,6 +27,10 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) +const ( + HMCControllerLabel = "app.kubernetes.io/name=hmc" +) + // Run executes the provided command within this context and returns it's // output. Run does not wait for the command to finish, use Wait instead. func Run(cmd *exec.Cmd) ([]byte, error) {