From a8474a7c4b6abd6792400ac5683e6c615b03f53e Mon Sep 17 00:00:00 2001 From: Kyle Wuolle Date: Wed, 18 Sep 2024 11:24:29 -0700 Subject: [PATCH] Implemented Azure e2e tests --- .github/workflows/test.yml | 5 + Makefile | 2 +- test/e2e/e2e_test.go | 205 +++++++++++++++++- test/managedcluster/azure/azure.go | 126 +++++++++++ test/managedcluster/managedcluster.go | 22 +- test/managedcluster/providervalidator.go | 2 + .../resources/aws-hosted-cp.yaml.tpl | 2 +- .../resources/aws-standalone-cp.yaml.tpl | 2 +- .../resources/azure-hosted-cp.yaml.tpl | 23 ++ .../resources/azure-standalone-cp.yaml.tpl | 22 ++ .../resources/vsphere-hosted-cp.yaml.tpl | 2 +- .../resources/vsphere-standalone-cp.yaml.tpl | 2 +- test/managedcluster/validate_deployed.go | 2 +- 13 files changed, 405 insertions(+), 12 deletions(-) create mode 100644 test/managedcluster/azure/azure.go create mode 100644 test/managedcluster/resources/azure-hosted-cp.yaml.tpl create mode 100644 test/managedcluster/resources/azure-standalone-cp.yaml.tpl diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 83e6d264..681c985b 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -18,6 +18,11 @@ env: AWS_REGION: us-west-2 AWS_ACCESS_KEY_ID: ${{ secrets.CI_AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.CI_AWS_SECRET_ACCESS_KEY }} + AZURE_SUBSCRIPTION_ID: ${{ secrets.CI_AZURE_SUBSCRIPTION_ID }} + AZURE_TENANT_ID: ${{ secrets.CI_AZURE_TENANT_ID }} + AZURE_CLIENT_ID: ${{ secrets.CI_AZURE_CLIENT_ID }} + AZURE_CLIENT_SECRET: ${{ secrets.CI_AZURE_CLIENT_SECRET }} + NAMESPACE: hmc-system jobs: e2etest: diff --git a/Makefile b/Makefile index 0a83f991..49b6cac1 100644 --- a/Makefile +++ b/Makefile @@ -109,7 +109,7 @@ test: generate-all fmt vet envtest tidy external-crd ## Run tests. # compatibility with other vendors. .PHONY: test-e2e # Run the e2e tests using a Kind k8s instance as the management cluster. test-e2e: cli-install - KIND_CLUSTER_NAME="hmc-test" KIND_VERSION=$(KIND_VERSION) go test ./test/e2e/ -v -ginkgo.v -timeout=2h + KIND_CLUSTER_NAME="hmc-test" KIND_VERSION=$(KIND_VERSION) go test ./test/e2e/ -v -ginkgo.v -timeout=3h .PHONY: lint lint: golangci-lint ## Run golangci-lint linter & yamllint diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index c0e06347..feaa9e6d 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -29,11 +29,13 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/utils/ptr" "github.com/Mirantis/hmc/test/kubeclient" "github.com/Mirantis/hmc/test/managedcluster" "github.com/Mirantis/hmc/test/managedcluster/aws" + "github.com/Mirantis/hmc/test/managedcluster/azure" "github.com/Mirantis/hmc/test/managedcluster/vsphere" "github.com/Mirantis/hmc/test/utils" ) @@ -126,7 +128,7 @@ var _ = Describe("controller", Ordered, func() { GinkgoT().Setenv(managedcluster.EnvVarInstallBeachHeadServices, "false") templateBy(managedcluster.TemplateAWSStandaloneCP, "creating a ManagedCluster") - sd := managedcluster.GetUnstructured(managedcluster.TemplateAWSStandaloneCP) + sd := managedcluster.GetUnstructured(managedcluster.TemplateAWSStandaloneCP, managedcluster.ProviderAWS) clusterName = sd.GetName() standaloneDeleteFunc = kc.CreateManagedCluster(context.Background(), sd) @@ -182,7 +184,7 @@ var _ = Describe("controller", Ordered, func() { aws.PopulateHostedTemplateVars(context.Background(), kc) templateBy(managedcluster.TemplateAWSHostedCP, "creating a ManagedCluster") - hd := managedcluster.GetUnstructured(managedcluster.TemplateAWSHostedCP) + hd := managedcluster.GetUnstructured(managedcluster.TemplateAWSHostedCP, managedcluster.ProviderAWS) hdName := hd.GetName() // Deploy the hosted cluster on top of the standalone cluster. @@ -296,7 +298,7 @@ var _ = Describe("controller", Ordered, func() { It("should deploy standalone managed cluster", func() { By("creating a managed cluster") - d := managedcluster.GetUnstructured(managedcluster.TemplateVSphereStandaloneCP) + d := managedcluster.GetUnstructured(managedcluster.TemplateVSphereStandaloneCP, managedcluster.ProviderVSphere) clusterName = d.GetName() deleteFunc := kc.CreateManagedCluster(context.Background(), d) @@ -325,8 +327,205 @@ var _ = Describe("controller", Ordered, func() { }) }) + Describe("Azure Templates", Label("provider"), func() { + var ( + kc *kubeclient.KubeClient + standaloneClient *kubeclient.KubeClient + standaloneDeleteFunc func() error + hostedDeleteFunc func() error + kubecfgDeleteFunc func() error + sdName string + ) + + BeforeAll(func() { + By("ensuring Azure credentials are set") + kc = kubeclient.NewFromLocal(namespace) + ExpectWithOffset(2, azure.CreateCredentialSecret(context.Background(), kc)).To(Succeed()) + }) + + AfterEach(func() { + // If we failed collect logs from each of the affiliated controllers + // as well as the output of clusterctl to store as artifacts. + if CurrentSpecReport().Failed() && !noCleanup() { + By("collecting failure logs from controllers") + if kc != nil { + collectLogArtifacts(kc, sdName, managedcluster.ProviderAzure, managedcluster.ProviderCAPI) + } + if standaloneClient != nil { + collectLogArtifacts(standaloneClient, sdName, managedcluster.ProviderAzure, managedcluster.ProviderCAPI) + } + + By("deleting resources after failure") + for _, deleteFunc := range []func() error{ + kubecfgDeleteFunc, + hostedDeleteFunc, + standaloneDeleteFunc, + } { + if deleteFunc != nil { + err := deleteFunc() + Expect(err).NotTo(HaveOccurred()) + } + } + } + }) + + It("should work with an Azure provider", func() { + templateBy(managedcluster.TemplateAzureStandaloneCP, + fmt.Sprintf("creating a Deployment using template %s", managedcluster.TemplateAzureStandaloneCP)) + sd := managedcluster.GetUnstructured(managedcluster.TemplateAzureStandaloneCP, managedcluster.ProviderAzure) + sdName = sd.GetName() + + standaloneDeleteFunc := kc.CreateManagedCluster(context.Background(), sd) + + // verify the standalone cluster is deployed correctly + deploymentValidator := managedcluster.NewProviderValidator( + managedcluster.TemplateAzureStandaloneCP, + sdName, + managedcluster.ValidationActionDeploy, + ) + + By("waiting for infrastructure providers to deploy successfully") + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), kc) + }).WithTimeout(90 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + // setup environment variables for deploying the hosted template (subnet name, etc) + setAzureEnvironmentVariables(sdName, kc) + + hd := managedcluster.GetUnstructured(managedcluster.TemplateAzureHostedCP, managedcluster.ProviderAzure) + hdName := hd.GetName() + + var kubeCfgPath string + kubeCfgPath, kubecfgDeleteFunc = kc.WriteKubeconfig(context.Background(), sdName) + + By("Deploy onto standalone cluster") + deployOnAzureCluster(kubeCfgPath) + + templateBy(managedcluster.TemplateAzureHostedCP, "creating a ManagedCluster") + standaloneClient = kc.NewFromCluster(context.Background(), namespace, sdName) + // verify the cluster is ready prior to creating credentials + Eventually(func() error { + err := verifyControllersUp(standaloneClient, managedcluster.ProviderAzure) + if err != nil { + _, _ = fmt.Fprintf(GinkgoWriter, "Controller validation failed: %v\n", err) + return err + } + return nil + }).WithTimeout(15 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + By("Create azure credential secret") + ExpectWithOffset(2, azure.CreateCredentialSecret(context.Background(), standaloneClient)).To(Succeed()) + + templateBy(managedcluster.TemplateAzureHostedCP, + fmt.Sprintf("creating a Deployment using template %s", managedcluster.TemplateAzureHostedCP)) + hostedDeleteFunc = standaloneClient.CreateManagedCluster(context.Background(), hd) + + templateBy(managedcluster.TemplateAzureHostedCP, "waiting for infrastructure to deploy successfully") + + deploymentValidator = managedcluster.NewProviderValidator( + managedcluster.TemplateAzureStandaloneCP, + hdName, + managedcluster.ValidationActionDeploy, + ) + + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), standaloneClient) + }).WithTimeout(90 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + By("verify the deployment deletes successfully") + err := hostedDeleteFunc() + Expect(err).NotTo(HaveOccurred()) + + err = standaloneDeleteFunc() + Expect(err).NotTo(HaveOccurred()) + + deploymentValidator = managedcluster.NewProviderValidator( + managedcluster.TemplateAzureHostedCP, + hdName, + managedcluster.ValidationActionDelete, + ) + + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), standaloneClient) + }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + deploymentValidator = managedcluster.NewProviderValidator( + managedcluster.TemplateAzureStandaloneCP, + hdName, + managedcluster.ValidationActionDelete, + ) + + Eventually(func() error { + return deploymentValidator.Validate(context.Background(), kc) + }).WithTimeout(10 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) + + }) + }) }) +func deployOnAzureCluster(kubeCfgPath string) { + GinkgoT().Helper() + GinkgoT().Setenv("KUBECONFIG", kubeCfgPath) + cmd := exec.Command("kubectl", "create", "-f", + "https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/deploy/example/"+ + "storageclass-azuredisk-csi.yaml") + _, err := utils.Run(cmd) + Expect(err).NotTo(HaveOccurred()) + + cmd = exec.Command("kubectl", "patch", "storageclass", "managed-csi", "-p", + "{\"metadata\": {\"annotations\":{\"storageclass.kubernetes.io/is-default-class\":\"true\"}}}") + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred()) + + cmd = exec.Command("make", "dev-deploy") + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred()) + + cmd = exec.Command("make", "dev-templates") + _, err = utils.Run(cmd) + Expect(err).NotTo(HaveOccurred()) + Expect(os.Unsetenv("KUBECONFIG")).To(Succeed()) +} + +func setAzureEnvironmentVariables(clusterName string, kc *kubeclient.KubeClient) { + GinkgoT().Helper() + spec, err := azure.GetAzureInfo(context.Background(), clusterName, kc) + Expect(err).NotTo(HaveOccurred()) + + networkSpec, found, err := unstructured.NestedMap(spec, "networkSpec") + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue()) + + vnet, found, err := unstructured.NestedMap(networkSpec, "vnet") + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue()) + vnetName := vnet["name"] + GinkgoT().Setenv("AZURE_VM_NET_NAME", vnetName.(string)) + + subnets, found, err := unstructured.NestedSlice(networkSpec, "subnets") + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue()) + + resourceGroup := spec["resourceGroup"] + GinkgoT().Setenv("AZURE_RESOURCE_GROUP", resourceGroup.(string)) + subnetMap, ok := subnets[0].(map[string]interface{}) + Expect(ok).To(BeTrue()) + subnetName := subnetMap["name"] + GinkgoT().Setenv("AZURE_NODE_SUBNET", subnetName.(string)) + + securityGroup, found, err := unstructured.NestedMap(subnetMap, "securityGroup") + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue()) + securityGroupName := securityGroup["name"] + GinkgoT().Setenv("AZURE_SECURITY_GROUP", securityGroupName.(string)) + + routeTable, found, err := unstructured.NestedMap(subnetMap, "routeTable") + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue()) + routeTableName := routeTable["name"] + GinkgoT().Setenv("AZURE_ROUTE_TABLE", routeTableName.(string)) +} + // templateBy wraps a Ginkgo By with a block describing the template being // tested. func templateBy(t managedcluster.Template, description string) { diff --git a/test/managedcluster/azure/azure.go b/test/managedcluster/azure/azure.go new file mode 100644 index 00000000..f8bb078f --- /dev/null +++ b/test/managedcluster/azure/azure.go @@ -0,0 +1,126 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package azure + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io" + "os" + + "github.com/a8m/envsubst" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/yaml" + yamlutil "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/client-go/discovery" + "k8s.io/client-go/restmapper" + + hmc "github.com/Mirantis/hmc/api/v1alpha1" + "github.com/Mirantis/hmc/test/kubeclient" +) + +func GetAzureInfo(ctx context.Context, name string, kc *kubeclient.KubeClient) (map[string]interface{}, error) { + resourceId := schema.GroupVersionResource{ + Group: "infrastructure.cluster.x-k8s.io", + Version: "v1beta1", + Resource: "azureclusters", + } + + dc := kc.GetDynamicClient(resourceId) + list, err := dc.List(ctx, metav1.ListOptions{ + LabelSelector: labels.SelectorFromSet(map[string]string{hmc.FluxHelmChartNameKey: name}).String(), + }) + + if apierrors.IsNotFound(err) || len(list.Items) == 0 { + return nil, fmt.Errorf("clusters %s not found", name) + } + + spec, found, err := unstructured.NestedMap(list.Items[0].Object, "spec") + if !found || err != nil { + return nil, fmt.Errorf("clusters spec %s not found", name) + } + return spec, nil +} + +func CreateCredentialSecret(ctx context.Context, kc *kubeclient.KubeClient) error { + serializer := yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme) + yamlFile, err := os.ReadFile("./config/dev/azure-credentials.yaml") + + if err != nil { + return fmt.Errorf("failed to read azure credential file: %w", err) + } + + yamlFile, err = envsubst.Bytes(yamlFile) + if err != nil { + return fmt.Errorf("failed to process azure credential file: %w", err) + } + + c := discovery.NewDiscoveryClientForConfigOrDie(kc.Config) + groupResources, err := restmapper.GetAPIGroupResources(c) + if err != nil { + return fmt.Errorf("failed to fetch group resources: %w", err) + } + + yamlReader := yamlutil.NewYAMLReader(bufio.NewReader(bytes.NewReader(yamlFile))) + for { + yamlDoc, err := yamlReader.Read() + + if err != nil { + if err == io.EOF { + break + } + return fmt.Errorf("failed to process azure credential file: %w", err) + + } + + credentialResource := &unstructured.Unstructured{} + _, _, err = serializer.Decode(yamlDoc, nil, credentialResource) + if err != nil { + return fmt.Errorf("failed to deserialize azure credential object: %w", err) + } + + mapper := restmapper.NewDiscoveryRESTMapper(groupResources) + mapping, err := mapper.RESTMapping(credentialResource.GroupVersionKind().GroupKind()) + + if err != nil { + return fmt.Errorf("failed to create rest mapper: %w", err) + } + + dc := kc.GetDynamicClient(schema.GroupVersionResource{ + Group: credentialResource.GroupVersionKind().Group, + Version: credentialResource.GroupVersionKind().Version, + Resource: mapping.Resource.Resource, + }) + + exists, err := dc.Get(ctx, credentialResource.GetName(), metav1.GetOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to check for existing credential: %w", err) + } + + if exists == nil { + if _, err = dc.Create(ctx, credentialResource, metav1.CreateOptions{}); err != nil { + return fmt.Errorf("failed to create azure credentials: %w", err) + } + } + } + + return nil +} diff --git a/test/managedcluster/managedcluster.go b/test/managedcluster/managedcluster.go index 2a08b53e..53fa3652 100644 --- a/test/managedcluster/managedcluster.go +++ b/test/managedcluster/managedcluster.go @@ -26,6 +26,8 @@ import ( . "github.com/onsi/gomega" "gopkg.in/yaml.v3" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + "github.com/Mirantis/hmc/internal/utils" ) type ProviderType string @@ -44,6 +46,8 @@ type Template string const ( TemplateAWSStandaloneCP Template = "aws-standalone-cp" TemplateAWSHostedCP Template = "aws-hosted-cp" + TemplateAzureHostedCP Template = "azure-hosted-cp" + TemplateAzureStandaloneCP Template = "azure-standalone-cp" TemplateVSphereStandaloneCP Template = "vsphere-standalone-cp" TemplateVSphereHostedCP Template = "vsphere-hosted-cp" ) @@ -54,6 +58,12 @@ var awsStandaloneCPManagedClusterTemplateBytes []byte //go:embed resources/aws-hosted-cp.yaml.tpl var awsHostedCPManagedClusterTemplateBytes []byte +//go:embed resources/azure-standalone-cp.yaml.tpl +var azureStandaloneCPManagedClusterTemplateBytes []byte + +//go:embed resources/azure-hosted-cp.yaml.tpl +var azureHostedCPManagedClusterTemplateBytes []byte + //go:embed resources/vsphere-standalone-cp.yaml.tpl var vsphereStandaloneCPManagedClusterTemplateBytes []byte @@ -66,17 +76,18 @@ func GetProviderLabel(provider ProviderType) string { // GetUnstructured returns an unstructured ManagedCluster object based on the // provider and template. -func GetUnstructured(templateName Template) *unstructured.Unstructured { +func GetUnstructured(templateName Template, provider ProviderType) *unstructured.Unstructured { GinkgoHelper() generatedName := os.Getenv(EnvVarManagedClusterName) if generatedName == "" { - generatedName = uuid.New().String()[:8] + "-e2e-test" + generatedName = "e2e-test-" + uuid.New().String()[:8] _, _ = fmt.Fprintf(GinkgoWriter, "Generated cluster name: %q\n", generatedName) GinkgoT().Setenv(EnvVarManagedClusterName, generatedName) } else { _, _ = fmt.Fprintf(GinkgoWriter, "Using configured cluster name: %q\n", generatedName) } + generatedName = generatedName + string(provider) var hostedName string if strings.Contains(string(templateName), "-hosted") { @@ -104,10 +115,15 @@ func GetUnstructured(templateName Template) *unstructured.Unstructured { managedClusterTemplateBytes = vsphereStandaloneCPManagedClusterTemplateBytes case TemplateVSphereHostedCP: managedClusterTemplateBytes = vsphereHostedCPManagedClusterTemplateBytes + case TemplateAzureHostedCP: + managedClusterTemplateBytes = azureHostedCPManagedClusterTemplateBytes + case TemplateAzureStandaloneCP: + managedClusterTemplateBytes = azureStandaloneCPManagedClusterTemplateBytes default: - Fail(fmt.Sprintf("unsupported AWS template: %s", templateName)) + Fail(fmt.Sprintf("unsupported template: %s", templateName)) } + Expect(os.Setenv("NAMESPACE", utils.DefaultSystemNamespace)).NotTo(HaveOccurred()) managedClusterConfigBytes, err := envsubst.Bytes(managedClusterTemplateBytes) Expect(err).NotTo(HaveOccurred(), "failed to substitute environment variables") diff --git a/test/managedcluster/providervalidator.go b/test/managedcluster/providervalidator.go index fd474253..2deae8ff 100644 --- a/test/managedcluster/providervalidator.go +++ b/test/managedcluster/providervalidator.go @@ -64,6 +64,8 @@ func NewProviderValidator(template Template, clusterName string, action Validati case TemplateAWSStandaloneCP, TemplateAWSHostedCP: resourcesToValidate["ccm"] = validateCCM resourceOrder = append(resourceOrder, "ccm") + case TemplateAzureStandaloneCP, TemplateVSphereHostedCP: + delete(resourcesToValidate, "csi-driver") } } else { resourcesToValidate = map[string]resourceValidationFunc{ diff --git a/test/managedcluster/resources/aws-hosted-cp.yaml.tpl b/test/managedcluster/resources/aws-hosted-cp.yaml.tpl index 06a4cf4c..3d3cbc69 100644 --- a/test/managedcluster/resources/aws-hosted-cp.yaml.tpl +++ b/test/managedcluster/resources/aws-hosted-cp.yaml.tpl @@ -1,7 +1,7 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ManagedCluster metadata: - name: ${HOSTED_MANAGED_CLUSTER_NAME} + name: ${HOSTED_MANAGED_CLUSTER_NAME}-aws spec: template: aws-hosted-cp config: diff --git a/test/managedcluster/resources/aws-standalone-cp.yaml.tpl b/test/managedcluster/resources/aws-standalone-cp.yaml.tpl index 0d107ca4..8b8943c2 100644 --- a/test/managedcluster/resources/aws-standalone-cp.yaml.tpl +++ b/test/managedcluster/resources/aws-standalone-cp.yaml.tpl @@ -1,7 +1,7 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ManagedCluster metadata: - name: ${MANAGED_CLUSTER_NAME} + name: ${MANAGED_CLUSTER_NAME}-aws spec: template: aws-standalone-cp config: diff --git a/test/managedcluster/resources/azure-hosted-cp.yaml.tpl b/test/managedcluster/resources/azure-hosted-cp.yaml.tpl new file mode 100644 index 00000000..6b8f7ad9 --- /dev/null +++ b/test/managedcluster/resources/azure-hosted-cp.yaml.tpl @@ -0,0 +1,23 @@ +apiVersion: hmc.mirantis.com/v1alpha1 +kind: ManagedCluster +metadata: + name: ${MANAGED_CLUSTER_NAME}-azure + namespace: ${NAMESPACE} +spec: + template: azure-hosted-cp + config: + location: "westus" + subscriptionID: "${AZURE_SUBSCRIPTION_ID}" + vmSize: Standard_A4_v2 + clusterIdentity: + name: azure-cluster-identity + namespace: hmc-system + resourceGroup: "${AZURE_RESOURCE_GROUP}" + network: + vnetName: "${AZURE_VM_NET_NAME}" + nodeSubnetName: "${AZURE_NODE_SUBNET}" + routeTableName: "${AZURE_ROUTE_TABLE}" + securityGroupName: "${AZURE_SECURITY_GROUP}" + tenantID: "${AZURE_TENANT_ID}" + clientID: "${AZURE_CLIENT_ID}" + clientSecret: "${AZURE_CLIENT_SECRET}" diff --git a/test/managedcluster/resources/azure-standalone-cp.yaml.tpl b/test/managedcluster/resources/azure-standalone-cp.yaml.tpl new file mode 100644 index 00000000..44d5abf6 --- /dev/null +++ b/test/managedcluster/resources/azure-standalone-cp.yaml.tpl @@ -0,0 +1,22 @@ +apiVersion: hmc.mirantis.com/v1alpha1 +kind: ManagedCluster +metadata: + name: ${MANAGED_CLUSTER_NAME}-azure + namespace: ${NAMESPACE} +spec: + template: azure-standalone-cp + config: + controlPlaneNumber: 1 + workersNumber: 1 + location: "westus" + subscriptionID: "${AZURE_SUBSCRIPTION_ID}" + controlPlane: + vmSize: Standard_A4_v2 + worker: + vmSize: Standard_A4_v2 + clusterIdentity: + name: azure-cluster-identity + namespace: ${NAMESPACE} + tenantID: "${AZURE_TENANT_ID}" + clientID: "${AZURE_CLIENT_ID}" + clientSecret: "${AZURE_CLIENT_SECRET}" diff --git a/test/managedcluster/resources/vsphere-hosted-cp.yaml.tpl b/test/managedcluster/resources/vsphere-hosted-cp.yaml.tpl index 2c556d9c..a4c328b7 100644 --- a/test/managedcluster/resources/vsphere-hosted-cp.yaml.tpl +++ b/test/managedcluster/resources/vsphere-hosted-cp.yaml.tpl @@ -1,7 +1,7 @@ apiVersion: hmc.mirantis.com/v1alpha1 kind: ManagedCluster metadata: - name: ${MANAGED_CLUSTER_NAME} + name: ${MANAGED_CLUSTER_NAME}-vsphere spec: template: vsphere-hosted-cp config: diff --git a/test/managedcluster/resources/vsphere-standalone-cp.yaml.tpl b/test/managedcluster/resources/vsphere-standalone-cp.yaml.tpl index 98d19325..3a7819e8 100644 --- a/test/managedcluster/resources/vsphere-standalone-cp.yaml.tpl +++ b/test/managedcluster/resources/vsphere-standalone-cp.yaml.tpl @@ -3,7 +3,7 @@ kind: ManagedCluster metadata: name: ${MANAGED_CLUSTER_NAME} spec: - template: vsphere-standalone-cp + template: vsphere-standalone-cp-vsphere config: controlPlaneNumber: ${CONTROL_PLANE_NUMBER:=1} workersNumber: ${WORKERS_NUMBER:=1} diff --git a/test/managedcluster/validate_deployed.go b/test/managedcluster/validate_deployed.go index b80d8420..5a065338 100644 --- a/test/managedcluster/validate_deployed.go +++ b/test/managedcluster/validate_deployed.go @@ -247,7 +247,7 @@ func validateCCM(ctx context.Context, kc *kubeclient.KubeClient, clusterName str } for _, i := range service.Status.LoadBalancer.Ingress { - if i.Hostname != "" { + if i.Hostname != "" || i.IP != "" { return nil } }