From d004496bed67faf1a6b839558b79125af9ba7243 Mon Sep 17 00:00:00 2001 From: Richard Case Date: Tue, 21 Jan 2025 08:51:59 +0000 Subject: [PATCH] fix: e2e changes to remove v1.23 This changes the EKS upgrade test to use a newer version of Kubernetes. It was using v1.23 which isn't supported. Removed old CSI tests as they where testing upgrades to v1.23 for unmanaged clusters. Removed references to CoreDNS addon in EKS tests. Signed-off-by: Richard Case --- test/e2e/data/e2e_conf.yaml | 3 - test/e2e/data/e2e_eks_conf.yaml | 6 +- ...late-eks-control-plane-only-withaddon.yaml | 4 - .../cluster-template-eks-ipv6-cluster.yaml | 3 - test/e2e/shared/defaults.go | 6 +- test/e2e/suites/managed/eks_ipv6_test.go | 1 - test/e2e/suites/managed/eks_legacy_test.go | 3 +- test/e2e/suites/managed/eks_test.go | 28 +- test/e2e/suites/managed/upgrade_test.go | 20 +- test/e2e/suites/unmanaged/helpers_test.go | 321 ------------------ .../unmanaged/unmanaged_functional_test.go | 236 ------------- 11 files changed, 23 insertions(+), 608 deletions(-) diff --git a/test/e2e/data/e2e_conf.yaml b/test/e2e/data/e2e_conf.yaml index b45b5b364e..9c6dc28a4d 100644 --- a/test/e2e/data/e2e_conf.yaml +++ b/test/e2e/data/e2e_conf.yaml @@ -180,9 +180,6 @@ variables: KUBERNETES_VERSION: "v1.29.9" KUBERNETES_VERSION_UPGRADE_TO: "v1.29.9" KUBERNETES_VERSION_UPGRADE_FROM: "v1.29.8" - # Pre and post 1.23 Kubernetes versions are being used for CSI upgrade tests - PRE_1_23_KUBERNETES_VERSION: "v1.22.17" - POST_1_23_KUBERNETES_VERSION: "v1.23.15" CNI: "../../data/cni/calico.yaml" KUBETEST_CONFIGURATION: "../../data/kubetest/conformance.yaml" EVENT_BRIDGE_INSTANCE_STATE: "true" diff --git a/test/e2e/data/e2e_eks_conf.yaml b/test/e2e/data/e2e_eks_conf.yaml index 44d486f6a5..b17caf98a8 100644 --- a/test/e2e/data/e2e_eks_conf.yaml +++ b/test/e2e/data/e2e_eks_conf.yaml @@ -16,7 +16,7 @@ images: - name: gcr.io/k8s-staging-cluster-api/capa-manager:e2e loadBehavior: mustLoad -## PLEASE KEEP THESE UP TO DATE WITH THE COMPONENTS + ## PLEASE KEEP THESE UP TO DATE WITH THE COMPONENTS - name: quay.io/jetstack/cert-manager-cainjector:v1.15.1 loadBehavior: tryLoad - name: quay.io/jetstack/cert-manager-webhook:v1.15.1 @@ -118,6 +118,8 @@ providers: variables: KUBERNETES_VERSION: "v1.30.2" KUBERNETES_VERSION_MANAGEMENT: "v1.30.0" # Kind bootstrap + UPGRADE_FROM_VERSION: "v1.30.0" + UPGRADE_TO_VERSION: "v1.31.0" EXP_MACHINE_POOL: "true" EXP_CLUSTER_RESOURCE_SET: "true" EVENT_BRIDGE_INSTANCE_STATE: "true" @@ -127,8 +129,6 @@ variables: EXP_EKS_IAM: "false" EXP_EKS_ADD_ROLES: "false" VPC_ADDON_VERSION: "v1.18.1-eksbuild.3" - COREDNS_ADDON_VERSION: "v1.11.1-eksbuild.8" - COREDNS_ADDON_CONFIGURATION: '{"replicaCount":3}' KUBE_PROXY_ADDON_VERSION: "v1.30.0-eksbuild.3" CONFORMANCE_CI_ARTIFACTS_KUBERNETES_VERSION: "1.30.2" IP_FAMILY: "IPv4" diff --git a/test/e2e/data/eks/cluster-template-eks-control-plane-only-withaddon.yaml b/test/e2e/data/eks/cluster-template-eks-control-plane-only-withaddon.yaml index 9108b38f3a..66ddf183a3 100644 --- a/test/e2e/data/eks/cluster-template-eks-control-plane-only-withaddon.yaml +++ b/test/e2e/data/eks/cluster-template-eks-control-plane-only-withaddon.yaml @@ -40,10 +40,6 @@ spec: - name: "vpc-cni" version: "${VPC_ADDON_VERSION}" conflictResolution: "overwrite" - - name: "coredns" - version: "${COREDNS_ADDON_VERSION}" - conflictResolution: "overwrite" - configuration: '${COREDNS_ADDON_CONFIGURATION}' identityRef: kind: AWSClusterStaticIdentity name: e2e-account diff --git a/test/e2e/data/eks/cluster-template-eks-ipv6-cluster.yaml b/test/e2e/data/eks/cluster-template-eks-ipv6-cluster.yaml index ef89483426..12cd69814c 100644 --- a/test/e2e/data/eks/cluster-template-eks-ipv6-cluster.yaml +++ b/test/e2e/data/eks/cluster-template-eks-ipv6-cluster.yaml @@ -45,9 +45,6 @@ spec: - name: "vpc-cni" version: "${VPC_ADDON_VERSION}" conflictResolution: "overwrite" - - name: "coredns" - version: "${COREDNS_ADDON_VERSION}" - conflictResolution: "overwrite" - name: "kube-proxy" version: "${KUBE_PROXY_ADDON_VERSION}" conflictResolution: "overwrite" diff --git a/test/e2e/shared/defaults.go b/test/e2e/shared/defaults.go index ca08e183d5..9a44c99700 100644 --- a/test/e2e/shared/defaults.go +++ b/test/e2e/shared/defaults.go @@ -43,8 +43,6 @@ const ( CNIPath = "CNI" CNIResources = "CNI_RESOURCES" CNIAddonVersion = "VPC_ADDON_VERSION" - CorednsAddonVersion = "COREDNS_ADDON_VERSION" - CorednsAddonConfiguration = "COREDNS_ADDON_CONFIGURATION" GcWorkloadPath = "GC_WORKLOAD" KubeproxyAddonVersion = "KUBE_PROXY_ADDON_VERSION" AwsNodeMachineType = "AWS_NODE_MACHINE_TYPE" @@ -66,11 +64,11 @@ const ( StorageClassOutTreeZoneLabel = "topology.ebs.csi.aws.com/zone" GPUFlavor = "gpu" InstanceVcpu = "AWS_MACHINE_TYPE_VCPU_USAGE" - PreCSIKubernetesVer = "PRE_1_23_KUBERNETES_VERSION" - PostCSIKubernetesVer = "POST_1_23_KUBERNETES_VERSION" EFSSupport = "efs-support" IntreeCloudProvider = "intree-cloud-provider" MultiTenancy = "MULTI_TENANCY_" + EksUpgradeFromVersion = "UPGRADE_FROM_VERSION" + EksUpgradeToVersion = "UPGRADE_TO_VERSION" ) // ResourceQuotaFilePath is the path to the file that contains the resource usage. diff --git a/test/e2e/suites/managed/eks_ipv6_test.go b/test/e2e/suites/managed/eks_ipv6_test.go index 891a34aa34..22743d26c3 100644 --- a/test/e2e/suites/managed/eks_ipv6_test.go +++ b/test/e2e/suites/managed/eks_ipv6_test.go @@ -51,7 +51,6 @@ var _ = ginkgo.Describe("[managed] [general] [ipv6] EKS cluster tests", func() { Expect(e2eCtx.E2EConfig).ToNot(BeNil(), "Invalid argument. e2eConfig can't be nil when calling %s spec", specName) Expect(e2eCtx.E2EConfig.Variables).To(HaveKey(shared.KubernetesVersion)) Expect(e2eCtx.E2EConfig.Variables).To(HaveKey(shared.CNIAddonVersion)) - Expect(e2eCtx.E2EConfig.Variables).To(HaveKey(shared.CorednsAddonVersion)) Expect(e2eCtx.E2EConfig.Variables).To(HaveKey(shared.KubeproxyAddonVersion)) ctx = context.TODO() diff --git a/test/e2e/suites/managed/eks_legacy_test.go b/test/e2e/suites/managed/eks_legacy_test.go index 9fca76eb64..82b414a84a 100644 --- a/test/e2e/suites/managed/eks_legacy_test.go +++ b/test/e2e/suites/managed/eks_legacy_test.go @@ -50,7 +50,6 @@ var _ = ginkgo.Describe("[managed] [legacy] EKS cluster tests - single kind", fu Expect(e2eCtx.E2EConfig).ToNot(BeNil(), "Invalid argument. e2eConfig can't be nil when calling %s spec", specName) Expect(e2eCtx.E2EConfig.Variables).To(HaveKey(shared.KubernetesVersion)) Expect(e2eCtx.E2EConfig.Variables).To(HaveKey(shared.CNIAddonVersion)) - Expect(e2eCtx.E2EConfig.Variables).To(HaveKey(shared.CorednsAddonVersion)) Expect(e2eCtx.E2EConfig.Variables).To(HaveKey(shared.KubeproxyAddonVersion)) ctx = context.TODO() @@ -71,7 +70,7 @@ var _ = ginkgo.Describe("[managed] [legacy] EKS cluster tests - single kind", fu Namespace: namespace, ClusterName: clusterName, Flavour: EKSControlPlaneOnlyLegacyFlavor, - ControlPlaneMachineCount: 1, //NOTE: this cannot be zero as clusterctl returns an error + ControlPlaneMachineCount: 1, // NOTE: this cannot be zero as clusterctl returns an error WorkerMachineCount: 0, } }) diff --git a/test/e2e/suites/managed/eks_test.go b/test/e2e/suites/managed/eks_test.go index ec2b08d343..85e11b747c 100644 --- a/test/e2e/suites/managed/eks_test.go +++ b/test/e2e/suites/managed/eks_test.go @@ -36,12 +36,11 @@ import ( // General EKS e2e test. var _ = ginkgo.Describe("[managed] [general] EKS cluster tests", func() { var ( - namespace *corev1.Namespace - ctx context.Context - specName = "cluster" - clusterName string - cniAddonName = "vpc-cni" - corednsAddonName = "coredns" + namespace *corev1.Namespace + ctx context.Context + specName = "cluster" + clusterName string + cniAddonName = "vpc-cni" ) shared.ConditionalIt(runGeneralTests, "should create a cluster and add nodes", func() { @@ -50,7 +49,6 @@ var _ = ginkgo.Describe("[managed] [general] EKS cluster tests", func() { Expect(e2eCtx.E2EConfig).ToNot(BeNil(), "Invalid argument. e2eConfig can't be nil when calling %s spec", specName) Expect(e2eCtx.E2EConfig.Variables).To(HaveKey(shared.KubernetesVersion)) Expect(e2eCtx.E2EConfig.Variables).To(HaveKey(shared.CNIAddonVersion)) - Expect(e2eCtx.E2EConfig.Variables).To(HaveKey(shared.CorednsAddonVersion)) ctx = context.TODO() namespace = shared.SetupSpecNamespace(ctx, specName, e2eCtx) @@ -70,7 +68,7 @@ var _ = ginkgo.Describe("[managed] [general] EKS cluster tests", func() { Namespace: namespace, ClusterName: clusterName, Flavour: EKSControlPlaneOnlyWithAddonFlavor, - ControlPlaneMachineCount: 1, //NOTE: this cannot be zero as clusterctl returns an error + ControlPlaneMachineCount: 1, // NOTE: this cannot be zero as clusterctl returns an error WorkerMachineCount: 0, } }) @@ -99,20 +97,6 @@ var _ = ginkgo.Describe("[managed] [general] EKS cluster tests", func() { } }) - ginkgo.By("should have the Coredns addon installed") - CheckAddonExistsSpec(ctx, func() CheckAddonExistsSpecInput { - return CheckAddonExistsSpecInput{ - E2EConfig: e2eCtx.E2EConfig, - BootstrapClusterProxy: e2eCtx.Environment.BootstrapClusterProxy, - AWSSession: e2eCtx.BootstrapUserAWSSession, - Namespace: namespace, - ClusterName: clusterName, - AddonName: corednsAddonName, - AddonVersion: e2eCtx.E2EConfig.GetVariable(shared.CorednsAddonVersion), - AddonConfiguration: e2eCtx.E2EConfig.GetVariable(shared.CorednsAddonConfiguration), - } - }) - ginkgo.By("should create a MachineDeployment") MachineDeploymentSpec(ctx, func() MachineDeploymentSpecInput { return MachineDeploymentSpecInput{ diff --git a/test/e2e/suites/managed/upgrade_test.go b/test/e2e/suites/managed/upgrade_test.go index ea135689cf..46f1df4e23 100644 --- a/test/e2e/suites/managed/upgrade_test.go +++ b/test/e2e/suites/managed/upgrade_test.go @@ -35,27 +35,29 @@ import ( // EKS cluster upgrade tests. var _ = ginkgo.Describe("EKS Cluster upgrade test", func() { - const ( - initialVersion = "v1.23.6" - upgradeToVersion = "v1.24.4" - ) var ( - namespace *corev1.Namespace - ctx context.Context - specName = "eks-upgrade" - clusterName string + namespace *corev1.Namespace + ctx context.Context + specName = "eks-upgrade" + clusterName string + initialVersion string + upgradeToVersion string ) shared.ConditionalIt(runUpgradeTests, "[managed] [upgrade] should create a cluster and upgrade the kubernetes version", func() { ginkgo.By("should have a valid test configuration") Expect(e2eCtx.Environment.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. BootstrapClusterProxy can't be nil") Expect(e2eCtx.E2EConfig).ToNot(BeNil(), "Invalid argument. e2eConfig can't be nil when calling %s spec", specName) - Expect(e2eCtx.E2EConfig.Variables).To(HaveKey(shared.KubernetesVersion)) + Expect(e2eCtx.E2EConfig.Variables).To(HaveKey(shared.EksUpgradeFromVersion)) + Expect(e2eCtx.E2EConfig.Variables).To(HaveKey(shared.EksUpgradeToVersion)) ctx = context.TODO() namespace = shared.SetupSpecNamespace(ctx, specName, e2eCtx) clusterName = fmt.Sprintf("%s-%s", specName, util.RandomString(6)) + initialVersion = e2eCtx.E2EConfig.GetVariable(shared.EksUpgradeFromVersion) + upgradeToVersion = e2eCtx.E2EConfig.GetVariable(shared.EksUpgradeToVersion) + ginkgo.By("default iam role should exist") VerifyRoleExistsAndOwned(ekscontrolplanev1.DefaultEKSControlPlaneRole, clusterName, false, e2eCtx.BootstrapUserAWSSession) diff --git a/test/e2e/suites/unmanaged/helpers_test.go b/test/e2e/suites/unmanaged/helpers_test.go index 0e1cd4c354..220341c275 100644 --- a/test/e2e/suites/unmanaged/helpers_test.go +++ b/test/e2e/suites/unmanaged/helpers_test.go @@ -34,14 +34,11 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/efs" - "github.com/aws/aws-sdk-go/service/elb" "github.com/blang/semver" "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" apimachinerytypes "k8s.io/apimachinery/pkg/types" @@ -59,24 +56,6 @@ import ( "sigs.k8s.io/cluster-api/util/conditions" ) -type statefulSetInfo struct { - name string - namespace string - replicas int32 - selector map[string]string - storageClassName string - volumeName string - svcName string - svcPort int32 - svcPortName string - containerName string - containerImage string - containerPort int32 - podTerminationGracePeriod int64 - volMountPath string - isInTreeCSI bool -} - // GetClusterByName returns a Cluster object given his name. func GetAWSClusterByName(ctx context.Context, namespace, name string) (*infrav1.AWSCluster, error) { cluster := &clusterv1.Cluster{} @@ -124,202 +103,6 @@ func defaultConfigCluster(clusterName, namespace string) clusterctl.ConfigCluste } } -func createLBService(svcNamespace string, svcName string, k8sclient crclient.Client) string { - ginkgo.By(fmt.Sprintf("Creating service of type Load Balancer with name: %s under namespace: %s", svcName, svcNamespace)) - svcSpec := corev1.ServiceSpec{ - Type: corev1.ServiceTypeLoadBalancer, - Ports: []corev1.ServicePort{ - { - Port: 80, - Protocol: corev1.ProtocolTCP, - }, - }, - Selector: map[string]string{ - "app": "nginx", - }, - } - createService(svcName, svcNamespace, nil, svcSpec, k8sclient) - // this sleep is required for the service to get updated with ingress details - time.Sleep(15 * time.Second) - svcCreated := &corev1.Service{} - err := k8sclient.Get(context.TODO(), apimachinerytypes.NamespacedName{Namespace: svcNamespace, Name: svcName}, svcCreated) - Expect(err).NotTo(HaveOccurred()) - elbName := "" - if lbs := len(svcCreated.Status.LoadBalancer.Ingress); lbs > 0 { - ingressHostname := svcCreated.Status.LoadBalancer.Ingress[0].Hostname - elbName = strings.Split(ingressHostname, "-")[0] - } - ginkgo.By(fmt.Sprintf("Created Load Balancer service and ELB name is: %s", elbName)) - - return elbName -} - -func deleteLBService(svcNamespace string, svcName string, k8sclient crclient.Client) { - svcSpec := corev1.ServiceSpec{ - Type: corev1.ServiceTypeLoadBalancer, - Ports: []corev1.ServicePort{ - { - Port: 80, - Protocol: corev1.ProtocolTCP, - }, - }, - Selector: map[string]string{ - "app": "nginx", - }, - } - deleteService(svcName, svcNamespace, nil, svcSpec, k8sclient) -} - -func createPodTemplateSpec(statefulsetinfo statefulSetInfo) corev1.PodTemplateSpec { - ginkgo.By("Creating PodTemplateSpec config object") - podTemplateSpec := corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: statefulsetinfo.name, - Labels: statefulsetinfo.selector, - }, - Spec: corev1.PodSpec{ - TerminationGracePeriodSeconds: &statefulsetinfo.podTerminationGracePeriod, - Containers: []corev1.Container{ - { - Name: statefulsetinfo.containerName, - Image: statefulsetinfo.containerImage, - Ports: []corev1.ContainerPort{{Name: statefulsetinfo.svcPortName, ContainerPort: statefulsetinfo.containerPort}}, - VolumeMounts: []corev1.VolumeMount{ - {Name: statefulsetinfo.volumeName, MountPath: statefulsetinfo.volMountPath}, - }, - }, - }, - Volumes: []corev1.Volume{ - { - Name: statefulsetinfo.volumeName, - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ClaimName: statefulsetinfo.volumeName}, - }, - }, - }, - }, - } - return podTemplateSpec -} - -func createPVC(statefulsetinfo statefulSetInfo) corev1.PersistentVolumeClaim { - ginkgo.By("Creating PersistentVolumeClaim config object") - volClaimTemplate := corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: statefulsetinfo.volumeName, - }, - Spec: corev1.PersistentVolumeClaimSpec{ - AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, - StorageClassName: &statefulsetinfo.storageClassName, - Resources: corev1.VolumeResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceStorage: resource.MustParse("4Gi"), - }, - }, - }, - } - return volClaimTemplate -} - -func createService(svcName string, svcNamespace string, labels map[string]string, serviceSpec corev1.ServiceSpec, k8sClient crclient.Client) { - svcToCreate := corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: svcNamespace, - Name: svcName, - }, - Spec: serviceSpec, - } - if len(labels) > 0 { - svcToCreate.ObjectMeta.Labels = labels - } - Expect(k8sClient.Create(context.TODO(), &svcToCreate)).NotTo(HaveOccurred()) -} - -func deleteService(svcName string, svcNamespace string, labels map[string]string, serviceSpec corev1.ServiceSpec, k8sClient crclient.Client) { - svcToDelete := corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: svcNamespace, - Name: svcName, - }, - Spec: serviceSpec, - } - if len(labels) > 0 { - svcToDelete.ObjectMeta.Labels = labels - } - Expect(k8sClient.Delete(context.TODO(), &svcToDelete)).NotTo(HaveOccurred()) -} - -func createStatefulSet(statefulsetinfo statefulSetInfo, k8sclient crclient.Client) { - ginkgo.By("Creating statefulset") - svcSpec := corev1.ServiceSpec{ - ClusterIP: "None", - Ports: []corev1.ServicePort{ - { - Port: statefulsetinfo.svcPort, - Name: statefulsetinfo.svcPortName, - }, - }, - Selector: statefulsetinfo.selector, - } - createService(statefulsetinfo.svcName, statefulsetinfo.namespace, statefulsetinfo.selector, svcSpec, k8sclient) - createStorageClass(statefulsetinfo.isInTreeCSI, statefulsetinfo.storageClassName, k8sclient) - podTemplateSpec := createPodTemplateSpec(statefulsetinfo) - volClaimTemplate := createPVC(statefulsetinfo) - deployStatefulSet(statefulsetinfo, volClaimTemplate, podTemplateSpec, k8sclient) - waitForStatefulSetRunning(statefulsetinfo, k8sclient) -} - -func createStorageClass(isIntree bool, storageClassName string, k8sclient crclient.Client) { - ginkgo.By(fmt.Sprintf("Creating StorageClass object with name: %s", storageClassName)) - volExpansion := true - bindingMode := storagev1.VolumeBindingWaitForFirstConsumer - azs := shared.GetAvailabilityZones(e2eCtx.AWSSession) - - provisioner := "ebs.csi.aws.com" - params := map[string]string{ - "csi.storage.k8s.io/fstype": "xfs", - "type": "io1", - "iopsPerGB": "100", - } - allowedTopo := []corev1.TopologySelectorTerm{{ - MatchLabelExpressions: []corev1.TopologySelectorLabelRequirement{{ - Key: shared.StorageClassOutTreeZoneLabel, - Values: []string{*azs[0].ZoneName}, - }}, - }} - if isIntree { - provisioner = "kubernetes.io/aws-ebs" - params = map[string]string{ - "type": "gp2", - } - - allowedTopo = nil - } - storageClass := &storagev1.StorageClass{} - if err := k8sclient.Get(context.TODO(), crclient.ObjectKey{ - Name: storageClassName, - Namespace: metav1.NamespaceDefault, - }, storageClass); err != nil { - if apierrors.IsNotFound(err) { - storageClass = &storagev1.StorageClass{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "storage.k8s.io/v1", - Kind: "StorageClass", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: storageClassName, - }, - Parameters: params, - Provisioner: provisioner, - AllowVolumeExpansion: &volExpansion, - VolumeBindingMode: &bindingMode, - AllowedTopologies: allowedTopo, - } - Expect(k8sclient.Create(context.TODO(), storageClass)).NotTo(HaveOccurred()) - } - } -} - func deleteCluster(ctx context.Context, cluster *clusterv1.Cluster) { framework.DeleteCluster(ctx, framework.DeleteClusterInput{ Deleter: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), @@ -351,35 +134,6 @@ func deleteMachine(namespace *corev1.Namespace, md *clusterv1.MachineDeployment) Expect(bootstrapClient.Delete(context.TODO(), machine)).To(Succeed()) } -func deleteRetainedVolumes(awsVolIDs []*string) { - ginkgo.By("Deleting dynamically provisioned volumes") - ec2Client := ec2.New(e2eCtx.AWSSession) - for _, volumeID := range awsVolIDs { - input := &ec2.DeleteVolumeInput{ - VolumeId: aws.String(*volumeID), - } - _, err := ec2Client.DeleteVolume(input) - Expect(err).NotTo(HaveOccurred()) - ginkgo.By(fmt.Sprintf("Deleted dynamically provisioned volume with ID: %s", *volumeID)) - } -} - -func deployStatefulSet(statefulsetinfo statefulSetInfo, volClaimTemp corev1.PersistentVolumeClaim, podTemplate corev1.PodTemplateSpec, k8sclient crclient.Client) { - ginkgo.By(fmt.Sprintf("Deploying Statefulset with name: %s under namespace: %s", statefulsetinfo.name, statefulsetinfo.namespace)) - statefulset := appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{Name: statefulsetinfo.name, Namespace: statefulsetinfo.namespace}, - Spec: appsv1.StatefulSetSpec{ - ServiceName: statefulsetinfo.svcName, - Replicas: &statefulsetinfo.replicas, - Selector: &metav1.LabelSelector{MatchLabels: statefulsetinfo.selector}, - Template: podTemplate, - VolumeClaimTemplates: []corev1.PersistentVolumeClaim{volClaimTemp}, - }, - } - err := k8sclient.Create(context.TODO(), &statefulset) - Expect(err).NotTo(HaveOccurred()) -} - func getEvents(namespace string) *corev1.EventList { eventsList := &corev1.EventList{} if err := e2eCtx.Environment.BootstrapClusterProxy.GetClient().List(context.TODO(), eventsList, crclient.InNamespace(namespace), crclient.MatchingLabels{}); err != nil { @@ -418,37 +172,6 @@ func getSubnetID(filterKey, filterValue, clusterName string) *string { return subnetOutput.Subnets[0].SubnetId } -func getVolumeIDs(info statefulSetInfo, k8sclient crclient.Client) []*string { - ginkgo.By("Retrieving IDs of dynamically provisioned volumes.") - statefulset := &appsv1.StatefulSet{} - err := k8sclient.Get(context.TODO(), apimachinerytypes.NamespacedName{Namespace: info.namespace, Name: info.name}, statefulset) - Expect(err).NotTo(HaveOccurred()) - podSelector, err := metav1.LabelSelectorAsMap(statefulset.Spec.Selector) - Expect(err).NotTo(HaveOccurred()) - pvcList := &corev1.PersistentVolumeClaimList{} - err = k8sclient.List(context.TODO(), pvcList, crclient.InNamespace(info.namespace), crclient.MatchingLabels(podSelector)) - Expect(err).NotTo(HaveOccurred()) - volIDs := make([]*string, len(pvcList.Items)) - for i, pvc := range pvcList.Items { - volName := pvc.Spec.VolumeName - volDescription := &corev1.PersistentVolume{} - err = k8sclient.Get(context.TODO(), apimachinerytypes.NamespacedName{Namespace: info.namespace, Name: volName}, volDescription) - Expect(err).NotTo(HaveOccurred()) - - url := "" - // Out-of-tree ebs CSI use .Spec.PersistentVolumeSource.CSI path - // In-tree ebs CSI use .Spec.PersistentVolumeSource.AWSElasticBlockStore path - if volDescription.Spec.PersistentVolumeSource.CSI != nil { - url = volDescription.Spec.PersistentVolumeSource.CSI.VolumeHandle - } else if volDescription.Spec.PersistentVolumeSource.AWSElasticBlockStore != nil { - str := strings.Split(volDescription.Spec.PersistentVolumeSource.AWSElasticBlockStore.VolumeID, "vol-") - url = "vol-" + str[1] - } - volIDs[i] = &url - } - return volIDs -} - func isErrorEventExists(namespace, machineDeploymentName, eventReason, errorMsg string, eList *corev1.EventList) bool { k8sClient := e2eCtx.Environment.BootstrapClusterProxy.GetClient() machineDeployment := &clusterv1.MachineDeployment{} @@ -662,50 +385,6 @@ func terminateInstance(instanceID string) { Expect(*result.TerminatingInstances[0].CurrentState.Code).To(Equal(termCode)) } -func verifyElbExists(elbName string, exists bool) { - ginkgo.By(fmt.Sprintf("Verifying ELB with name %s present", elbName)) - elbClient := elb.New(e2eCtx.AWSSession) - input := &elb.DescribeLoadBalancersInput{ - LoadBalancerNames: []*string{ - aws.String(elbName), - }, - } - elbsOutput, err := elbClient.DescribeLoadBalancers(input) - if exists { - Expect(err).NotTo(HaveOccurred()) - Expect(len(elbsOutput.LoadBalancerDescriptions)).To(Equal(1)) - ginkgo.By(fmt.Sprintf("ELB with name %s exists", elbName)) - } else { - aerr, ok := err.(awserr.Error) - Expect(ok).To(BeTrue()) - Expect(aerr.Code()).To(Equal(elb.ErrCodeAccessPointNotFoundException)) - ginkgo.By(fmt.Sprintf("ELB with name %s doesn't exists", elbName)) - } -} - -func verifyVolumesExists(awsVolumeIDs []*string) { - ginkgo.By("Ensuring dynamically provisioned volumes exists") - ec2Client := ec2.New(e2eCtx.AWSSession) - input := &ec2.DescribeVolumesInput{ - VolumeIds: awsVolumeIDs, - } - _, err := ec2Client.DescribeVolumes(input) - Expect(err).NotTo(HaveOccurred()) -} - -func waitForStatefulSetRunning(info statefulSetInfo, k8sclient crclient.Client) { - ginkgo.By(fmt.Sprintf("Ensuring Statefulset(%s) is running", info.name)) - statefulset := &appsv1.StatefulSet{} - Eventually( - func() (bool, error) { - if err := k8sclient.Get(context.TODO(), apimachinerytypes.NamespacedName{Namespace: info.namespace, Name: info.name}, statefulset); err != nil { - return false, err - } - return *statefulset.Spec.Replicas == statefulset.Status.ReadyReplicas, nil - }, 10*time.Minute, 30*time.Second, - ).Should(BeTrue(), fmt.Sprintf("Eventually failed waiting for StatefulSet %s to be running", info.name)) -} - // LatestCIReleaseForVersion returns the latest ci release of a specific version. func LatestCIReleaseForVersion(searchVersion string) (string, error) { ciVersionURL := "https://dl.k8s.io/ci/latest-%d.%d.txt" diff --git a/test/e2e/suites/unmanaged/unmanaged_functional_test.go b/test/e2e/suites/unmanaged/unmanaged_functional_test.go index 6cca7cfb20..048782977e 100644 --- a/test/e2e/suites/unmanaged/unmanaged_functional_test.go +++ b/test/e2e/suites/unmanaged/unmanaged_functional_test.go @@ -305,222 +305,6 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { }) }) - // todo: Fix and enable back the tests ASAP. - ginkgo.PDescribe("CSI=in-tree CCM=in-tree AWSCSIMigration=off: upgrade to v1.23", func() { - ginkgo.It("should create volumes dynamically with in tree CSI driver and in tree cloud provider", func() { - specName := "csimigration-off-upgrade" - if !e2eCtx.Settings.SkipQuotas { - requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, VolumeGP2: 4, EventBridgeRules: 50} - requiredResources.WriteRequestedResources(e2eCtx, specName) - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) - defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) - } - namespace := shared.SetupNamespace(ctx, specName, e2eCtx) - defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx) - - ginkgo.By("Creating first cluster with single control plane") - cluster1Name := fmt.Sprintf("%s-%s", specName, util.RandomString(6)) - configCluster := defaultConfigCluster(cluster1Name, namespace.Name) - configCluster.KubernetesVersion = e2eCtx.E2EConfig.GetVariable(shared.PreCSIKubernetesVer) - configCluster.WorkerMachineCount = ptr.To[int64](1) - configCluster.Flavor = shared.IntreeCloudProvider - createCluster(ctx, configCluster, result) - - // Create statefulSet with PVC and confirm it is working with in-tree providers - nginxStatefulsetInfo := createStatefulSetInfo(true, "intree") - - ginkgo.By("Deploying StatefulSet on infra") - clusterClient := e2eCtx.Environment.BootstrapClusterProxy.GetWorkloadCluster(ctx, namespace.Name, cluster1Name).GetClient() - - createStatefulSet(nginxStatefulsetInfo, clusterClient) - awsVolIDs := getVolumeIDs(nginxStatefulsetInfo, clusterClient) - verifyVolumesExists(awsVolIDs) - - kubernetesUgradeVersion := e2eCtx.E2EConfig.GetVariable(shared.PostCSIKubernetesVer) - configCluster.KubernetesVersion = kubernetesUgradeVersion - configCluster.Flavor = "csimigration-off" - - cluster2, _, kcp := createCluster(ctx, configCluster, result) - - ginkgo.By("Waiting for control-plane machines to have the upgraded kubernetes version") - framework.WaitForControlPlaneMachinesToBeUpgraded(ctx, framework.WaitForControlPlaneMachinesToBeUpgradedInput{ - Lister: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), - Cluster: cluster2, - MachineCount: int(*kcp.Spec.Replicas), - KubernetesUpgradeVersion: kubernetesUgradeVersion, - }, e2eCtx.E2EConfig.GetIntervals(specName, "wait-contolplane-upgrade")...) - - ginkgo.By("Creating the LB service") - lbServiceName := TestSvc + util.RandomString(6) - elbName := createLBService(metav1.NamespaceDefault, lbServiceName, clusterClient) - verifyElbExists(elbName, true) - - ginkgo.By("Checking v1.22 StatefulSet still healthy after the upgrade") - waitForStatefulSetRunning(nginxStatefulsetInfo, clusterClient) - - nginxStatefulsetInfo2 := createStatefulSetInfo(true, "postupgrade") - - ginkgo.By("Deploying StatefulSet on infra when K8s >= 1.23") - createStatefulSet(nginxStatefulsetInfo2, clusterClient) - awsVolIDs = getVolumeIDs(nginxStatefulsetInfo2, clusterClient) - verifyVolumesExists(awsVolIDs) - - ginkgo.By("Deleting LB service") - deleteLBService(metav1.NamespaceDefault, lbServiceName, clusterClient) - - ginkgo.By("Deleting the Clusters") - deleteCluster(ctx, cluster2) - - ginkgo.By("Deleting retained dynamically provisioned volumes") - deleteRetainedVolumes(awsVolIDs) - ginkgo.By("PASSED!") - }) - }) - - ginkgo.PDescribe("CSI=external CCM=in-tree AWSCSIMigration=on: upgrade to v1.23", func() { - ginkgo.It("should create volumes dynamically with external CSI driver and in tree cloud provider", func() { - specName := "only-csi-external-upgrade" - if !e2eCtx.Settings.SkipQuotas { - requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, VolumeGP2: 4, EventBridgeRules: 50} - requiredResources.WriteRequestedResources(e2eCtx, specName) - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) - defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) - } - namespace := shared.SetupNamespace(ctx, specName, e2eCtx) - defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx) - ginkgo.By("Creating first cluster with single control plane") - cluster1Name := fmt.Sprintf("%s-%s", specName, util.RandomString(6)) - - configCluster := defaultConfigCluster(cluster1Name, namespace.Name) - configCluster.KubernetesVersion = e2eCtx.E2EConfig.GetVariable(shared.PreCSIKubernetesVer) - configCluster.WorkerMachineCount = ptr.To[int64](1) - configCluster.Flavor = shared.IntreeCloudProvider - createCluster(ctx, configCluster, result) - - // Create statefulSet with PVC and confirm it is working with in-tree providers - nginxStatefulsetInfo := createStatefulSetInfo(true, "intree") - - ginkgo.By("Deploying StatefulSet on infra") - clusterClient := e2eCtx.Environment.BootstrapClusterProxy.GetWorkloadCluster(ctx, namespace.Name, cluster1Name).GetClient() - - createStatefulSet(nginxStatefulsetInfo, clusterClient) - awsVolIDs := getVolumeIDs(nginxStatefulsetInfo, clusterClient) - verifyVolumesExists(awsVolIDs) - - kubernetesUgradeVersion := e2eCtx.E2EConfig.GetVariable(shared.PostCSIKubernetesVer) - - configCluster.KubernetesVersion = kubernetesUgradeVersion - configCluster.Flavor = "external-csi" - - cluster2, _, kcp := createCluster(ctx, configCluster, result) - - ginkgo.By("Waiting for control-plane machines to have the upgraded kubernetes version") - framework.WaitForControlPlaneMachinesToBeUpgraded(ctx, framework.WaitForControlPlaneMachinesToBeUpgradedInput{ - Lister: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), - Cluster: cluster2, - MachineCount: int(*kcp.Spec.Replicas), - KubernetesUpgradeVersion: kubernetesUgradeVersion, - }, e2eCtx.E2EConfig.GetIntervals(specName, "wait-contolplane-upgrade")...) - - ginkgo.By("Creating the LB service") - lbServiceName := TestSvc + util.RandomString(6) - elbName := createLBService(metav1.NamespaceDefault, lbServiceName, clusterClient) - verifyElbExists(elbName, true) - - ginkgo.By("Checking v1.22 StatefulSet still healthy after the upgrade") - waitForStatefulSetRunning(nginxStatefulsetInfo, clusterClient) - - nginxStatefulsetInfo2 := createStatefulSetInfo(false, "postupgrade") - - ginkgo.By("Deploying StatefulSet on infra when K8s >= 1.23") - createStatefulSet(nginxStatefulsetInfo2, clusterClient) - awsVolIDs = getVolumeIDs(nginxStatefulsetInfo2, clusterClient) - verifyVolumesExists(awsVolIDs) - - ginkgo.By("Deleting LB service") - deleteLBService(metav1.NamespaceDefault, lbServiceName, clusterClient) - - ginkgo.By("Deleting the Clusters") - deleteCluster(ctx, cluster2) - - ginkgo.By("Deleting retained dynamically provisioned volumes") - deleteRetainedVolumes(awsVolIDs) - ginkgo.By("PASSED!") - }) - }) - - ginkgo.PDescribe("CSI=external CCM=external AWSCSIMigration=on: upgrade to v1.23", func() { - ginkgo.It("should create volumes dynamically with external CSI driver and external cloud provider", func() { - specName := "csi-ccm-external-upgrade" - if !e2eCtx.Settings.SkipQuotas { - requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, VolumeGP2: 4, EventBridgeRules: 50} - requiredResources.WriteRequestedResources(e2eCtx, specName) - Expect(shared.AcquireResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath))).To(Succeed()) - defer shared.ReleaseResources(requiredResources, ginkgo.GinkgoParallelProcess(), flock.New(shared.ResourceQuotaFilePath)) - } - namespace := shared.SetupNamespace(ctx, specName, e2eCtx) - defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx) - - ginkgo.By("Creating first cluster with single control plane") - cluster1Name := fmt.Sprintf("%s-%s", specName, util.RandomString(6)) - configCluster := defaultConfigCluster(cluster1Name, namespace.Name) - configCluster.KubernetesVersion = e2eCtx.E2EConfig.GetVariable(shared.PreCSIKubernetesVer) - - configCluster.WorkerMachineCount = ptr.To[int64](1) - configCluster.Flavor = shared.IntreeCloudProvider - createCluster(ctx, configCluster, result) - - // Create statefulSet with PVC and confirm it is working with in-tree providers - nginxStatefulsetInfo := createStatefulSetInfo(true, "intree") - - ginkgo.By("Deploying StatefulSet on infra") - clusterClient := e2eCtx.Environment.BootstrapClusterProxy.GetWorkloadCluster(ctx, namespace.Name, cluster1Name).GetClient() - - createStatefulSet(nginxStatefulsetInfo, clusterClient) - awsVolIDs := getVolumeIDs(nginxStatefulsetInfo, clusterClient) - verifyVolumesExists(awsVolIDs) - - kubernetesUgradeVersion := e2eCtx.E2EConfig.GetVariable(shared.PostCSIKubernetesVer) - configCluster.KubernetesVersion = kubernetesUgradeVersion - configCluster.Flavor = "upgrade-to-external-cloud-provider" - - cluster2, _, kcp := createCluster(ctx, configCluster, result) - - ginkgo.By("Waiting for control-plane machines to have the upgraded kubernetes version") - framework.WaitForControlPlaneMachinesToBeUpgraded(ctx, framework.WaitForControlPlaneMachinesToBeUpgradedInput{ - Lister: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), - Cluster: cluster2, - MachineCount: int(*kcp.Spec.Replicas), - KubernetesUpgradeVersion: kubernetesUgradeVersion, - }, e2eCtx.E2EConfig.GetIntervals(specName, "wait-contolplane-upgrade")...) - - ginkgo.By("Creating the LB service") - lbServiceName := TestSvc + util.RandomString(6) - elbName := createLBService(metav1.NamespaceDefault, lbServiceName, clusterClient) - verifyElbExists(elbName, true) - - ginkgo.By("Checking v1.22 StatefulSet still healthy after the upgrade") - waitForStatefulSetRunning(nginxStatefulsetInfo, clusterClient) - - nginxStatefulsetInfo2 := createStatefulSetInfo(false, "postupgrade") - - ginkgo.By("Deploying StatefulSet on infra when K8s >= 1.23") - createStatefulSet(nginxStatefulsetInfo2, clusterClient) - awsVolIDs = getVolumeIDs(nginxStatefulsetInfo2, clusterClient) - verifyVolumesExists(awsVolIDs) - - ginkgo.By("Deleting LB service") - deleteLBService(metav1.NamespaceDefault, lbServiceName, clusterClient) - - ginkgo.By("Deleting the Clusters") - deleteCluster(ctx, cluster2) - - ginkgo.By("Deleting retained dynamically provisioned volumes") - deleteRetainedVolumes(awsVolIDs) - ginkgo.By("PASSED!") - }) - }) - ginkgo.Describe("Workload cluster with AWS SSM Parameter as the Secret Backend", func() { ginkgo.It("should be creatable and deletable", func() { specName := "functional-test-ssm-parameter-store" @@ -1174,23 +958,3 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { }) }) }) - -func createStatefulSetInfo(isIntreeCSI bool, prefix string) statefulSetInfo { - return statefulSetInfo{ - name: fmt.Sprintf("%s%s", prefix, "-nginx-statefulset"), - namespace: metav1.NamespaceDefault, - replicas: int32(2), - selector: map[string]string{"app": fmt.Sprintf("%s%s", prefix, "-nginx")}, - storageClassName: fmt.Sprintf("%s%s", prefix, "-aws-ebs-volumes"), - volumeName: fmt.Sprintf("%s%s", prefix, "-volumes"), - svcName: fmt.Sprintf("%s%s", prefix, "-svc"), - svcPort: int32(80), - svcPortName: fmt.Sprintf("%s%s", prefix, "-web"), - containerName: fmt.Sprintf("%s%s", prefix, "-nginx"), - containerImage: "registry.k8s.io/nginx-slim:0.8", - containerPort: int32(80), - podTerminationGracePeriod: int64(30), - volMountPath: "/usr/share/nginx/html", - isInTreeCSI: isIntreeCSI, - } -}