From d3a3ef960b4cbbe976223593eb100968246d4932 Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Tue, 22 Aug 2023 20:18:12 +0800 Subject: [PATCH 1/2] chore(e2e): fix and enable e2e tests --- .github/workflows/ci.yml | 35 +++++++++++ test/e2e/automigration/automigration.go | 2 +- test/e2e/federatedcluster/clusterdelete.go | 3 +- test/e2e/federatedcluster/clusterjoin.go | 3 +- test/e2e/framework/framework.go | 62 ++++++++++++++++--- test/e2e/framework/interface.go | 2 + .../framework/policies/propagationpolicy.go | 4 +- test/e2e/resourcepropagation/cronjobs.go | 8 +-- test/e2e/resourcepropagation/deployments.go | 2 +- test/e2e/resourcepropagation/framework.go | 49 ++++++++------- test/e2e/resourcepropagation/jobs.go | 2 +- test/e2e/schedulingprofile/intree.go | 47 ++++++++------ 12 files changed, 157 insertions(+), 62 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a8d33504..a2cee3c7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -28,6 +28,41 @@ jobs: - run: make test + e2e: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version: "1.20" + + - name: Install Go dependencies + run: | + go install github.com/onsi/ginkgo/v2/ginkgo + go install sigs.k8s.io/kwok/cmd/{kwok,kwokctl}@v0.3.0 + + - name: Build cluster + run: CLUSTER_PROVIDER=kwok make dev-up + + - name: Build images + run: make images + + - name: Run Kubeadmiral + run: | + kwokctl get kubeconfig --name kubeadmiral-host > $HOME/.kube/kubeadmiral/kubeadmiral-host.yaml + docker create \ + --network host \ + --name kubeadmiral-controller-manager \ + ghcr.io/kubewharf/kubeadmiral-controller-manager:latest \ + /kubeadmiral-controller-manager --kubeconfig=/etc/kubeconfig --klog-v=4 --cluster-join-timeout=1m + docker cp $HOME/.kube/kubeadmiral/kubeadmiral-host.yaml kubeadmiral-controller-manager:/etc/kubeconfig + docker start kubeadmiral-controller-manager + + - name: Run tests + run: KUBECONFIG=$HOME/.kube/kubeadmiral/kubeadmiral-host.yaml EXTRA_GINKGO_FLAGS="-v" make e2e + lint: runs-on: ubuntu-latest steps: diff --git a/test/e2e/automigration/automigration.go b/test/e2e/automigration/automigration.go index 8e3ca384..d9b7dbf1 100644 --- a/test/e2e/automigration/automigration.go +++ b/test/e2e/automigration/automigration.go @@ -52,7 +52,7 @@ var ( assertNoAutoMigrationDuration = 20 * time.Second ) -var _ = ginkgo.Describe("auto migration", autoMigrationTestLabel, func() { +var _ = ginkgo.Describe("Auto Migration", autoMigrationTestLabel, func() { f := framework.NewFramework("auto-migration", framework.FrameworkOptions{CreateNamespace: true}) var clusters []*fedcorev1a1.FederatedCluster diff --git a/test/e2e/federatedcluster/clusterdelete.go b/test/e2e/federatedcluster/clusterdelete.go index 82590823..10a0989f 100644 --- a/test/e2e/federatedcluster/clusterdelete.go +++ b/test/e2e/federatedcluster/clusterdelete.go @@ -87,9 +87,8 @@ var _ = ginkgo.Describe("Cluster Delete", federatedClusterTestLabels, func() { // 3. service account info deleted from secret secret, err = f.HostKubeClient().CoreV1().Secrets(framework.FedSystemNamespace).Get(ctx, secret.Name, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - token, ca := getServiceAccountInfo(secret) + token, _ := getServiceAccountInfo(secret) gomega.Expect(token).To(gomega.BeNil(), "Token data not removed from cluster secret") - gomega.Expect(ca).To(gomega.BeNil(), "Token data not removed from cluster secret") } ginkgo.Context("Without cascading delete", func() { diff --git a/test/e2e/federatedcluster/clusterjoin.go b/test/e2e/federatedcluster/clusterjoin.go index 9d46e6be..5b2ef0c5 100644 --- a/test/e2e/federatedcluster/clusterjoin.go +++ b/test/e2e/federatedcluster/clusterjoin.go @@ -74,9 +74,8 @@ var _ = ginkgo.Describe("Cluster Join", federatedClusterTestLabels, func() { ginkgo.By("Assert cluster secret not updated with service account information") secret, err := f.HostKubeClient().CoreV1().Secrets(framework.FedSystemNamespace).Get(ctx, secret.Name, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - token, ca := getServiceAccountInfo(secret) + token, _ := getServiceAccountInfo(secret) gomega.Expect(token).To(gomega.BeNil()) - gomega.Expect(ca).To(gomega.BeNil()) }) }) diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index c383ce7c..93177188 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -38,11 +38,14 @@ import ( "k8s.io/client-go/dynamic" kubeclient "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/clientcmd" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" fedclient "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" + fedinformers "github.com/kubewharf/kubeadmiral/pkg/client/informers/externalversions" "github.com/kubewharf/kubeadmiral/pkg/controllers/common" + "github.com/kubewharf/kubeadmiral/pkg/util/informermanager" "github.com/kubewharf/kubeadmiral/test/e2e/framework/clusterprovider" ) @@ -83,6 +86,7 @@ var ( hostFedClient fedclient.Interface hostDynamicClient dynamic.Interface hostDiscoveryClient discovery.DiscoveryInterface + ftcManager informermanager.FederatedTypeConfigManager clusterKubeClients sync.Map clusterFedClients sync.Map clusterDynamicClients sync.Map @@ -93,7 +97,12 @@ func init() { flag.StringVar(&master, "master", "", "The address of the host Kubernetes cluster.") flag.StringVar(&kubeconfig, "kubeconfig", "", "The path of the kubeconfig for the host Kubernetes cluster.") flag.Float64Var(&kubeAPIQPS, "kube-api-qps", 500, "The maximum QPS from each Kubernetes client.") - flag.IntVar(&kubeAPIBurst, "kube-api-burst", 1000, "The maximum burst for throttling requests from each Kubernetes client.") + flag.IntVar( + &kubeAPIBurst, + "kube-api-burst", + 1000, + "The maximum burst for throttling requests from each Kubernetes client.", + ) flag.StringVar(&clusterProvider, "cluster-provider", "kwok", "The cluster provider [kwok,kind] to use.") flag.StringVar( @@ -102,11 +111,26 @@ func init() { "kindest/node:v1.20.15@sha256:a32bf55309294120616886b5338f95dd98a2f7231519c7dedcec32ba29699394", "The node image to use for creating kind test clusters, it should include the image digest.", ) - flag.StringVar(&kwokImagePrefix, "kwok-image-prefix", "registry.k8s.io", "The image prefix used by kwok to pull kubernetes images.") - flag.StringVar(&kwokKubeVersion, "kwok-kube-version", "v1.20.15", "The kubernetes version to be used for kwok member clusters") + flag.StringVar( + &kwokImagePrefix, + "kwok-image-prefix", + "registry.k8s.io", + "The image prefix used by kwok to pull kubernetes images.", + ) + flag.StringVar( + &kwokKubeVersion, + "kwok-kube-version", + "v1.20.15", + "The kubernetes version to be used for kwok member clusters", + ) flag.BoolVar(&preserveClusters, "preserve-clusters", false, "If set, clusters created during testing are preserved") - flag.BoolVar(&preserveNamespace, "preserve-namespaces", false, "If set, namespaces created during testing are preserved") + flag.BoolVar( + &preserveNamespace, + "preserve-namespaces", + false, + "If set, namespaces created during testing are preserved", + ) } var _ = ginkgo.SynchronizedBeforeSuite( @@ -125,7 +149,7 @@ var _ = ginkgo.SynchronizedBeforeSuite( return bytes }, - func(data []byte) { + func(ctx context.Context, data []byte) { params := []string{} err := json.Unmarshal(data, ¶ms) gomega.Expect(err).ToNot(gomega.HaveOccurred()) @@ -150,6 +174,21 @@ var _ = ginkgo.SynchronizedBeforeSuite( hostDiscoveryClient, err = discovery.NewDiscoveryClientForConfig(restConfig) gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + fedInformerFactory := fedinformers.NewSharedInformerFactory(hostFedClient, 0) + manager := informermanager.NewInformerManager( + hostDynamicClient, + fedInformerFactory.Core().V1alpha1().FederatedTypeConfigs(), + nil, + ) + ftcManager = manager + + fedInformerFactory.Start(ctx.Done()) + manager.Start(ctx) + + if !cache.WaitForNamedCacheSync("host-informer-manager", ctx.Done(), ftcManager.HasSynced) { + ginkgo.Fail("failed to wait for host informer manager cache sync") + } + clusterKubeClients = sync.Map{} clusterFedClients = sync.Map{} clusterDynamicClients = sync.Map{} @@ -170,7 +209,9 @@ var _ = ginkgo.SynchronizedBeforeSuite( defaultClusterWaitTimeout, ) default: - ginkgo.Fail(fmt.Sprintf("invalid cluster provider, %s or %s accepted", KwokClusterProvider, KindClusterProvider)) + ginkgo.Fail( + fmt.Sprintf("invalid cluster provider, %s or %s accepted", KwokClusterProvider, KindClusterProvider), + ) } }, ) @@ -231,12 +272,19 @@ func (*framework) HostDiscoveryClient() discovery.DiscoveryInterface { return hostDiscoveryClient } +func (*framework) FTCManager() informermanager.FederatedTypeConfigManager { + return ftcManager +} + func (f *framework) TestNamespace() *corev1.Namespace { gomega.Expect(f.namespace).ToNot(gomega.BeNil(), MessageUnexpectedError) return f.namespace } -func (f *framework) NewCluster(ctx context.Context, clusterModifiers ...ClusterModifier) (*fedcorev1a1.FederatedCluster, *corev1.Secret) { +func (f *framework) NewCluster( + ctx context.Context, + clusterModifiers ...ClusterModifier, +) (*fedcorev1a1.FederatedCluster, *corev1.Secret) { clusterName := strings.ToLower(fmt.Sprintf("%s-%s", f.name, rand.String(12))) cluster, secret := f.clusterProvider.NewCluster(ctx, clusterName) diff --git a/test/e2e/framework/interface.go b/test/e2e/framework/interface.go index 0b15207a..d7fad70a 100644 --- a/test/e2e/framework/interface.go +++ b/test/e2e/framework/interface.go @@ -26,6 +26,7 @@ import ( fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" fedclient "github.com/kubewharf/kubeadmiral/pkg/client/clientset/versioned" + "github.com/kubewharf/kubeadmiral/pkg/util/informermanager" ) type FrameworkOptions struct { @@ -37,6 +38,7 @@ type Framework interface { HostFedClient() fedclient.Interface HostDynamicClient() dynamic.Interface HostDiscoveryClient() discovery.DiscoveryInterface + FTCManager() informermanager.FederatedTypeConfigManager Name() string TestNamespace() *corev1.Namespace diff --git a/test/e2e/framework/policies/propagationpolicy.go b/test/e2e/framework/policies/propagationpolicy.go index 2b9de1bf..3ed7e5f3 100644 --- a/test/e2e/framework/policies/propagationpolicy.go +++ b/test/e2e/framework/policies/propagationpolicy.go @@ -38,12 +38,12 @@ func PropagationPolicyForClustersWithPlacements( }, Spec: fedcorev1a1.PropagationPolicySpec{ SchedulingMode: fedcorev1a1.SchedulingModeDuplicate, - Placements: []fedcorev1a1.ClusterReference{}, + Placements: []fedcorev1a1.DesiredPlacement{}, }, } for _, c := range clusters { - policy.Spec.Placements = append(policy.Spec.Placements, fedcorev1a1.ClusterReference{Cluster: c.Name}) + policy.Spec.Placements = append(policy.Spec.Placements, fedcorev1a1.DesiredPlacement{Cluster: c.Name}) } return policy diff --git a/test/e2e/resourcepropagation/cronjobs.go b/test/e2e/resourcepropagation/cronjobs.go index f7898205..173c65be 100644 --- a/test/e2e/resourcepropagation/cronjobs.go +++ b/test/e2e/resourcepropagation/cronjobs.go @@ -43,7 +43,8 @@ var _ = ginkgo.Describe("CronJob Propagation", func() { resourcePropagationTest( f, &resourcePropagationTestConfig[*batchv1.CronJob]{ - gvr: batchv1.SchemeGroupVersion.WithResource("jobs"), + gvr: batchv1.SchemeGroupVersion.WithResource("cronjobs"), + gvk: batchv1.SchemeGroupVersion.WithKind("CronJob"), objectFactory: resources.GetSimpleV1CronJob, clientGetter: func(client kubernetes.Interface, namespace string) resourceClient[*batchv1.CronJob] { return client.BatchV1().CronJobs(namespace) @@ -56,7 +57,6 @@ var _ = ginkgo.Describe("CronJob Propagation", func() { return resources.IsV1CronJobScheduledOnce(cronjob), nil }, statusCollection: &resourceStatusCollectionTestConfig{ - gvr: fedtypesv1a1.SchemeGroupVersion.WithResource("federatedcronjobstatuses"), path: "status", }, }, @@ -66,7 +66,8 @@ var _ = ginkgo.Describe("CronJob Propagation", func() { resourcePropagationTest( f, &resourcePropagationTestConfig[*batchv1b1.CronJob]{ - gvr: batchv1.SchemeGroupVersion.WithResource("jobs"), + gvr: batchv1b1.SchemeGroupVersion.WithResource("cronjobs"), + gvk: batchv1b1.SchemeGroupVersion.WithKind("CronJob"), objectFactory: resources.GetSimpleV1Beta1CronJob, clientGetter: func(client kubernetes.Interface, namespace string) resourceClient[*batchv1b1.CronJob] { return client.BatchV1beta1().CronJobs(namespace) @@ -79,7 +80,6 @@ var _ = ginkgo.Describe("CronJob Propagation", func() { return resources.IsV1Beta1CronJobScheduledOnce(cronjob), nil }, statusCollection: &resourceStatusCollectionTestConfig{ - gvr: fedtypesv1a1.SchemeGroupVersion.WithResource("federatedcronjobstatuses"), path: "status", }, }, diff --git a/test/e2e/resourcepropagation/deployments.go b/test/e2e/resourcepropagation/deployments.go index 42fb142b..e046c668 100644 --- a/test/e2e/resourcepropagation/deployments.go +++ b/test/e2e/resourcepropagation/deployments.go @@ -34,6 +34,7 @@ var _ = ginkgo.Describe("Deployment Propagation", func() { f, &resourcePropagationTestConfig[*appsv1.Deployment]{ gvr: appsv1.SchemeGroupVersion.WithResource("deployments"), + gvk: appsv1.SchemeGroupVersion.WithKind("Deployment"), objectFactory: resources.GetSimpleDeployment, clientGetter: func(client kubernetes.Interface, namespace string) resourceClient[*appsv1.Deployment] { return client.AppsV1().Deployments(namespace) @@ -46,7 +47,6 @@ var _ = ginkgo.Describe("Deployment Propagation", func() { return resources.IsDeploymentProgressing(deployment), nil }, statusCollection: &resourceStatusCollectionTestConfig{ - gvr: fedtypesv1a1.SchemeGroupVersion.WithResource("federateddeploymentstatuses"), path: "status", }, }, diff --git a/test/e2e/resourcepropagation/framework.go b/test/e2e/resourcepropagation/framework.go index b455a543..3dfa492a 100644 --- a/test/e2e/resourcepropagation/framework.go +++ b/test/e2e/resourcepropagation/framework.go @@ -31,11 +31,12 @@ import ( pkgruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + jsonutil "k8s.io/apimachinery/pkg/util/json" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" - controllerutil "github.com/kubewharf/kubeadmiral/pkg/controllers/util" + "github.com/kubewharf/kubeadmiral/pkg/util/naming" "github.com/kubewharf/kubeadmiral/test/e2e/framework" "github.com/kubewharf/kubeadmiral/test/e2e/framework/policies" "github.com/kubewharf/kubeadmiral/test/e2e/framework/util" @@ -71,14 +72,13 @@ type resourceClient[T k8sObject] interface { } type resourceStatusCollectionTestConfig struct { - // GVR of the federatedstatus. - gvr schema.GroupVersionResource // Path to a field in the resource whose value should be collected by status collection. path string } type resourcePropagationTestConfig[T k8sObject] struct { gvr schema.GroupVersionResource + gvk schema.GroupVersionKind statusCollection *resourceStatusCollectionTestConfig // Returns an object template with the given name. objectFactory func(name string) T @@ -153,18 +153,17 @@ func resourcePropagationTest[T k8sObject]( }) ginkgo.By("Updating the source object", func() { - patch := []map[string]interface{}{ - { - "op": "add", - // escape the / in annotation key - "path": "/metadata/annotations/" + strings.Replace(resourceUpdateTestAnnotationKey, "/", "~1", 1), - "value": resourceUpdateTestAnnotationValue, + patch := map[string]interface{}{ + "metadata": map[string]interface{}{ + "annotations": map[string]interface{}{ + resourceUpdateTestAnnotationKey: resourceUpdateTestAnnotationValue, + }, }, } patchBytes, err := json.Marshal(patch) gomega.Expect(err).NotTo(gomega.HaveOccurred(), framework.MessageUnexpectedError) - object, err = hostClient.Patch(ctx, object.GetName(), types.JSONPatchType, patchBytes, metav1.PatchOptions{}) + object, err = hostClient.Patch(ctx, object.GetName(), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred(), framework.MessageUnexpectedError) }) @@ -245,29 +244,33 @@ func resourcePropagationTest[T k8sObject]( actualFieldByCluster[cluster.Name] = actualField } - fedStatusUns, err := f.HostDynamicClient().Resource(config.statusCollection.gvr).Namespace(object.GetNamespace()).Get( - ctx, object.GetName(), metav1.GetOptions{}) + ftc, exists := f.FTCManager().GetResourceFTC(config.gvk) + g.Expect(exists).To(gomega.BeTrue()) + + collectedStatusName := naming.GenerateFederatedObjectName(object.GetName(), ftc.Name) + fedStatus, err := f.HostFedClient().CoreV1alpha1().CollectedStatuses(object.GetNamespace()).Get( + ctx, collectedStatusName, metav1.GetOptions{}) if err != nil && apierrors.IsNotFound(err) { // status might not have been created yet, use local g to fail only this attempt g.Expect(err).NotTo(gomega.HaveOccurred(), "Federated status object has not been created") } gomega.Expect(err).NotTo(gomega.HaveOccurred(), framework.MessageUnexpectedError) - fedStatus := controllerutil.FederatedResource{} - err = pkgruntime.DefaultUnstructuredConverter.FromUnstructured(fedStatusUns.Object, &fedStatus) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), framework.MessageUnexpectedError) - - g.Expect(fedStatus.ClusterStatus). + g.Expect(fedStatus.Clusters). To(gomega.HaveLen(len(actualFieldByCluster)), "Collected status has wrong number of clusters") - for _, clusterStatus := range fedStatus.ClusterStatus { - actualField, exists := actualFieldByCluster[clusterStatus.ClusterName] - g.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("collected from unexpected cluster %s", clusterStatus.ClusterName)) - - collectedField, exists, err := unstructured.NestedFieldNoCopy(clusterStatus.CollectedFields, pathSegments...) + for _, clusterStatus := range fedStatus.Clusters { + actualField, exists := actualFieldByCluster[clusterStatus.Cluster] + g.Expect(exists). + To(gomega.BeTrue(), fmt.Sprintf("collected from unexpected cluster %s", clusterStatus.Cluster)) + + collectedFields := &map[string]interface{}{} + err := jsonutil.Unmarshal(clusterStatus.CollectedFields.Raw, collectedFields) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + collectedField, exists, err := unstructured.NestedFieldNoCopy(*collectedFields, pathSegments...) gomega.Expect(err).NotTo(gomega.HaveOccurred(), framework.MessageUnexpectedError) g.Expect(exists).To( gomega.BeTrue(), - fmt.Sprintf("collected fields does not contain %q for cluster %s", config.statusCollection.path, clusterStatus.ClusterName), + fmt.Sprintf("collected fields does not contain %q for cluster %s", config.statusCollection.path, clusterStatus.Cluster), ) g.Expect(collectedField).To(gomega.Equal(actualField), "collected and actual fields differ") } diff --git a/test/e2e/resourcepropagation/jobs.go b/test/e2e/resourcepropagation/jobs.go index 45c7b70a..a6362ef8 100644 --- a/test/e2e/resourcepropagation/jobs.go +++ b/test/e2e/resourcepropagation/jobs.go @@ -75,6 +75,7 @@ var _ = ginkgo.Context("Job Propagation", func() { f, &resourcePropagationTestConfig[*batchv1.Job]{ gvr: batchv1.SchemeGroupVersion.WithResource("jobs"), + gvk: batchv1.SchemeGroupVersion.WithKind("Job"), objectFactory: testCase.jobFactory, clientGetter: func(client kubernetes.Interface, namespace string) resourceClient[*batchv1.Job] { return client.BatchV1().Jobs(namespace) @@ -87,7 +88,6 @@ var _ = ginkgo.Context("Job Propagation", func() { return resources.IsJobComplete(job), nil }, statusCollection: &resourceStatusCollectionTestConfig{ - gvr: fedtypesv1a1.SchemeGroupVersion.WithResource("federatedjobstatuses"), path: "status", }, }, diff --git a/test/e2e/schedulingprofile/intree.go b/test/e2e/schedulingprofile/intree.go index f026c82e..f650aed6 100644 --- a/test/e2e/schedulingprofile/intree.go +++ b/test/e2e/schedulingprofile/intree.go @@ -23,14 +23,14 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/sets" fedcorev1a1 "github.com/kubewharf/kubeadmiral/pkg/apis/core/v1alpha1" - "github.com/kubewharf/kubeadmiral/pkg/controllers/scheduler" - controllerutil "github.com/kubewharf/kubeadmiral/pkg/controllers/util" + "github.com/kubewharf/kubeadmiral/pkg/util/naming" "github.com/kubewharf/kubeadmiral/test/e2e/framework" "github.com/kubewharf/kubeadmiral/test/e2e/framework/policies" "github.com/kubewharf/kubeadmiral/test/e2e/framework/resources" @@ -57,10 +57,16 @@ var _ = ginkgo.Describe("Scheduling Profile", func() { } ginkgo.By("Creating scheduling profile") - profile, err = f.HostFedClient().CoreV1alpha1().SchedulingProfiles().Create(ctx, profile, metav1.CreateOptions{}) + profile, err = f.HostFedClient(). + CoreV1alpha1(). + SchedulingProfiles(). + Create(ctx, profile, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred(), framework.MessageUnexpectedError) ginkgo.DeferCleanup(func(ctx ginkgo.SpecContext) { - err := f.HostFedClient().CoreV1alpha1().SchedulingProfiles().Delete(ctx, profile.Name, metav1.DeleteOptions{}) + err := f.HostFedClient(). + CoreV1alpha1(). + SchedulingProfiles(). + Delete(ctx, profile.Name, metav1.DeleteOptions{}) gomega.Expect(err).To(gomega.Or(gomega.BeNil(), gomega.Satisfy(apierrors.IsNotFound))) }) @@ -76,41 +82,44 @@ var _ = ginkgo.Describe("Scheduling Profile", func() { ginkgo.By("Creating configmap that references propagation policy") configMap := resources.GetSimpleConfigMap(f.Name()) policies.SetPropagationPolicy(configMap, policy) - configMap, err = f.HostKubeClient().CoreV1().ConfigMaps(f.TestNamespace().Name).Create(ctx, configMap, metav1.CreateOptions{}) + configMap, err = f.HostKubeClient(). + CoreV1(). + ConfigMaps(f.TestNamespace().Name). + Create(ctx, configMap, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred(), framework.MessageUnexpectedError) ginkgo.By("Verifying scheduling result") gomega.Eventually(func(g gomega.Gomega, ctx context.Context) { - federatedConfigMap, err := f.HostDynamicClient(). - Resource(resources.FederatedConfigMapGVR). - Namespace(f.TestNamespace().Name). - Get(ctx, configMap.Name, metav1.GetOptions{}) + configMapFTC, exists := f.FTCManager().GetResourceFTC(corev1.SchemeGroupVersion.WithKind("ConfigMap")) + g.Expect(exists).To(gomega.BeTrue()) + + federatedConfigMapName := naming.GenerateFederatedObjectName(configMap.Name, configMapFTC.Name) + federatedConfigMap, err := f.HostFedClient(). + CoreV1alpha1().FederatedObjects(f.TestNamespace().Name). + Get(ctx, federatedConfigMapName, metav1.GetOptions{}) gomega.Expect(err).To(gomega.Or(gomega.BeNil(), gomega.Satisfy(apierrors.IsNotFound))) g.Expect(err).ToNot(gomega.HaveOccurred()) - placementObj, err := controllerutil.UnmarshalGenericPlacements(federatedConfigMap) - gomega.Expect(err).ToNot(gomega.HaveOccurred(), framework.MessageUnexpectedError) - - placement := placementObj.Spec.GetPlacementOrNil(scheduler.PrefixedGlobalSchedulerName) + placement := federatedConfigMap.GetSpec().GetPlacementUnion() g.Expect(placement).ToNot(gomega.BeNil()) if enabled { // only the first cluster should be selected since placement plugin was enabled - g.Expect(placement.Clusters).To(gomega.HaveLen(1)) - g.Expect(placement.Clusters[0].Name).To(gomega.Equal(clusters[0].Name)) + g.Expect(placement).To(gomega.HaveLen(1)) + g.Expect(placement.UnsortedList()[0]).To(gomega.Equal(clusters[0].Name)) } else { // all clusters should be selected since placement plugin was disabled - g.Expect(placement.Clusters).To(gomega.HaveLen(len(clusters))) + g.Expect(placement).To(gomega.HaveLen(len(clusters))) clusterSet := sets.New[string]() for _, cluster := range clusters { clusterSet.Insert(cluster.Name) } - for _, cluster := range placement.Clusters { - gomega.Expect(clusterSet.Has(cluster.Name)).To(gomega.BeTrue()) + for cluster := range placement { + gomega.Expect(clusterSet.Has(cluster)).To(gomega.BeTrue()) } } - ginkgo.GinkgoLogr.Info("Obtained scheduling result", "result", placement.Clusters) + ginkgo.GinkgoLogr.Info("Obtained scheduling result", "result", placement.UnsortedList()) }).WithTimeout(scheduleTimeout).WithContext(ctx).Should(gomega.Succeed(), "Timed out waiting for scheduling") } From 50c7e0b08bd1aaa6046e2b75ca9854b2bb4ff6ad Mon Sep 17 00:00:00 2001 From: "hawjia.lim" Date: Wed, 23 Aug 2023 06:45:47 +0000 Subject: [PATCH 2/2] fix(e2e): fix npe --- test/e2e/automigration/automigration.go | 1 + 1 file changed, 1 insertion(+) diff --git a/test/e2e/automigration/automigration.go b/test/e2e/automigration/automigration.go index d9b7dbf1..c7ef5d22 100644 --- a/test/e2e/automigration/automigration.go +++ b/test/e2e/automigration/automigration.go @@ -161,6 +161,7 @@ var _ = ginkgo.Describe("Auto Migration", autoMigrationTestLabel, func() { ctx, dp.Name, metav1.GetOptions{}, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + g.Expect(clusterDp.Status).ToNot(gomega.BeNil()) g.Expect(clusterDp.Status.ReadyReplicas).To(gomega.Equal(replicasPerCluster)) }).WithPolling(defaultPollingInterval).Should(gomega.Succeed()) })