From ff121d3132fbc5613ae4fe4596a7a96d5208aa48 Mon Sep 17 00:00:00 2001 From: yiraeChristineKim Date: Tue, 7 May 2024 12:50:56 -0400 Subject: [PATCH] test for amd Signed-off-by: yiraeChristineKim --- .vscode/launch.json | 131 +++++++++++++++-------- Makefile | 29 +++-- controllers/operatorpolicy_controller.go | 23 ++-- main.go | 10 +- test/e2e/case38_install_operator_test.go | 65 +++++++---- test/e2e/e2e_suite_test.go | 41 +++++++ 6 files changed, 212 insertions(+), 87 deletions(-) diff --git a/.vscode/launch.json b/.vscode/launch.json index 5380ddcd..7800e10a 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -1,48 +1,89 @@ { - "version": "0.2.0", - "configurations": [ - // Run `make kind-bootstrap-cluster-dev` before launching this. - { - "name": "Launch Package", - "type": "go", - "request": "launch", - "mode": "auto", - "program": "${workspaceFolder}/main.go", - "args": ["controller", "--leader-elect=false", "--log-level=3", "--v=5", "--enable-operator-policy=true"], - "env": { - "WATCH_NAMESPACE": "managed", - "KUBECONFIG": "${workspaceFolder}/kubeconfig_managed", - } - }, - // Set FDescribe or FIt on the test to debug. Then set the desired breakpoint. - { - "name": "Launch Test Function (instructions in launch.json)", - "type": "go", - "request": "launch", - "mode": "auto", - "program": "${workspaceFolder}/test/e2e/e2e_suite_test.go", - "args": [ - "-ginkgo.debug", - "-ginkgo.v", - ], - "env": { - "KUBECONFIG": "${workspaceFolder}/kubeconfig_managed_e2e", - } - }, - // Set the correct path to the governance-policy-framework repo directory in the env section. - { - "name": "Launch Package (Framework E2E) (instructions in launch.json)", - "type": "go", - "request": "launch", - "mode": "auto", - "program": "${workspaceFolder}/main.go", - "args": ["controller", "--leader-elect=false", "--log-level=3", "--v=5", "--enable-operator-policy=true"], - "env": { - "WATCH_NAMESPACE": "managed", - "HUB_CONFIG": "${userHome}/git/governance-policy-framework/kubeconfig_hub", - "MANAGED_CONFIG": "${userHome}/git/governance-policy-framework/kubeconfig_managed", - "KUBECONFIG": "${userHome}/git/governance-policy-framework/kubeconfig_managed", - } + "version": "0.2.0", + "configurations": [ + // Run `make kind-bootstrap-cluster-dev` before launching this. + { + "name": "Launch Package", + "type": "go", + "request": "launch", + "mode": "auto", + "program": "${workspaceFolder}/main.go", + "args": [ + "controller", + "--leader-elect=false", + "--log-level=3", + "--v=5", + "--enable-operator-policy=true" + ], + "env": { + "WATCH_NAMESPACE": "managed", + "KUBECONFIG": "${workspaceFolder}/kubeconfig_managed" + } + }, + { + "name": "Launch hosted Package", + "type": "go", + "request": "launch", + "mode": "auto", + "program": "${workspaceFolder}/main.go", + "args": [ + "controller", + "--leader-elect=false", + "--log-level=3", + "--v=5", + "--enable-operator-policy=true", + "--target-kubeconfig-path=${workspaceFolder}/kubeconfig_managed2" + ], + "env": { + "WATCH_NAMESPACE": "managed", + "KUBECONFIG": "${workspaceFolder}/kubeconfig_managed" } - ] + }, + // Set FDescribe or FIt on the test to debug. Then set the desired breakpoint. + { + "name": "Launch Test Function (instructions in launch.json)", + "type": "go", + "request": "launch", + "mode": "auto", + "program": "${workspaceFolder}/test/e2e/e2e_suite_test.go", + "args": ["-ginkgo.debug", "-ginkgo.v"], + "env": { + "KUBECONFIG": "${workspaceFolder}/kubeconfig_managed_e2e" + } + }, + // Set FDescribe or FIt on the test to debug. Then set the desired breakpoint. + { + "name": "Launch Hosted Test Function (instructions in launch.json)", + "type": "go", + "request": "launch", + "mode": "auto", + "program": "${workspaceFolder}/test/e2e/e2e_suite_test.go", + "args": ["-ginkgo.debug", "-ginkgo.v", "--is_hosted=true"], + "env": { + "KUBECONFIG": "${workspaceFolder}/kubeconfig_managed_e2e", + "TARGET_KUBECONFIG_PATH": "${workspaceFolder}/kubeconfig_managed2_e2e", + } + }, + // Set the correct path to the governance-policy-framework repo directory in the env section. + { + "name": "Launch Package (Framework E2E) (instructions in launch.json)", + "type": "go", + "request": "launch", + "mode": "auto", + "program": "${workspaceFolder}/main.go", + "args": [ + "controller", + "--leader-elect=false", + "--log-level=3", + "--v=5", + "--enable-operator-policy=true" + ], + "env": { + "WATCH_NAMESPACE": "managed", + "HUB_CONFIG": "${userHome}/git/governance-policy-framework/kubeconfig_hub", + "MANAGED_CONFIG": "${userHome}/git/governance-policy-framework/kubeconfig_managed", + "KUBECONFIG": "${userHome}/git/governance-policy-framework/kubeconfig_managed" + } + } + ] } diff --git a/Makefile b/Makefile index 4cbbd718..9ca8b387 100644 --- a/Makefile +++ b/Makefile @@ -106,7 +106,7 @@ create-ns: # Run against the current locally configured Kubernetes cluster .PHONY: run run: - WATCH_NAMESPACE=$(WATCH_NAMESPACE) go run ./main.go controller --leader-elect=false + WATCH_NAMESPACE=$(WATCH_NAMESPACE) go run ./main.go controller --leader-elect=false --log-level=1 --enable-operator-policy=true --target-kubeconfig-path=./kubeconfig_managed2 ############################################################ # clean section @@ -193,7 +193,7 @@ kind-create-cluster: .PHONY: kind-additional-cluster kind-additional-cluster: MANAGED_CLUSTER_SUFFIX = 2 kind-additional-cluster: CLUSTER_NAME = $(MANAGED_CLUSTER_NAME) -kind-additional-cluster: kind-create-cluster kind-controller-kubeconfig +kind-additional-cluster: kind-create-cluster kind-controller-kubeconfig install-crds-hosted .PHONY: kind-delete-cluster kind-delete-cluster: @@ -204,11 +204,10 @@ kind-delete-cluster: kind-tests: kind-delete-cluster kind-bootstrap-cluster-dev kind-deploy-controller-dev e2e-test OLM_VERSION := v0.26.0 +OLM_INSTALLER = $(LOCAL_BIN)/install.sh .PHONY: install-crds -install-crds: - @echo installing olm - curl -L https://github.com/operator-framework/operator-lifecycle-manager/releases/download/$(OLM_VERSION)/install.sh -o $(LOCAL_BIN)/install.sh +install-crds: $(OLM_INSTALLER) chmod +x $(LOCAL_BIN)/install.sh $(LOCAL_BIN)/install.sh $(OLM_VERSION) @echo installing crds @@ -221,6 +220,18 @@ install-crds: kubectl apply -f deploy/crds/policy.open-cluster-management.io_configurationpolicies.yaml kubectl apply -f deploy/crds/policy.open-cluster-management.io_operatorpolicies.yaml + + +$(OLM_INSTALLER): + @echo installing olm + curl -L https://github.com/operator-framework/operator-lifecycle-manager/releases/download/$(OLM_VERSION)/install.sh -o $(LOCAL_BIN)/install.sh + +install-crds-hosted: export KUBECONFIG=./kubeconfig_managed$(MANAGED_CLUSTER_SUFFIX)_e2e +install-crds-hosted: $(OLM_INSTALLER) + chmod +x $(LOCAL_BIN)/install.sh + $(LOCAL_BIN)/install.sh $(OLM_VERSION) + + .PHONY: install-resources install-resources: # creating namespaces @@ -230,20 +241,22 @@ install-resources: kubectl apply -k deploy/rbac kubectl apply -f deploy/manager/service-account.yaml -n $(KIND_NAMESPACE) +IS_HOSTED ?= false + .PHONY: e2e-test e2e-test: e2e-dependencies - $(GINKGO) -v --procs=20 $(E2E_TEST_ARGS) test/e2e + $(GINKGO) -v --procs=20 $(E2E_TEST_ARGS) test/e2e -- -is_hosted=$(IS_HOSTED) .PHONY: e2e-test-coverage e2e-test-coverage: E2E_TEST_ARGS = --json-report=report_e2e.json --label-filter='!hosted-mode && !running-in-cluster' --output-dir=. e2e-test-coverage: e2e-run-instrumented e2e-test e2e-stop-instrumented .PHONY: e2e-test-hosted-mode-coverage -e2e-test-hosted-mode-coverage: E2E_TEST_ARGS = --json-report=report_e2e_hosted_mode.json --label-filter="hosted-mode && !running-in-cluster" --output-dir=. +e2e-test-hosted-mode-coverage: E2E_TEST_ARGS = --json-report=report_e2e_hosted_mode.json --label-filter="hosted-mode && hosted-as-well && !running-in-cluster" --output-dir=. e2e-test-hosted-mode-coverage: COVERAGE_E2E_OUT = coverage_e2e_hosted_mode.out +e2e-test-hosted-mode-coverage: IS_HOSTED=true e2e-test-hosted-mode-coverage: export TARGET_KUBECONFIG_PATH = $(PWD)/kubeconfig_managed2 e2e-test-hosted-mode-coverage: e2e-run-instrumented e2e-test e2e-stop-instrumented - .PHONY: e2e-test-running-in-cluster e2e-test-running-in-cluster: E2E_TEST_ARGS = --label-filter="running-in-cluster" --covermode=atomic --coverprofile=coverage_e2e_uninstall.out --coverpkg=open-cluster-management.io/config-policy-controller/pkg/triggeruninstall e2e-test-running-in-cluster: e2e-test diff --git a/controllers/operatorpolicy_controller.go b/controllers/operatorpolicy_controller.go index 720d550b..a38dbbe9 100644 --- a/controllers/operatorpolicy_controller.go +++ b/controllers/operatorpolicy_controller.go @@ -103,6 +103,7 @@ type OperatorPolicyReconciler struct { DynamicWatcher depclient.DynamicWatcher InstanceName string DefaultNamespace string + TargetClient client.Client } // SetupWithManager sets up the controller with the Manager and will reconcile when the dynamic watcher @@ -745,7 +746,7 @@ func (r *OperatorPolicyReconciler) musthaveOpGroup( desiredOpGroup.ResourceVersion = opGroup.GetResourceVersion() - err = r.Update(ctx, &opGroup) + err = r.TargetClient.Update(ctx, &opGroup) if err != nil { return nil, changed, fmt.Errorf("error updating the OperatorGroup: %w", err) } @@ -766,7 +767,7 @@ func (r *OperatorPolicyReconciler) musthaveOpGroup( func (r *OperatorPolicyReconciler) createWithNamespace( ctx context.Context, policy *policyv1beta1.OperatorPolicy, object client.Object, ) error { - err := r.Create(ctx, object) + err := r.TargetClient.Create(ctx, object) if err == nil { return nil } @@ -784,13 +785,13 @@ func (r *OperatorPolicyReconciler) createWithNamespace( }, } - err = r.Create(ctx, &ns) + err = r.TargetClient.Create(ctx, &ns) if err != nil && !k8serrors.IsAlreadyExists(err) { return err } // Try creating the object again now that the namespace was created. - return r.Create(ctx, object) + return r.TargetClient.Create(ctx, object) } // isNamespaceNotFound detects if the input error from r.Create failed due to the specified namespace not existing. @@ -899,7 +900,7 @@ func (r *OperatorPolicyReconciler) mustnothaveOpGroup( earlyConds = append(earlyConds, calculateComplianceCondition(policy)) } - err := r.Delete(ctx, desiredOpGroup) + err := r.TargetClient.Delete(ctx, desiredOpGroup) if err != nil { return earlyConds, changed, fmt.Errorf("error deleting the OperatorGroup: %w", err) } @@ -1036,7 +1037,7 @@ func (r *OperatorPolicyReconciler) musthaveSubscription( earlyConds = append(earlyConds, calculateComplianceCondition(policy)) } - err = r.Update(ctx, foundSub) + err = r.TargetClient.Update(ctx, foundSub) if err != nil { return mergedSub, nil, changed, fmt.Errorf("error updating the Subscription: %w", err) } @@ -1090,7 +1091,7 @@ func (r *OperatorPolicyReconciler) mustnothaveSubscription( earlyConds = append(earlyConds, calculateComplianceCondition(policy)) } - err := r.Delete(ctx, foundUnstructSub) + err := r.TargetClient.Delete(ctx, foundUnstructSub) if err != nil { return foundSub, earlyConds, changed, fmt.Errorf("error deleting the Subscription: %w", err) } @@ -1354,7 +1355,7 @@ func (r *OperatorPolicyReconciler) musthaveInstallPlan( return false, fmt.Errorf("error approving InstallPlan: %w", err) } - if err := r.Update(ctx, &approvableInstallPlans[0]); err != nil { + if err := r.TargetClient.Update(ctx, &approvableInstallPlans[0]); err != nil { return false, fmt.Errorf("error updating approved InstallPlan: %w", err) } @@ -1482,7 +1483,7 @@ func (r *OperatorPolicyReconciler) mustnothaveCSV( continue } - err := r.Delete(ctx, &csvList[i]) + err := r.TargetClient.Delete(ctx, &csvList[i]) if err != nil { changed := updateStatus(policy, foundNotWantedCond("ClusterServiceVersion", csvNames...), relatedCSVs...) @@ -1636,7 +1637,7 @@ func (r *OperatorPolicyReconciler) handleCRDs( continue } - err := r.Delete(ctx, &crdList[i]) + err := r.TargetClient.Delete(ctx, &crdList[i]) if err != nil { changed := updateStatus(policy, foundNotWantedCond("CustomResourceDefinition"), relatedCRDs...) @@ -1755,7 +1756,7 @@ func (r *OperatorPolicyReconciler) mergeObjects( } if updateNeeded { - err := r.Update(ctx, existing, client.DryRunAll) + err := r.TargetClient.Update(ctx, existing, client.DryRunAll) if err != nil { if k8serrors.IsForbidden(err) { // This indicates the update would make a change, but the change is not allowed, diff --git a/main.go b/main.go index a17ad797..6a9e31df 100644 --- a/main.go +++ b/main.go @@ -322,6 +322,7 @@ func main() { var targetK8sClient kubernetes.Interface var targetK8sDynamicClient dynamic.Interface var targetK8sConfig *rest.Config + var targetClient client.Client var nsSelMgr manager.Manager // A separate controller-manager is needed in hosted mode if opts.targetKubeConfig == "" { @@ -329,6 +330,7 @@ func main() { targetK8sClient = kubernetes.NewForConfigOrDie(targetK8sConfig) targetK8sDynamicClient = dynamic.NewForConfigOrDie(targetK8sConfig) nsSelMgr = mgr + targetClient = mgr.GetClient() } else { // "Hosted mode" var err error @@ -343,6 +345,11 @@ func main() { targetK8sClient = kubernetes.NewForConfigOrDie(targetK8sConfig) targetK8sDynamicClient = dynamic.NewForConfigOrDie(targetK8sConfig) + targetClient, err = client.New(targetK8sConfig, client.Options{Scheme: scheme}) + if err != nil { + log.Error(err, "Failed to load the target kubeconfig", "path", opts.targetKubeConfig) + os.Exit(1) + } // The managed cluster's API server is potentially not the same as the hosting cluster and it could be // offline already as part of the uninstall process. In this case, the manager's instantiation will fail. @@ -428,7 +435,7 @@ func main() { if opts.enableOperatorPolicy { depReconciler, depEvents := depclient.NewControllerRuntimeSource() - watcher, err := depclient.New(cfg, depReconciler, + watcher, err := depclient.New(targetK8sConfig, depReconciler, &depclient.Options{ DisableInitialReconcile: true, EnableCache: true, @@ -455,6 +462,7 @@ func main() { DynamicWatcher: watcher, InstanceName: instanceName, DefaultNamespace: opts.operatorPolDefaultNS, + TargetClient: targetClient, } if err = OpReconciler.SetupWithManager(mgr, depEvents); err != nil { diff --git a/test/e2e/case38_install_operator_test.go b/test/e2e/case38_install_operator_test.go index 91b9b1a5..12e4ecb4 100644 --- a/test/e2e/case38_install_operator_test.go +++ b/test/e2e/case38_install_operator_test.go @@ -20,7 +20,7 @@ import ( "open-cluster-management.io/config-policy-controller/test/utils" ) -var _ = Describe("Testing OperatorPolicy", Ordered, func() { +var _ = Describe("Testing OperatorPolicy", Ordered, Label("hosted-as-well"), func() { const ( opPolTestNS = "operator-policy-testns" parentPolicyYAML = "../resources/case38_operator_install/parent-policy.yaml" @@ -167,6 +167,19 @@ var _ = Describe("Testing OperatorPolicy", Ordered, func() { ConsistentlyWithOffset(1, checkFunc, consistentlyDuration, 1).Should(Succeed()) } + preFunc := func() { + if IsHosted { + KubectlTarget("create", "ns", opPolTestNS) + DeferCleanup(func() { + KubectlTarget("delete", "ns", opPolTestNS) + }) + } + utils.Kubectl("create", "ns", opPolTestNS) + DeferCleanup(func() { + utils.Kubectl("delete", "ns", opPolTestNS) + }) + } + Describe("Testing an all default operator policy", Ordered, func() { const ( opPolYAML = "../resources/case38_operator_install/operator-policy-all-defaults.yaml" @@ -174,6 +187,12 @@ var _ = Describe("Testing OperatorPolicy", Ordered, func() { subName = "project-quay" ) BeforeAll(func() { + if IsHosted { + KubectlTarget("create", "ns", opPolTestNS) + DeferCleanup(func() { + KubectlTarget("delete", "ns", opPolTestNS) + }) + } utils.Kubectl("create", "ns", opPolTestNS) DeferCleanup(func() { utils.Kubectl("delete", "ns", opPolTestNS) @@ -206,7 +225,7 @@ var _ = Describe("Testing OperatorPolicy", Ordered, func() { ) By("Verifying the subscription has the correct defaults") - sub, err := clientManagedDynamic.Resource(gvrSubscription).Namespace(opPolTestNS). + sub, err := targetK8sDynamic.Resource(gvrSubscription).Namespace(opPolTestNS). Get(ctx, subName, metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) @@ -229,6 +248,13 @@ var _ = Describe("Testing OperatorPolicy", Ordered, func() { subName = "project-quay" ) BeforeAll(func() { + if IsHosted { + KubectlTarget("create", "ns", opPolTestNS) + DeferCleanup(func() { + KubectlTarget("delete", "ns", opPolTestNS) + }) + } + utils.Kubectl("create", "ns", opPolTestNS) DeferCleanup(func() { utils.Kubectl("delete", "ns", opPolTestNS) @@ -267,10 +293,7 @@ var _ = Describe("Testing OperatorPolicy", Ordered, func() { extraOpGroupName = "extra-operator-group" ) BeforeAll(func() { - utils.Kubectl("create", "ns", opPolTestNS) - DeferCleanup(func() { - utils.Kubectl("delete", "ns", opPolTestNS) - }) + preFunc() createObjWithParent(parentPolicyYAML, parentPolicyName, opPolYAML, opPolTestNS, gvrPolicy, gvrOperatorPolicy) @@ -325,7 +348,7 @@ var _ = Describe("Testing OperatorPolicy", Ordered, func() { ) }) It("Should become NonCompliant when an extra OperatorGroup is added", func() { - utils.Kubectl("apply", "-f", extraOpGroupYAML, "-n", opPolTestNS) + KubectlTarget("apply", "-f", extraOpGroupYAML, "-n", opPolTestNS) check( opPolName, true, @@ -360,8 +383,8 @@ var _ = Describe("Testing OperatorPolicy", Ordered, func() { It("Should warn about the OperatorGroup when it doesn't match the default", func() { utils.Kubectl("patch", "operatorpolicy", opPolName, "-n", opPolTestNS, "--type=json", "-p", `[{"op": "replace", "path": "/spec/remediationAction", "value": "inform"}]`) - utils.Kubectl("delete", "operatorgroup", "-n", opPolTestNS, "--all") - utils.Kubectl("apply", "-f", extraOpGroupYAML, "-n", opPolTestNS) + KubectlTarget("delete", "operatorgroup", "-n", opPolTestNS, "--all") + KubectlTarget("apply", "-f", extraOpGroupYAML, "-n", opPolTestNS) check( opPolName, false, @@ -391,11 +414,12 @@ var _ = Describe("Testing OperatorPolicy", Ordered, func() { opPolName = "oppol-no-group-enforce" ) BeforeAll(func() { + preFunc() + DeferCleanup(func() { utils.Kubectl( "delete", "-f", parentPolicyYAML, "-n", testNamespace, "--ignore-not-found", "--cascade=foreground", ) - utils.Kubectl("delete", "ns", opPolTestNS, "--ignore-not-found") }) createObjWithParent( @@ -410,7 +434,7 @@ var _ = Describe("Testing OperatorPolicy", Ordered, func() { }) }) - Describe("Testing OperatorGroup behavior when it is specified in the policy", Ordered, func() { + FDescribe("Testing OperatorGroup behavior when it is specified in the policy", Ordered, func() { const ( opPolYAML = "../resources/case38_operator_install/operator-policy-with-group.yaml" opPolName = "oppol-with-group" @@ -423,12 +447,9 @@ var _ = Describe("Testing OperatorPolicy", Ordered, func() { ) BeforeAll(func() { - utils.Kubectl("create", "ns", opPolTestNS) - DeferCleanup(func() { - utils.Kubectl("delete", "ns", opPolTestNS) - }) + preFunc() - utils.Kubectl("apply", "-f", incorrectOpGroupYAML, "-n", opPolTestNS) + KubectlTarget("apply", "-f", incorrectOpGroupYAML, "-n", opPolTestNS) createObjWithParent(parentPolicyYAML, parentPolicyName, opPolYAML, opPolTestNS, gvrPolicy, gvrOperatorPolicy) @@ -471,8 +492,8 @@ var _ = Describe("Testing OperatorPolicy", Ordered, func() { ) }) It("Should match when the OperatorGroup is manually corrected", func() { - utils.Kubectl("delete", "operatorgroup", incorrectOpGroupName, "-n", opPolTestNS) - utils.Kubectl("apply", "-f", scopedOpGroupYAML, "-n", opPolTestNS) + KubectlTarget("delete", "operatorgroup", incorrectOpGroupName, "-n", opPolTestNS) + KubectlTarget("apply", "-f", scopedOpGroupYAML, "-n", opPolTestNS) check( opPolName, false, @@ -498,7 +519,7 @@ var _ = Describe("Testing OperatorPolicy", Ordered, func() { ) }) It("Should report a mismatch when the OperatorGroup is manually edited", func() { - utils.Kubectl("patch", "operatorgroup", scopedOpGroupName, "-n", opPolTestNS, "--type=json", "-p", + KubectlTarget("patch", "operatorgroup", scopedOpGroupName, "-n", opPolTestNS, "--type=json", "-p", `[{"op": "replace", "path": "/spec/targetNamespaces", "value": []}]`) check( opPolName, @@ -552,7 +573,7 @@ var _ = Describe("Testing OperatorPolicy", Ordered, func() { ) }) It("Should become NonCompliant when an extra OperatorGroup is added", func() { - utils.Kubectl("apply", "-f", extraOpGroupYAML, "-n", opPolTestNS) + KubectlTarget("apply", "-f", extraOpGroupYAML, "-n", opPolTestNS) check( opPolName, true, @@ -593,9 +614,9 @@ var _ = Describe("Testing OperatorPolicy", Ordered, func() { ) BeforeAll(func() { - utils.Kubectl("create", "ns", opPolTestNS) + KubectlTarget("create", "ns", opPolTestNS) DeferCleanup(func() { - utils.Kubectl("delete", "ns", opPolTestNS) + KubectlTarget("delete", "ns", opPolTestNS) }) createObjWithParent(parentPolicyYAML, parentPolicyName, diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index 4e80feca..f3ec06b5 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -54,8 +54,14 @@ var ( gvrInstallPlan schema.GroupVersionResource gvrClusterServiceVersion schema.GroupVersionResource defaultImageRegistry string + IsHosted bool + targetK8sClient kubernetes.Interface + targetK8sDynamic dynamic.Interface + KubectlTarget func(args ...string) ) +const targetEnvName = "TARGET_KUBECONFIG_PATH" + func TestE2e(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Config policy controller e2e Suite") @@ -66,6 +72,11 @@ func init() { klog.InitFlags(nil) flag.StringVar(&kubeconfigManaged, "kubeconfig_managed", "../../kubeconfig_managed_e2e", "Location of the kubeconfig to use; defaults to current kubeconfig if set to an empty string") + + flag.BoolVar( + &IsHosted, "is_hosted", false, + "Whether is hosted mode or not", + ) } var _ = BeforeSuite(func() { @@ -156,6 +167,36 @@ var _ = BeforeSuite(func() { }, metav1.CreateOptions{})).NotTo(BeNil()) } Expect(namespaces.Get(context.TODO(), testNamespace, metav1.GetOptions{})).NotTo(BeNil()) + + if IsHosted { + By("Checking that the " + targetEnvName + " environment variable is valid") + altKubeconfigPath := os.Getenv(targetEnvName) + Expect(altKubeconfigPath).ToNot(Equal("")) + + targetK8sConfig, err := clientcmd.BuildConfigFromFlags("", altKubeconfigPath) + Expect(err).ToNot(HaveOccurred()) + + targetK8sClient, err = kubernetes.NewForConfig(targetK8sConfig) + Expect(err).ToNot(HaveOccurred()) + + targetK8sDynamic, err = dynamic.NewForConfig(targetK8sConfig) + Expect(err).ToNot(HaveOccurred()) + } else { + targetK8sClient = clientManaged + targetK8sDynamic = clientManagedDynamic + + } + + KubectlTarget = func(args ...string) { + kubeconfig := "../../kubeconfig_managed_e2e" + if IsHosted { + kubeconfig = "../../kubeconfig_managed2_e2e" + } + + args = append(args, "--kubeconfig="+kubeconfig) + + utils.Kubectl(args...) + } }) func NewKubeClient(url, kubeconfig, context string) kubernetes.Interface {