diff --git a/api/v1/configurationpolicy_types.go b/api/v1/configurationpolicy_types.go index d285bc03..ec41e287 100644 --- a/api/v1/configurationpolicy_types.go +++ b/api/v1/configurationpolicy_types.go @@ -103,7 +103,9 @@ func (t Target) String() string { return fmt.Sprintf(fmtSelectorStr, t.Include, t.Exclude, *t.MatchLabels, *t.MatchExpressions) } -// Configures the minimum elapsed time before a ConfigurationPolicy is reevaluated +// Configures the minimum elapsed time before a ConfigurationPolicy is reevaluated. If the policy +// spec is changed, or if the list of namespaces selected by the policy changes, the policy may be +// evaluated regardless of the settings here. type EvaluationInterval struct { //+kubebuilder:validation:Pattern=`^(?:(?:(?:[0-9]+(?:.[0-9])?)(?:h|m|s|(?:ms)|(?:us)|(?:ns)))|never)+$` // The minimum elapsed time before a ConfigurationPolicy is reevaluated when in the compliant state. Set this to diff --git a/controllers/configurationpolicy_controller.go b/controllers/configurationpolicy_controller.go index 06b86313..6e96c44b 100644 --- a/controllers/configurationpolicy_controller.go +++ b/controllers/configurationpolicy_controller.go @@ -120,6 +120,7 @@ type ConfigurationPolicyReconciler struct { TargetK8sClient kubernetes.Interface TargetK8sDynamicClient dynamic.Interface TargetK8sConfig *rest.Config + SelectorReconciler common.SelectorReconciler // Whether custom metrics collection is enabled EnableMetrics bool discoveryInfo @@ -149,6 +150,8 @@ func (r *ConfigurationPolicyReconciler) Reconcile(ctx context.Context, request c prometheus.Labels{"policy": fmt.Sprintf("%s/%s", request.Namespace, request.Name)}) _ = policyUserErrorsCounter.DeletePartialMatch(prometheus.Labels{"template": request.Name}) _ = policySystemErrorsCounter.DeletePartialMatch(prometheus.Labels{"template": request.Name}) + + r.SelectorReconciler.Stop(request.Name) } return reconcile.Result{}, nil @@ -236,7 +239,7 @@ func (r *ConfigurationPolicyReconciler) PeriodicallyExecConfigPolicies( for i := range policiesList.Items { policy := policiesList.Items[i] - if !shouldEvaluatePolicy(&policy, cleanupImmediately) { + if !r.shouldEvaluatePolicy(&policy, cleanupImmediately) { continue } @@ -346,7 +349,9 @@ func (r *ConfigurationPolicyReconciler) refreshDiscoveryInfo() error { // met, then that will also trigger an evaluation. If cleanupImmediately is true, then only policies // with finalizers will be ready for evaluation regardless of the last evaluation. // cleanupImmediately should be set true when the controller is getting uninstalled. -func shouldEvaluatePolicy(policy *policyv1.ConfigurationPolicy, cleanupImmediately bool) bool { +func (r *ConfigurationPolicyReconciler) shouldEvaluatePolicy( + policy *policyv1.ConfigurationPolicy, cleanupImmediately bool, +) bool { log := log.WithValues("policy", policy.GetName()) // If it's time to clean up such as when the config-policy-controller is being uninstalled, only evaluate policies @@ -356,19 +361,19 @@ func shouldEvaluatePolicy(policy *policyv1.ConfigurationPolicy, cleanupImmediate } if policy.ObjectMeta.DeletionTimestamp != nil { - log.V(2).Info("The policy has been deleted and is waiting for object cleanup. Will evaluate it now.") + log.V(1).Info("The policy has been deleted and is waiting for object cleanup. Will evaluate it now.") return true } if policy.Status.LastEvaluatedGeneration != policy.Generation { - log.V(2).Info("The policy has been updated. Will evaluate it now.") + log.V(1).Info("The policy has been updated. Will evaluate it now.") return true } if policy.Status.LastEvaluated == "" { - log.V(2).Info("The policy's status.lastEvaluated field is not set. Will evaluate it now.") + log.V(1).Info("The policy's status.lastEvaluated field is not set. Will evaluate it now.") return true } @@ -387,13 +392,23 @@ func shouldEvaluatePolicy(policy *policyv1.ConfigurationPolicy, cleanupImmediate } else if policy.Status.ComplianceState == policyv1.NonCompliant && policy.Spec != nil { interval, err = policy.Spec.EvaluationInterval.GetNonCompliantInterval() } else { - log.V(2).Info("The policy has an unknown compliance. Will evaluate it now.") + log.V(1).Info("The policy has an unknown compliance. Will evaluate it now.") + + return true + } + + usesSelector := policy.Spec.NamespaceSelector.MatchLabels != nil || + policy.Spec.NamespaceSelector.MatchExpressions != nil || + len(policy.Spec.NamespaceSelector.Include) != 0 + + if usesSelector && r.SelectorReconciler.HasUpdate(policy.Name) { + log.V(1).Info("There was an update for this policy's namespaces. Will evaluate it now.") return true } if errors.Is(err, policyv1.ErrIsNever) { - log.Info("Skipping the policy evaluation due to the spec.evaluationInterval value being set to never") + log.V(1).Info("Skipping the policy evaluation due to the spec.evaluationInterval value being set to never") return false } else if err != nil { @@ -409,7 +424,7 @@ func shouldEvaluatePolicy(policy *policyv1.ConfigurationPolicy, cleanupImmediate nextEvaluation := lastEvaluated.Add(interval) if nextEvaluation.Sub(time.Now().UTC()) > 0 { - log.Info("Skipping the policy evaluation due to the policy not reaching the evaluation interval") + log.V(1).Info("Skipping the policy evaluation due to the policy not reaching the evaluation interval") return false } @@ -473,11 +488,13 @@ func (r *ConfigurationPolicyReconciler) getObjectTemplateDetails( selector := plc.Spec.NamespaceSelector // If MatchLabels/MatchExpressions/Include were not provided, return no namespaces if selector.MatchLabels == nil && selector.MatchExpressions == nil && len(selector.Include) == 0 { + r.SelectorReconciler.Stop(plc.Name) + log.Info("namespaceSelector is empty. Skipping namespace retrieval.") } else { // If an error occurred in the NamespaceSelector, update the policy status and abort var err error - selectedNamespaces, err = common.GetSelectedNamespaces(r.TargetK8sClient, selector) + selectedNamespaces, err = r.SelectorReconciler.Get(plc.Name, plc.Spec.NamespaceSelector) if err != nil { errMsg := "Error filtering namespaces with provided namespaceSelector" log.Error( diff --git a/controllers/configurationpolicy_controller_test.go b/controllers/configurationpolicy_controller_test.go index 29e93437..020b39ba 100644 --- a/controllers/configurationpolicy_controller_test.go +++ b/controllers/configurationpolicy_controller_test.go @@ -954,6 +954,10 @@ func TestShouldEvaluatePolicy(t *testing.T) { }, } + r := &ConfigurationPolicyReconciler{ + SelectorReconciler: &fakeSR{}, + } + for _, test := range tests { test := test @@ -970,7 +974,7 @@ func TestShouldEvaluatePolicy(t *testing.T) { policy.ObjectMeta.DeletionTimestamp = test.deletionTimestamp policy.ObjectMeta.Finalizers = test.finalizers - if actual := shouldEvaluatePolicy(policy, test.cleanupImmediately); actual != test.expected { + if actual := r.shouldEvaluatePolicy(policy, test.cleanupImmediately); actual != test.expected { t.Fatalf("expected %v but got %v", test.expected, actual) } }, @@ -978,6 +982,19 @@ func TestShouldEvaluatePolicy(t *testing.T) { } } +type fakeSR struct{} + +func (r *fakeSR) Get(_ string, _ policyv1.Target) ([]string, error) { + return nil, nil +} + +func (r *fakeSR) HasUpdate(_ string) bool { + return false +} + +func (r *fakeSR) Stop(_ string) { +} + func TestShouldHandleSingleKeyFalse(t *testing.T) { t.Parallel() diff --git a/deploy/crds/kustomize/policy.open-cluster-management.io_configurationpolicies.yaml b/deploy/crds/kustomize/policy.open-cluster-management.io_configurationpolicies.yaml index 9d70f1d2..d4009433 100644 --- a/deploy/crds/kustomize/policy.open-cluster-management.io_configurationpolicies.yaml +++ b/deploy/crds/kustomize/policy.open-cluster-management.io_configurationpolicies.yaml @@ -43,7 +43,9 @@ spec: properties: evaluationInterval: description: Configures the minimum elapsed time before a ConfigurationPolicy - is reevaluated + is reevaluated. If the policy spec is changed, or if the list of + namespaces selected by the policy changes, the policy may be evaluated + regardless of the settings here. properties: compliant: description: The minimum elapsed time before a ConfigurationPolicy diff --git a/deploy/crds/policy.open-cluster-management.io_configurationpolicies.yaml b/deploy/crds/policy.open-cluster-management.io_configurationpolicies.yaml index 5512abf2..06cd6a7e 100644 --- a/deploy/crds/policy.open-cluster-management.io_configurationpolicies.yaml +++ b/deploy/crds/policy.open-cluster-management.io_configurationpolicies.yaml @@ -50,7 +50,9 @@ spec: properties: evaluationInterval: description: Configures the minimum elapsed time before a ConfigurationPolicy - is reevaluated + is reevaluated. If the policy spec is changed, or if the list of + namespaces selected by the policy changes, the policy may be evaluated + regardless of the settings here. properties: compliant: description: The minimum elapsed time before a ConfigurationPolicy diff --git a/main.go b/main.go index 9873e8cc..6e856599 100644 --- a/main.go +++ b/main.go @@ -11,6 +11,7 @@ import ( "os" "runtime" "strings" + "sync" "time" "github.com/go-logr/zapr" @@ -20,6 +21,7 @@ import ( corev1 "k8s.io/api/core/v1" extensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" extensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" k8sruntime "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -28,6 +30,7 @@ import ( clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth" "k8s.io/client-go/rest" + toolscache "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" @@ -202,6 +205,18 @@ func main() { log.Info("Skipping restrictions on the ConfigurationPolicy cache because watchNamespace is empty") } + nsTransform := func(obj interface{}) (interface{}, error) { + ns := obj.(*corev1.Namespace) + guttedNS := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: ns.Name, + Labels: ns.Labels, + }, + } + + return guttedNS, nil + } + // Set default manager options options := manager.Options{ MetricsBindAddress: opts.metricsAddr, @@ -210,7 +225,12 @@ func main() { HealthProbeBindAddress: opts.probeAddr, LeaderElection: opts.enableLeaderElection, LeaderElectionID: "config-policy-controller.open-cluster-management.io", - NewCache: cache.BuilderWithOptions(cache.Options{SelectorsByObject: cacheSelectors}), + NewCache: cache.BuilderWithOptions(cache.Options{ + SelectorsByObject: cacheSelectors, + TransformByObject: map[client.Object]toolscache.TransformFunc{ + &corev1.Namespace{}: nsTransform, + }, + }), // Disable the cache for Secrets to avoid a watch getting created when the `policy-encryption-key` // Secret is retrieved. Special cache handling is done by the controller. ClientDisableCacheFor: []client.Object{&corev1.Secret{}}, @@ -252,12 +272,14 @@ func main() { var targetK8sClient kubernetes.Interface var targetK8sDynamicClient dynamic.Interface var targetK8sConfig *rest.Config + var nsSelMgr manager.Manager // A separate controller-manager is needed in hosted mode if opts.targetKubeConfig == "" { targetK8sConfig = cfg targetK8sClient = kubernetes.NewForConfigOrDie(targetK8sConfig) targetK8sDynamicClient = dynamic.NewForConfigOrDie(targetK8sConfig) - } else { + nsSelMgr = mgr + } else { // "Hosted mode" var err error targetK8sConfig, err = clientcmd.BuildConfigFromFlags("", opts.targetKubeConfig) @@ -272,6 +294,18 @@ func main() { targetK8sClient = kubernetes.NewForConfigOrDie(targetK8sConfig) targetK8sDynamicClient = dynamic.NewForConfigOrDie(targetK8sConfig) + nsSelMgr, err = manager.New(targetK8sConfig, manager.Options{ + NewCache: cache.BuilderWithOptions(cache.Options{ + TransformByObject: map[client.Object]toolscache.TransformFunc{ + &corev1.Namespace{}: nsTransform, + }, + }), + }) + if err != nil { + log.Error(err, "Unable to create manager from target kube config") + os.Exit(1) + } + log.Info( "Overrode the target Kubernetes cluster for policy evaluation and enforcement", "path", opts.targetKubeConfig, @@ -280,6 +314,14 @@ func main() { instanceName, _ := os.Hostname() // on an error, instanceName will be empty, which is ok + nsSelReconciler := common.NamespaceSelectorReconciler{ + Client: nsSelMgr.GetClient(), + } + if err = nsSelReconciler.SetupWithManager(nsSelMgr); err != nil { + log.Error(err, "Unable to create controller", "controller", "NamespaceSelector") + os.Exit(1) + } + reconciler := controllers.ConfigurationPolicyReconciler{ Client: mgr.GetClient(), DecryptionConcurrency: opts.decryptionConcurrency, @@ -290,6 +332,7 @@ func main() { TargetK8sClient: targetK8sClient, TargetK8sDynamicClient: targetK8sDynamicClient, TargetK8sConfig: targetK8sConfig, + SelectorReconciler: &nsSelReconciler, EnableMetrics: opts.enableMetrics, } if err = reconciler.SetupWithManager(mgr); err != nil { @@ -353,10 +396,44 @@ func main() { log.Info("Addon status reporting is not enabled") } - log.Info("Starting manager") + log.Info("Starting managers") + + var wg sync.WaitGroup + var errorExit bool + + wg.Add(1) + + go func() { + if err := mgr.Start(managerCtx); err != nil { + log.Error(err, "Problem running manager") + + managerCancel() + + errorExit = true + } + + wg.Done() + }() + + if opts.targetKubeConfig != "" { // "hosted mode" + wg.Add(1) + + go func() { + if err := nsSelMgr.Start(managerCtx); err != nil { + log.Error(err, "Problem running manager") + + managerCancel() + + errorExit = true + } + + wg.Done() + }() + } + + wg.Wait() - if err := mgr.Start(managerCtx); err != nil { - log.Error(err, "Problem running manager") + if errorExit { os.Exit(1) } } diff --git a/pkg/common/namespace_selection.go b/pkg/common/namespace_selection.go index 39332e07..804e7a3c 100644 --- a/pkg/common/namespace_selection.go +++ b/pkg/common/namespace_selection.go @@ -6,10 +6,23 @@ package common import ( "context" "fmt" + "reflect" + "sort" + "sync" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" policyv1 "open-cluster-management.io/config-policy-controller/api/v1" ) @@ -103,3 +116,220 @@ func GetAllNamespaces(client kubernetes.Interface, labelSelector metav1.LabelSel return namespacesNames, nil } + +// SelectorReconciler keeps a cache of NamespaceSelector results, which it should update when +// namespaces are created, deleted, or re-labeled. +type SelectorReconciler interface { + // Get returns the items matching the given Target for the given name. If no selection for that + // name and Target has been calculated, it will be calculated now. Otherwise, a cached value + // may be used. + Get(string, policyv1.Target) ([]string, error) + + // HasUpdate indicates when the cached selection for this name has been changed since the last + // time that Get was called for that name. + HasUpdate(string) bool + + // Stop tells the SelectorReconciler to stop updating the cached selection for the name. + Stop(string) +} + +type NamespaceSelectorReconciler struct { + Client client.Client + selections map[string]namespaceSelection + lock sync.RWMutex +} + +type namespaceSelection struct { + target policyv1.Target + namespaces []string + hasUpdate bool + err error +} + +// SetupWithManager sets up the controller with the Manager. +func (r *NamespaceSelectorReconciler) SetupWithManager(mgr ctrl.Manager) error { + r.selections = make(map[string]namespaceSelection) + + neverEnqueue := predicate.NewPredicateFuncs(func(o client.Object) bool { return false }) + + // Instead of reconciling for each Namespace, just reconcile once + // - that reconcile will do a list on all the Namespaces anyway. + mapToSingleton := func(_ client.Object) []reconcile.Request { + return []reconcile.Request{{NamespacedName: types.NamespacedName{ + Name: "NamespaceSelector", + }}} + } + + return ctrl.NewControllerManagedBy(mgr). + Named("NamespaceSelector"). + For( // This is a workaround because a `For` is required, but doesn't allow the enqueueing to be customized + &corev1.Namespace{}, + builder.WithPredicates(neverEnqueue)). + Watches( + &source.Kind{Type: &corev1.Namespace{}}, + handler.EnqueueRequestsFromMapFunc(mapToSingleton), + builder.WithPredicates(predicate.LabelChangedPredicate{})). + Complete(r) +} + +// Reconcile runs whenever a namespace on the target cluster is created, deleted, or has a change in +// labels. It updates the cached selections for NamespaceSelectors that it knows about. +func (r *NamespaceSelectorReconciler) Reconcile(ctx context.Context, _ ctrl.Request) (ctrl.Result, error) { + log := logf.Log.WithValues("Reconciler", "NamespaceSelector") + oldSelections := make(map[string]namespaceSelection) + + r.lock.RLock() + + for name, selection := range r.selections { + oldSelections[name] = selection + } + + r.lock.RUnlock() + + // No selections to populate, just skip. + if len(r.selections) == 0 { + return ctrl.Result{}, nil + } + + namespaces := corev1.NamespaceList{} + + // This List will be from the cache + if err := r.Client.List(ctx, &namespaces); err != nil { + log.Error(err, "Unable to list namespaces from the cache") + + return ctrl.Result{}, err + } + + for name, oldSelection := range oldSelections { + newNamespaces, err := filter(namespaces, oldSelection.target) + if err != nil { + log.Error(err, "Unable to filter namespaces for policy", "name", name) + + r.update(name, namespaceSelection{ + target: oldSelection.target, + namespaces: newNamespaces, + hasUpdate: oldSelection.err == nil, // it has an update if the error state changed + err: err, + }) + + continue + } + + if !reflect.DeepEqual(newNamespaces, oldSelection.namespaces) { + log.V(2).Info("Updating selection from Reconcile", "policy", name, "selection", newNamespaces) + + r.update(name, namespaceSelection{ + target: oldSelection.target, + namespaces: newNamespaces, + hasUpdate: true, + err: nil, + }) + } + } + + return ctrl.Result{}, nil +} + +// Get returns the items matching the given Target for the given policy. If no selection for that +// policy and Target has been calculated, it will be calculated now. Otherwise, a cached value +// may be used. +func (r *NamespaceSelectorReconciler) Get(name string, t policyv1.Target) ([]string, error) { + log := logf.Log.WithValues("Reconciler", "NamespaceSelector") + + r.lock.Lock() + + // If found, and target has not been changed + if selection, found := r.selections[name]; found && selection.target.String() == t.String() { + selection.hasUpdate = false + r.selections[name] = selection + + r.lock.Unlock() + + return selection.namespaces, selection.err + } + + // unlock for now, the list filtering could take a non-trivial amount of time + r.lock.Unlock() + + // New, or the target has changed. + nsList := corev1.NamespaceList{} + + labelSelector := parseToLabelSelector(t) + + selector, err := metav1.LabelSelectorAsSelector(&labelSelector) + if err != nil { + return nil, fmt.Errorf("error parsing namespace LabelSelector: %w", err) + } + + // This List will be from the cache + if err := r.Client.List(context.TODO(), &nsList, &client.ListOptions{LabelSelector: selector}); err != nil { + log.Error(err, "Unable to list namespaces from the cache") + + return nil, err + } + + nsToMatch := make([]string, len(nsList.Items)) + for i, ns := range nsList.Items { + nsToMatch[i] = ns.Name + } + + selected, err := Matches(nsToMatch, t.Include, t.Exclude) + sort.Strings(selected) + + log.V(2).Info("Updating selection from Reconcile", "policy", name, "selection", selected) + + r.update(name, namespaceSelection{ + target: t, + namespaces: selected, + hasUpdate: false, + err: err, + }) + + return selected, err +} + +// HasUpdate indicates when the cached selection for this policy has been changed since the last +// time that Get was called for that policy. +func (r *NamespaceSelectorReconciler) HasUpdate(name string) bool { + r.lock.RLock() + defer r.lock.RUnlock() + + return r.selections[name].hasUpdate +} + +// Stop tells the SelectorReconciler to stop updating the cached selection for the name. +func (r *NamespaceSelectorReconciler) Stop(name string) { + r.lock.Lock() + defer r.lock.Unlock() + + delete(r.selections, name) +} + +func (r *NamespaceSelectorReconciler) update(name string, sel namespaceSelection) { + r.lock.Lock() + defer r.lock.Unlock() + + r.selections[name] = sel +} + +func filter(allNSList corev1.NamespaceList, t policyv1.Target) ([]string, error) { + labelSelector := parseToLabelSelector(t) + + selector, err := metav1.LabelSelectorAsSelector(&labelSelector) + if err != nil { + return nil, fmt.Errorf("error parsing namespace LabelSelector: %w", err) + } + + nsToFilter := make([]string, 0) + + for _, ns := range allNSList.Items { + if selector.Matches(labels.Set(ns.GetLabels())) { + nsToFilter = append(nsToFilter, ns.Name) + } + } + + namespaces, err := Matches(nsToFilter, t.Include, t.Exclude) + sort.Strings(namespaces) + + return namespaces, err +} diff --git a/test/e2e/case19_ns_selector_test.go b/test/e2e/case19_ns_selector_test.go index 3553123d..9186a062 100644 --- a/test/e2e/case19_ns_selector_test.go +++ b/test/e2e/case19_ns_selector_test.go @@ -4,144 +4,348 @@ package e2e import ( + "context" + "fmt" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "open-cluster-management.io/config-policy-controller/test/utils" ) -const ( - case19PolicyName string = "policy-configmap-selector-e2e" - case19PolicyYaml string = "../resources/case19_ns_selector/case19_cm_policy.yaml" - case19TemplatesName string = "configmap-selector-e2e" - case19TemplatesKind string = "ConfigMap" - case19PrereqYaml string = "../resources/case19_ns_selector/case19_cm_manifest.yaml" - case19PatchPrefix string = "[{\"op\":\"replace\",\"path\":\"/spec/namespaceSelector\",\"value\":" - case19PatchSuffix string = "}]" -) +const nsSelectorPatchFmt = `--patch=[{"op":"replace","path":"/spec/namespaceSelector","value":%s}]` -// Test setup for namespace selection policy tests: -// - Namespaces `case19-[1-5]-e2e`, each with a `name: ` label -// - Single deployed Configmap `configmap-selector-e2e` in namespace `case19-1-e2e` -// - Deployed policy should be compliant since it matches the single deployed ConfigMap -// - Policies are patched so that the namespace doesn't match and should be NonCompliant -var _ = Describe("Test object namespace selection", Ordered, func() { - // NamespaceSelector patches to test - resetPatch := "{\"include\":[\"case19-1-e2e\"]}" - allPatch := "{\"matchExpressions\":[{\"key\":\"name\",\"operator\":\"Exists\"}]}" - patches := map[string]struct { - patch string - message string - }{ - "no namespaceSelector specified": { - "{}", - "namespaced object " + case19TemplatesName + " of kind " + case19TemplatesKind + - " has no namespace specified" + - " from the policy namespaceSelector nor the object metadata", - }, - "a non-matching LabelSelector": { - "{\"matchLabels\":{\"name\":\"not-a-namespace\"}}", - "namespaced object " + case19TemplatesName + " of kind " + case19TemplatesKind + - " has no namespace specified" + - " from the policy namespaceSelector nor the object metadata", - }, - "LabelSelector and exclude": { - "{\"exclude\":[\"*-[3-4]-e2e\"],\"matchLabels\":{}," + - "\"matchExpressions\":[{\"key\":\"name\",\"operator\":\"Exists\"}]}", - "configmaps [configmap-selector-e2e] not found in namespaces: case19-2-e2e, case19-5-e2e", - }, - "empty LabelSelector and include/exclude": { - "{\"include\":[\"case19-[2-5]-e2e\"],\"exclude\":[\"*-[3-4]-e2e\"]," + - "\"matchLabels\":{},\"matchExpressions\":[]}", - "configmaps [configmap-selector-e2e] not found in namespaces: case19-2-e2e, case19-5-e2e", - }, - "LabelSelector": { - "{\"matchExpressions\":[{\"key\":\"name\",\"operator\":\"Exists\"}]}", - "configmaps [configmap-selector-e2e] not found in namespaces: case19-2-e2e, case19-3-e2e, " + - "case19-4-e2e, case19-5-e2e", - }, - "Malformed filepath in include": { - "{\"include\":[\"*-[a-z-*\"]}", - "Error filtering namespaces with provided namespaceSelector: " + - "error parsing 'include' pattern '*-[a-z-*': syntax error in pattern", - }, - "MatchExpressions with incorrect operator": { - "{\"matchExpressions\":[{\"key\":\"name\",\"operator\":\"Seriously\"}]}", - "Error filtering namespaces with provided namespaceSelector: " + - "error parsing namespace LabelSelector: \"Seriously\" is not a valid label selector operator", - }, - "MatchExpressions with missing values": { - "{\"matchExpressions\":[{\"key\":\"name\",\"operator\":\"In\",\"values\":[]}]}", - "Error filtering namespaces with provided namespaceSelector: " + - "error parsing namespace LabelSelector: " + - "values: Invalid value: []string(nil): for 'in', 'notin' operators, values set can't be empty", - }, - } - - It("creates prerequisite objects", func() { - utils.Kubectl("apply", "-f", case19PrereqYaml) - // Delete the last namespace so we can use it to test whether - // adding a namespace works as the final test - utils.Kubectl("delete", "namespace", "case19-6-e2e") - utils.Kubectl("apply", "-f", case19PolicyYaml, "-n", testNamespace) - plc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, - case19PolicyName, testNamespace, true, defaultTimeoutSeconds) - Expect(plc).NotTo(BeNil()) - }) +var _ = Describe("Test results of namespace selection", Ordered, func() { + const ( + prereqYaml string = "../resources/case19_ns_selector/case19_results_prereq.yaml" + policyYaml string = "../resources/case19_ns_selector/case19_results_policy.yaml" + policyName string = "selector-results-e2e" + + noMatchesMsg string = "namespaced object configmap-selector-e2e of kind ConfigMap has no " + + "namespace specified from the policy namespaceSelector nor the object metadata" + notFoundMsgFmt string = "configmaps [configmap-selector-e2e] not found in namespaces: %s" + filterErrMsgFmt string = "Error filtering namespaces with provided namespaceSelector: %s" + ) + + // Test setup for namespace selection policy tests: + // - Namespaces `case19a-[1-5]-e2e`, each with a `case19a: ` label + // - Single deployed Configmap `configmap-selector-e2e` in namespace `case19a-1-e2e` + // - Deployed policy should be compliant since it matches the single deployed ConfigMap + // - Policies are patched so that the namespace doesn't match and should be NonCompliant + BeforeAll(func() { + By("Applying prerequisites") + utils.Kubectl("apply", "-f", prereqYaml) + DeferCleanup(func() { + utils.Kubectl("delete", "-f", prereqYaml) + }) - It("should properly handle the namespaceSelector", func() { - for name, patch := range patches { - By("patching compliant policy " + case19PolicyName + " on the managed cluster") - utils.Kubectl("patch", "--namespace=managed", "configurationpolicy", case19PolicyName, "--type=json", - "--patch="+case19PatchPrefix+resetPatch+case19PatchSuffix, - ) - Eventually(func() interface{} { - managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, - case19PolicyName, testNamespace, true, defaultTimeoutSeconds) - - return utils.GetComplianceState(managedPlc) - }, defaultTimeoutSeconds, 1).Should(Equal("Compliant")) - By("patching with " + name) - utils.Kubectl("patch", "--namespace=managed", "configurationpolicy", case19PolicyName, "--type=json", - "--patch="+case19PatchPrefix+patch.patch+case19PatchSuffix, - ) - Eventually(func() interface{} { - managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, - case19PolicyName, testNamespace, true, defaultTimeoutSeconds) - - return utils.GetComplianceState(managedPlc) - }, defaultTimeoutSeconds, 1).Should(Equal("NonCompliant")) - Eventually(func() interface{} { - managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, - case19PolicyName, testNamespace, true, defaultTimeoutSeconds) - - return utils.GetStatusMessage(managedPlc) - }, defaultTimeoutSeconds, 1).Should(Equal(patch.message)) - } + utils.Kubectl("apply", "-f", policyYaml, "-n", testNamespace) + DeferCleanup(func() { + utils.Kubectl("delete", "-f", policyYaml, "-n", testNamespace) + }) }) - It("should handle when a matching labeled namespace is added", func() { - utils.Kubectl("apply", "-f", case19PrereqYaml) - By("patching with a patch for all namespaces") - utils.Kubectl("patch", "--namespace=managed", "configurationpolicy", case19PolicyName, "--type=json", - "--patch="+case19PatchPrefix+allPatch+case19PatchSuffix, + DescribeTable("Checking results of different namespaceSelectors", func(patch string, message string) { + By("patching policy with the test selector") + utils.Kubectl("patch", "--namespace=managed", "configurationpolicy", policyName, "--type=json", + fmt.Sprintf(nsSelectorPatchFmt, patch), ) Eventually(func() interface{} { managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, - case19PolicyName, testNamespace, true, defaultTimeoutSeconds) + policyName, testNamespace, true, defaultTimeoutSeconds) return utils.GetStatusMessage(managedPlc) - }, defaultTimeoutSeconds, 1).Should(Equal( - "configmaps [configmap-selector-e2e] not found in namespaces: case19-2-e2e, case19-3-e2e, case19-4-e2e, " + - "case19-5-e2e, case19-6-e2e", - )) + }, defaultTimeoutSeconds, 1).Should(Equal(message)) + }, + Entry("No namespaceSelector specified", + "{}", + noMatchesMsg), + Entry("LabelSelector and exclude", + `{"exclude":["*19a-[3-4]-e2e"],"matchExpressions":[{"key":"case19a","operator":"Exists"}]}`, + fmt.Sprintf(notFoundMsgFmt, "case19a-2-e2e, case19a-5-e2e"), + ), + Entry("A non-matching LabelSelector", + `{"matchLabels":{"name":"not-a-namespace"}}`, + noMatchesMsg), + Entry("Empty LabelSelector and include/exclude", + `{"include":["case19a-[2-5]-e2e"],"exclude":["*-[3-4]-e2e"],"matchLabels":{},"matchExpressions":[]}`, + fmt.Sprintf(notFoundMsgFmt, "case19a-2-e2e, case19a-5-e2e"), + ), + Entry("LabelSelector", + `{"matchExpressions":[{"key":"case19a","operator":"Exists"}]}`, + fmt.Sprintf(notFoundMsgFmt, "case19a-2-e2e, case19a-3-e2e, case19a-4-e2e, case19a-5-e2e"), + ), + Entry("Malformed filepath in include", + `{"include":["*-[a-z-*"]}`, + fmt.Sprintf(filterErrMsgFmt, "error parsing 'include' pattern '*-[a-z-*': syntax error in pattern"), + ), + Entry("MatchExpressions with incorrect operator", + `{"matchExpressions":[{"key":"name","operator":"Seriously"}]}`, + fmt.Sprintf(filterErrMsgFmt, "error parsing namespace LabelSelector: "+ + `"Seriously" is not a valid label selector operator`), + ), + Entry("MatchExpressions with missing values", + `{"matchExpressions":[{"key":"name","operator":"In","values":[]}]}`, + fmt.Sprintf(filterErrMsgFmt, "error parsing namespace LabelSelector: "+ + "values: Invalid value: []string(nil): for 'in', 'notin' operators, values set can't be empty"), + ), + ) +}) + +var _ = Describe("Test behavior of namespace selection as namespaces change", Ordered, func() { + const ( + prereqYaml string = "../resources/case19_ns_selector/case19_behavior_prereq.yaml" + policyYaml string = "../resources/case19_ns_selector/case19_behavior_policy.yaml" + policyName string = "selector-behavior-e2e" + + notFoundMsgFmt string = "configmaps [configmap-selector-e2e] not found in namespaces: %s" + ) + + BeforeAll(func() { + By("Applying prerequisites") + utils.Kubectl("apply", "-f", prereqYaml) + // cleaned up in an AfterAll because that will cover other namespaces created in the tests + + utils.Kubectl("apply", "-f", policyYaml, "-n", testNamespace) + DeferCleanup(func() { + utils.Kubectl("delete", "-f", policyYaml, "-n", testNamespace) + }) + + By("Verifying initial compliance message") + Eventually(func() interface{} { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, + policyName, testNamespace, true, defaultTimeoutSeconds) + + return utils.GetStatusMessage(managedPlc) + }, defaultTimeoutSeconds, 1).Should(Equal(fmt.Sprintf(notFoundMsgFmt, + "case19b-1-e2e, case19b-2-e2e"))) }) AfterAll(func() { - utils.Kubectl("delete", "-f", case19PrereqYaml) - policies := []string{ - case19PolicyName, - } - deleteConfigPolicies(policies) + utils.Kubectl("delete", "ns", "case19b-1-e2e", "--ignore-not-found") + utils.Kubectl("delete", "ns", "case19b-2-e2e", "--ignore-not-found") + utils.Kubectl("delete", "ns", "case19b-3-e2e", "--ignore-not-found") + utils.Kubectl("delete", "ns", "case19b-4-e2e", "--ignore-not-found") + utils.Kubectl("delete", "ns", "kube-case19b-e2e", "--ignore-not-found") + }) + + It("should evaluate when a matching labeled namespace is added", func() { + _, err := clientManaged.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "case19b-3-e2e", + Labels: map[string]string{ + "case19b": "case19b-3-e2e", + }, + }, + }, metav1.CreateOptions{}) + Expect(err).ToNot(HaveOccurred()) + + Eventually(func() interface{} { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, + policyName, testNamespace, true, defaultTimeoutSeconds) + + return utils.GetStatusMessage(managedPlc) + }, defaultTimeoutSeconds, 1).Should(Equal(fmt.Sprintf(notFoundMsgFmt, + "case19b-1-e2e, case19b-2-e2e, case19b-3-e2e"))) + }) + + It("should not evaluate early if a non-matching namespace is added", func() { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, + policyName, testNamespace, true, defaultTimeoutSeconds) + + evalTime, found, err := unstructured.NestedString(managedPlc.Object, "status", "lastEvaluated") + Expect(evalTime).ToNot(BeEmpty()) + Expect(found).To(BeTrue()) + Expect(err).ToNot(HaveOccurred()) + + _, err = clientManaged.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{Name: "case19b-4-e2e"}, + }, metav1.CreateOptions{}) + Expect(err).ToNot(HaveOccurred()) + + Consistently(func() interface{} { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, + policyName, testNamespace, true, defaultTimeoutSeconds) + + newEvalTime, found, err := unstructured.NestedString(managedPlc.Object, "status", "lastEvaluated") + Expect(newEvalTime).ToNot(BeEmpty()) + Expect(found).To(BeTrue()) + Expect(err).ToNot(HaveOccurred()) + + return newEvalTime + }, "40s", "3s").Should(Equal(evalTime)) + }) + + It("should evaluate when a namespace is labeled to match", func() { + utils.Kubectl("label", "ns", "case19b-4-e2e", "case19b=case19b-4-e2e") + + Eventually(func() interface{} { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, + policyName, testNamespace, true, defaultTimeoutSeconds) + + return utils.GetStatusMessage(managedPlc) + }, defaultTimeoutSeconds, 1).Should(Equal(fmt.Sprintf(notFoundMsgFmt, + "case19b-1-e2e, case19b-2-e2e, case19b-3-e2e, case19b-4-e2e"))) + }) + + It("should evaluate when a matching namespace label is removed", func() { + utils.Kubectl("label", "ns", "case19b-3-e2e", "case19b-") + + Eventually(func() interface{} { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, + policyName, testNamespace, true, defaultTimeoutSeconds) + + return utils.GetStatusMessage(managedPlc) + }, defaultTimeoutSeconds, 1).Should(Equal(fmt.Sprintf(notFoundMsgFmt, + "case19b-1-e2e, case19b-2-e2e, case19b-4-e2e"))) + }) + + It("should not evaluate when an excluded namespace is added", func() { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, + policyName, testNamespace, true, defaultTimeoutSeconds) + + evalTime, found, err := unstructured.NestedString(managedPlc.Object, "status", "lastEvaluated") + Expect(evalTime).ToNot(BeEmpty()) + Expect(found).To(BeTrue()) + Expect(err).ToNot(HaveOccurred()) + + _, err = clientManaged.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kube-case19b-e2e", + Labels: map[string]string{ + "case19b": "kube-case19b-e2e", + }, + }, + }, metav1.CreateOptions{}) + Expect(err).ToNot(HaveOccurred()) + + Consistently(func() interface{} { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, + policyName, testNamespace, true, defaultTimeoutSeconds) + + newEvalTime, found, err := unstructured.NestedString(managedPlc.Object, "status", "lastEvaluated") + Expect(newEvalTime).ToNot(BeEmpty()) + Expect(found).To(BeTrue()) + Expect(err).ToNot(HaveOccurred()) + + return newEvalTime + }, "40s", "3s").Should(Equal(evalTime)) + }) + + It("should not evaluate when a matched namespace is changed", func() { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, + policyName, testNamespace, true, defaultTimeoutSeconds) + + evalTime, found, err := unstructured.NestedString(managedPlc.Object, "status", "lastEvaluated") + Expect(evalTime).ToNot(BeEmpty()) + Expect(found).To(BeTrue()) + Expect(err).ToNot(HaveOccurred()) + + utils.Kubectl("label", "ns", "case19b-1-e2e", "extra-label=hello") + + Consistently(func() interface{} { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, + policyName, testNamespace, true, defaultTimeoutSeconds) + + newEvalTime, found, err := unstructured.NestedString(managedPlc.Object, "status", "lastEvaluated") + Expect(newEvalTime).ToNot(BeEmpty()) + Expect(found).To(BeTrue()) + Expect(err).ToNot(HaveOccurred()) + + return newEvalTime + }, "40s", "3s").Should(Equal(evalTime)) + }) + + It("should not evaluate early if the namespace selector is empty", func() { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, + policyName, testNamespace, true, defaultTimeoutSeconds) + + evalTime, found, err := unstructured.NestedString(managedPlc.Object, "status", "lastEvaluated") + Expect(evalTime).ToNot(BeEmpty()) + Expect(found).To(BeTrue()) + Expect(err).ToNot(HaveOccurred()) + + By("Patching the configurationpolicy to remove the namespaceSelector") + utils.Kubectl("patch", "--namespace=managed", "configurationpolicy", policyName, "--type=json", + `--patch=[{"op":"remove","path":"/spec/namespaceSelector"}]`) + + var newEvalTime string + + By("Waiting for the one evaluation after the spec changed") + Eventually(func() interface{} { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, + policyName, testNamespace, true, defaultTimeoutSeconds) + + var found bool + var err error + + newEvalTime, found, err = unstructured.NestedString(managedPlc.Object, "status", "lastEvaluated") + Expect(newEvalTime).ToNot(BeEmpty()) + Expect(found).To(BeTrue()) + Expect(err).ToNot(HaveOccurred()) + + return newEvalTime + }, "40s", "3s").ShouldNot(Equal(evalTime)) + + By("Verifying it does not evaluate again") + Consistently(func() interface{} { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, + policyName, testNamespace, true, defaultTimeoutSeconds) + + newestEvalTime, found, err := unstructured.NestedString(managedPlc.Object, "status", "lastEvaluated") + Expect(newestEvalTime).ToNot(BeEmpty()) + Expect(found).To(BeTrue()) + Expect(err).ToNot(HaveOccurred()) + + return newestEvalTime + }, "40s", "3s").Should(Equal(newEvalTime)) + }) + + It("should not evaluate early when the namespace selector is not valid", func() { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, + policyName, testNamespace, true, defaultTimeoutSeconds) + + evalTime, found, err := unstructured.NestedString(managedPlc.Object, "status", "lastEvaluated") + Expect(evalTime).ToNot(BeEmpty()) + Expect(found).To(BeTrue()) + Expect(err).ToNot(HaveOccurred()) + + By("Patching the configurationpolicy to remove the namespaceSelector") + utils.Kubectl("patch", "--namespace=managed", "configurationpolicy", policyName, "--type=json", + fmt.Sprintf(nsSelectorPatchFmt, `{"matchExpressions":[{"key":"name","operator":"Seriously"}]}`)) + + var newEvalTime string + + By("Waiting for the one evaluation after the spec changed") + Eventually(func() interface{} { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, + policyName, testNamespace, true, defaultTimeoutSeconds) + + var found bool + var err error + + newEvalTime, found, err = unstructured.NestedString(managedPlc.Object, "status", "lastEvaluated") + Expect(newEvalTime).ToNot(BeEmpty()) + Expect(found).To(BeTrue()) + Expect(err).ToNot(HaveOccurred()) + + return newEvalTime + }, "40s", "3s").ShouldNot(Equal(evalTime)) + + By("Verifying it does not evaluate again") + Consistently(func() interface{} { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, + policyName, testNamespace, true, defaultTimeoutSeconds) + + newestEvalTime, found, err := unstructured.NestedString(managedPlc.Object, "status", "lastEvaluated") + Expect(newestEvalTime).ToNot(BeEmpty()) + Expect(found).To(BeTrue()) + Expect(err).ToNot(HaveOccurred()) + + return newestEvalTime + }, "40s", "3s").Should(Equal(newEvalTime)) }) }) diff --git a/test/resources/case19_ns_selector/case19_behavior_policy.yaml b/test/resources/case19_ns_selector/case19_behavior_policy.yaml new file mode 100644 index 00000000..68e44eb9 --- /dev/null +++ b/test/resources/case19_ns_selector/case19_behavior_policy.yaml @@ -0,0 +1,22 @@ +apiVersion: policy.open-cluster-management.io/v1 +kind: ConfigurationPolicy +metadata: + name: selector-behavior-e2e +spec: + evaluationInterval: + compliant: 2h + noncompliant: 2h + namespaceSelector: + exclude: + - "kube-*" + matchExpressions: + - key: case19b + operator: Exists + remediationAction: inform + object-templates: + - complianceType: musthave + objectDefinition: + apiVersion: v1 + kind: ConfigMap + metadata: + name: configmap-selector-e2e diff --git a/test/resources/case19_ns_selector/case19_behavior_prereq.yaml b/test/resources/case19_ns_selector/case19_behavior_prereq.yaml new file mode 100644 index 00000000..80846776 --- /dev/null +++ b/test/resources/case19_ns_selector/case19_behavior_prereq.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + case19b: case19b-1-e2e + name: case19b-1-e2e +--- +apiVersion: v1 +kind: Namespace +metadata: + labels: + case19b: case19b-2-e2e + name: case19b-2-e2e diff --git a/test/resources/case19_ns_selector/case19_cm_long_interval_policy.yaml b/test/resources/case19_ns_selector/case19_cm_long_interval_policy.yaml new file mode 100644 index 00000000..ee511bbd --- /dev/null +++ b/test/resources/case19_ns_selector/case19_cm_long_interval_policy.yaml @@ -0,0 +1,19 @@ +apiVersion: policy.open-cluster-management.io/v1 +kind: ConfigurationPolicy +metadata: + name: policy-configmap-selector-e2e +spec: + namespaceSelector: + matchLabels: + sample: test + remediationAction: inform + evaluationInterval: + compliant: 2h + noncompliant: 2h + object-templates: + - complianceType: musthave + objectDefinition: + apiVersion: v1 + kind: ConfigMap + metadata: + name: configmap-selector-e2e diff --git a/test/resources/case19_ns_selector/case19_cm_manifest.yaml b/test/resources/case19_ns_selector/case19_cm_manifest.yaml index efc40d1c..708a9e30 100644 --- a/test/resources/case19_ns_selector/case19_cm_manifest.yaml +++ b/test/resources/case19_ns_selector/case19_cm_manifest.yaml @@ -3,6 +3,7 @@ kind: Namespace metadata: labels: name: case19-1-e2e + sample: test name: case19-1-e2e --- apiVersion: v1 @@ -10,6 +11,7 @@ kind: Namespace metadata: labels: name: case19-2-e2e + sample: test name: case19-2-e2e --- apiVersion: v1 diff --git a/test/resources/case19_ns_selector/case19_cm_policy.yaml b/test/resources/case19_ns_selector/case19_results_policy.yaml similarity index 84% rename from test/resources/case19_ns_selector/case19_cm_policy.yaml rename to test/resources/case19_ns_selector/case19_results_policy.yaml index 4781efb5..f694728e 100644 --- a/test/resources/case19_ns_selector/case19_cm_policy.yaml +++ b/test/resources/case19_ns_selector/case19_results_policy.yaml @@ -1,11 +1,11 @@ apiVersion: policy.open-cluster-management.io/v1 kind: ConfigurationPolicy metadata: - name: policy-configmap-selector-e2e + name: selector-results-e2e spec: namespaceSelector: include: - - case19-1-e2e + - case19a-1-e2e remediationAction: inform object-templates: - complianceType: musthave diff --git a/test/resources/case19_ns_selector/case19_results_prereq.yaml b/test/resources/case19_ns_selector/case19_results_prereq.yaml new file mode 100644 index 00000000..c0a5f376 --- /dev/null +++ b/test/resources/case19_ns_selector/case19_results_prereq.yaml @@ -0,0 +1,40 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + case19a: case19a-1-e2e + name: case19a-1-e2e +--- +apiVersion: v1 +kind: Namespace +metadata: + labels: + case19a: case19a-2-e2e + name: case19a-2-e2e +--- +apiVersion: v1 +kind: Namespace +metadata: + labels: + case19a: case19a-3-e2e + name: case19a-3-e2e +--- +apiVersion: v1 +kind: Namespace +metadata: + labels: + case19a: case19a-4-e2e + name: case19a-4-e2e +--- +apiVersion: v1 +kind: Namespace +metadata: + labels: + case19a: case19a-5-e2e + name: case19a-5-e2e +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: configmap-selector-e2e + namespace: case19a-1-e2e