diff --git a/.github/workflows/kind.yml b/.github/workflows/kind.yml index fdf914a3..0ac69178 100644 --- a/.github/workflows/kind.yml +++ b/.github/workflows/kind.yml @@ -4,11 +4,11 @@ on: push: branches: - main - - release-2.3 + - release-2.[3-9] pull_request: branches: - main - - release-2.3 + - release-2.[3-9] defaults: run: diff --git a/Makefile b/Makefile index 169da6b3..06086fd8 100644 --- a/Makefile +++ b/Makefile @@ -215,6 +215,7 @@ install-crds: kubectl apply -f test/crds/securitycontextconstraints.security.openshift.io_crd.yaml kubectl apply -f test/crds/apiservers.config.openshift.io_crd.yaml kubectl apply -f test/crds/clusterclaims.cluster.open-cluster-management.io.yaml + kubectl apply -f test/crds/oauths.config.openshift.io_crd.yaml install-resources: @echo creating namespaces @@ -234,6 +235,9 @@ e2e-debug: kubectl get configurationpolicies.policy.open-cluster-management.io --all-namespaces kubectl describe pods -n $(KIND_NAMESPACE) kubectl logs $$(kubectl get pods -n $(KIND_NAMESPACE) -o name | grep $(IMG)) -n $(KIND_NAMESPACE) + kubectl get namespace open-cluster-management-agent-addon + kubectl get namespaces + kubectl get secrets -n open-cluster-management-agent-addon ############################################################ # e2e test coverage diff --git a/cmd/manager/main.go b/cmd/manager/main.go index 04ef4b5a..9fae8fe9 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -52,7 +52,7 @@ func printVersion() { func main() { pflag.CommandLine.AddGoFlagSet(flag.CommandLine) - var eventOnParent string + var eventOnParent, clusterName, hubConfigSecretNs, hubConfigSecretName string var frequency uint var enableLease bool pflag.UintVar(&frequency, "update-frequency", 10, @@ -61,6 +61,9 @@ func main() { "to also send status events on parent policy. options are: yes/no/ifpresent") pflag.BoolVar(&enableLease, "enable-lease", false, "If enabled, the controller will start the lease controller to report its status") + pflag.StringVar(&clusterName, "cluster-name", "acm-managed-cluster", "Name of the cluster") + pflag.StringVar(&hubConfigSecretNs, "hubconfig-secret-ns", "open-cluster-management-agent-addon", "Namespace for hub config kube-secret") + pflag.StringVar(&hubConfigSecretName, "hubconfig-secret-name", "policy-controller-hub-kubeconfig", "Name of the hub config kube-secret") pflag.Parse() @@ -128,6 +131,7 @@ func main() { } var generatedClient kubernetes.Interface = kubernetes.NewForConfigOrDie(mgr.GetConfig()) common.Initialize(&generatedClient, cfg) + policyStatusHandler.Initialize(cfg, client, &generatedClient, mgr, namespace, eventOnParent) // PeriodicallyExecConfigPolicies is the go-routine that periodically checks the policies go policyStatusHandler.PeriodicallyExecConfigPolicies(frequency, false) @@ -142,12 +146,22 @@ func main() { os.Exit(1) } } else { + log.Info("Starting lease controller to report status") leaseUpdater := lease.NewLeaseUpdater( generatedClient, "config-policy-controller", operatorNs, ) + + //set hubCfg on lease updated if found + hubCfg, _ := common.LoadHubConfig(hubConfigSecretNs, hubConfigSecretName) + if hubCfg != nil { + leaseUpdater = leaseUpdater.WithHubLeaseConfig(hubCfg, clusterName) + } else { + log.Error(err, "HubConfig not found, HubLeaseConfig not set") + } + go leaseUpdater.Start(ctx) } } else { diff --git a/deploy/operator.yaml b/deploy/operator.yaml index d0755add..1fcac024 100644 --- a/deploy/operator.yaml +++ b/deploy/operator.yaml @@ -20,6 +20,7 @@ spec: - config-policy-controller args: - "--enable-lease=true" + - "--hubconfig-secret-name=hub-kubeconfig" imagePullPolicy: Always env: - name: WATCH_NAMESPACE diff --git a/go.mod b/go.mod index c41cc350..ccbcc0ae 100644 --- a/go.mod +++ b/go.mod @@ -8,18 +8,17 @@ require ( github.com/google/go-cmp v0.5.2 github.com/onsi/ginkgo v1.14.1 github.com/onsi/gomega v1.10.2 - github.com/open-cluster-management/addon-framework v0.0.0-20210419013051-38730a847aff + github.com/open-cluster-management/addon-framework v0.0.0-20210621074027-a81f712c10c2 + github.com/open-cluster-management/go-template-utils v1.2.3 github.com/operator-framework/operator-sdk v0.19.4 - github.com/spf13/cast v1.3.1 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.6.1 - golang.org/x/net v0.0.0-20201110031124-69a78807bb2b + golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 k8s.io/api v0.20.5 k8s.io/apimachinery v0.20.5 k8s.io/client-go v12.0.0+incompatible k8s.io/klog v1.0.0 sigs.k8s.io/controller-runtime v0.6.2 - sigs.k8s.io/yaml v1.2.0 ) replace ( diff --git a/go.sum b/go.sum index 7847ea7e..18777ab1 100644 --- a/go.sum +++ b/go.sum @@ -697,10 +697,12 @@ github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs= github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/open-cluster-management/addon-framework v0.0.0-20210419013051-38730a847aff h1:ZFFgtkVySNuIQBO/DHxGodfHARzHqlnJgNFJdqRUZag= -github.com/open-cluster-management/addon-framework v0.0.0-20210419013051-38730a847aff/go.mod h1:mcpd6pc0j/L+WLFwV2MXHVMr+86ri2iUdTK2M8RHJ7U= +github.com/open-cluster-management/addon-framework v0.0.0-20210621074027-a81f712c10c2 h1:oHFveB+YtcfOF6zSTkGjgSWEHHkQUkwit7enNj5RRsI= +github.com/open-cluster-management/addon-framework v0.0.0-20210621074027-a81f712c10c2/go.mod h1:mcpd6pc0j/L+WLFwV2MXHVMr+86ri2iUdTK2M8RHJ7U= github.com/open-cluster-management/api v0.0.0-20210409125704-06f2aec1a73f h1:s6z3k0jV0ccoYDPJWMSqVNevO1UoQLYb8f7dYFALSNk= github.com/open-cluster-management/api v0.0.0-20210409125704-06f2aec1a73f/go.mod h1:ot+A1DWq+v1IV+e1S7nhIteYAmNByFgtazvzpoeAfRQ= +github.com/open-cluster-management/go-template-utils v1.2.3 h1:eeSayCDXV0IJAJjr083yuIY95NSQmQSIgTqeJ44GO2g= +github.com/open-cluster-management/go-template-utils v1.2.3/go.mod h1:+D8buOYN/VMVuTEd8WnnJQn+Z1oU4sT2OXbYZE+mIDk= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -1063,8 +1065,9 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1144,8 +1147,12 @@ golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd h1:5CtCZbICpIOFdgO940moixOPjc0178IU44m4EjOO5IY= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1348,8 +1355,9 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= helm.sh/helm/v3 v3.2.4/go.mod h1:ZaXz/vzktgwjyGGFbUWtIQkscfE7WYoRGP2szqAFHR0= diff --git a/pkg/common/kubeClient.go b/pkg/common/kubeClient.go index 1a131822..c4f908cd 100644 --- a/pkg/common/kubeClient.go +++ b/pkg/common/kubeClient.go @@ -4,8 +4,14 @@ package common import ( + "context" + base64 "encoding/base64" + "github.com/golang/glog" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "regexp" ) // KubeClient a k8s client used for k8s native resources @@ -14,8 +20,46 @@ var KubeClient *kubernetes.Interface // KubeConfig is the given kubeconfig at startup var KubeConfig *rest.Config -// Initialize to initialize some controller variables +var HubConfig *rest.Config + +// Initialize to initialize some controller varaibles func Initialize(kClient *kubernetes.Interface, cfg *rest.Config) { + KubeClient = kClient KubeConfig = cfg } + +func LoadHubConfig(namespace string, secretname string) (*rest.Config, error) { + + if HubConfig == nil { + + secretsClient := (*KubeClient).CoreV1().Secrets(namespace) + hubSecret, err := secretsClient.Get(context.TODO(), secretname, metav1.GetOptions{}) + + if err != nil { + glog.Errorf("Error Getting HubConfig Secret: %v", err) + return nil, err + } + + secretkconfig := string(hubSecret.Data["kubeconfig"]) + crt := base64.StdEncoding.EncodeToString(hubSecret.Data["tls.crt"]) + key := base64.StdEncoding.EncodeToString(hubSecret.Data["tls.key"]) + + re := regexp.MustCompile(`(client-certificate:\s+tls.crt)`) + secretkconfig = re.ReplaceAllString(secretkconfig, "client-certificate-data: "+crt) + + re = regexp.MustCompile(`(client-key:\s+tls.key)`) + secretkconfig = re.ReplaceAllString(secretkconfig, "client-key-data: "+key) + + //glog.Errorf("After Secret Value: %v", string(secretkconfig)) + + HubConfig, err = clientcmd.RESTConfigFromKubeConfig([]byte(secretkconfig)) + if err != nil { + glog.Errorf("Error getting Rest config for Hub: %v", err) + return nil, err + } + + //glog.Errorf("HubConfig: %v", HubConfig) + } + return HubConfig, nil +} diff --git a/pkg/common/templates/clusterconfig_funcs.go b/pkg/common/templates/clusterconfig_funcs.go deleted file mode 100644 index 1d88dc19..00000000 --- a/pkg/common/templates/clusterconfig_funcs.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2020 Red Hat, Inc. -// Copyright Contributors to the Open Cluster Management project - -package templates - -import ( - "context" - "github.com/golang/glog" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// retrieve Spec value for the given clusterclaim -func fromClusterClaim(claimname string) (string, error) { - result := map[string]interface{}{} - - dclient, dclientErr := getDynamicClient( - "cluster.open-cluster-management.io/v1alpha1", - "ClusterClaim", - "", - ) - if dclientErr != nil { - return "", dclientErr - } - - getObj, getErr := dclient.Get(context.TODO(), claimname, metav1.GetOptions{}) - if getErr != nil { - glog.Errorf("Error retrieving clusterclaim : %v, %v", claimname, getErr) - return "", getErr - } - - result = getObj.UnstructuredContent() - - spec := result["spec"].(map[string]interface{}) - if _, ok := spec["value"]; ok { - return spec["value"].(string), nil - } - return "", nil -} diff --git a/pkg/common/templates/generic_lookup_func.go b/pkg/common/templates/generic_lookup_func.go deleted file mode 100644 index 67aa6587..00000000 --- a/pkg/common/templates/generic_lookup_func.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright (c) 2020 Red Hat, Inc. -// Copyright Contributors to the Open Cluster Management project - -package templates - -import ( - "context" - "github.com/golang/glog" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/discovery" - "k8s.io/client-go/dynamic" -) - -func lookup(apiversion string, kind string, namespace string, rsrcname string) (map[string]interface{}, error) { - glog.V(2).Infof("lookup : %v, %v, %v, %v", apiversion, kind, namespace, rsrcname) - - result := map[string]interface{}{} - - //get dynamic Client for the given GVK and namespace - dclient, dclientErr := getDynamicClient(apiversion, kind, namespace) - if dclientErr != nil { - return result, dclientErr - } - - //if resourcename is set then get the specific resource - //else get list of all resources for that (gvk, ns) - - var lookupErr error - if rsrcname != "" { - getObj, getErr := dclient.Get(context.TODO(), rsrcname, metav1.GetOptions{}) - if getErr == nil { - result = getObj.UnstructuredContent() - } - lookupErr = getErr - } else { - listObj, listErr := dclient.List(context.TODO(), metav1.ListOptions{}) - if listErr == nil { - result = listObj.UnstructuredContent() - } - lookupErr = listErr - } - - if lookupErr != nil { - if apierrors.IsNotFound(lookupErr) { - lookupErr = nil - } - } - - glog.V(2).Infof("lookup result: %v", result) - return result, lookupErr -} - -//this func finds the GVR for given GVK and returns a namespaced dynamic client -func getDynamicClient(apiversion string, kind string, namespace string) (dynamic.ResourceInterface, error) { - - var dclient dynamic.ResourceInterface - gvk := schema.FromAPIVersionAndKind(apiversion, kind) - glog.V(2).Infof("GVK is: %v", gvk) - - // we have GVK but We need GVR i.e resourcename for kind inorder to create dynamicClient - // find ApiResource for given GVK - apiResource, findErr := findAPIResource(gvk) - if findErr != nil { - return nil, findErr - } - //make GVR from ApiResource - gvr := schema.GroupVersionResource{ - Group: apiResource.Group, - Version: apiResource.Version, - Resource: apiResource.Name, - } - glog.V(2).Infof("GVR is: %v", gvr) - - //get Dynamic Client - dclientIntf, dclientErr := dynamic.NewForConfig(kubeConfig) - if dclientErr != nil { - glog.Errorf("Failed to get dynamic client with err: %v", dclientErr) - return nil, dclientErr - } - - //get Dynamic Client for GVR - dclientNsRes := dclientIntf.Resource(gvr) - - //get Dynamic Client for GVR for Namespace if namespaced - if apiResource.Namespaced && namespace != "" { - dclient = dclientNsRes.Namespace(namespace) - } else { - dclient = dclientNsRes - } - - glog.V(2).Infof("dynamic client: %v", dclient) - return dclient, nil -} - -func findAPIResource(gvk schema.GroupVersionKind) (metav1.APIResource, error) { - glog.V(2).Infof("GVK is: %v", gvk) - - apiResource := metav1.APIResource{} - - //check if an apiresource list is available already (i.e provided as input to templates) - //if not available use api discovery client to get api resource list - apiResList := kubeAPIResourceList - if apiResList == nil { - var ddErr error - apiResList, ddErr = discoverAPIResources() - if ddErr != nil { - return apiResource, ddErr - } - } - - //find apiResourcefor given GVK - var groupVersion string - if gvk.Group != "" { - groupVersion = gvk.Group + "/" + gvk.Version - } else { - groupVersion = gvk.Version - } - glog.V(2).Infof("GroupVersion is : %v", groupVersion) - - for _, apiResGroup := range apiResList { - if apiResGroup.GroupVersion == groupVersion { - for _, apiRes := range apiResGroup.APIResources { - if apiRes.Kind == gvk.Kind { - apiResource = apiRes - apiResource.Group = gvk.Group - apiResource.Version = gvk.Version - break - } - } - } - } - - glog.V(2).Infof("found APIResource : %v", apiResource) - return apiResource, nil -} - -// Configpolicycontroller sets the apiresource list on the template processor -// So this func shouldnt execute in the configpolicy flow -// including this just for completeness -func discoverAPIResources() ([]*metav1.APIResourceList, error) { - glog.V(2).Infof("discover APIResources") - - dd, ddErr := discovery.NewDiscoveryClientForConfig(kubeConfig) - if ddErr != nil { - glog.Errorf("Failed to create discovery client with err: %v", ddErr) - return nil, ddErr - } - apiresourcelist, apiresourcelistErr := dd.ServerResources() - if apiresourcelistErr != nil { - glog.Errorf("Failed to retrieve apiresourcelist with err: %v", apiresourcelistErr) - return nil, apiresourcelistErr - } - - glog.V(2).Infof("discovered APIResources : %v", apiresourcelist) - return apiresourcelist, nil -} diff --git a/pkg/common/templates/k8sresource_funcs.go b/pkg/common/templates/k8sresource_funcs.go deleted file mode 100644 index c1812250..00000000 --- a/pkg/common/templates/k8sresource_funcs.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (c) 2020 Red Hat, Inc. -// Copyright Contributors to the Open Cluster Management project - -package templates - -import ( - "context" - base64 "encoding/base64" - "github.com/golang/glog" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// retrieves value of the key in the given Secret, namespace -func fromSecret(namespace string, secretname string, key string) (string, error) { - glog.V(2).Infof("fromSecret for namespace: %v, secretname: %v, key:%v", namespace, secretname, key) - - secretsClient := (*kubeClient).CoreV1().Secrets(namespace) - secret, getErr := secretsClient.Get(context.TODO(), secretname, metav1.GetOptions{}) - - if getErr != nil { - glog.Errorf("Error Getting secret: %v", getErr) - return "", getErr - } - glog.V(2).Infof("Secret is %v", secret) - - keyVal := secret.Data[key] - glog.V(2).Infof("Secret Key:%v, Value: %v", key, keyVal) - - // when using corev1 secret api, the data is returned decoded , - // re-encododing to be able to use it in the referencing secret - sEnc := base64.StdEncoding.EncodeToString(keyVal) - glog.V(2).Infof("encoded secret Key:%v, Value: %v", key, sEnc) - - return sEnc, nil -} - -// retrieves value for the key in the given Configmap, namespace -func fromConfigMap(namespace string, cmapname string, key string) (string, error) { - glog.V(2).Infof("fromConfigMap for namespace: %v, configmap name: %v, key:%v", namespace, cmapname, key) - - configmapsClient := (*kubeClient).CoreV1().ConfigMaps(namespace) - configmap, getErr := configmapsClient.Get(context.TODO(), cmapname, metav1.GetOptions{}) - - if getErr != nil { - glog.Errorf("Error getting configmap: %v", getErr) - return "", getErr - } - glog.V(2).Infof("Configmap is %v", configmap) - - keyVal := configmap.Data[key] - glog.V(2).Infof("Configmap Key:%v, Value: %v", key, keyVal) - - return keyVal, nil -} - -//convenience functions to base64 encode string values -//for setting in value in Referencing Secret resources -func base64encode(v string) string { - return base64.StdEncoding.EncodeToString([]byte(v)) -} - -func base64decode(v string) string { - data, err := base64.StdEncoding.DecodeString(v) - if err != nil { - return err.Error() - } - return string(data) -} diff --git a/pkg/common/templates/k8sresource_funcs_test.go b/pkg/common/templates/k8sresource_funcs_test.go deleted file mode 100644 index 4126292b..00000000 --- a/pkg/common/templates/k8sresource_funcs_test.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) 2020 Red Hat, Inc. -// Copyright Contributors to the Open Cluster Management project - -package templates - -import ( - "errors" - "strings" - "testing" -) - -func TestFromSecret(t *testing.T) { - testcases := []struct { - inputNs string - inputCMname string - inputKey string - expectedResult string - expectedErr error - }{ - {"testns", "testsecret", "secretkey1", "secretkey1Val", nil}, //green-path test - {"testns", "testsecret", "secretkey2", "secretkey2Val", nil}, //green-path test - {"testns", "idontexist", "secretkey1", "secretkey2Val", errors.New("secrets \"idontexist\" not found")}, //error : nonexistant secret - {"testns", "testsecret", "blah", "", nil}, //error : nonexistant key - } - - for _, test := range testcases { - - val, err := fromSecret(test.inputNs, test.inputCMname, test.inputKey) - - if err != nil { - if test.expectedErr == nil { - t.Fatalf(err.Error()) - } - if !strings.EqualFold(test.expectedErr.Error(), err.Error()) { - t.Fatalf("expected err: %s got err: %s", test.expectedErr, err) - } - } else { - if val != base64encode(test.expectedResult) { - t.Fatalf("expected : %s , got : %s", base64encode(test.expectedResult), val) - } - } - } -} - -func TestFromConfigMap(t *testing.T) { - testcases := []struct { - inputNs string - inputCMname string - inputKey string - expectedResult string - expectedErr error - }{ - {"testns", "testconfigmap", "cmkey1", "cmkey1Val", nil}, - {"testns", "testconfigmap", "cmkey2", "cmkey2Val", nil}, - {"testns", "idontexist", "cmkey1", "cmkey1Val", errors.New("configmaps \"idontexist\" not found")}, - {"testns", "testconfigmap", "idontexist", "", nil}, - } - - for _, test := range testcases { - - val, err := fromConfigMap(test.inputNs, test.inputCMname, test.inputKey) - - if err != nil { - if test.expectedErr == nil { - t.Fatalf(err.Error()) - } - if !strings.EqualFold(test.expectedErr.Error(), err.Error()) { - t.Fatalf("expected err: %s got err: %s", test.expectedErr, err) - } - } else { - if val != test.expectedResult { - t.Fatalf("expected : %s , got : %s", test.expectedResult, val) - } - } - } -} diff --git a/pkg/common/templates/templates.go b/pkg/common/templates/templates.go deleted file mode 100644 index a7b06cdf..00000000 --- a/pkg/common/templates/templates.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright (c) 2020 Red Hat, Inc. -// Copyright Contributors to the Open Cluster Management project - -package templates - -import ( - "github.com/golang/glog" - "github.com/spf13/cast" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "regexp" - "sigs.k8s.io/yaml" - "strconv" - "strings" - "text/template" -) - -var kubeClient *kubernetes.Interface -var kubeConfig *rest.Config -var kubeAPIResourceList []*metav1.APIResourceList - -func InitializeKubeClient(kClient *kubernetes.Interface, kConfig *rest.Config) { - kubeClient = kClient - kubeConfig = kConfig -} - -//If this is set, template processing will not try to rediscover -// the apiresourcesList needed for dynamic client/ gvk look -func SetAPIResources(apiresList []*metav1.APIResourceList) { - kubeAPIResourceList = apiresList -} - -// just does a simple check for {{ string to indicate if it has a template -func HasTemplate(templateStr string) bool { - glog.V(2).Infof("hasTemplate template str: %v", templateStr) - - hasTemplate := false - if strings.Contains(templateStr, "{{") { - hasTemplate = true - } - - glog.V(2).Infof("hasTemplate: %v", hasTemplate) - return hasTemplate -} - -// Main Template Processing func -func ResolveTemplate(tmplMap interface{}) (interface{}, error) { - - glog.V(2).Infof("ResolveTemplate for: %v", tmplMap) - - // Build Map of supported template functions - funcMap := template.FuncMap{ - "fromSecret": fromSecret, - "fromConfigMap": fromConfigMap, - "fromClusterClaim": fromClusterClaim, - "lookup": lookup, - "base64enc": base64encode, - "base64dec": base64decode, - "indent": indent, - "atoi": atoi, - "toInt": toInt, - "toBool": toBool, - } - - // create template processor and Initialize function map - tmpl := template.New("tmpl").Funcs(funcMap) - - //convert the interface to yaml to string - // ext.raw is jsonMarshalled data which the template processor is not accepting - // so marshalling unmarshalled(ext.raw) to yaml to string - - templateStr, err := toYAML(tmplMap) - if err != nil { - return "", err - } - glog.V(2).Infof("Initial template str to resolve : %v ", templateStr) - - //process for int or bool - if strings.Contains(templateStr, "toInt") || strings.Contains(templateStr, "toBool") { - templateStr = processForDataTypes(templateStr) - } - - tmpl, err = tmpl.Parse(templateStr) - if err != nil { - glog.Errorf("error parsing template map %v,\n template str %v,\n error: %v", tmplMap, templateStr, err) - return "", err - } - - var buf strings.Builder - err = tmpl.Execute(&buf, "") - if err != nil { - glog.Errorf("error executing the template map %v,\n template str %v,\n error: %v", tmplMap, templateStr, err) - return "", err - } - - resolvedTemplateStr := buf.String() - glog.V(2).Infof("resolved template str : %v ", resolvedTemplateStr) - //unmarshall before returning - - resolvedTemplateIntf, err := fromYAML(resolvedTemplateStr) - if err != nil { - return "", err - } - - return resolvedTemplateIntf, nil -} - -// fromYAML converts a YAML document into a map[string]interface{}. -func fromYAML(str string) (map[string]interface{}, error) { - m := map[string]interface{}{} - - if err := yaml.Unmarshal([]byte(str), &m); err != nil { - glog.Errorf("error parsing the YAML the template str %v , \n %v ", str, err) - return m, err - } - return m, nil -} - -// ftoYAML converts a map[string]interface{} to YAML document string -func toYAML(v interface{}) (string, error) { - data, err := yaml.Marshal(v) - if err != nil { - glog.Errorf("error parsing the YAML the template map %v , \n %v ", v, err) - return "", err - } - - return strings.TrimSuffix(string(data), "\n"), nil -} - -func processForDataTypes(str string) string { - - //the idea is to remove the quotes enclosing the template if it ends in toBool ot ToInt - //quotes around the resolved template forces the value to be a string.. - //so removal of these quotes allows yaml to process the datatype correctly.. - - // the below pattern searches for optional block scalars | or >.. followed by the quoted template , - // and replaces it with just the template txt thats inside in the quotes - // ex-1 key : "{{ "6" | toInt }}" .. is replaced with key : {{ "6" | toInt }} - // ex-2 key : | - // "{{ "true" | toBool }}" .. is replaced with key : {{ "true" | toBool }} - re := regexp.MustCompile(`:\s+(?:[\|>][-]?\s+)?(?:['|"]\s*)?({{.*?\s+\|\s+(?:toInt|toBool)\s*}})(?:\s*['|"])?`) - glog.V(2).Infof("\n Pattern: %v\n", re.String()) - - submatchall := re.FindAllStringSubmatch(str, -1) - glog.V(2).Infof("\n All Submatches:\n%v", submatchall) - - processeddata := re.ReplaceAllString(str, ": $1") - glog.V(2).Infof("\n processed data :\n%v", processeddata) - - return processeddata -} - -func indent(spaces int, v string) string { - pad := strings.Repeat(" ", spaces) - npad := "\n" + pad + strings.Replace(v, "\n", "\n"+pad, -1) - return strings.TrimSpace(npad) -} - -func toInt(v interface{}) int { - return cast.ToInt(v) -} - -func atoi(a string) int { - i, _ := strconv.Atoi(a) - return i -} - -func toBool(a string) bool { - b, _ := strconv.ParseBool(a) - return b -} diff --git a/pkg/common/templates/templates_test.go b/pkg/common/templates/templates_test.go deleted file mode 100644 index 3ba18bed..00000000 --- a/pkg/common/templates/templates_test.go +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright (c) 2020 Red Hat, Inc. -// Copyright Contributors to the Open Cluster Management project - -package templates - -import ( - "os" - "testing" - - "context" - "errors" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - fake "k8s.io/client-go/kubernetes/fake" - "strings" -) - -func TestMain(m *testing.M) { - - var simpleClient kubernetes.Interface = fake.NewSimpleClientset() - - //setup test resources - - testns := "testns" - var ns = corev1.Namespace{ObjectMeta: metav1.ObjectMeta{ - Name: testns, - }, - } - simpleClient.CoreV1().Namespaces().Create(context.TODO(), &ns, metav1.CreateOptions{}) - - //secret - secret := corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testsecret", - }, - Data: map[string][]byte{ - "secretkey1": []byte("secretkey1Val"), - "secretkey2": []byte("secretkey2Val"), - }, - Type: "Opaque", - } - simpleClient.CoreV1().Secrets(testns).Create(context.TODO(), &secret, metav1.CreateOptions{}) - - //configmap - configmap := corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testconfigmap", - }, - Data: map[string]string{ - "cmkey1": "cmkey1Val", - "cmkey2": "cmkey2Val", - }, - } - simpleClient.CoreV1().ConfigMaps(testns).Create(context.TODO(), &configmap, metav1.CreateOptions{}) - - InitializeKubeClient(&simpleClient, nil) - - exitVal := m.Run() - os.Exit(exitVal) -} - -func TestResolveTemplate(t *testing.T) { - - const tmpl1 = `data: '{{ fromSecret "testns" "testsecret" "secretkey1" }}'` - testcases := []struct { - inputTmpl string - expectedResult string - expectedErr error - }{ - { - `data: '{{ fromSecret "testns" "testsecret" "secretkey1" }}'`, - "data: c2VjcmV0a2V5MVZhbA==", - nil, - }, - { - `param: '{{ fromConfigMap "testns" "testconfigmap" "cmkey1" }}'`, - "param: cmkey1Val", - nil, - }, - { - `config1: '{{ "testdata" | base64enc }}'`, - "config1: dGVzdGRhdGE=", - nil, - }, - { - `config2: '{{ "dGVzdGRhdGE=" | base64dec }}'`, - "config2: testdata", - nil, - }, - { - `test: '{{ blah "asdf" }}'`, - "", - errors.New("template: tmpl:1: function \"blah\" not defined"), - }, - } - - for _, test := range testcases { - - //unmarshall to Interface - tmplMap, _ := fromYAML(test.inputTmpl) - val, err := ResolveTemplate(tmplMap) - - if err != nil { - if test.expectedErr == nil { - t.Fatalf(err.Error()) - } - if !strings.EqualFold(test.expectedErr.Error(), err.Error()) { - t.Fatalf("expected err: %s got err: %s", test.expectedErr, err) - } - } else { - val, _ := toYAML(val) - if val != test.expectedResult { - t.Fatalf("expected : %s , got : %s", test.expectedResult, val) - } - } - } -} - -func TestHasTemplate(t *testing.T) { - testcases := []struct { - input string - result bool - }{ - {" I am a sample template ", false}, - {" I am a {{ sample }} template ", true}, - } - - for _, test := range testcases { - val := HasTemplate(test.input) - if val != test.result { - t.Fatalf("expected : %v , got : %v", test.result, val) - } - } -} -func TestAtoi(t *testing.T) { - testcases := []struct { - input string - result int - }{ - {"123", 123}, - } - - for _, test := range testcases { - val := atoi(test.input) - if val != test.result { - t.Fatalf("expected : %v , got : %v", test.result, val) - } - } -} - -func TestToBool(t *testing.T) { - testcases := []struct { - input string - result bool - }{ - {"1", true}, - {"blah", false}, - {"TRUE", true}, - {"F", false}, - {"true", true}, - {"false", false}, - } - - for _, test := range testcases { - val := toBool(test.input) - if val != test.result { - t.Fatalf("expected : %v , got : %v", test.result, val) - } - } -} - -func TestProcessForDataTypes(t *testing.T) { - testcases := []struct { - input string - expectedResult string - }{ - {`key : "{{ "1" | toBool }}"`, `key : {{ "1" | toBool }}`}, - {`key : | - "{{ "6" | toInt }}"`, - `key : {{ "6" | toInt }}`}, - {`key1 : "{{ "1" | toInt }}" - key2 : |- - {{ "test" | toBool | toInt }}`, - `key1 : {{ "1" | toInt }} - key2 : {{ "test" | toBool | toInt }}`}, - } - - for _, test := range testcases { - val := processForDataTypes(test.input) - if val != test.expectedResult { - t.Fatalf("expected : %v , got : %v", test.expectedResult, val) - } - } -} diff --git a/pkg/controller/configurationpolicy/configurationpolicy_controller.go b/pkg/controller/configurationpolicy/configurationpolicy_controller.go index ca433836..7a655e06 100644 --- a/pkg/controller/configurationpolicy/configurationpolicy_controller.go +++ b/pkg/controller/configurationpolicy/configurationpolicy_controller.go @@ -4,12 +4,11 @@ package configurationpolicy import ( - "bytes" "context" - "encoding/json" "fmt" "reflect" "sort" + "strconv" "strings" "sync" "time" @@ -18,15 +17,15 @@ import ( gocmp "github.com/google/go-cmp/cmp" policyv1 "github.com/open-cluster-management/config-policy-controller/pkg/apis/policy/v1" common "github.com/open-cluster-management/config-policy-controller/pkg/common" - templates "github.com/open-cluster-management/config-policy-controller/pkg/common/templates" + templates "github.com/open-cluster-management/go-template-utils/pkg/templates" corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/errors" meta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/json" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -127,7 +126,6 @@ func Initialize(kubeconfig *rest.Config, clientset *kubernetes.Clientset, EventOnParent = strings.ToLower(eventParent) recorder, _ = common.CreateRecorder(*KubeClient, controllerName) config = kubeconfig - templates.InitializeKubeClient(kubeClient, kubeconfig) } //InitializeClient helper function to initialize kubeclient @@ -186,9 +184,11 @@ func (r *ReconcileConfigurationPolicy) Reconcile(request reconcile.Request) (rec return reconcile.Result{}, nil } -// PeriodicallyExecConfigPolicies always check status +// PeriodicallyExecConfigPolicies loops through all configurationpolicies in the target namespace and triggers +// template handling for each one. This function drives all the work the configuration policy controller does. func PeriodicallyExecConfigPolicies(freq uint, test bool) { - // var plcToUpdateMap map[string]*policyv1.ConfigurationPolicy + cachedApiResourceList := []*metav1.APIResourceList{} + cachedApiGroupsList := []*restmapper.APIGroupResources{} for { start := time.Now() printMap(availablePolicies.PolicyMap) @@ -205,14 +205,25 @@ func PeriodicallyExecConfigPolicies(freq uint, test bool) { //get resources once per cycle to avoid hanging dd := clientSet.Discovery() apiresourcelist, apiresourcelistErr := dd.ServerResources() + if len(apiresourcelist) > 0 { + cachedApiResourceList = append([]*metav1.APIResourceList{}, apiresourcelist...) + } skipLoop := false - if apiresourcelistErr != nil { + if apiresourcelistErr != nil && len(cachedApiResourceList) > 0 { + glog.Errorf("Error getting API resource list. Using cached list...") + apiresourcelist = cachedApiResourceList + } else if apiresourcelistErr != nil { skipLoop = true glog.Errorf("Failed to retrieve apiresourcelist with err: %v", apiresourcelistErr) } apigroups, apigroupsErr := restmapper.GetAPIGroupResources(dd) - - if !skipLoop && apigroupsErr != nil { + if len(apigroups) > 0 { + cachedApiGroupsList = append([]*restmapper.APIGroupResources{}, apigroups...) + } + if apigroupsErr != nil && len(cachedApiGroupsList) > 0 { + glog.Errorf("Error getting API groups list. Using cached list...") + apigroups = cachedApiGroupsList + } else if !skipLoop && apigroupsErr != nil { skipLoop = true glog.Errorf("Failed to retrieve apigroups with err: %v", apigroupsErr) @@ -223,6 +234,12 @@ func PeriodicallyExecConfigPolicies(freq uint, test bool) { //flattenedpolicylist only contains 1 of each policy instance for _, policy := range flattenedPolicyList { Mx.Lock() + // Deep copy the policy since even though handleObjectTemplates accepts a copy + // (i.e. not a pointer) of the policy, policy.Spec.ObjectTemplates is a slice of + // pointers, so any modifications to the objects in that slice will be reflected in + // the PolicyMap cache, which can have unintended side effects. + policy = (*policy).DeepCopy() + //handle each template in each policy handleObjectTemplates(*policy, apiresourcelist, apigroups) Mx.Unlock() } @@ -244,55 +261,11 @@ func PeriodicallyExecConfigPolicies(freq uint, test bool) { } } -func addConditionToStatus(plc *policyv1.ConfigurationPolicy, cond *policyv1.Condition, index int, - complianceState policyv1.ComplianceState) (updateNeeded bool) { - var update bool - if len((*plc).Status.CompliancyDetails) <= index { - (*plc).Status.CompliancyDetails = append((*plc).Status.CompliancyDetails, policyv1.TemplateStatus{ - ComplianceState: complianceState, - Conditions: []policyv1.Condition{}, - }) - } - if (*plc).Status.CompliancyDetails[index].ComplianceState != complianceState { - update = true - } - (*plc).Status.CompliancyDetails[index].ComplianceState = complianceState - - if !checkMessageSimilarity((*plc).Status.CompliancyDetails[index].Conditions, cond) { - conditions := AppendCondition((*plc).Status.CompliancyDetails[index].Conditions, cond, "", false) - (*plc).Status.CompliancyDetails[index].Conditions = conditions - update = true - } - return update -} - -func createViolation(plc *policyv1.ConfigurationPolicy, index int, reason string, message string) (result bool) { - var cond *policyv1.Condition - cond = &policyv1.Condition{ - Type: "violation", - Status: corev1.ConditionTrue, - LastTransitionTime: metav1.Now(), - Reason: reason, - Message: message, - } - return addConditionToStatus(plc, cond, index, policyv1.NonCompliant) -} - -func createNotification(plc *policyv1.ConfigurationPolicy, index int, reason string, message string) (result bool) { - var cond *policyv1.Condition - cond = &policyv1.Condition{ - Type: "notification", - Status: corev1.ConditionTrue, - LastTransitionTime: metav1.Now(), - Reason: reason, - Message: message, - } - return addConditionToStatus(plc, cond, index, policyv1.Compliant) -} - +//handleObjectTemplates iterates through all policy templates in a given policy and processes them func handleObjectTemplates(plc policyv1.ConfigurationPolicy, apiresourcelist []*metav1.APIResourceList, apigroups []*restmapper.APIGroupResources) { fmt.Println(fmt.Sprintf("processing object templates for policy %s...", plc.GetName())) + //error if no remediationAction is specified plcNamespaces := getPolicyNamespaces(plc) if plc.Spec.RemediationAction == "" { message := "Policy does not have a RemediationAction specified" @@ -314,7 +287,13 @@ func handleObjectTemplates(plc policyv1.ConfigurationPolicy, apiresourcelist []* // initialize apiresources for template processing before starting objectTemplate processing // this is optional but since apiresourcelist is already available, // use this rather than re-discovering the list for generic-lookup - templates.SetAPIResources(apiresourcelist) + tmplResolverCfg := templates.Config{KubeAPIResourceList: apiresourcelist} + tmplResolver, err := templates.NewResolver(KubeClient, config, tmplResolverCfg) + if err != nil { + // Panic here since this error is unrecoverable + glog.Errorf("Failed to create a template resolver: %v", err) + panic(err) + } for indx, objectT := range plc.Spec.ObjectTemplates { nonCompliantObjects := map[string]map[string]interface{}{} @@ -328,12 +307,6 @@ func handleObjectTemplates(plc policyv1.ConfigurationPolicy, apiresourcelist []* //override policy namespaces if one is present in object template var unstruct unstructured.Unstructured unstruct.Object = make(map[string]interface{}) - var blob interface{} - ext := objectT.ObjectDefinition - if jsonErr := json.Unmarshal(ext.Raw, &blob); jsonErr != nil { - glog.Error(jsonErr) - return - } // Here appears to be a good place to hook in template processing // This is at the head of objectemplate processing @@ -343,10 +316,35 @@ func handleObjectTemplates(plc policyv1.ConfigurationPolicy, apiresourcelist []* // and execute template-processing only if there is a template pattern "{{" in it // to avoid unnecessary parsing when there is no template in the definition. - if templates.HasTemplate(string(ext.Raw)) { - resolvedblob, tplErr := templates.ResolveTemplate(blob) - if tplErr != nil { - update := createViolation(&plc, 0, "Error processing template", tplErr.Error()) + //if disable-templates annotations exists and is true, then do not process templates + annotations := plc.GetAnnotations() + disableTemplates := false + if disableAnnotation, ok := annotations["policy.open-cluster-management.io/disable-templates"]; ok { + glog.Infof("Found disable-templates Annotation : %s", disableAnnotation) + bool_disableAnnotation, err := strconv.ParseBool(disableAnnotation) + if err != nil { + glog.Errorf("Error parsing value for annotation: disable-templates %v", err) + } else { + disableTemplates = bool_disableAnnotation + } // + } // + if !disableTemplates { + + //first check to make sure there are no hub-templates with delimiter - {{hub + //if they exists, it means the template resolution on the hub did not succeed. + if templates.HasTemplate(objectT.ObjectDefinition.Raw, "{{hub") { + glog.Error("Configuration Policy has hub-templates. Error occured while processing hub-templates on the Hub Cluster.") + + //check to see there is an annotation set to the hub error msg, + //if not ,set a generic msg + + hubTemplatesErrMsg, ok := annotations["policy.open-cluster-management.io/hub-templates-error"] + if !ok || hubTemplatesErrMsg == "" { + //set a generic msg + hubTemplatesErrMsg = "Error occured while processing hub-templates, check the policy events for more details." + } + + update := createViolation(&plc, 0, "Error processing hub templates", hubTemplatesErrMsg) if update { recorder.Event(&plc, eventWarning, fmt.Sprintf(plcFmtStr, plc.GetName()), convertPolicyStatusToString(&plc)) addForUpdate(&plc) @@ -354,18 +352,30 @@ func handleObjectTemplates(plc policyv1.ConfigurationPolicy, apiresourcelist []* return } - //marshal it back and set it on the objectTemplate so be used in processed further down - resolveddata, jsonErr := json.Marshal(resolvedblob) - if jsonErr != nil { - glog.Error(jsonErr) - return + if templates.HasTemplate(objectT.ObjectDefinition.Raw, "") { + resolvedTemplate, tplErr := tmplResolver.ResolveTemplate(objectT.ObjectDefinition.Raw, nil) + if tplErr != nil { + update := createViolation(&plc, 0, "Error processing template", tplErr.Error()) // + if update { + recorder.Event(&plc, eventWarning, fmt.Sprintf(plcFmtStr, plc.GetName()), convertPolicyStatusToString(&plc)) + addForUpdate(&plc) + } + return + } + + // Set the resolved data for use in further processing + objectT.ObjectDefinition.Raw = []byte(resolvedTemplate) } - //Set the resolved data for use in further processing - objectT.ObjectDefinition.Raw = resolveddata - blob = resolvedblob } + var blob interface{} + if jsonErr := json.Unmarshal(objectT.ObjectDefinition.Raw, &blob); jsonErr != nil { + glog.Error(jsonErr) + return + } + + //pull metadata out of the object template unstruct.Object = blob.(map[string]interface{}) if md, ok := unstruct.Object["metadata"]; ok { metadata := md.(map[string]interface{}) @@ -380,6 +390,7 @@ func handleObjectTemplates(plc policyv1.ConfigurationPolicy, apiresourcelist []* numNonCompliant := 0 handled := false objNamespaced := false + //iterate through all namespaces the configurationpolicy is set on for _, ns := range relevantNamespaces { names, compliant, reason, objKind, related, update, namespaced := handleObjects(objectT, ns, indx, &plc, config, recorder, apiresourcelist, apigroups) @@ -398,7 +409,11 @@ func handleObjectTemplates(plc policyv1.ConfigurationPolicy, apiresourcelist []* } else { enforce = false if !compliant { - numNonCompliant += len(names) + if len(names) == 0 { + numNonCompliant += 1 + } else { + numNonCompliant += len(names) + } nonCompliantObjects[ns] = map[string]interface{}{ "names": names, "reason": reason, @@ -417,6 +432,8 @@ func handleObjectTemplates(plc policyv1.ConfigurationPolicy, apiresourcelist []* } } } + //violations for enforce configurationpolicies are already handled in handleObjects, so we only need to generate a violation + //if the remediationAction is set to inform if !handled && !enforce { objData := map[string]interface{}{ "indx": indx, @@ -434,6 +451,7 @@ func handleObjectTemplates(plc policyv1.ConfigurationPolicy, apiresourcelist []* checkRelatedAndUpdate(parentUpdate, plc, relatedObjects, oldRelated) } +//checks related objects field and triggers an update on the configurationpolicy if it has changed func checkRelatedAndUpdate(update bool, plc policyv1.ConfigurationPolicy, related, oldRelated []policyv1.RelatedObject) { sortUpdate := sortRelatedObjectsAndUpdate(&plc, related, oldRelated) @@ -442,6 +460,7 @@ func checkRelatedAndUpdate(update bool, plc policyv1.ConfigurationPolicy, relate } } +//helper function to check whether related objects has changed func sortRelatedObjectsAndUpdate(plc *policyv1.ConfigurationPolicy, related, oldRelated []policyv1.RelatedObject) (updateNeeded bool) { sort.SliceStable(related, func(i, j int) bool { @@ -469,6 +488,58 @@ func sortRelatedObjectsAndUpdate(plc *policyv1.ConfigurationPolicy, related, return update } +//helper function that appends a condition (violation or compliant) to the status of a configurationpolicy +func addConditionToStatus(plc *policyv1.ConfigurationPolicy, cond *policyv1.Condition, index int, + complianceState policyv1.ComplianceState) (updateNeeded bool) { + var update bool + if len((*plc).Status.CompliancyDetails) <= index { + (*plc).Status.CompliancyDetails = append((*plc).Status.CompliancyDetails, policyv1.TemplateStatus{ + ComplianceState: complianceState, + Conditions: []policyv1.Condition{}, + }) + } + if (*plc).Status.CompliancyDetails[index].ComplianceState != complianceState { + update = true + } + (*plc).Status.CompliancyDetails[index].ComplianceState = complianceState + + //do not add condition unless it does not already appear in the status + if !checkMessageSimilarity((*plc).Status.CompliancyDetails[index].Conditions, cond) { + conditions := AppendCondition((*plc).Status.CompliancyDetails[index].Conditions, cond, "", false) + (*plc).Status.CompliancyDetails[index].Conditions = conditions + update = true + } + return update +} + +//helper function to create a violation condition and append it to the list of conditions in the status +func createViolation(plc *policyv1.ConfigurationPolicy, index int, reason string, message string) (result bool) { + var cond *policyv1.Condition + cond = &policyv1.Condition{ + Type: "violation", + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Now(), + Reason: reason, + Message: message, + } + return addConditionToStatus(plc, cond, index, policyv1.NonCompliant) +} + +//helper function to create a compliant notification condition and append it to the list of conditions in the status +func createNotification(plc *policyv1.ConfigurationPolicy, index int, reason string, message string) (result bool) { + var cond *policyv1.Condition + cond = &policyv1.Condition{ + Type: "notification", + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Now(), + Reason: reason, + Message: message, + } + return addConditionToStatus(plc, cond, index, policyv1.Compliant) +} + +//createInformStatus updates the status field for a configurationpolicy with remediationAction=inform based on how many compliant +//and noncompliant objects are found when processing the templates in the configurationpolicy func createInformStatus(mustNotHave bool, numCompliant int, numNonCompliant int, compliantObjects map[string]map[string]interface{}, nonCompliantObjects map[string]map[string]interface{}, plc *policyv1.ConfigurationPolicy, objData map[string]interface{}) (updateNeeded bool) { @@ -481,36 +552,45 @@ func createInformStatus(mustNotHave bool, numCompliant int, numNonCompliant int, if kind == "" { return } - if !mustNotHave && numCompliant == 0 { - //noncompliant; musthave and objects do not exist - update = createMustHaveStatus(desiredName, kind, nonCompliantObjects, namespaced, - plc, indx, compliant) - } - if mustNotHave && numNonCompliant > 0 { - //noncompliant; mustnothave and objects exist - update = createMustNotHaveStatus(kind, nonCompliantObjects, namespaced, plc, indx, compliant) - } - if !mustNotHave && numCompliant > 0 { - //compliant; musthave and objects exist - compliant = true - update = createMustHaveStatus("", kind, compliantObjects, namespaced, plc, indx, compliant) - } - if mustNotHave && numNonCompliant == 0 { - //compliant; mustnothave and no objects exist - compliant = true - update = createMustNotHaveStatus(kind, compliantObjects, namespaced, plc, indx, compliant) + + if mustNotHave { + if numNonCompliant > 0 { // We want no resources, but some were found + //noncompliant; mustnothave and objects exist + update = createMustNotHaveStatus(kind, nonCompliantObjects, namespaced, plc, indx, compliant) + } else if numNonCompliant == 0 { + //compliant; mustnothave and no objects exist + compliant = true + update = createMustNotHaveStatus(kind, compliantObjects, namespaced, plc, indx, compliant) + } + } else { // !mustNotHave (musthave) + if numCompliant == 0 && numNonCompliant == 0 { // Special case: No resources found is NonCompliant + //noncompliant; musthave and objects do not exist + update = createMustHaveStatus(desiredName, kind, nonCompliantObjects, namespaced, + plc, indx, compliant) + } else if numNonCompliant > 0 { + //noncompliant; musthave and some objects do not exist + update = createMustHaveStatus(desiredName, kind, nonCompliantObjects, namespaced, plc, indx, compliant) + } else { // Found only compliant resources (numCompliant > 0 and no NonCompliant) + //compliant; musthave and objects exist + compliant = true + update = createMustHaveStatus("", kind, compliantObjects, namespaced, plc, indx, compliant) + } } + if update { //update parent policy with violation eventType := eventNormal if !compliant { eventType = eventWarning } - recorder.Event(plc, eventType, fmt.Sprintf(plcFmtStr, plc.GetName()), convertPolicyStatusToString(plc)) + if recorder != nil { + recorder.Event(plc, eventType, fmt.Sprintf(plcFmtStr, plc.GetName()), convertPolicyStatusToString(plc)) + } } return update } +//handleObjects controls the processing of each individual object template within a configurationpolicy func handleObjects(objectT *policyv1.ObjectTemplate, namespace string, index int, policy *policyv1.ConfigurationPolicy, config *rest.Config, recorder record.EventRecorder, apiresourcelist []*metav1.APIResourceList, apigroups []*restmapper.APIGroupResources) (objNameList []string, compliant bool, reason string, @@ -523,6 +603,7 @@ func handleObjects(objectT *policyv1.ObjectTemplate, namespace string, index int namespaced := true needUpdate := false ext := objectT.ObjectDefinition + //map raw object to a resource, generate a violation if resource cannot be found mapping, mappingUpdate := getMapping(apigroups, ext, policy, index) if mapping == nil { return nil, false, "", "", nil, (needUpdate || mappingUpdate), namespaced @@ -541,7 +622,7 @@ func handleObjects(objectT *policyv1.ObjectTemplate, namespace string, index int if metaNamespace != "" { namespace = metaNamespace } - dclient, rsrc, namespaced := getClientRsrc(mapping, apiresourcelist) + dclient, rsrc, namespaced := getResourceAndDynamicClient(mapping, apiresourcelist) if namespaced && namespace == "" { //namespaced but none specified, generate violation updateStatus := createViolation(policy, index, "K8s missing namespace", @@ -558,12 +639,13 @@ func handleObjects(objectT *policyv1.ObjectTemplate, namespace string, index int } return nil, false, "", "", nil, needUpdate, namespaced } - if name != "" { + if name != "" { //named object, so checking just for the existence of the specific object exists = objectExists(namespaced, namespace, name, rsrc, unstruct, dclient) objNames = append(objNames, name) - } else if kind != "" { + } else if kind != "" { //no name, so we are checking for the existence of any object of this kind objNames = append(objNames, getNamesOfKind(unstruct, rsrc, namespaced, namespace, dclient, strings.ToLower(string(objectT.ComplianceType)))...) + //we do not support enforce on unnamed templates remediation = "inform" if len(objNames) == 0 { exists = false @@ -573,6 +655,7 @@ func handleObjects(objectT *policyv1.ObjectTemplate, namespace string, index int rsrcKind = "" reason = "" // if the compliance is calculated by the handleSingleObj function, do not override the setting + // we do this because the message string for single objects is different than for multiple complianceCalculated := false if len(objNames) == 1 { name = objNames[0] @@ -618,6 +701,7 @@ func handleObjects(objectT *policyv1.ObjectTemplate, namespace string, index int return objNames, compliant, reason, rsrcKind, relatedObjects, needUpdate, namespaced } +//generateSingleObjReason is a helper function to create a compliant/noncompliant message for a named object func generateSingleObjReason(objShouldExist bool, compliant bool, exists bool) (rsn string) { reason := "" if objShouldExist && compliant { @@ -634,6 +718,8 @@ func generateSingleObjReason(objShouldExist bool, compliant bool, exists bool) ( return reason } +// handleSingleObj takes in an object template (for a named object) and its data and determines whether the object on the cluster +// is compliant or not func handleSingleObj(policy *policyv1.ConfigurationPolicy, remediation policyv1.RemediationAction, exists bool, objShouldExist bool, rsrc schema.GroupVersionResource, dclient dynamic.Interface, objectT *policyv1.ObjectTemplate, data map[string]interface{}) (objNameList []string, compliance bool, rsrcKind string, shouldUpdate bool) { @@ -688,8 +774,9 @@ func handleSingleObj(policy *policyv1.ConfigurationPolicy, remediation policyv1. processingErr := false specViolation := false + // object exists and the template requires it to exist, so we need to check specific fields to see if we have a match if exists { - updated, throwSpecViolation, msg, pErr := updateTemplate( + updated, throwSpecViolation, msg, pErr := checkAndUpdateResource( strings.ToLower(string(objectT.ComplianceType)), data, remediation, rsrc, dclient, unstruct.Object["kind"].(string), nil) if !updated && throwSpecViolation { @@ -731,7 +818,9 @@ func handleSingleObj(policy *policyv1.ConfigurationPolicy, remediation policyv1. return nil, compliant, "", false } -func getClientRsrc(mapping *meta.RESTMapping, apiresourcelist []*metav1.APIResourceList) (dclient dynamic.Interface, +// getResourceAndDynamicClient creates a dynamic client to query resources and pulls the groupVersionResource +// for an object from its mapping, as well as checking whether the resource is namespaced or cluster-level +func getResourceAndDynamicClient(mapping *meta.RESTMapping, apiresourcelist []*metav1.APIResourceList) (dclient dynamic.Interface, rsrc schema.GroupVersionResource, namespaced bool) { namespaced = false restconfig := config @@ -744,6 +833,7 @@ func getClientRsrc(mapping *meta.RESTMapping, apiresourcelist []*metav1.APIResou glog.Fatal(err) } + // check all resources in the list of resources on the cluster to get a match for the mapping rsrc = mapping.Resource for _, apiresourcegroup := range apiresourcelist { if apiresourcegroup.GroupVersion == join(mapping.GroupVersionKind.Group, "/", mapping.GroupVersionKind.Version) { @@ -759,6 +849,7 @@ func getClientRsrc(mapping *meta.RESTMapping, apiresourcelist []*metav1.APIResou return dclient, rsrc, namespaced } +// getMapping takes in a raw object, decodes it, and maps it to an existing group/kind func getMapping(apigroups []*restmapper.APIGroupResources, ext runtime.RawExtension, policy *policyv1.ConfigurationPolicy, index int) (mapping *meta.RESTMapping, update bool) { updateNeeded := false @@ -766,6 +857,7 @@ func getMapping(apigroups []*restmapper.APIGroupResources, ext runtime.RawExtens glog.V(9).Infof("reading raw object: %v", string(ext.Raw)) _, gvk, err := unstructured.UnstructuredJSONScheme.Decode(ext.Raw, nil, nil) if err != nil { + // generate violation if object cannot be decoded and update the configpolicy decodeErr := fmt.Sprintf("Decoding error, please check your policy file!"+ " Aborting handling the object template at index [%v] in policy `%v` with error = `%v`", index, policy.Name, err) @@ -789,9 +881,11 @@ func getMapping(apigroups []*restmapper.APIGroupResources, ext runtime.RawExtens } return nil, true } + //initializes a mapping between Kind and APIVersion to a resource name mapping, err = restmapper.RESTMapping(gvk.GroupKind(), gvk.Version) mappingErrMsg := "" if err != nil { + //if the restmapper fails to find a mapping to a resource, generate a violation and update the configpolicy prefix := "no matches for kind \"" startIdx := strings.Index(err.Error(), prefix) if startIdx == -1 { @@ -830,6 +924,7 @@ func getMapping(apigroups []*restmapper.APIGroupResources, ext runtime.RawExtens } } if updateNeeded { + //generate an event on the configurationpolicy if a violation is created recorder.Event(policy, eventWarning, fmt.Sprintf(plcFmtStr, policy.GetName()), errMsg) } return nil, updateNeeded @@ -860,12 +955,15 @@ func getDetails(unstruct unstructured.Unstructured) (name string, kind string, n return name, kind, namespace } +//buildNameList is a helper function to pull names of resources that match an objectTemplate from a list of resources func buildNameList(unstruct unstructured.Unstructured, complianceType string, resList *unstructured.UnstructuredList) (kindNameList []string) { for i := range resList.Items { uObj := resList.Items[i] match := true for key := range unstruct.Object { + //if any key in the object generates a mismatch, the object does not match the template and we + //do not add its name to the list errorMsg, updateNeeded, _, skipped := handleSingleKey(key, unstruct, &uObj, complianceType) if !skipped { if errorMsg != "" || updateNeeded { @@ -1002,23 +1100,23 @@ func getAllNamespaces() (list []string) { return namespacesNames } +//checkMessageSimilarity decides whether to append a new condition to a configurationPolicy status +//based on whether it is too similar to the previous one func checkMessageSimilarity(conditions []policyv1.Condition, cond *policyv1.Condition) bool { same := true lastIndex := len(conditions) if lastIndex > 0 { oldCond := conditions[lastIndex-1] if !IsSimilarToLastCondition(oldCond, *cond) { - // objectT.Status.Conditions = AppendCondition(objectT.Status.Conditions, cond, "object", false) same = false } } else { - // objectT.Status.Conditions = AppendCondition(objectT.Status.Conditions, cond, "object", false) same = false } return same } -// objectExists returns true if the object is found +// objectExists gets object with dynamic client, returns true if the object is found func objectExists(namespaced bool, namespace string, name string, rsrc schema.GroupVersionResource, unstruct unstructured.Unstructured, dclient dynamic.Interface) (result bool) { exists := false @@ -1133,12 +1231,14 @@ func deleteObject(namespaced bool, namespace string, name string, rsrc schema.Gr return deleted, err } -func mergeSpecs(x1, x2 interface{}, ctype string) (interface{}, error) { - data1, err := json.Marshal(x1) +//mergeSpecs is a wrapper for the recursive function to merge 2 maps. It marshals the objects into JSON +//to make sure they are valid objects before calling the merge function +func mergeSpecs(templateVal, existingVal interface{}, ctype string) (interface{}, error) { + data1, err := json.Marshal(templateVal) if err != nil { return nil, err } - data2, err := json.Marshal(x2) + data2, err := json.Marshal(existingVal) if err != nil { return nil, err } @@ -1155,109 +1255,136 @@ func mergeSpecs(x1, x2 interface{}, ctype string) (interface{}, error) { return mergeSpecsHelper(j1, j2, ctype), nil } -func mergeSpecsHelper(x1, x2 interface{}, ctype string) interface{} { - switch x1 := x1.(type) { +// mergeSpecsHelper is a helper function that takes an object from the existing object and merges in all the data that is +// different in the template. This way, comparing the merged object to the one that exists on the cluster will tell +// you whether the existing object is compliant with the template. This function uses recursion to check mismatches in +// nested objects and is the basis for most comparisons the controller makes. +func mergeSpecsHelper(templateVal, existingVal interface{}, ctype string) interface{} { + switch templateVal := templateVal.(type) { case map[string]interface{}: - x2, ok := x2.(map[string]interface{}) + existingVal, ok := existingVal.(map[string]interface{}) if !ok { - return x1 - } - for k, v2 := range x2 { - if v1, ok := x1[k]; ok { - x1[k] = mergeSpecsHelper(v1, v2, ctype) + //if one field is a map and the other isn't, don't bother merging - just returning the template value will + //still generate noncompliant + return templateVal + } + //otherwise, iterate through all fields in the template object and merge in missing values from the existing object + for k, v2 := range existingVal { + if v1, ok := templateVal[k]; ok { + templateVal[k] = mergeSpecsHelper(v1, v2, ctype) } else { - x1[k] = v2 + templateVal[k] = v2 } } - case []interface{}: - x2, ok := x2.([]interface{}) + case []interface{}: //list nested in map + if !isSorted(templateVal) { + //arbitrary sort on template value for easier comparison + sort.Slice(templateVal, func(i, j int) bool { + return fmt.Sprintf("%v", templateVal[i]) < fmt.Sprintf("%v", templateVal[j]) + }) + } + existingVal, ok := existingVal.([]interface{}) if !ok { - return x1 + //if one field is a list and the other isn't, don't bother merging + return templateVal } - if len(x2) > len(x1) { - if ctype != "mustonlyhave" { - return mergeArrays(x1, x2, ctype) - } - return x1 - } - if len(x2) > 0 { - _, ok := x2[0].(map[string]interface{}) - if ok { - for idx, v2 := range x2 { - v1 := x1[idx] - x1[idx] = mergeSpecsHelper(v1, v2, ctype) - } - } else { - if ctype != "mustonlyhave" { - return mergeArrays(x1, x2, ctype) - } - return x1 - } - } else { - return x1 + if len(existingVal) > 0 { + //if both values are non-empty lists, we need to merge in the extra data in the existing + //object to do a proper compare + return mergeArrays(templateVal, existingVal, ctype) } case nil: - x2, ok := x2.(map[string]interface{}) + //if template value is nil, pull data from existing, since the template does not care about it + existingVal, ok := existingVal.(map[string]interface{}) if ok { - return x2 + return existingVal } } - _, ok := x1.(string) + _, ok := templateVal.(string) if !ok { - return x1 + return templateVal } - return strings.TrimSpace(x1.(string)) + return templateVal.(string) } +// isSorted is a helper function that checks whether an array is sorted +func isSorted(arr []interface{}) (result bool) { + arrCopy := append([]interface{}{}, arr...) + sort.Slice(arr, func(i, j int) bool { + return fmt.Sprintf("%v", arr[i]) < fmt.Sprintf("%v", arr[j]) + }) + if fmt.Sprint(arrCopy) != fmt.Sprint(arr) { + return false + } + return true +} + +// mergeArrays is a helper function that takes a list from the existing object and merges in all the data that is +// different in the template. This way, comparing the merged object to the one that exists on the cluster will tell +// you whether the existing object is compliant with the template func mergeArrays(new []interface{}, old []interface{}, ctype string) (result []interface{}) { if ctype == "mustonlyhave" { return new } - newCopy := append([]interface{}{}, new...) - indexesSkipped := map[int]bool{} + idxWritten := map[int]bool{} for i := range newCopy { - indexesSkipped[i] = false + idxWritten[i] = false } - + //create a set with a key for each unique item in the list + oldItemSet := map[string]map[string]interface{}{} for _, val2 := range old { - found := false - for newIdx, val1 := range newCopy { - matches := false - if ctype != "mustonlyhave" { - var mergedObj interface{} - switch val2 := val2.(type) { - case map[string]interface{}: - mergedObj, _ = compareSpecs(val1.(map[string]interface{}), val2, ctype) - default: - mergedObj = val1 - } - if reflect.DeepEqual(mergedObj, val2) && !indexesSkipped[newIdx] { - found = true - matches = true - } - if matches && ctype != "mustonlyhave" && !indexesSkipped[newIdx] { - new[newIdx] = mergedObj - indexesSkipped[newIdx] = true - } + if entry, ok := oldItemSet[fmt.Sprint(val2)]; ok { + oldItemSet[fmt.Sprint(val2)]["count"] = entry["count"].(int) + 1 + } else { + oldItemSet[fmt.Sprint(val2)] = map[string]interface{}{ + "count": 1, + "value": val2, + } + } + } - } else if reflect.DeepEqual(val1, val2) && !indexesSkipped[newIdx] { - found = true - indexesSkipped[newIdx] = true + for _, data := range oldItemSet { + count := 0 + reqCount := data["count"] + val2 := data["value"] + //for each list item in the existing array, iterate through the template array and try to find a match + for newIdx, val1 := range newCopy { + if idxWritten[newIdx] { + continue + } + var mergedObj interface{} + switch val2 := val2.(type) { + case map[string]interface{}: + //use map compare helper function to check equality on lists of maps + mergedObj, _ = compareSpecs(val1.(map[string]interface{}), val2, ctype) + default: + mergedObj = val1 + } + //if a match is found, this field is already in the template, so we can skip it in future checks + if equalObjWithSort(mergedObj, val2) { + count = count + 1 + new[newIdx] = mergedObj + idxWritten[newIdx] = true } } - if !found { - new = append(new, val2) + //if an item in the existing object cannot be found in the template, we add it to the template array + //to produce the merged array + if count < reqCount.(int) { + for i := 0; i < (reqCount.(int) - count); i++ { + new = append(new, val2) + } } } return new } +// compareLists is a wrapper function that creates a merged list for musthave and returns the template list for mustonlyhave func compareLists(newList []interface{}, oldList []interface{}, ctype string) (updatedList []interface{}, err error) { if ctype != "mustonlyhave" { return mergeArrays(newList, oldList, ctype), nil } - //mustonlyhave + //mustonlyhave scenario: go through existing list and merge everything in order, then add all other template items in order afterward mergedList := []interface{}{} for idx, item := range newList { if idx < len(oldList) { @@ -1273,11 +1400,13 @@ func compareLists(newList []interface{}, oldList []interface{}, ctype string) (u return mergedList, nil } +// compareSpecs is a wrapper function that creates a merged map for mustHave and returns the template map for mustonlyhave func compareSpecs(newSpec map[string]interface{}, oldSpec map[string]interface{}, ctype string) (updatedSpec map[string]interface{}, err error) { if ctype == "mustonlyhave" { return newSpec, nil } + //if compliance type is musthave, create merged object to compare on merged, err := mergeSpecs(newSpec, oldSpec, ctype) if err != nil { return merged.(map[string]interface{}), err @@ -1285,6 +1414,8 @@ func compareSpecs(newSpec map[string]interface{}, oldSpec map[string]interface{} return merged.(map[string]interface{}), nil } +// handleSingleKey checks whether a key/value pair in an object template matches with that in the existing +// resource on the cluster func handleSingleKey(key string, unstruct unstructured.Unstructured, existingObj *unstructured.Unstructured, complianceType string) (errormsg string, update bool, merged interface{}, skip bool) { var err error @@ -1293,7 +1424,9 @@ func handleSingleKey(key string, unstruct unstructured.Unstructured, existingObj newObj := formatTemplate(unstruct, key) oldObj := existingObj.UnstructuredContent()[key] typeErr := "" - //merge changes into new spec + // We will compare the existing field to a "merged" field which has the fields in the template merged into the existing + // object to avoid erroring on fields that are not in the template but have been automatically added to the resource. For + // the mustOnlyHave complianceType, this object is identical to the field in the template. var mergedObj interface{} switch newObj := newObj.(type) { case []interface{}: @@ -1316,7 +1449,7 @@ func handleSingleKey(key string, unstruct unstructured.Unstructured, existingObj typeErr = fmt.Sprintf("Error merging changes into key \"%s\": object type of template and existing do not match", key) } - default: + default: // if field is not an object, just do a basic compare mergedObj = newObj } if typeErr != "" { @@ -1327,43 +1460,21 @@ func handleSingleKey(key string, unstruct unstructured.Unstructured, existingObj return message, false, mergedObj, false } if key == "metadata" { + // filter out autogenerated annotations that have caused compare issues in the past oldObj = formatMetadata(oldObj.(map[string]interface{})) mergedObj = formatMetadata(mergedObj.(map[string]interface{})) } - //check if merged spec has changed - nJSON, err := json.Marshal(mergedObj) - if err != nil { - message := fmt.Sprintf(convertJSONError, key, err) - return message, false, mergedObj, false - } - oJSON, err := json.Marshal(oldObj) - if err != nil { - message := fmt.Sprintf(convertJSONError, key, err) - return message, false, mergedObj, false - } - switch mergedObj := mergedObj.(type) { - case (map[string]interface{}): - if oldObj == nil || !checkFieldsWithSort(mergedObj, oldObj.(map[string]interface{})) { - updateNeeded = true - } - case ([]map[string]interface{}): - if oldObj == nil || !checkListFieldsWithSort(mergedObj, oldObj.([]map[string]interface{})) { - updateNeeded = true - } - case ([]interface{}): - if oldObj == nil || !checkListsMatch(mergedObj, oldObj.([]interface{})) { - updateNeeded = true - } - default: - if !reflect.DeepEqual(nJSON, oJSON) { - updateNeeded = true - } + //sort objects before checking equality to ensure that the merged object and the existing one are not in different orders + if !equalObjWithSort(mergedObj, oldObj) { + updateNeeded = true } return "", updateNeeded, mergedObj, false } return "", false, nil, true } +// handleKeys is a helper function that calls handleSingleKey to check if each field in the template matches the object. +// If it finds a mismatch and the remediationAction is enforce, it will update the object with the data from the template func handleKeys(unstruct unstructured.Unstructured, existingObj *unstructured.Unstructured, remediation policyv1.RemediationAction, complianceType string, typeStr string, name string, res dynamic.ResourceInterface) (success bool, throwSpecViolation bool, message string, @@ -1371,6 +1482,7 @@ func handleKeys(unstruct unstructured.Unstructured, existingObj *unstructured.Un var err error for key := range unstruct.Object { isStatus := key == "status" + //check key for mismatch errorMsg, updateNeeded, mergedObj, skipped := handleSingleKey(key, unstruct, existingObj, complianceType) if errorMsg != "" { return false, false, errorMsg, true @@ -1380,6 +1492,7 @@ func handleKeys(unstruct unstructured.Unstructured, existingObj *unstructured.Un } mapMtx := sync.RWMutex{} mapMtx.Lock() + //only look at labels and annotations for metadata - configurationPolicies do not update other metadata fields if key == "metadata" { existingObj.UnstructuredContent()["metadata"].(map[string]interface{})["annotations"] = mergedObj.(map[string]interface{})["annotations"] @@ -1393,7 +1506,7 @@ func handleKeys(unstruct unstructured.Unstructured, existingObj *unstructured.Un if (strings.ToLower(string(remediation)) == strings.ToLower(string(policyv1.Inform))) || isStatus { return false, true, "", false } - //enforce + //update resource if template is enforce glog.V(4).Infof("Updating %v template `%v`...", typeStr, name) _, err = res.Update(context.TODO(), existingObj, metav1.UpdateOptions{}) if errors.IsNotFound(err) { @@ -1410,7 +1523,9 @@ func handleKeys(unstruct unstructured.Unstructured, existingObj *unstructured.Un return false, false, "", false } -func updateTemplate( +// checkAndUpdateResource checks each individual key of a resource and passes it to handleKeys to see if it +// matches the template and update it if the remediationAction is enforce +func checkAndUpdateResource( complianceType string, metadata map[string]interface{}, remediation policyv1.RemediationAction, rsrc schema.GroupVersionResource, dclient dynamic.Interface, typeStr string, parent *policyv1.ConfigurationPolicy) (success bool, throwSpecViolation bool, @@ -1435,7 +1550,7 @@ func updateTemplate( return false, false, "", false } -// AppendCondition check and appends conditions +// AppendCondition check and appends conditions to the policy status func AppendCondition(conditions []policyv1.Condition, newCond *policyv1.Condition, resourceType string, resolved ...bool) (conditionsRes []policyv1.Condition) { defer recoverFlow() @@ -1456,37 +1571,6 @@ func AppendCondition(conditions []policyv1.Condition, newCond *policyv1.Conditio return conditions } -func getRoleNames(list []rbacv1.Role) []string { - roleNames := []string{} - for _, n := range list { - roleNames = append(roleNames, n.Name) - } - return roleNames -} - -func listToRoleMap(rlist []rbacv1.Role) map[string]rbacv1.Role { - roleMap := make(map[string]rbacv1.Role) - for _, role := range rlist { - roleN := []string{role.Name, role.Namespace} - roleNamespace := strings.Join(roleN, "-") - roleMap[roleNamespace] = role - } - return roleMap -} - -func createKeyValuePairs(m map[string]string) string { - - if m == nil { - return "" - } - b := new(bytes.Buffer) - for key, value := range m { - fmt.Fprintf(b, "%s=%s,", key, value) - } - s := strings.TrimSuffix(b.String(), ",") - return s -} - //IsSimilarToLastCondition checks the diff, so that we don't keep updating with the same info func IsSimilarToLastCondition(oldCond policyv1.Condition, newCond policyv1.Condition) bool { if reflect.DeepEqual(oldCond.Status, newCond.Status) && @@ -1498,6 +1582,7 @@ func IsSimilarToLastCondition(oldCond policyv1.Condition, newCond policyv1.Condi return false } +//addForUpdate calculates the compliance status of a configurationPolicy and updates its status field if needed func addForUpdate(policy *policyv1.ConfigurationPolicy) { compliant := true for index := range policy.Spec.ObjectTemplates { @@ -1519,10 +1604,11 @@ func addForUpdate(policy *policyv1.ConfigurationPolicy) { }) if err != nil { log.Error(err, err.Error()) - // time.Sleep(100) //giving enough time to sync } } +//updatePolicyStatus updates the status of the configurationPolicy if new conditions are added and generates an event +//on the parent policy with the complaince decision func updatePolicyStatus(policies map[string]*policyv1.ConfigurationPolicy) (*policyv1.ConfigurationPolicy, error) { for _, instance := range policies { // policies is a map where: key = plc.Name, value = pointer to plc err := reconcilingAgent.client.Status().Update(context.TODO(), instance) @@ -1540,6 +1626,8 @@ func updatePolicyStatus(policies map[string]*policyv1.ConfigurationPolicy) (*pol return nil, nil } +//handleRemovingPolicy removes a configurationPolicy from the list of configurationPolicies that the controller is +//processing func handleRemovingPolicy(name string) { for k, v := range availablePolicies.PolicyMap { if v.Name == name { @@ -1548,6 +1636,7 @@ func handleRemovingPolicy(name string) { } } +//handleAddingPolicy adds a configurationPolicy to the list of configurationPolicies that the controller is processing func handleAddingPolicy(plc *policyv1.ConfigurationPolicy) error { allNamespaces, err := common.GetAllNamespaces() if err != nil { diff --git a/pkg/controller/configurationpolicy/configurationpolicy_controller_test.go b/pkg/controller/configurationpolicy/configurationpolicy_controller_test.go index 1f045888..73ccb9df 100644 --- a/pkg/controller/configurationpolicy/configurationpolicy_controller_test.go +++ b/pkg/controller/configurationpolicy/configurationpolicy_controller_test.go @@ -232,15 +232,21 @@ func TestCompareSpecs(t *testing.T) { } assert.Equal(t, reflect.DeepEqual(merged, mergedExpected), true) spec1 = map[string]interface{}{ - "containers": map[string]string{ + "containers": map[string]interface{}{ "image": "nginx1.7.9", "test": "1111", + "timestamp": map[string]int64{ + "seconds": 1631796491, + }, }, } spec2 = map[string]interface{}{ - "containers": map[string]string{ + "containers": map[string]interface{}{ "image": "nginx1.7.9", "name": "nginx", + "timestamp": map[string]int64{ + "seconds": 1631796491, + }, }, } merged, err = compareSpecs(spec1, spec2, "musthave") @@ -248,10 +254,16 @@ func TestCompareSpecs(t *testing.T) { t.Fatalf("compareSpecs: (%v)", err) } mergedExpected = map[string]interface{}{ - "containers": map[string]string{ + "containers": map[string]interface{}{ "image": "nginx1.7.9", "name": "nginx", "test": "1111", + // This verifies that the type of the number has not changed as part of compare specs. + // With standard JSON marshaling and unmarshaling, it will cause an int64 to be + // converted to a float64. This ensures this does not happen. + "timestamp": map[string]int64{ + "seconds": 1631796491, + }, }, } assert.Equal(t, reflect.DeepEqual(fmt.Sprint(merged), fmt.Sprint(mergedExpected)), true) @@ -320,7 +332,7 @@ func TestCompareLists(t *testing.T) { mergedExpected = []interface{}{ map[string]interface{}{ "apiGroups": []string{ - "extensions", "apps", + "apps", "extensions", }, "resources": []string{ "deployments", @@ -572,3 +584,95 @@ func newRuleTemplate(verbs, apiGroups, resources, nonResourceURLs string, compli }, } } + +func TestCreateInformStatus(t *testing.T) { + policy := &policiesv1alpha1.ConfigurationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "default", + }, + Spec: policiesv1alpha1.ConfigurationPolicySpec{ + Severity: "low", + NamespaceSelector: policiesv1alpha1.Target{ + Include: []string{"test1", "test2"}, + }, + RemediationAction: "inform", + ObjectTemplates: []*policiesv1alpha1.ObjectTemplate{ + &policiesv1alpha1.ObjectTemplate{ + ComplianceType: "musthave", + ObjectDefinition: runtime.RawExtension{}, + }, + }, + }, + } + objNamespaced := true + objData := map[string]interface{}{ + "indx": 0, + "kind": "Secret", + "desiredName": "myobject", + "namespaced": objNamespaced, + } + mustNotHave := false + numCompliant := 0 + numNonCompliant := 1 + var nonCompliantObjects map[string]map[string]interface{} = make(map[string]map[string]interface{}) + var compliantObjects map[string]map[string]interface{} = make(map[string]map[string]interface{}) + nonCompliantObjects["test1"] = map[string]interface{}{ + "names": []string{"myobject"}, + "reason": "my reason", + } + + // Test 1 NonCompliant resource + createInformStatus(mustNotHave, numCompliant, numNonCompliant, + compliantObjects, nonCompliantObjects, policy, objData) + assert.True(t, policy.Status.CompliancyDetails[0].ComplianceState == policiesv1alpha1.NonCompliant) + + nonCompliantObjects["test2"] = map[string]interface{}{ + "names": []string{"myobject"}, + "reason": "my reason", + } + numNonCompliant = 2 + + // Test 2 NonCompliant resources + createInformStatus(mustNotHave, numCompliant, numNonCompliant, + compliantObjects, nonCompliantObjects, policy, objData) + assert.True(t, policy.Status.CompliancyDetails[0].ComplianceState == policiesv1alpha1.NonCompliant) + + delete(nonCompliantObjects, "test1") + delete(nonCompliantObjects, "test2") + + // Test 0 resources + numNonCompliant = 0 + createInformStatus(mustNotHave, numCompliant, numNonCompliant, + compliantObjects, nonCompliantObjects, policy, objData) + assert.True(t, policy.Status.CompliancyDetails[0].ComplianceState == policiesv1alpha1.NonCompliant) + + compliantObjects["test1"] = map[string]interface{}{ + "names": []string{"myobject"}, + "reason": "my reason", + } + numCompliant = 1 + nonCompliantObjects["test2"] = map[string]interface{}{ + "names": []string{"myobject"}, + "reason": "my reason", + } + numNonCompliant = 1 + + // Test 1 compliant and 1 noncompliant resource NOTE: This use case is the new behavior change! + createInformStatus(mustNotHave, numCompliant, numNonCompliant, + compliantObjects, nonCompliantObjects, policy, objData) + assert.True(t, policy.Status.CompliancyDetails[0].ComplianceState == policiesv1alpha1.NonCompliant) + + compliantObjects["test2"] = map[string]interface{}{ + "names": []string{"myobject"}, + "reason": "my reason", + } + numCompliant = 2 + numNonCompliant = 0 + delete(nonCompliantObjects, "test2") + + // Test 2 compliant resources + createInformStatus(mustNotHave, numCompliant, numNonCompliant, + compliantObjects, nonCompliantObjects, policy, objData) + assert.True(t, policy.Status.CompliancyDetails[0].ComplianceState == policiesv1alpha1.Compliant) +} diff --git a/pkg/controller/configurationpolicy/configurationpolicy_utils.go b/pkg/controller/configurationpolicy/configurationpolicy_utils.go index bcc225ea..a9a2e378 100644 --- a/pkg/controller/configurationpolicy/configurationpolicy_utils.go +++ b/pkg/controller/configurationpolicy/configurationpolicy_utils.go @@ -5,6 +5,7 @@ package configurationpolicy import ( "fmt" + "reflect" "sort" "strings" @@ -68,23 +69,61 @@ func updateRelatedObjectsStatus(list []policyv1.RelatedObject, return list } +//equalObjWithSort is a wrapper function that calls the correct function to check equality depending on what +//type the objects to compare are +func equalObjWithSort(mergedObj interface{}, oldObj interface{}) (areEqual bool) { + switch mergedObj := mergedObj.(type) { + case (map[string]interface{}): + if oldObj == nil || !checkFieldsWithSort(mergedObj, oldObj.(map[string]interface{})) { + return false + } + case ([]interface{}): + if oldObj == nil || !checkListsMatch(mergedObj, oldObj.([]interface{})) { + return false + } + default: + if !reflect.DeepEqual(fmt.Sprint(mergedObj), fmt.Sprint(oldObj)) { + return false + } + } + return true +} + +//checFieldsWithSort is a check for maps that uses an arbitrary sort to ensure it is +//comparing the right values func checkFieldsWithSort(mergedObj map[string]interface{}, oldObj map[string]interface{}) (matches bool) { //needed to compare lists, since merge messes up the order + if len(mergedObj) < len(oldObj) { + return false + } match := true for i, mVal := range mergedObj { switch mVal := mVal.(type) { + case (map[string]interface{}): + //if field is a map, recurse to check for a match + oVal, ok := oldObj[i].(map[string]interface{}) + if !ok { + match = false + break + } else if !checkFieldsWithSort(mVal, oVal) { + match = false + } + case ([]map[string]interface{}): + //if field is a list of maps, use checkListFieldsWithSort to check for a match + oVal, ok := oldObj[i].([]map[string]interface{}) + if !ok { + match = false + break + } else if !checkListFieldsWithSort(mVal, oVal) { + match = false + } case ([]interface{}): + //if field is a generic list, sort and iterate through them to make sure each value matches oVal, ok := oldObj[i].([]interface{}) if !ok { match = false break } - sort.Slice(oVal, func(i, j int) bool { - return fmt.Sprintf("%v", oVal[i]) < fmt.Sprintf("%v", oVal[j]) - }) - sort.Slice(mVal, func(x, y int) bool { - return fmt.Sprintf("%v", mVal[x]) < fmt.Sprintf("%v", mVal[y]) - }) if len(mVal) != len(oVal) { match = false } else { @@ -92,15 +131,8 @@ func checkFieldsWithSort(mergedObj map[string]interface{}, oldObj map[string]int match = false } } - case (map[string]interface{}): - oVal, ok := oldObj[i].(map[string]interface{}) - if !ok { - match = false - break - } else if !checkFieldsWithSort(mVal, oVal) { - match = false - } default: + //if field is not an object, just do a basic compare to check for a match oVal := oldObj[i] if fmt.Sprint(oVal) != fmt.Sprint(mVal) { match = false @@ -110,6 +142,8 @@ func checkFieldsWithSort(mergedObj map[string]interface{}, oldObj map[string]int return match } +//checkListFieldsWithSort is a check for lists of maps that uses an arbitrary sort to ensure it is +//comparing the right values func checkListFieldsWithSort(mergedObj []map[string]interface{}, oldObj []map[string]interface{}) (matches bool) { sort.Slice(oldObj, func(i, j int) bool { return fmt.Sprintf("%v", oldObj[i]) < fmt.Sprintf("%v", oldObj[j]) @@ -125,17 +159,12 @@ func checkListFieldsWithSort(mergedObj []map[string]interface{}, oldObj []map[st for i, mVal := range mergedItem { switch mVal := mVal.(type) { case ([]interface{}): + //if a map in the list contains a nested list, sort and check for equality oVal, ok := oldItem[i].([]interface{}) if !ok { match = false break } - sort.Slice(oVal, func(i, j int) bool { - return fmt.Sprintf("%v", oVal[i]) < fmt.Sprintf("%v", oVal[j]) - }) - sort.Slice(mVal, func(x, y int) bool { - return fmt.Sprintf("%v", mVal[x]) < fmt.Sprintf("%v", mVal[y]) - }) if len(mVal) != len(oVal) { match = false } else { @@ -144,10 +173,12 @@ func checkListFieldsWithSort(mergedObj []map[string]interface{}, oldObj []map[st } } case (map[string]interface{}): + //if a map in the list contains another map, check fields for equality if !checkFieldsWithSort(mVal, oldItem[i].(map[string]interface{})) { match = false } default: + //if the field in the map is not an object, just do a generic check oVal := oldItem[i] if fmt.Sprint(oVal) != fmt.Sprint(mVal) { match = false @@ -158,6 +189,7 @@ func checkListFieldsWithSort(mergedObj []map[string]interface{}, oldObj []map[st return match } +//checkListsMatch is a generic list check that uses an arbitrary sort to ensure it is comparing the right values func checkListsMatch(oldVal []interface{}, mergedVal []interface{}) (m bool) { oVal := append([]interface{}{}, oldVal...) mVal := append([]interface{}{}, mergedVal...) @@ -175,8 +207,17 @@ func checkListsMatch(oldVal []interface{}, mergedVal []interface{}) (m bool) { return false } for idx, oNestedVal := range oVal { - if fmt.Sprint(oNestedVal) != fmt.Sprint(mVal[idx]) { - match = false + switch oNestedVal := oNestedVal.(type) { + case (map[string]interface{}): + //if list contains maps, recurse on those maps to check for a match + if !checkFieldsWithSort(mVal[idx].(map[string]interface{}), oNestedVal) { + match = false + } + default: + //otherwise, just do a generic check + if fmt.Sprint(oNestedVal) != fmt.Sprint(mVal[idx]) { + match = false + } } } return match @@ -267,11 +308,6 @@ func createResourceNameStr(names []string, namespace string, namespaced bool) (n //createMustHaveStatus generates a status for a musthave/mustonlyhave policy func createMustHaveStatus(desiredName string, kind string, complianceObjects map[string]map[string]interface{}, namespaced bool, plc *policyv1.ConfigurationPolicy, indx int, compliant bool) (update bool) { - // Noncompliant with no resources -- return violation immediately - if !compliant && desiredName == "" { - message := fmt.Sprintf("No instances of `%v` found as specified", kind) - return createViolation(plc, indx, "K8s does not have a `must have` object", message) - } // Parse discovered resources nameList := []string{} sortedNamespaces := []string{} @@ -279,6 +315,17 @@ func createMustHaveStatus(desiredName string, kind string, complianceObjects map sortedNamespaces = append(sortedNamespaces, n) } sort.Strings(sortedNamespaces) + + // Noncompliant with no resources -- return violation immediately + if !compliant && desiredName == "" { + message := fmt.Sprintf("No instances of `%v` found as specified", kind) + if len(sortedNamespaces) > 0 { + message = fmt.Sprintf("No instances of `%v` found as specified in namespaces: %v", + kind, strings.Join(sortedNamespaces, ", ")) + } + return createViolation(plc, indx, "K8s does not have a `must have` object", message) + } + for i := range sortedNamespaces { ns := sortedNamespaces[i] names := complianceObjects[ns]["names"].([]string) diff --git a/test/crds/oauths.config.openshift.io_crd.yaml b/test/crds/oauths.config.openshift.io_crd.yaml new file mode 100644 index 00000000..b3f4d452 --- /dev/null +++ b/test/crds/oauths.config.openshift.io_crd.yaml @@ -0,0 +1,431 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: oauths.config.openshift.io +spec: + group: config.openshift.io + names: + kind: OAuth + listKind: OAuthList + plural: oauths + singular: oauth + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: OAuth holds cluster-wide information about OAuth. The canonical name is `cluster`. It is used to configure the integrated OAuth server. This configuration is only honored when the top level Authentication config has type set to IntegratedOAuth. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + identityProviders: + description: identityProviders is an ordered list of ways for a user to identify themselves. When this list is empty, no identities are provisioned for users. + items: + description: IdentityProvider provides identities for users authenticating using credentials + properties: + basicAuth: + description: basicAuth contains configuration options for the BasicAuth IdP + properties: + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + required: + - name + type: object + tlsClientCert: + description: tlsClientCert is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate to present when connecting to the server. The key "tls.crt" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + tlsClientKey: + description: tlsClientKey is an optional reference to a secret by name that contains the PEM-encoded TLS private key for the client certificate referenced in tlsClientCert. The key "tls.key" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + url: + description: url is the remote URL to connect to + type: string + type: object + github: + description: github enables user authentication using GitHub credentials + properties: + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value. The namespace for this config map is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + required: + - name + type: object + clientID: + description: clientID is the oauth client ID + type: string + clientSecret: + description: clientSecret is a required reference to the secret by name containing the oauth client secret. The key "clientSecret" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + hostname: + description: hostname is the optional domain (e.g. "mycompany.com") for use with a hosted instance of GitHub Enterprise. It must match the GitHub Enterprise settings value configured at /setup/settings#hostname. + type: string + organizations: + description: organizations optionally restricts which organizations are allowed to log in + items: + type: string + type: array + teams: + description: teams optionally restricts which teams are allowed to log in. Format is /. + items: + type: string + type: array + type: object + gitlab: + description: gitlab enables user authentication using GitLab credentials + properties: + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + required: + - name + type: object + clientID: + description: clientID is the oauth client ID + type: string + clientSecret: + description: clientSecret is a required reference to the secret by name containing the oauth client secret. The key "clientSecret" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + url: + description: url is the oauth server base URL + type: string + type: object + google: + description: google enables user authentication using Google credentials + properties: + clientID: + description: clientID is the oauth client ID + type: string + clientSecret: + description: clientSecret is a required reference to the secret by name containing the oauth client secret. The key "clientSecret" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + hostedDomain: + description: hostedDomain is the optional Google App domain (e.g. "mycompany.com") to restrict logins to + type: string + type: object + htpasswd: + description: htpasswd enables user authentication using an HTPasswd file to validate credentials + properties: + fileData: + description: fileData is a required reference to a secret by name containing the data to use as the htpasswd file. The key "htpasswd" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. If the specified htpasswd data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + type: object + keystone: + description: keystone enables user authentication using keystone password credentials + properties: + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + required: + - name + type: object + domainName: + description: domainName is required for keystone v3 + type: string + tlsClientCert: + description: tlsClientCert is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate to present when connecting to the server. The key "tls.crt" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + tlsClientKey: + description: tlsClientKey is an optional reference to a secret by name that contains the PEM-encoded TLS private key for the client certificate referenced in tlsClientCert. The key "tls.key" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + url: + description: url is the remote URL to connect to + type: string + type: object + ldap: + description: ldap enables user authentication using LDAP credentials + properties: + attributes: + description: attributes maps LDAP attributes to identities + properties: + email: + description: email is the list of attributes whose values should be used as the email address. Optional. If unspecified, no email is set for the identity + items: + type: string + type: array + id: + description: id is the list of attributes whose values should be used as the user ID. Required. First non-empty attribute is used. At least one attribute is required. If none of the listed attribute have a value, authentication fails. LDAP standard identity attribute is "dn" + items: + type: string + type: array + name: + description: name is the list of attributes whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity LDAP standard display name attribute is "cn" + items: + type: string + type: array + preferredUsername: + description: preferredUsername is the list of attributes whose values should be used as the preferred username. LDAP standard login attribute is "uid" + items: + type: string + type: array + type: object + bindDN: + description: bindDN is an optional DN to bind with during the search phase. + type: string + bindPassword: + description: bindPassword is an optional reference to a secret by name containing a password to bind with during the search phase. The key "bindPassword" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + required: + - name + type: object + insecure: + description: 'insecure, if true, indicates the connection should not use TLS WARNING: Should not be set to `true` with the URL scheme "ldaps://" as "ldaps://" URLs always attempt to connect using TLS, even when `insecure` is set to `true` When `true`, "ldap://" URLS connect insecurely. When `false`, "ldap://" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830.' + type: boolean + url: + description: 'url is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is: ldap://host:port/basedn?attribute?scope?filter' + type: string + type: object + mappingMethod: + description: mappingMethod determines how identities from this provider are mapped to users Defaults to "claim" + type: string + name: + description: 'name is used to qualify the identities returned by this provider. - It MUST be unique and not shared by any other identity provider used - It MUST be a valid path segment: name cannot equal "." or ".." or contain "/" or "%" or ":" Ref: https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName' + type: string + openID: + description: openID enables user authentication using OpenID credentials + properties: + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + required: + - name + type: object + claims: + description: claims mappings + properties: + email: + description: email is the list of claims whose values should be used as the email address. Optional. If unspecified, no email is set for the identity + items: + type: string + type: array + name: + description: name is the list of claims whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity + items: + type: string + type: array + preferredUsername: + description: preferredUsername is the list of claims whose values should be used as the preferred username. If unspecified, the preferred username is determined from the value of the sub claim + items: + type: string + type: array + type: object + clientID: + description: clientID is the oauth client ID + type: string + clientSecret: + description: clientSecret is a required reference to the secret by name containing the oauth client secret. The key "clientSecret" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + extraAuthorizeParameters: + additionalProperties: + type: string + description: extraAuthorizeParameters are any custom parameters to add to the authorize request. + type: object + extraScopes: + description: extraScopes are any scopes to request in addition to the standard "openid" scope. + items: + type: string + type: array + issuer: + description: issuer is the URL that the OpenID Provider asserts as its Issuer Identifier. It must use the https scheme with no query or fragment component. + type: string + type: object + requestHeader: + description: requestHeader enables user authentication using request header credentials + properties: + ca: + description: ca is a required reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. Specifically, it allows verification of incoming requests to prevent header spoofing. The key "ca.crt" is used to locate the data. If the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. The namespace for this config map is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + required: + - name + type: object + challengeURL: + description: challengeURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here. ${url} is replaced with the current URL, escaped to be safe in a query parameter https://www.example.com/sso-login?then=${url} ${query} is replaced with the current query string https://www.example.com/auth-proxy/oauth/authorize?${query} Required when challenge is set to true. + type: string + clientCommonNames: + description: clientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative. + items: + type: string + type: array + emailHeaders: + description: emailHeaders is the set of headers to check for the email address + items: + type: string + type: array + headers: + description: headers is the set of headers to check for identity information + items: + type: string + type: array + loginURL: + description: loginURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter https://www.example.com/sso-login?then=${url} ${query} is replaced with the current query string https://www.example.com/auth-proxy/oauth/authorize?${query} Required when login is set to true. + type: string + nameHeaders: + description: nameHeaders is the set of headers to check for the display name + items: + type: string + type: array + preferredUsernameHeaders: + description: preferredUsernameHeaders is the set of headers to check for the preferred username + items: + type: string + type: array + type: object + type: + description: type identifies the identity provider type for this entry. + type: string + type: object + type: array + templates: + description: templates allow you to customize pages like the login page. + properties: + error: + description: error is the name of a secret that specifies a go template to use to render error pages during the authentication or grant flow. The key "errors.html" is used to locate the template data. If specified and the secret or expected key is not found, the default error page is used. If the specified template is not valid, the default error page is used. If unspecified, the default error page is used. The namespace for this secret is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + login: + description: login is the name of a secret that specifies a go template to use to render the login page. The key "login.html" is used to locate the template data. If specified and the secret or expected key is not found, the default login page is used. If the specified template is not valid, the default login page is used. If unspecified, the default login page is used. The namespace for this secret is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + providerSelection: + description: providerSelection is the name of a secret that specifies a go template to use to render the provider selection page. The key "providers.html" is used to locate the template data. If specified and the secret or expected key is not found, the default provider selection page is used. If the specified template is not valid, the default provider selection page is used. If unspecified, the default provider selection page is used. The namespace for this secret is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + type: object + tokenConfig: + description: tokenConfig contains options for authorization and access tokens + properties: + accessTokenInactivityTimeout: + description: accessTokenInactivityTimeout defines the token inactivity timeout for tokens granted by any client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Takes valid time duration string such as "5m", "1.5h" or "2h45m". The minimum allowed value for duration is 300s (5 minutes). If the timeout is configured per client, then that value takes precedence. If the timeout value is not specified and the client does not override the value, then tokens are valid until their lifetime. + type: string + accessTokenInactivityTimeoutSeconds: + description: 'accessTokenInactivityTimeoutSeconds - DEPRECATED: setting this field has no effect.' + format: int32 + type: integer + accessTokenMaxAgeSeconds: + description: accessTokenMaxAgeSeconds defines the maximum age of access tokens + format: int32 + type: integer + type: object + type: object + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/test/e2e/case12_list_compare_test.go b/test/e2e/case12_list_compare_test.go index 9ff458fc..39288510 100644 --- a/test/e2e/case12_list_compare_test.go +++ b/test/e2e/case12_list_compare_test.go @@ -4,6 +4,8 @@ package e2e import ( + "time" + . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/open-cluster-management/config-policy-controller/test/utils" @@ -19,6 +21,39 @@ const case12ConfigPolicyNameRoleEnforce string = "policy-role-create-listinspec" const case12RoleInformYaml string = "../resources/case12_list_compare/case12_role_inform.yaml" const case12RoleEnforceYaml string = "../resources/case12_list_compare/case12_role_create.yaml" +const case12RoleToPatch string = "topatch-role-configpolicy" +const case12RoleToPatchYaml string = "../resources/case12_list_compare/case12_role_create_small.yaml" +const case12RolePatchEnforce string = "patch-role-configpolicy" +const case12RolePatchEnforceYaml string = "../resources/case12_list_compare/case12_role_patch.yaml" +const case12RolePatchInform string = "patch-role-configpolicy-inform" +const case12RolePatchInformYaml string = "../resources/case12_list_compare/case12_role_patch_inform.yaml" + +const case12OauthCreate string = "policy-idp-create" +const case12OauthPatch string = "policy-idp-patch" +const case12OauthVerify string = "policy-idp-verify" +const case12OauthCreateYaml string = "../resources/case12_list_compare/case12_oauth_create.yaml" +const case12OauthPatchYaml string = "../resources/case12_list_compare/case12_oauth_patch.yaml" +const case12OauthVerifyYaml string = "../resources/case12_list_compare/case12_oauth_verify.yaml" + +const case12SingleItemListCreate string = "policy-htpasswd-single" +const case12SingleItemListPatch string = "policy-htpasswd-single" +const case12SingleItemListInform string = "policy-htpasswd-single-inform" +const case12SingleItemListCreateYaml string = "../resources/case12_list_compare/case12_oauth_single_create.yaml" +const case12SingleItemListPatchYaml string = "../resources/case12_list_compare/case12_oauth_single_patch.yaml" +const case12SingleItemListInformYaml string = "../resources/case12_list_compare/case12_oauth_single_inform.yaml" + +const case12SmallerListExistingCreate string = "policy-htpasswd-less" +const case12SmallerListExistingPatch string = "policy-htpasswd-less" +const case12SmallerListExistingInform string = "policy-htpasswd-less-inform" +const case12SmallerListExistingCreateYaml string = "../resources/case12_list_compare/case12_oauth_less_create.yaml" +const case12SmallerListExistingPatchYaml string = "../resources/case12_list_compare/case12_oauth_less_patch.yaml" +const case12SmallerListExistingInformYaml string = "../resources/case12_list_compare/case12_oauth_less_inform.yaml" + +const case12WhitespaceListCreate string = "policy-pod-whitespace-env" +const case12WhitespaceListInform string = "policy-pod-whitespace-env-inform" +const case12WhitespaceListCreateYaml string = "../resources/case12_list_compare/case12_whitespace_create.yaml" +const case12WhitespaceDeployment string = "envvar-whitespace" + var _ = Describe("Test list handling for musthave", func() { Describe("Create a policy with a nested list on managed cluster in ns:"+testNamespace, func() { It("should be created properly on the managed cluster", func() { @@ -58,4 +93,137 @@ var _ = Describe("Test list handling for musthave", func() { }, defaultTimeoutSeconds, 1).Should(Equal("NonCompliant")) }) }) + Describe("Create and patch a role on managed cluster in ns:"+testNamespace, func() { + It("should be created properly on the managed cluster", func() { + By("Creating " + case12RoleToPatch + " and " + case12RolePatchEnforce + " on managed") + utils.Kubectl("apply", "-f", case12RoleToPatchYaml, "-n", testNamespace) + plc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case12RoleToPatch, testNamespace, true, defaultTimeoutSeconds) + Expect(plc).NotTo(BeNil()) + Eventually(func() interface{} { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case12RoleToPatch, testNamespace, true, defaultTimeoutSeconds) + return utils.GetComplianceState(managedPlc) + }, defaultTimeoutSeconds, 1).Should(Equal("Compliant")) + utils.Kubectl("apply", "-f", case12RolePatchEnforceYaml, "-n", testNamespace) + plc = utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case12RolePatchEnforce, testNamespace, true, defaultTimeoutSeconds) + Expect(plc).NotTo(BeNil()) + Eventually(func() interface{} { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case12RolePatchEnforce, testNamespace, true, defaultTimeoutSeconds) + return utils.GetComplianceState(managedPlc) + }, defaultTimeoutSeconds, 1).Should(Equal("Compliant")) + utils.Kubectl("apply", "-f", case12RolePatchInformYaml, "-n", testNamespace) + plc = utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case12RolePatchInform, testNamespace, true, defaultTimeoutSeconds) + Expect(plc).NotTo(BeNil()) + Eventually(func() interface{} { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case12RolePatchInform, testNamespace, true, defaultTimeoutSeconds) + return utils.GetComplianceState(managedPlc) + }, defaultTimeoutSeconds, 1).Should(Equal("Compliant")) + }) + }) + Describe("Create and patch an oauth object on managed cluster in ns:"+testNamespace, func() { + It("should be created properly on the managed cluster", func() { + By("Creating " + case12OauthCreate + " and " + case12OauthPatch + " on managed") + utils.Kubectl("apply", "-f", case12OauthCreateYaml, "-n", testNamespace) + plc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case12OauthCreate, testNamespace, true, defaultTimeoutSeconds) + Expect(plc).NotTo(BeNil()) + Eventually(func() interface{} { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case12OauthCreate, testNamespace, true, defaultTimeoutSeconds) + return utils.GetComplianceState(managedPlc) + }, defaultTimeoutSeconds, 1).Should(Equal("Compliant")) + utils.Kubectl("delete", "-f", case12OauthCreateYaml, "-n", testNamespace) + + utils.Kubectl("apply", "-f", case12OauthPatchYaml, "-n", testNamespace) + plc = utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case12OauthPatch, testNamespace, true, defaultTimeoutSeconds) + Expect(plc).NotTo(BeNil()) + Eventually(func() interface{} { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case12OauthPatch, testNamespace, true, defaultTimeoutSeconds) + return utils.GetComplianceState(managedPlc) + }, defaultTimeoutSeconds, 1).Should(Equal("Compliant")) + utils.Kubectl("apply", "-f", case12OauthVerifyYaml, "-n", testNamespace) + plc = utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case12OauthVerify, testNamespace, true, defaultTimeoutSeconds) + Expect(plc).NotTo(BeNil()) + Eventually(func() interface{} { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case12OauthVerify, testNamespace, true, defaultTimeoutSeconds) + return utils.GetComplianceState(managedPlc) + }, defaultTimeoutSeconds, 1).Should(Equal("Compliant")) + }) + + It("should handle lists with just one object properly on the managed cluster", func() { + By("Creating " + case12SingleItemListCreate + " and " + case12SingleItemListPatch + " on managed") + utils.Kubectl("apply", "-f", case12SingleItemListCreateYaml, "-n", testNamespace) + plc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case12SingleItemListCreate, testNamespace, true, defaultTimeoutSeconds) + Expect(plc).NotTo(BeNil()) + Eventually(func() interface{} { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case12SingleItemListCreate, testNamespace, true, defaultTimeoutSeconds) + return utils.GetComplianceState(managedPlc) + }, defaultTimeoutSeconds, 1).Should(Equal("Compliant")) + + utils.Kubectl("apply", "-f", case12SingleItemListPatchYaml, "-n", testNamespace) + plc = utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case12SingleItemListPatch, testNamespace, true, defaultTimeoutSeconds) + Expect(plc).NotTo(BeNil()) + Eventually(func() interface{} { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case12SingleItemListPatch, testNamespace, true, defaultTimeoutSeconds) + return utils.GetComplianceState(managedPlc) + }, defaultTimeoutSeconds, 1).Should(Equal("Compliant")) + utils.Kubectl("apply", "-f", case12SingleItemListInformYaml, "-n", testNamespace) + plc = utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case12SingleItemListInform, testNamespace, true, defaultTimeoutSeconds) + Expect(plc).NotTo(BeNil()) + Eventually(func() interface{} { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case12SingleItemListInform, testNamespace, true, defaultTimeoutSeconds) + return utils.GetComplianceState(managedPlc) + }, defaultTimeoutSeconds, 1).Should(Equal("Compliant")) + }) + + It("should handle lists with fewer items in existing than the template", func() { + By("Creating " + case12SmallerListExistingCreate + " and " + case12SmallerListExistingPatch + " on managed") + utils.Kubectl("apply", "-f", case12SmallerListExistingCreateYaml, "-n", testNamespace) + plc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case12SmallerListExistingCreate, testNamespace, true, defaultTimeoutSeconds) + Expect(plc).NotTo(BeNil()) + Eventually(func() interface{} { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case12SmallerListExistingCreate, testNamespace, true, defaultTimeoutSeconds) + return utils.GetComplianceState(managedPlc) + }, defaultTimeoutSeconds, 1).Should(Equal("Compliant")) + + utils.Kubectl("apply", "-f", case12SmallerListExistingPatchYaml, "-n", testNamespace) + plc = utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case12SmallerListExistingPatch, testNamespace, true, defaultTimeoutSeconds) + Expect(plc).NotTo(BeNil()) + Eventually(func() interface{} { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case12SmallerListExistingPatch, testNamespace, true, defaultTimeoutSeconds) + return utils.GetComplianceState(managedPlc) + }, defaultTimeoutSeconds, 1).Should(Equal("Compliant")) + utils.Kubectl("apply", "-f", case12SmallerListExistingInformYaml, "-n", testNamespace) + plc = utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case12SmallerListExistingInform, testNamespace, true, defaultTimeoutSeconds) + Expect(plc).NotTo(BeNil()) + Eventually(func() interface{} { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case12SmallerListExistingInform, testNamespace, true, defaultTimeoutSeconds) + return utils.GetComplianceState(managedPlc) + }, defaultTimeoutSeconds, 1).Should(Equal("Compliant")) + }) + }) + Describe("Create a deployment object with env vars on managed cluster in ns:"+testNamespace, func() { + It("should only add the list item with prefix and suffix whitespace once", func() { + By("Creating " + case12WhitespaceListCreate + " and " + case12WhitespaceListInform + " on managed") + utils.Kubectl("apply", "-f", case12WhitespaceListCreateYaml, "-n", testNamespace) + + plc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case12WhitespaceListCreate, testNamespace, true, defaultTimeoutSeconds) + Expect(plc).NotTo(BeNil()) + Eventually(func() interface{} { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case12WhitespaceListCreate, testNamespace, true, defaultTimeoutSeconds) + return utils.GetComplianceState(managedPlc) + }, defaultTimeoutSeconds, 1).Should(Equal("Compliant")) + // Ensure it remains compliant for a while - need to ensure there were multiple enforce checks/attempts. + Consistently(func() interface{} { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case12WhitespaceListCreate, testNamespace, true, defaultTimeoutSeconds) + return utils.GetComplianceState(managedPlc) + }, time.Second*20, 1).Should(Equal("Compliant")) + + // Verify that the conatiner list and its environment variable list is correct (there are no duplicates) + deploy := utils.GetWithTimeout(clientManagedDynamic, gvrDeployment, case12WhitespaceDeployment, "default", true, defaultTimeoutSeconds) + Expect(deploy).NotTo(BeNil()) + tmplSpec := deploy.Object["spec"].(map[string]interface{})["template"].(map[string]interface{})["spec"].(map[string]interface{}) + containers := tmplSpec["containers"].([]interface{}) + Expect(len(containers)).To(Equal(1)) + envvars := containers[0].(map[string]interface{})["env"].([]interface{}) + Expect(len(envvars)).To(Equal(1)) + }) + }) }) diff --git a/test/e2e/case13_templatization_test.go b/test/e2e/case13_templatization_test.go index a549e39a..4a81fbba 100644 --- a/test/e2e/case13_templatization_test.go +++ b/test/e2e/case13_templatization_test.go @@ -4,9 +4,14 @@ package e2e import ( + "context" + "time" + . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/open-cluster-management/config-policy-controller/test/utils" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const case13Secret string = "e2esecret" @@ -38,6 +43,9 @@ const case13UnterminatedYaml string = "../resources/case13_templatization/case13 const case13WrongArgs string = "policy-pod-create-wrong-args" const case13WrongArgsYaml string = "../resources/case13_templatization/case13_wrong_args.yaml" +const case13UpdateRefObject = "policy-update-referenced-object" +const case13UpdateRefObjectYaml = "../resources/case13_templatization/case13_update_referenced_object.yaml" + var _ = Describe("Test templatization", func() { Describe("Create a secret and pull data from it into a configurationPolicy", func() { It("should be created properly on the managed cluster", func() { @@ -153,4 +161,79 @@ var _ = Describe("Test templatization", func() { }, defaultTimeoutSeconds, 1).Should(Equal("NonCompliant")) }) }) + // Though the Bugzilla bug #2007575 references a different incorrect behavior, it's the same + // underlying bug and this behavior is easier to test. + Describe("RHBZ#2007575: Test that the template updates when a referenced resource object is updated", func() { + const configMapName = "configmap-update-referenced-object" + const configMapReplName = configMapName + "-repl" + It("Should have the expected ConfigMap created", func() { + By("Creating the ConfigMap to reference") + configMap := corev1.ConfigMap{ + ObjectMeta: v1.ObjectMeta{ + Name: configMapName, + }, + Data: map[string]string{"message": "Hello Raleigh!"}, + } + _, err := clientManaged.CoreV1().ConfigMaps("default").Create( + context.TODO(), &configMap, v1.CreateOptions{}, + ) + Expect(err).To(BeNil()) + By("Creating the configuration policy that references the ConfigMap") + utils.Kubectl("apply", "-f", case13UpdateRefObjectYaml, "-n", testNamespace) + + By("By verifying that the policy is compliant") + Eventually( + func() interface{} { + managedPlc := utils.GetWithTimeout( + clientManagedDynamic, + gvrConfigPolicy, + case13UpdateRefObject, + testNamespace, + true, + defaultTimeoutSeconds, + ) + return utils.GetComplianceState(managedPlc) + }, + defaultTimeoutSeconds, + 1, + ).Should(Equal("Compliant")) + + By("By verifying that the replicated ConfigMap has the expected data") + replConfigMap, err := clientManaged.CoreV1().ConfigMaps("default").Get( + context.TODO(), configMapReplName, v1.GetOptions{}, + ) + Expect(err).To(BeNil()) + Expect(replConfigMap.Data["message"]).To(Equal("Hello Raleigh!\n")) + + By("Sleeping 30 seconds to ensure PeriodicallyExecConfigPolicies has rerun twice") + time.Sleep(30 * time.Second) + + By("Updating the referenced ConfigMap") + configMap.Data["message"] = "Hello world!" + _, err = clientManaged.CoreV1().ConfigMaps("default").Update( + context.TODO(), &configMap, v1.UpdateOptions{}, + ) + Expect(err).To(BeNil()) + + By("Verifying that the replicated ConfigMap has the updated data") + Eventually( + func() interface{} { + replConfigMap, err := clientManaged.CoreV1().ConfigMaps("default").Get( + context.TODO(), configMapReplName, v1.GetOptions{}, + ) + if err != nil { + return "" + } + return replConfigMap.Data["message"] + }, + defaultTimeoutSeconds, + 1, + ).Should(Equal("Hello world!\n")) + }) + It("Should clean up", func() { + utils.Kubectl("delete", "configurationpolicy", case13UpdateRefObject, "-n", testNamespace) + utils.Kubectl("delete", "configmap", configMapName, "-n", "default") + utils.Kubectl("delete", "configmap", configMapReplName, "-n", "default") + }) + }) }) diff --git a/test/e2e/case14_selection_test.go b/test/e2e/case14_selection_test.go new file mode 100644 index 00000000..fee19e2e --- /dev/null +++ b/test/e2e/case14_selection_test.go @@ -0,0 +1,108 @@ +// Copyright (c) 2020 Red Hat, Inc. +// Copyright Contributors to the Open Cluster Management project + +package e2e + +import ( + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/open-cluster-management/config-policy-controller/test/utils" +) + +const case14PolicyNamed string = "../resources/case14_namespaces/case14_limitrange_named.yaml" +const case14PolicyUnnamed string = "../resources/case14_namespaces/case14_limitrange_unnamed.yaml" +const case14PolicyNamedName string = "policy-named-limitrange" +const case14PolicyUnnamedName string = "policy-unnamed-limitrange" +const case14LimitRangeFile string = "../resources/case14_namespaces/case14_limitrange.yaml" +const case14LimitRangeName string = "container-mem-limit-range" + +var _ = Describe("Test policy compliance with namespace selection", func() { + Describe("Create a named limitrange policy on managed cluster in ns: "+testNamespace, func() { + It("should be created properly on the managed cluster", func() { + By("Creating " + case14PolicyNamed + " on managed") + utils.Kubectl("apply", "-f", case14PolicyNamed, "-n", testNamespace) + plc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case14PolicyNamedName, testNamespace, true, defaultTimeoutSeconds) + Expect(plc).NotTo(BeNil()) + Eventually(func() interface{} { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case14PolicyNamedName, testNamespace, true, defaultTimeoutSeconds) + return utils.GetComplianceState(managedPlc) + }, defaultTimeoutSeconds, 1).Should(Equal("NonCompliant")) + }) + It("should stay noncompliant when limitrange is in one matching namespace", func() { + By("Creating limitrange " + case14LimitRangeName + " on range1") + utils.Kubectl("apply", "-f", case14LimitRangeFile, "-n", "range1") + plc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case14PolicyNamedName, testNamespace, true, defaultTimeoutSeconds) + Expect(plc).NotTo(BeNil()) + Eventually(func() interface{} { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case14PolicyNamedName, testNamespace, true, defaultTimeoutSeconds) + return utils.GetComplianceState(managedPlc) + }, defaultTimeoutSeconds, 1).Should(Equal("NonCompliant")) + Consistently(func() interface{} { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case14PolicyNamedName, testNamespace, true, defaultTimeoutSeconds) + return utils.GetComplianceState(managedPlc) + }, time.Second*20, 1).Should(Equal("NonCompliant")) + }) + It("should be compliant with limitrange in all matching namespaces", func() { + By("Creating " + case14LimitRangeName + " on range2") + utils.Kubectl("apply", "-f", case14LimitRangeFile, "-n", "range2") + plc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case14PolicyNamedName, testNamespace, true, defaultTimeoutSeconds) + Expect(plc).NotTo(BeNil()) + Eventually(func() interface{} { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case14PolicyNamedName, testNamespace, true, defaultTimeoutSeconds) + return utils.GetComplianceState(managedPlc) + }, defaultTimeoutSeconds, 1).Should(Equal("Compliant")) + }) + It("Clean up named policy and limitrange", func() { + By("Deleting " + case14LimitRangeName + " on range1") + utils.Kubectl("delete", "-f", case14LimitRangeFile, "-n", "range1") + By("Deleting " + case14LimitRangeName + " on range2") + utils.Kubectl("delete", "-f", case14LimitRangeFile, "-n", "range2") + By("Deleting " + case14PolicyNamed + " on managed") + utils.Kubectl("delete", "-f", case14PolicyNamed, "-n", testNamespace) + }) + It("should be created properly on the managed cluster", func() { + By("Creating " + case14PolicyUnnamed + " on managed") + utils.Kubectl("apply", "-f", case14PolicyUnnamed, "-n", testNamespace) + plc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case14PolicyUnnamedName, testNamespace, true, defaultTimeoutSeconds) + Expect(plc).NotTo(BeNil()) + Eventually(func() interface{} { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case14PolicyUnnamedName, testNamespace, true, defaultTimeoutSeconds) + return utils.GetComplianceState(managedPlc) + }, defaultTimeoutSeconds, 1).Should(Equal("NonCompliant")) + }) + It("should stay noncompliant when limitrange is in one matching namespace", func() { + By("Creating limitrange " + case14LimitRangeName + " on range1") + utils.Kubectl("apply", "-f", case14LimitRangeFile, "-n", "range1") + plc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case14PolicyUnnamedName, testNamespace, true, defaultTimeoutSeconds) + Expect(plc).NotTo(BeNil()) + Eventually(func() interface{} { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case14PolicyUnnamedName, testNamespace, true, defaultTimeoutSeconds) + return utils.GetComplianceState(managedPlc) + }, defaultTimeoutSeconds, 1).Should(Equal("NonCompliant")) + Consistently(func() interface{} { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case14PolicyUnnamedName, testNamespace, true, defaultTimeoutSeconds) + return utils.GetComplianceState(managedPlc) + }, time.Second*20, 1).Should(Equal("NonCompliant")) + }) + It("should be compliant with limitrange in all matching namespaces", func() { + By("Creating " + case14LimitRangeName + " on range2") + utils.Kubectl("apply", "-f", case14LimitRangeFile, "-n", "range2") + plc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case14PolicyUnnamedName, testNamespace, true, defaultTimeoutSeconds) + Expect(plc).NotTo(BeNil()) + Eventually(func() interface{} { + managedPlc := utils.GetWithTimeout(clientManagedDynamic, gvrConfigPolicy, case14PolicyUnnamedName, testNamespace, true, defaultTimeoutSeconds) + return utils.GetComplianceState(managedPlc) + }, defaultTimeoutSeconds, 1).Should(Equal("Compliant")) + }) + It("Clean up unnamed policy and limitrange", func() { + By("Deleting " + case14LimitRangeName + " on range1") + utils.Kubectl("delete", "-f", case14LimitRangeFile, "-n", "range1") + By("Deleting " + case14LimitRangeName + " on range2") + utils.Kubectl("delete", "-f", case14LimitRangeFile, "-n", "range2") + By("Deleting " + case14PolicyUnnamed + " on managed") + utils.Kubectl("delete", "-f", case14PolicyUnnamed, "-n", testNamespace) + }) + }) +}) diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index f6a6f8fe..126f604d 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -49,6 +49,7 @@ var ( gvrSecret schema.GroupVersionResource gvrClusterClaim schema.GroupVersionResource gvrConfigMap schema.GroupVersionResource + gvrDeployment schema.GroupVersionResource defaultImageRegistry string ) @@ -75,21 +76,25 @@ var _ = BeforeSuite(func() { gvrSCC = schema.GroupVersionResource{Group: "security.openshift.io", Version: "v1", Resource: "securitycontextconstraints"} gvrSecret = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"} gvrClusterClaim = schema.GroupVersionResource{Group: "cluster.open-cluster-management.io", Version: "v1alpha1", Resource: "clusterclaims"} + gvrDeployment = schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"} clientManaged = NewKubeClient("", kubeconfigManaged, "") clientManagedDynamic = NewKubeClientDynamic("", kubeconfigManaged, "") defaultImageRegistry = "quay.io/open-cluster-management" testNamespace = "managed" + testNamespaces := []string{testNamespace, "range1", "range2"} defaultTimeoutSeconds = 60 - By("Create Namespace if needed") + By("Create Namespaces if needed") namespaces := clientManaged.CoreV1().Namespaces() - if _, err := namespaces.Get(context.TODO(), testNamespace, metav1.GetOptions{}); err != nil && errors.IsNotFound(err) { - Expect(namespaces.Create(context.TODO(), &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: testNamespace, - }, - }, metav1.CreateOptions{})).NotTo(BeNil()) + for _, ns := range testNamespaces { + if _, err := namespaces.Get(context.TODO(), ns, metav1.GetOptions{}); err != nil && errors.IsNotFound(err) { + Expect(namespaces.Create(context.TODO(), &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: ns, + }, + }, metav1.CreateOptions{})).NotTo(BeNil()) + } + Expect(namespaces.Get(context.TODO(), ns, metav1.GetOptions{})).NotTo(BeNil()) } - Expect(namespaces.Get(context.TODO(), testNamespace, metav1.GetOptions{})).NotTo(BeNil()) }) func NewKubeClient(url, kubeconfig, context string) kubernetes.Interface { diff --git a/test/resources/case12_list_compare/case12_oauth_create.yaml b/test/resources/case12_list_compare/case12_oauth_create.yaml new file mode 100644 index 00000000..9371a64b --- /dev/null +++ b/test/resources/case12_list_compare/case12_oauth_create.yaml @@ -0,0 +1,21 @@ +apiVersion: policy.open-cluster-management.io/v1 +kind: ConfigurationPolicy +metadata: + name: policy-idp-create +spec: + object-templates: + #Set up Identity Providers + - complianceType: musthave + objectDefinition: + apiVersion: config.openshift.io/v1 + kind: OAuth + metadata: + name: cluster + annotations: + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + release.openshift.io/create-only: 'true' + spec: + identityProviders: [] + remediationAction: enforce + severity: high diff --git a/test/resources/case12_list_compare/case12_oauth_less_create.yaml b/test/resources/case12_list_compare/case12_oauth_less_create.yaml new file mode 100644 index 00000000..2992a7f0 --- /dev/null +++ b/test/resources/case12_list_compare/case12_oauth_less_create.yaml @@ -0,0 +1,34 @@ +apiVersion: policy.open-cluster-management.io/v1 +kind: ConfigurationPolicy +metadata: + name: policy-htpasswd-less +spec: + remediationAction: enforce + namespaceSelector: + exclude: ["kube-*"] + include: ["default"] + object-templates: + - complianceType: musthave + objectDefinition: + apiVersion: config.openshift.io/v1 + kind: OAuth + metadata: + name: cluster3 + annotations: + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + release.openshift.io/create-only: 'true' + spec: + identityProviders: + - name: htpasswd-5 + htpasswd: + fileData: + name: htpasswd-platform-team-secret + mappingMethod: claim + type: HTPasswd + - name: htpasswd-4 + htpasswd: + fileData: + name: htpasswd-platform-team-secret + mappingMethod: claim + type: HTPasswd \ No newline at end of file diff --git a/test/resources/case12_list_compare/case12_oauth_less_inform.yaml b/test/resources/case12_list_compare/case12_oauth_less_inform.yaml new file mode 100644 index 00000000..a47b69f3 --- /dev/null +++ b/test/resources/case12_list_compare/case12_oauth_less_inform.yaml @@ -0,0 +1,52 @@ +apiVersion: policy.open-cluster-management.io/v1 +kind: ConfigurationPolicy +metadata: + name: policy-htpasswd-less-inform +spec: + remediationAction: inform + namespaceSelector: + exclude: ["kube-*"] + include: ["default"] + object-templates: + - complianceType: musthave + objectDefinition: + apiVersion: config.openshift.io/v1 + kind: OAuth + metadata: + name: cluster3 + annotations: + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + release.openshift.io/create-only: 'true' + spec: + identityProviders: + - name: htpasswd-1 + htpasswd: + fileData: + name: htpasswd-platform-team-secret + mappingMethod: claim + type: HTPasswd + - name: htpasswd-2 + htpasswd: + fileData: + name: htpasswd-platform-team-secret + mappingMethod: claim + type: HTPasswd + - name: htpasswd-3 + htpasswd: + fileData: + name: htpasswd-platform-team-secret + mappingMethod: claim + type: HTPasswd + - name: htpasswd-4 + htpasswd: + fileData: + name: htpasswd-platform-team-secret + mappingMethod: claim + type: HTPasswd + - name: htpasswd-5 + htpasswd: + fileData: + name: htpasswd-platform-team-secret + mappingMethod: claim + type: HTPasswd \ No newline at end of file diff --git a/test/resources/case12_list_compare/case12_oauth_less_patch.yaml b/test/resources/case12_list_compare/case12_oauth_less_patch.yaml new file mode 100644 index 00000000..b6312440 --- /dev/null +++ b/test/resources/case12_list_compare/case12_oauth_less_patch.yaml @@ -0,0 +1,40 @@ +apiVersion: policy.open-cluster-management.io/v1 +kind: ConfigurationPolicy +metadata: + name: policy-htpasswd-less +spec: + remediationAction: enforce + namespaceSelector: + exclude: ["kube-*"] + include: ["default"] + object-templates: + - complianceType: musthave + objectDefinition: + apiVersion: config.openshift.io/v1 + kind: OAuth + metadata: + name: cluster3 + annotations: + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + release.openshift.io/create-only: 'true' + spec: + identityProviders: + - name: htpasswd-3 + htpasswd: + fileData: + name: htpasswd-platform-team-secret + mappingMethod: claim + type: HTPasswd + - name: htpasswd-2 + htpasswd: + fileData: + name: htpasswd-platform-team-secret + mappingMethod: claim + type: HTPasswd + - name: htpasswd-1 + htpasswd: + fileData: + name: htpasswd-platform-team-secret + mappingMethod: claim + type: HTPasswd \ No newline at end of file diff --git a/test/resources/case12_list_compare/case12_oauth_patch.yaml b/test/resources/case12_list_compare/case12_oauth_patch.yaml new file mode 100644 index 00000000..3eb70531 --- /dev/null +++ b/test/resources/case12_list_compare/case12_oauth_patch.yaml @@ -0,0 +1,43 @@ +apiVersion: policy.open-cluster-management.io/v1 +kind: ConfigurationPolicy +metadata: + name: policy-idp-patch +spec: + object-templates: + #Set up Identity Providers + - complianceType: musthave + objectDefinition: + apiVersion: config.openshift.io/v1 + kind: OAuth + metadata: + name: cluster + annotations: + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + release.openshift.io/create-only: 'true' + spec: + identityProviders: + - name: ping + mappingMethod: add + openID: + claims: + name: + - name + email: + - email + preferredUsername: + - preferred_username + clientID: REDACTED + clientSecret: + name: ping-client-secret + extraScopes: [] + issuer: https://localhost:3000 + type: OpenID + - name: htpasswd + htpasswd: + fileData: + name: htpasswd-platform-team-secret + mappingMethod: claim + type: HTPasswd + remediationAction: enforce + severity: high diff --git a/test/resources/case12_list_compare/case12_oauth_single_create.yaml b/test/resources/case12_list_compare/case12_oauth_single_create.yaml new file mode 100644 index 00000000..f1000abe --- /dev/null +++ b/test/resources/case12_list_compare/case12_oauth_single_create.yaml @@ -0,0 +1,28 @@ +apiVersion: policy.open-cluster-management.io/v1 +kind: ConfigurationPolicy +metadata: + name: policy-htpasswd-single +spec: + remediationAction: enforce + namespaceSelector: + exclude: ["kube-*"] + include: ["default"] + object-templates: + - complianceType: musthave + objectDefinition: + apiVersion: config.openshift.io/v1 + kind: OAuth + metadata: + name: cluster2 + annotations: + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + release.openshift.io/create-only: 'true' + spec: + identityProviders: + - name: htpasswd-single + htpasswd: + fileData: + name: htpasswd-platform-team-secret + mappingMethod: claim + type: HTPasswd \ No newline at end of file diff --git a/test/resources/case12_list_compare/case12_oauth_single_inform.yaml b/test/resources/case12_list_compare/case12_oauth_single_inform.yaml new file mode 100644 index 00000000..e3ef8ebf --- /dev/null +++ b/test/resources/case12_list_compare/case12_oauth_single_inform.yaml @@ -0,0 +1,34 @@ +apiVersion: policy.open-cluster-management.io/v1 +kind: ConfigurationPolicy +metadata: + name: policy-htpasswd-single-inform +spec: + remediationAction: inform + namespaceSelector: + exclude: ["kube-*"] + include: ["default"] + object-templates: + - complianceType: musthave + objectDefinition: + apiVersion: config.openshift.io/v1 + kind: OAuth + metadata: + name: cluster2 + annotations: + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + release.openshift.io/create-only: 'true' + spec: + identityProviders: + - name: htpasswd-single + htpasswd: + fileData: + name: htpasswd-platform-team-secret + mappingMethod: claim + type: HTPasswd + - name: htpasswd-single-2 + htpasswd: + fileData: + name: htpasswd-platform-team-secret + mappingMethod: claim + type: HTPasswd \ No newline at end of file diff --git a/test/resources/case12_list_compare/case12_oauth_single_patch.yaml b/test/resources/case12_list_compare/case12_oauth_single_patch.yaml new file mode 100644 index 00000000..7fe8bee4 --- /dev/null +++ b/test/resources/case12_list_compare/case12_oauth_single_patch.yaml @@ -0,0 +1,28 @@ +apiVersion: policy.open-cluster-management.io/v1 +kind: ConfigurationPolicy +metadata: + name: policy-htpasswd-single +spec: + remediationAction: enforce + namespaceSelector: + exclude: ["kube-*"] + include: ["default"] + object-templates: + - complianceType: musthave + objectDefinition: + apiVersion: config.openshift.io/v1 + kind: OAuth + metadata: + name: cluster2 + annotations: + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + release.openshift.io/create-only: 'true' + spec: + identityProviders: + - name: htpasswd-single-2 + htpasswd: + fileData: + name: htpasswd-platform-team-secret + mappingMethod: claim + type: HTPasswd \ No newline at end of file diff --git a/test/resources/case12_list_compare/case12_oauth_verify.yaml b/test/resources/case12_list_compare/case12_oauth_verify.yaml new file mode 100644 index 00000000..1103f011 --- /dev/null +++ b/test/resources/case12_list_compare/case12_oauth_verify.yaml @@ -0,0 +1,43 @@ +apiVersion: policy.open-cluster-management.io/v1 +kind: ConfigurationPolicy +metadata: + name: policy-idp-verify +spec: + object-templates: + #Set up Identity Providers + - complianceType: musthave + objectDefinition: + apiVersion: config.openshift.io/v1 + kind: OAuth + metadata: + name: cluster + annotations: + include.release.openshift.io/self-managed-high-availability: 'true' + include.release.openshift.io/single-node-developer: 'true' + release.openshift.io/create-only: 'true' + spec: + identityProviders: + - name: ping + mappingMethod: add + openID: + claims: + name: + - name + email: + - email + preferredUsername: + - preferred_username + clientID: REDACTED + clientSecret: + name: ping-client-secret + extraScopes: [] + issuer: https://localhost:3000 + type: OpenID + - name: htpasswd + htpasswd: + fileData: + name: htpasswd-platform-team-secret + mappingMethod: claim + type: HTPasswd + remediationAction: inform + severity: high diff --git a/test/resources/case12_list_compare/case12_role_create_small.yaml b/test/resources/case12_list_compare/case12_role_create_small.yaml new file mode 100644 index 00000000..818b4dd2 --- /dev/null +++ b/test/resources/case12_list_compare/case12_role_create_small.yaml @@ -0,0 +1,28 @@ +apiVersion: policy.open-cluster-management.io/v1 +kind: ConfigurationPolicy +metadata: + name: topatch-role-configpolicy +spec: + remediationAction: enforce + object-templates: + - complianceType: musthave + objectDefinition: + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: topatch + namespace: default + rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch diff --git a/test/resources/case12_list_compare/case12_role_patch.yaml b/test/resources/case12_list_compare/case12_role_patch.yaml new file mode 100644 index 00000000..5f817236 --- /dev/null +++ b/test/resources/case12_list_compare/case12_role_patch.yaml @@ -0,0 +1,43 @@ +apiVersion: policy.open-cluster-management.io/v1 +kind: ConfigurationPolicy +metadata: + name: patch-role-configpolicy +spec: + remediationAction: enforce + object-templates: + - complianceType: musthave + objectDefinition: + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: topatch + namespace: default + rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - persistentvolumeclaims + - persistentvolumeclaims/status + - persistentvolumes + - persistentvolumes/status + verbs: + - create + - get + - list + - delete + - watch + - patch + - update diff --git a/test/resources/case12_list_compare/case12_role_patch_inform.yaml b/test/resources/case12_list_compare/case12_role_patch_inform.yaml new file mode 100644 index 00000000..6ae4e15c --- /dev/null +++ b/test/resources/case12_list_compare/case12_role_patch_inform.yaml @@ -0,0 +1,43 @@ +apiVersion: policy.open-cluster-management.io/v1 +kind: ConfigurationPolicy +metadata: + name: patch-role-configpolicy-inform +spec: + remediationAction: inform + object-templates: + - complianceType: mustonlyhave + objectDefinition: + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: topatch + namespace: default + rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - persistentvolumeclaims + - persistentvolumeclaims/status + - persistentvolumes + - persistentvolumes/status + verbs: + - create + - get + - list + - delete + - watch + - patch + - update diff --git a/test/resources/case12_list_compare/case12_whitespace_create.yaml b/test/resources/case12_list_compare/case12_whitespace_create.yaml new file mode 100644 index 00000000..53cda0bf --- /dev/null +++ b/test/resources/case12_list_compare/case12_whitespace_create.yaml @@ -0,0 +1,34 @@ +apiVersion: policy.open-cluster-management.io/v1 +kind: ConfigurationPolicy +metadata: + name: policy-pod-whitespace-env +spec: + remediationAction: enforce + namespaceSelector: + exclude: ["kube-*"] + include: ["default"] + object-templates: + - complianceType: musthave + objectDefinition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: envvar-whitespace + labels: + test: envvar-whitespace + spec: + replicas: 0 + selector: + matchLabels: + test: envvar-whitespace + template: + metadata: + labels: + test: envvar-whitespace + spec: + containers: + - image: nginx:1.7.9 + name: nginx + env: + - name: DEMO_GREETING + value: " \t hello with tricky whitespace \n " diff --git a/test/resources/case13_templatization/case13_update_referenced_object.yaml b/test/resources/case13_templatization/case13_update_referenced_object.yaml new file mode 100644 index 00000000..49b86736 --- /dev/null +++ b/test/resources/case13_templatization/case13_update_referenced_object.yaml @@ -0,0 +1,20 @@ +apiVersion: policy.open-cluster-management.io/v1 +kind: ConfigurationPolicy +metadata: + name: policy-update-referenced-object +spec: + remediationAction: enforce + namespaceSelector: + exclude: ["kube-*"] + include: ["default"] + object-templates: + - complianceType: musthave + objectDefinition: + apiVersion: v1 + kind: ConfigMap + metadata: + name: configmap-update-referenced-object-repl + namespace: default + data: + message: | + {{ fromConfigMap "default" "configmap-update-referenced-object" "message" }} diff --git a/test/resources/case14_namespaces/case14_limitrange.yaml b/test/resources/case14_namespaces/case14_limitrange.yaml new file mode 100644 index 00000000..efab4a26 --- /dev/null +++ b/test/resources/case14_namespaces/case14_limitrange.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: LimitRange +metadata: + name: container-mem-limit-range +spec: + limits: + - default: + ephemeral-storage: '0' + defaultRequest: + ephemeral-storage: '0' + type: Container \ No newline at end of file diff --git a/test/resources/case14_namespaces/case14_limitrange_named.yaml b/test/resources/case14_namespaces/case14_limitrange_named.yaml new file mode 100644 index 00000000..f6458eb8 --- /dev/null +++ b/test/resources/case14_namespaces/case14_limitrange_named.yaml @@ -0,0 +1,33 @@ +apiVersion: policy.open-cluster-management.io/v1 +kind: ConfigurationPolicy +metadata: + name: policy-named-limitrange +spec: + namespaceSelector: + exclude: + - open-cluster-management* + - kube-* + - openshift* + - hive + - default + - local-cluster + - acm* + - e2e* + include: + - 'range*' + object-templates: + - complianceType: musthave + objectDefinition: + apiVersion: v1 + kind: LimitRange + metadata: + name: container-mem-limit-range + spec: + limits: + - default: + ephemeral-storage: '0' + defaultRequest: + ephemeral-storage: '0' + type: Container + remediationAction: inform + severity: high diff --git a/test/resources/case14_namespaces/case14_limitrange_unnamed.yaml b/test/resources/case14_namespaces/case14_limitrange_unnamed.yaml new file mode 100644 index 00000000..b5e7cbdf --- /dev/null +++ b/test/resources/case14_namespaces/case14_limitrange_unnamed.yaml @@ -0,0 +1,32 @@ +apiVersion: policy.open-cluster-management.io/v1 +kind: ConfigurationPolicy +metadata: + name: policy-unnamed-limitrange +spec: + namespaceSelector: + exclude: + - open-cluster-management* + - kube-* + - openshift* + - hive + - default + - local-cluster + - acm* + - e2e* + - stackrox* + include: + - 'range*' + object-templates: + - complianceType: musthave + objectDefinition: + apiVersion: v1 + kind: LimitRange + spec: + limits: + - default: + ephemeral-storage: '0' + defaultRequest: + ephemeral-storage: '0' + type: Container + remediationAction: inform + severity: high