diff --git a/api/v1/configurationpolicy_types.go b/api/v1/configurationpolicy_types.go index a38a42de..ed1077ae 100644 --- a/api/v1/configurationpolicy_types.go +++ b/api/v1/configurationpolicy_types.go @@ -195,6 +195,8 @@ type ObjectTemplate struct { MetadataComplianceType MetadataComplianceType `json:"metadataComplianceType,omitempty"` + RecreateOption RecreateOption `json:"recreateOption,omitempty"` + // ObjectDefinition defines required fields for the object // +kubebuilder:pruning:PreserveUnknownFields ObjectDefinition runtime.RawExtension `json:"objectDefinition"` @@ -340,6 +342,19 @@ func (c ComplianceType) IsMustNotHave() bool { // +kubebuilder:validation:Enum=MustHave;Musthave;musthave;MustOnlyHave;Mustonlyhave;mustonlyhave type MetadataComplianceType string +// RecreateOption describes the condition when to delete and recreate an object when an update is required. IfRequired +// will recreate the object when updating an immutable field. Always will always recreate the object if a mismatch is +// detected. RecreateOption has no effect when the remediationAction is inform. IfRequired has no effect on clusters +// without dry run update support. Default is None. +// +kubebuilder:validation:Enum=None;IfRequired;Always +type RecreateOption string + +const ( + None RecreateOption = "None" + IfRequired RecreateOption = "IfRequired" + Always RecreateOption = "Always" +) + // RelatedObject is the list of objects matched by this Policy resource. type RelatedObject struct { // diff --git a/controllers/configurationpolicy_controller.go b/controllers/configurationpolicy_controller.go index c4b1d51f..03335ace 100644 --- a/controllers/configurationpolicy_controller.go +++ b/controllers/configurationpolicy_controller.go @@ -408,6 +408,19 @@ func (r *ConfigurationPolicyReconciler) shouldEvaluatePolicy( return true } + // If there was a timeout during a recreate, immediately evaluate the policy regardless of the evaluation interval. + if policy.Status.ComplianceState == policyv1.NonCompliant { + for _, details := range policy.Status.CompliancyDetails { + for _, condition := range details.Conditions { + if condition.Reason == "K8s update template error" && strings.Contains( + condition.Message, "timed out waiting for the object to delete during recreate", + ) { + return true + } + } + } + } + lastEvaluated, err := time.Parse(time.RFC3339, policy.Status.LastEvaluated) if err != nil { log.Error(err, "The policy has an invalid status.lastEvaluated value. Will evaluate it now.") @@ -1769,9 +1782,11 @@ func (r *ConfigurationPolicyReconciler) handleSingleObj( if exists && obj.shouldExist { log.V(2).Info("The object already exists. Verifying the object fields match what is desired.") - var throwSpecViolation, triedUpdate, updatedObj bool + var throwSpecViolation, triedUpdate bool var msg, diff string + var updatedObj *unstructured.Unstructured + created := false uid := string(obj.existingObj.GetUID()) if evaluated, compliant := r.alreadyEvaluated(obj.policy, obj.existingObj); evaluated { @@ -1791,6 +1806,11 @@ func (r *ConfigurationPolicyReconciler) handleSingleObj( throwSpecViolation, msg, diff, triedUpdate, updatedObj = r.checkAndUpdateResource( obj, objectT, remediation, ) + + if updatedObj != nil && string(updatedObj.GetUID()) != uid { + uid = string(updatedObj.GetUID()) + created = true + } } if triedUpdate && !strings.Contains(msg, "Error validating the object") { @@ -1798,8 +1818,6 @@ func (r *ConfigurationPolicyReconciler) handleSingleObj( result.events = append(result.events, objectTmplEvalEvent{false, reasonWantFoundNoMatch, ""}) } - created := false - if throwSpecViolation { var resultReason, resultMsg string @@ -1820,7 +1838,7 @@ func (r *ConfigurationPolicyReconciler) handleSingleObj( } else { // it is a must have and it does exist, so it is compliant if remediation.IsEnforce() { - if updatedObj { + if updatedObj != nil { result.events = append(result.events, objectTmplEvalEvent{true, reasonUpdateSuccess, ""}) } else { result.events = append(result.events, objectTmplEvalEvent{true, reasonWantFoundExists, ""}) @@ -2569,7 +2587,13 @@ func (r *ConfigurationPolicyReconciler) checkAndUpdateResource( obj singleObject, objectT *policyv1.ObjectTemplate, remediation policyv1.RemediationAction, -) (throwSpecViolation bool, message string, diff string, updateNeeded bool, updateSucceeded bool) { +) ( + throwSpecViolation bool, + message string, + diff string, + updateNeeded bool, + updatedObj *unstructured.Unstructured, +) { complianceType := strings.ToLower(string(objectT.ComplianceType)) mdComplianceType := strings.ToLower(string(objectT.MetadataComplianceType)) @@ -2597,7 +2621,7 @@ func (r *ConfigurationPolicyReconciler) checkAndUpdateResource( if obj.existingObj == nil { log.Info("Skipping update: Previous object retrieval from the API server failed") - return false, "", "", false, false + return false, "", "", false, nil } var res dynamic.ResourceInterface @@ -2615,10 +2639,11 @@ func (r *ConfigurationPolicyReconciler) checkAndUpdateResource( obj.desiredObj, obj.existingObj, existingObjectCopy, complianceType, mdComplianceType, !r.DryRunSupported, ) if message != "" { - return true, message, "", true, false + return true, message, "", true, nil } recordDiff := objectT.RecordDiffWithDefault() + var needsRecreate bool if updateNeeded { mismatchLog := "Detected value mismatch" @@ -2631,7 +2656,7 @@ func (r *ConfigurationPolicyReconciler) checkAndUpdateResource( if err := r.validateObject(obj.existingObj); err != nil { message := fmt.Sprintf("Error validating the object %s, the error is `%v`", obj.name, err) - return true, message, "", updateNeeded, false + return true, message, "", updateNeeded, nil } } @@ -2646,16 +2671,6 @@ func (r *ConfigurationPolicyReconciler) checkAndUpdateResource( DryRun: []string{metav1.DryRunAll}, }) if err != nil { - // If an inform policy and the update is forbidden (i.e. modifying Pod spec fields), then return - // noncompliant since that confirms some fields don't match. - if k8serrors.IsForbidden(err) { - log.Info(fmt.Sprintf("Dry run update failed with error: %s", err.Error())) - - r.setEvaluatedObject(obj.policy, obj.existingObj, false) - - return true, "", "", false, false - } - // If it's a conflict, refetch the object and try again. if k8serrors.IsConflict(err) { log.Info("The object was updating during the evaluation. Trying again.") @@ -2668,32 +2683,70 @@ func (r *ConfigurationPolicyReconciler) checkAndUpdateResource( } } - message := getUpdateErrorMsg(err, obj.existingObj.GetKind(), obj.name) - if message == "" { - message = fmt.Sprintf( - "Error issuing a dry run update request for the object `%v`, the error is `%v`", - obj.name, - err, - ) + // Handle all errors not related to updating immutable fields here + if !k8serrors.IsInvalid(err) { + message := getUpdateErrorMsg(err, obj.existingObj.GetKind(), obj.name) + if message == "" { + message = fmt.Sprintf( + "Error issuing a dry run update request for the object `%v`, the error is `%v`", + obj.name, + err, + ) + } + + // If the user specifies an unknown or invalid field, it comes back as a bad request. + if k8serrors.IsBadRequest(err) { + r.setEvaluatedObject(obj.policy, obj.existingObj, false) + } + + return true, message, "", updateNeeded, nil } - return true, message, "", updateNeeded, false - } + // If an update is invalid (i.e. modifying Pod spec fields), then return noncompliant since that + // confirms some fields don't match and can't be fixed with an update. If a recreate option is + // specified, then the update may proceed when enforced. + needsRecreate = true + recreateOption := objectT.RecreateOption - removeFieldsForComparison(dryRunUpdatedObj) + if isInform || !(recreateOption == policyv1.Always || recreateOption == policyv1.IfRequired) { + log.Info(fmt.Sprintf("Dry run update failed with error: %s", err.Error())) - if reflect.DeepEqual(dryRunUpdatedObj.Object, existingObjectCopy.Object) { - log.Info( - "A mismatch was detected but a dry run update didn't make any changes. Assuming the object is " + - "compliant.", - ) + r.setEvaluatedObject(obj.policy, obj.existingObj, false) - r.setEvaluatedObject(obj.policy, obj.existingObj, true) + // Remove noisy fields such as managedFields from the diff + removeFieldsForComparison(existingObjectCopy) + removeFieldsForComparison(obj.existingObj) - return false, "", "", false, false - } + diff = handleDiff(log, recordDiff, true, existingObjectCopy, obj.existingObj) + + if !isInform { + // Don't include the error message in the compliance status because that can be very long. The + // user can check the diff or the logs for more information. + message = fmt.Sprintf( + `%s cannot be updated, likely due to immutable fields not matching, you may `+ + `set spec["object-templates"][].recreateOption to recreate the object`, + getMsgPrefix(&obj), + ) + } - diff = handleDiff(log, recordDiff, isInform, existingObjectCopy, dryRunUpdatedObj) + return true, message, diff, false, nil + } + } else { + removeFieldsForComparison(dryRunUpdatedObj) + + if reflect.DeepEqual(dryRunUpdatedObj.Object, existingObjectCopy.Object) { + log.Info( + "A mismatch was detected but a dry run update didn't make any changes. Assuming the object " + + "is compliant.", + ) + + r.setEvaluatedObject(obj.policy, obj.existingObj, true) + + return false, "", "", false, nil + } + + diff = handleDiff(log, recordDiff, isInform, existingObjectCopy, dryRunUpdatedObj) + } } else if recordDiff == policyv1.RecordDiffLog || (isInform && recordDiff == policyv1.RecordDiffInStatus) { // Generate and log the diff for when dryrun is unsupported (i.e. OCP v3.11) mergedObjCopy := obj.existingObj.DeepCopy() @@ -2706,18 +2759,56 @@ func (r *ConfigurationPolicyReconciler) checkAndUpdateResource( if isInform { r.setEvaluatedObject(obj.policy, obj.existingObj, false) - return true, "", diff, false, false + return true, "", diff, false, nil } // If it's not inform (i.e. enforce), update the object - log.Info("Updating the object based on the template definition") + var err error + + // At this point, if a recreate is needed, we know the user opted in, otherwise, the dry run update + // failed and would have returned before now. + if needsRecreate || objectT.RecreateOption == policyv1.Always { + log.Info("Deleting and recreating the object based on the template definition") + + err = res.Delete(context.TODO(), obj.name, metav1.DeleteOptions{}) + if err != nil && !k8serrors.IsNotFound(err) { + message = fmt.Sprintf(`%s failed to delete when recreating with the error %v`, getMsgPrefix(&obj), err) + + return true, message, "", updateNeeded, nil + } + + start := time.Now() + + for { + updatedObj, err = res.Create(context.TODO(), &obj.desiredObj, metav1.CreateOptions{}) + if !k8serrors.IsAlreadyExists(err) { + // If there is no error or the error is unexpected, break for the error handling below + break + } + + if time.Since(start) > time.Second*10 { + message = fmt.Sprintf( + `%s timed out waiting for the object to delete during recreate, will retry on the next `+ + `policy evaluation`, + getMsgPrefix(&obj), + ) + + return true, message, "", updateNeeded, nil + } + + time.Sleep(time.Second) + } + } else { + log.Info("Updating the object based on the template definition") + + updatedObj, err = res.Update(context.TODO(), obj.existingObj, metav1.UpdateOptions{ + FieldValidation: metav1.FieldValidationStrict, + }) + } - updatedObj, err := res.Update(context.TODO(), obj.existingObj, metav1.UpdateOptions{ - FieldValidation: metav1.FieldValidationStrict, - }) if err != nil { if k8serrors.IsConflict(err) { - log.Info("The object updating during the evaluation. Trying again.") + log.Info("The object updated during the evaluation. Trying again.") rv, getErr := res.Get(context.TODO(), obj.existingObj.GetName(), metav1.GetOptions{}) if getErr == nil { @@ -2727,19 +2818,23 @@ func (r *ConfigurationPolicyReconciler) checkAndUpdateResource( } } + action := "update" + + if needsRecreate || objectT.RecreateOption == policyv1.Always { + action = "recreate" + } + message := getUpdateErrorMsg(err, obj.existingObj.GetKind(), obj.name) if message == "" { - message = fmt.Sprintf("Error updating the object `%v`, the error is `%v`", obj.name, err) + message = fmt.Sprintf("%s failed to %s with the error `%v`", getMsgPrefix(&obj), action, err) } - return true, message, diff, updateNeeded, false + return true, message, diff, updateNeeded, nil } if !statusMismatch { r.setEvaluatedObject(obj.policy, updatedObj, true) } - - updateSucceeded = true } else { if throwSpecViolation && recordDiff != policyv1.RecordDiffNone { // The spec didn't require a change but throwSpecViolation indicates the status didn't match. Handle @@ -2754,7 +2849,17 @@ func (r *ConfigurationPolicyReconciler) checkAndUpdateResource( r.setEvaluatedObject(obj.policy, obj.existingObj, !throwSpecViolation) } - return throwSpecViolation, "", diff, updateNeeded, updateSucceeded + return throwSpecViolation, "", diff, updateNeeded, updatedObj +} + +func getMsgPrefix(obj *singleObject) string { + var namespaceMsg string + + if obj.namespaced { + namespaceMsg = fmt.Sprintf(" in namespace %s", obj.namespace) + } + + return fmt.Sprintf(`%s [%s]%s`, obj.gvr.Resource, obj.name, namespaceMsg) } // handleDiff will generate the diff and then log it or return it based on the input recordDiff value. If recordDiff diff --git a/deploy/crds/kustomize_configurationpolicy/policy.open-cluster-management.io_configurationpolicies.yaml b/deploy/crds/kustomize_configurationpolicy/policy.open-cluster-management.io_configurationpolicies.yaml index db5e43da..0ab25173 100644 --- a/deploy/crds/kustomize_configurationpolicy/policy.open-cluster-management.io_configurationpolicies.yaml +++ b/deploy/crds/kustomize_configurationpolicy/policy.open-cluster-management.io_configurationpolicies.yaml @@ -177,6 +177,17 @@ spec: - InStatus - None type: string + recreateOption: + description: |- + RecreateOption describes the condition when to delete and recreate an object when an update is required. IfRequired + will recreate the object when updating an immutable field. Always will always recreate the object if a mismatch is + detected. RecreateOption has no effect when the remediationAction is inform. IfRequired has no effect on clusters + without dry run update support. Default is None. + enum: + - None + - IfRequired + - Always + type: string required: - complianceType - objectDefinition diff --git a/deploy/crds/policy.open-cluster-management.io_configurationpolicies.yaml b/deploy/crds/policy.open-cluster-management.io_configurationpolicies.yaml index fd8a9b11..3944a91f 100644 --- a/deploy/crds/policy.open-cluster-management.io_configurationpolicies.yaml +++ b/deploy/crds/policy.open-cluster-management.io_configurationpolicies.yaml @@ -184,6 +184,17 @@ spec: - InStatus - None type: string + recreateOption: + description: |- + RecreateOption describes the condition when to delete and recreate an object when an update is required. IfRequired + will recreate the object when updating an immutable field. Always will always recreate the object if a mismatch is + detected. RecreateOption has no effect when the remediationAction is inform. IfRequired has no effect on clusters + without dry run update support. Default is None. + enum: + - None + - IfRequired + - Always + type: string required: - complianceType - objectDefinition diff --git a/test/e2e/case40_recreate_option_test.go b/test/e2e/case40_recreate_option_test.go new file mode 100644 index 00000000..b535fa16 --- /dev/null +++ b/test/e2e/case40_recreate_option_test.go @@ -0,0 +1,226 @@ +// Copyright Contributors to the Open Cluster Management project + +package e2e + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + "open-cluster-management.io/config-policy-controller/test/utils" +) + +var _ = Describe("Recreate options", Ordered, func() { + const ( + deploymentYAML = "../resources/case40_recreate_option/deployment.yaml" + configMapYAML = "../resources/case40_recreate_option/configmap-with-finalizer.yaml" + policyNoRecreateYAML = "../resources/case40_recreate_option/policy-no-recreate-options.yaml" + policyAlwaysRecreateYAML = "../resources/case40_recreate_option/policy-always-recreate-option.yaml" + ) + + AfterAll(func(ctx SpecContext) { + deleteConfigPolicies([]string{"case40"}) + + By("Deleting the case40 Deployment in the default namespace") + utils.Kubectl("-n", "default", "delete", "deployment", "case40", "--ignore-not-found") + + By("Deleting the case40 ConfigMap in the default namespace") + configmap, err := clientManagedDynamic.Resource(gvrConfigMap).Namespace("default").Get( + ctx, "case40", metav1.GetOptions{}, + ) + if k8serrors.IsNotFound(err) { + return + } + + Expect(err).ToNot(HaveOccurred()) + + if len(configmap.GetFinalizers()) != 0 { + utils.Kubectl( + "-n", "default", + "patch", + "configmap", + "case40", + "--type", + "json", + `-p=[{"op":"remove","path":"/metadata/finalizers"}]`, + ) + } + + utils.Kubectl("-n", "default", "delete", "configmap", "case40", "--ignore-not-found") + }) + + It("should fail to update due to immutable fields not matching", func() { + By("Creating the case40 Deployment in the default namespace") + utils.Kubectl("create", "-f", deploymentYAML) + + By("Creating the case40 ConfigurationPolicy with different selectors on the Deployment") + utils.Kubectl("-n", testNamespace, "create", "-f", policyNoRecreateYAML) + + By("Verifying the ConfigurationPolicy is NonCompliant") + var managedPlc *unstructured.Unstructured + + Eventually(func() interface{} { + managedPlc = utils.GetWithTimeout( + clientManagedDynamic, + gvrConfigPolicy, + "case40", + testNamespace, + true, + defaultTimeoutSeconds, + ) + + return utils.GetComplianceState(managedPlc) + }, defaultTimeoutSeconds, 1).Should(Equal("NonCompliant")) + + By("Verifying the diff is present") + relatedObjects, _, err := unstructured.NestedSlice(managedPlc.Object, "status", "relatedObjects") + Expect(err).ToNot(HaveOccurred()) + Expect(relatedObjects).To(HaveLen(1)) + + diff, _, _ := unstructured.NestedString(relatedObjects[0].(map[string]interface{}), "properties", "diff") + Expect(diff).To(ContainSubstring("- app: case40\n+ app: case40-2\n")) + + expected := `deployments [case40] in namespace default cannot be updated, likely due to immutable fields not ` + + `matching, you may set spec["object-templates"][].recreateOption to recreate the object` + Expect(utils.GetStatusMessage(managedPlc)).To(Equal(expected)) + }) + + It("should update the immutable fields when recreateOption=IfRequired", func(ctx SpecContext) { + By("Setting recreateOption=IfRequired on the case40 ConfigurationPolicy") + deployment, err := clientManagedDynamic.Resource(gvrDeployment).Namespace("default").Get( + ctx, "case40", metav1.GetOptions{}, + ) + Expect(err).ToNot(HaveOccurred()) + + uid := deployment.GetUID() + + utils.Kubectl( + "-n", testNamespace, "patch", "configurationpolicy", "case40", "--type=json", "-p", + `[{ "op": "replace", "path": "/spec/object-templates/0/recreateOption", "value": 'IfRequired' }]`, + ) + + By("Verifying the ConfigurationPolicy is Compliant") + var managedPlc *unstructured.Unstructured + + Eventually(func() interface{} { + managedPlc = utils.GetWithTimeout( + clientManagedDynamic, + gvrConfigPolicy, + "case40", + testNamespace, + true, + defaultTimeoutSeconds, + ) + + return utils.GetComplianceState(managedPlc) + }, defaultTimeoutSeconds, 1).Should(Equal("Compliant")) + + By("Verifying the diff is not set") + relatedObjects, _, err := unstructured.NestedSlice(managedPlc.Object, "status", "relatedObjects") + Expect(err).ToNot(HaveOccurred()) + Expect(relatedObjects).To(HaveLen(1)) + + relatedObject := relatedObjects[0].(map[string]interface{}) + + diff, _, _ := unstructured.NestedString(relatedObject, "properties", "diff") + Expect(diff).To(BeEmpty()) + + propsUID, _, _ := unstructured.NestedString(relatedObject, "properties", "uid") + Expect(propsUID).ToNot(BeEmpty()) + + createdByPolicy, _, _ := unstructured.NestedBool(relatedObject, "properties", "createdByPolicy") + Expect(createdByPolicy).To(BeTrue()) + + deployment, err = clientManagedDynamic.Resource(gvrDeployment).Namespace("default").Get( + ctx, "case40", metav1.GetOptions{}, + ) + Expect(err).ToNot(HaveOccurred()) + + By("Verifying the Deployment was recreated") + Expect(deployment.GetUID()).ToNot(Equal(uid), "Expected a new UID on the Deployment after it got recreated") + Expect(propsUID).To( + BeEquivalentTo(deployment.GetUID()), "Expect the object properties UID to match the new Deployment", + ) + + selector, _, _ := unstructured.NestedString(deployment.Object, "spec", "selector", "matchLabels", "app") + Expect(selector).To(Equal("case40-2")) + + deleteConfigPolicies([]string{"case40"}) + }) + + It("should timeout on the delete when there is a finalizer", func(ctx SpecContext) { + By("Creating the case40 ConfigMap in the default namespace") + utils.Kubectl("create", "-f", configMapYAML) + + configmap, err := clientManagedDynamic.Resource(gvrConfigMap).Namespace("default").Get( + ctx, "case40", metav1.GetOptions{}, + ) + Expect(err).ToNot(HaveOccurred()) + + uid := configmap.GetUID() + + By("Creating the case40 ConfigurationPolicy with different data on the ConfigMap") + utils.Kubectl("-n", testNamespace, "create", "-f", policyAlwaysRecreateYAML) + + expected := `configmaps [case40] in namespace default timed out waiting for the object to delete during ` + + `recreate, will retry on the next policy evaluation` + + By("Verifying the ConfigurationPolicy is NonCompliant") + var managedPlc *unstructured.Unstructured + + Eventually(func(g Gomega) { + managedPlc = utils.GetWithTimeout( + clientManagedDynamic, + gvrConfigPolicy, + "case40", + testNamespace, + true, + defaultTimeoutSeconds, + ) + + g.Expect(utils.GetComplianceState(managedPlc)).To(Equal("NonCompliant")) + g.Expect(utils.GetStatusMessage(managedPlc)).To(Equal(expected)) + }, defaultTimeoutSeconds, 1).Should(Succeed()) + + By("Removing the finalizer on the ConfigMap") + utils.Kubectl( + "-n", "default", + "patch", + "configmap", + "case40", + "--type", + "json", + `-p=[{"op":"remove","path":"/metadata/finalizers"}]`, + ) + + By("Verifying the ConfigurationPolicy is Compliant") + Eventually(func() interface{} { + managedPlc = utils.GetWithTimeout( + clientManagedDynamic, + gvrConfigPolicy, + "case40", + testNamespace, + true, + defaultTimeoutSeconds, + ) + + return utils.GetComplianceState(managedPlc) + }, defaultTimeoutSeconds, 1).Should(Equal("Compliant")) + + By("Verifying the ConfigMap was recreated") + + configmap, err = clientManagedDynamic.Resource(gvrConfigMap).Namespace("default").Get( + ctx, "case40", metav1.GetOptions{}, + ) + Expect(err).ToNot(HaveOccurred()) + + Expect(configmap.GetUID()).ToNot(Equal(uid), "Expected a new UID on the ConfigMap after it got recreated") + + city, _, _ := unstructured.NestedString(configmap.Object, "data", "city") + Expect(city).To(Equal("Raleigh")) + + deleteConfigPolicies([]string{"case40"}) + }) +}) diff --git a/test/resources/case40_recreate_option/configmap-with-finalizer.yaml b/test/resources/case40_recreate_option/configmap-with-finalizer.yaml new file mode 100644 index 00000000..c1bed2ca --- /dev/null +++ b/test/resources/case40_recreate_option/configmap-with-finalizer.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: case40 + namespace: default + finalizers: + - policy.open-cluster-management.io/case40 +data: + city: Durham + state: NC diff --git a/test/resources/case40_recreate_option/deployment.yaml b/test/resources/case40_recreate_option/deployment.yaml new file mode 100644 index 00000000..d054962e --- /dev/null +++ b/test/resources/case40_recreate_option/deployment.yaml @@ -0,0 +1,26 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: case40 + namespace: default +spec: + replicas: 0 + selector: + matchLabels: + app: case40 + strategy: + type: Recreate + template: + metadata: + labels: + app: case40 + spec: + containers: + - args: + - do-not-start + command: + - config-policy-controller + image: quay.io/open-cluster-management/config-policy-controller:latest + imagePullPolicy: IfNotPresent + name: case40 + restartPolicy: Always diff --git a/test/resources/case40_recreate_option/policy-always-recreate-option.yaml b/test/resources/case40_recreate_option/policy-always-recreate-option.yaml new file mode 100644 index 00000000..834e7a09 --- /dev/null +++ b/test/resources/case40_recreate_option/policy-always-recreate-option.yaml @@ -0,0 +1,19 @@ +apiVersion: policy.open-cluster-management.io/v1 +kind: ConfigurationPolicy +metadata: + name: case40 +spec: + pruneObjectBehavior: DeleteAll + remediationAction: enforce + object-templates: + - complianceType: musthave + recreateOption: Always + objectDefinition: + apiVersion: v1 + kind: ConfigMap + metadata: + name: case40 + namespace: default + data: + city: Raleigh + state: NC diff --git a/test/resources/case40_recreate_option/policy-no-recreate-options.yaml b/test/resources/case40_recreate_option/policy-no-recreate-options.yaml new file mode 100644 index 00000000..de11b761 --- /dev/null +++ b/test/resources/case40_recreate_option/policy-no-recreate-options.yaml @@ -0,0 +1,36 @@ +apiVersion: policy.open-cluster-management.io/v1 +kind: ConfigurationPolicy +metadata: + name: case40 +spec: + pruneObjectBehavior: DeleteAll + remediationAction: enforce + object-templates: + - complianceType: musthave + objectDefinition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: case40 + namespace: default + spec: + replicas: 0 + selector: + matchLabels: + app: case40-2 + strategy: + type: Recreate + template: + metadata: + labels: + app: case40-2 + spec: + containers: + - args: + - do-not-start + command: + - config-policy-controller + image: quay.io/open-cluster-management/config-policy-controller:latest + imagePullPolicy: IfNotPresent + name: case40 + restartPolicy: Always