Skip to content

Commit

Permalink
Fix bug where user error validation was being ignored
Browse files Browse the repository at this point in the history
Regex formatting changed for template sync to be read in policy status sync and policy template evaluation changed to evaluate erroneous policy templates

Signed-off-by: Jeffrey Luo <jeluo@redhat.com>
  • Loading branch information
JeffeyL authored and openshift-merge-robot committed Sep 7, 2023
1 parent 2df23d8 commit 03bd852
Show file tree
Hide file tree
Showing 5 changed files with 196 additions and 8 deletions.
14 changes: 10 additions & 4 deletions controllers/statussync/policy_status_sync.go
Original file line number Diff line number Diff line change
Expand Up @@ -215,17 +215,23 @@ func (r *PolicyReconciler) Reconcile(ctx context.Context, request reconcile.Requ

reqLogger.Info("Updating status for policy templates")

for _, policyT := range instance.Spec.PolicyTemplates {
for i, policyT := range instance.Spec.PolicyTemplates {
existingDpt := &policiesv1.DetailsPerTemplate{}

var tName string

object, _, err := unstructured.UnstructuredJSONScheme.Decode(policyT.ObjectDefinition.Raw, nil, nil)
if err != nil {
// failed to decode PolicyTemplate, skipping it
reqLogger.Error(err, "Failed to decode policy template, skipping it")

break
existingDpt.ComplianceState = policiesv1.NonCompliant
newStatus.Details = append(newStatus.Details, existingDpt)
tName = fmt.Sprintf("template-%v", i) // template-sync emits this name on error
} else {
tName = object.(metav1.Object).GetName()
}

tName := object.(metav1.Object).GetName()
existingDpt := &policiesv1.DetailsPerTemplate{}
// retrieve existingDpt from instance.status.details field
found := false

Expand Down
8 changes: 4 additions & 4 deletions controllers/templatesync/template_sync.go
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ func (r *PolicyReconciler) Reconcile(ctx context.Context, request reconcile.Requ
reqLogger.Info(msg)

for tIndex := range instance.Spec.PolicyTemplates {
_ = r.emitTemplateError(ctx, instance, tIndex, fmt.Sprintf("[template %v]", tIndex), false, msg)
_ = r.emitTemplateError(ctx, instance, tIndex, fmt.Sprintf("template-%v", tIndex), false, msg)
}

return reconcile.Result{}, nil
Expand Down Expand Up @@ -276,7 +276,7 @@ func (r *PolicyReconciler) Reconcile(ctx context.Context, request reconcile.Requ
resultError = err
errMsg := fmt.Sprintf("Failed to decode policy template with err: %s", err)

_ = r.emitTemplateError(ctx, instance, tIndex, fmt.Sprintf("[template %v]", tIndex), false, errMsg)
_ = r.emitTemplateError(ctx, instance, tIndex, fmt.Sprintf("template-%v", tIndex), false, errMsg)

reqLogger.Error(resultError, "Failed to decode the policy template", "templateIndex", tIndex)

Expand Down Expand Up @@ -326,7 +326,7 @@ func (r *PolicyReconciler) Reconcile(ctx context.Context, request reconcile.Requ
errMsg := fmt.Sprintf("Failed to decode policy template with err: %s", resultError)

_ = r.emitTemplateError(ctx, instance, tIndex,
fmt.Sprintf("[template %v]", tIndex), isClusterScoped, errMsg)
fmt.Sprintf("template-%v", tIndex), isClusterScoped, errMsg)

reqLogger.Error(resultError, "Failed to decode the policy template", "templateIndex", tIndex)

Expand Down Expand Up @@ -356,7 +356,7 @@ func (r *PolicyReconciler) Reconcile(ctx context.Context, request reconcile.Requ
resultError = k8serrors.NewBadRequest(errMsg)

_ = r.emitTemplateError(ctx, instance, tIndex,
fmt.Sprintf("[template %v]", tIndex), isClusterScoped, errMsg)
fmt.Sprintf("template-%v", tIndex), isClusterScoped, errMsg)

reqLogger.Error(resultError, "Failed to process the policy template", "templateIndex", tIndex)

Expand Down
84 changes: 84 additions & 0 deletions test/e2e/case22_user_validation_error_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
// Copyright (c) 2023 Red Hat, Inc.
// Copyright Contributors to the Open Cluster Management project

package e2e

import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"open-cluster-management.io/config-policy-controller/test/utils"
policiesv1 "open-cluster-management.io/governance-policy-propagator/api/v1"
policyUtils "open-cluster-management.io/governance-policy-propagator/test/utils"
)

var _ = Describe("Test proper metrics handling on syntax error", Ordered, func() {
case22ErrPolicyName := "case22-err"
case22ErrYaml := "../resources/case22_user_validation_error/case22-test-err.yaml"
case22CorrectPolicyName := "case22-correct"
case22CorrectYaml := "../resources/case22_user_validation_error/case22-test-correct.yaml"

cleanup := func() {
By("Deleting test policies on hub cluster in ns:" + clusterNamespaceOnHub)
// Clean up and ignore any errors (in case it was deleted previously)
_, _ = kubectlHub("delete", "-f", case22ErrYaml, "-n", clusterNamespaceOnHub, "--ignore-not-found")
_, _ = kubectlHub("delete", "-f", case22CorrectYaml, "-n", clusterNamespaceOnHub, "--ignore-not-found")
opt := metav1.ListOptions{}
policyUtils.ListWithTimeout(clientHubDynamic, gvrPolicy, opt, 0, true, defaultTimeoutSeconds)
policyUtils.ListWithTimeout(clientManagedDynamic, gvrPolicy, opt, 0, true, defaultTimeoutSeconds)
}

AfterAll(cleanup)

It("Verifies NonCompliant status for non-decodable policy", func() {
hubApplyPolicy(case22ErrPolicyName, case22ErrYaml)

By("Waiting for " + case22ErrPolicyName + " to become NonCompliant")
Eventually(func() interface{} {
plc := utils.GetWithTimeout(
clientHubDynamic, gvrPolicy,
case22ErrPolicyName, clusterNamespaceOnHub,
true, defaultTimeoutSeconds,
)

return utils.GetComplianceState(plc)
}, defaultTimeoutSeconds, 1).Should(Equal("NonCompliant"))
})

It("Verifies that validation errors are shown", func() {
By("Checking message on " + case22ErrPolicyName)
var plc *policiesv1.Policy
Eventually(func(g Gomega) interface{} {
managedPlc := utils.GetWithTimeout(
clientManagedDynamic,
gvrPolicy,
case22ErrPolicyName,
clusterNamespace,
true,
defaultTimeoutSeconds)
err := runtime.DefaultUnstructuredConverter.FromUnstructured(managedPlc.Object, &plc)
g.Expect(err).ToNot(HaveOccurred())
if len(plc.Status.Details) < 1 {
return ""
}

return plc.Status.Details[1].History[0].Message
}, defaultTimeoutSeconds, 1).Should(ContainSubstring("NonCompliant; template-error;"))
})

It("Verifies correct policy does not become NonCompliant", func() {
hubApplyPolicy(case22CorrectPolicyName, case22CorrectYaml)

By("Checking that " + case22CorrectPolicyName + " does not become NonCompliant")
Consistently(func() interface{} {
plc := utils.GetWithTimeout(
clientHubDynamic, gvrPolicy,
case22CorrectPolicyName, clusterNamespaceOnHub,
true, defaultTimeoutSeconds,
)

return utils.GetComplianceState(plc)
}, defaultTimeoutSeconds, 1).ShouldNot(Equal("NonCompliant"))
})
})
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
apiVersion: policy.open-cluster-management.io/v1
kind: Policy
metadata:
name: case22-correct
labels:
policy.open-cluster-management.io/cluster-name: managed
policy.open-cluster-management.io/cluster-namespace: managed
policy.open-cluster-management.io/root-policy: case22-correct
spec:
disabled: false
policy-templates:
- objectDefinition:
apiVersion: policy.open-cluster-management.io/v1
kind: ConfigurationPolicy
metadata:
name: checkfailednodes
spec:
object-templates-raw: |
{{- range $node := (lookup "v1" "Node" "" "").items }}
- complianceType: musthave
objectDefinition:
apiVersion: v1
kind: Node
metadata:
name: test-node
status:
conditions:
- message: kubelet has sufficient memory available
reason: KubeletHasSufficientMemory
status: "False"
type: MemoryPressure
- message: kubelet has no disk pressure
reason: KubeletHasNoDiskPressure
status: "False"
type: DiskPressure
- message: kubelet has sufficient PID available
reason: KubeletHasSufficientPID
status: "False"
type: PIDPressure
- message: kubelet is posting ready status
reason: KubeletReady
status: "True"
type: Ready
{{- end }}
remediationAction: inform
severity: low
remediationAction: inform
51 changes: 51 additions & 0 deletions test/resources/case22_user_validation_error/case22-test-err.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
apiVersion: policy.open-cluster-management.io/v1
kind: Policy
metadata:
name: case22-err
labels:
policy.open-cluster-management.io/cluster-name: managed
policy.open-cluster-management.io/cluster-namespace: managed
policy.open-cluster-management.io/root-policy: case22-err
creationTimestamp: 2023-08-07T09:21:13Z
generation: 4
managedFields:
resourceVersion: "168475"
uid: d40e4a53-919a-4d5e-a23e-de32eb9ae710
spec:
disabled: false
policy-templates:
- objectDefinition:
spec:
apiVersion: policy.open-cluster-management.io/v1
kind: ConfigurationPolicy
metadata:
name: checkfailednodes
severity: low
spec:
object-templates-raw: |
{{- /* loop over nodes*/ -}}
{{- range $node := (lookup "v1" "Node" "").items }}
- complianceType: musthave
objectDefinition:
apiVersion: v1
kind: Node
metadata:
name: {{ $node }}
status:
conditions:
- message: kubelet has sufficient memory available
reason: KubeletHasSufficientMemory
status: "False"
type: MemoryPressure
remediationAction: inform
severity: high
remediationAction: inform
status:
compliant: Compliant
placement:
- placement: testix-placement
placementBinding: testix-placement
status:
- clustername: local-cluster
clusternamespace: local-cluster
compliant: Compliant

0 comments on commit 03bd852

Please sign in to comment.