From ccc46b3bd18da885129e2af81e855a6ac43907d6 Mon Sep 17 00:00:00 2001 From: Jeffrey Luo Date: Tue, 15 Aug 2023 03:04:12 -0400 Subject: [PATCH] ACM-6596: Initialize controller for OperatorPolicy ref: https://issues.redhat.com/browse/ACM-6596 Signed-off-by: Jeffrey Luo (cherry picked from commit 4f3c09a1b7e2314e4216915e86d07b0eb77e2406) --- controllers/operatorpolicy_controller.go | 149 +++++++++++++++++++++-- main.go | 30 +++++ main_test.go | 1 + 3 files changed, 170 insertions(+), 10 deletions(-) diff --git a/controllers/operatorpolicy_controller.go b/controllers/operatorpolicy_controller.go index 3c6526e8..5eda072f 100644 --- a/controllers/operatorpolicy_controller.go +++ b/controllers/operatorpolicy_controller.go @@ -5,20 +5,41 @@ package controllers import ( "context" + "fmt" + "time" - "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + policyv1 "open-cluster-management.io/config-policy-controller/api/v1" policyv1beta1 "open-cluster-management.io/config-policy-controller/api/v1beta1" ) +const ( + OperatorControllerName string = "operator-policy-controller" +) + +var OpLog = ctrl.Log.WithName(OperatorControllerName) + // OperatorPolicyReconciler reconciles a OperatorPolicy object type OperatorPolicyReconciler struct { client.Client - Scheme *runtime.Scheme } +// SetupWithManager sets up the controller with the Manager. +func (r *OperatorPolicyReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + Named(OperatorControllerName). + For(&policyv1beta1.OperatorPolicy{}). + Complete(r) +} + +// blank assignment to verify that OperatorPolicyReconciler implements reconcile.Reconciler +var _ reconcile.Reconciler = &OperatorPolicyReconciler{} + //+kubebuilder:rbac:groups=policy.open-cluster-management.io,resources=operatorpolicies,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=policy.open-cluster-management.io,resources=operatorpolicies/status,verbs=get;update;patch //+kubebuilder:rbac:groups=policy.open-cluster-management.io,resources=operatorpolicies/finalizers,verbs=update @@ -34,16 +55,124 @@ type OperatorPolicyReconciler struct { // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.4/pkg/reconcile func (r *OperatorPolicyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { policy := &policyv1beta1.OperatorPolicy{} - _ = r.Get(ctx, req.NamespacedName, policy) - // (user): your logic here + err := r.Get(ctx, req.NamespacedName, policy) + if err != nil { + if errors.IsNotFound(err) { + OpLog.Info("Operator policy could not be found") - return ctrl.Result{}, nil + return reconcile.Result{}, nil + } + } + + return reconcile.Result{}, nil } -// SetupWithManager sets up the controller with the Manager. -func (r *OperatorPolicyReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&policyv1beta1.OperatorPolicy{}). - Complete(r) +// PeriodicallyExecOperatorPolicies loops through all operatorpolicies in the target namespace and triggers +// template handling for each one. This function drives all the work the operator policy controller does. +func (r *OperatorPolicyReconciler) PeriodicallyExecOperatorPolicies(freq uint, elected <-chan struct{}, +) { + OpLog.Info("Waiting for leader election before periodically evaluating operator policies") + <-elected + + exiting := false + for !exiting { + start := time.Now() + + var skipLoop bool + if !skipLoop { + policiesList := policyv1beta1.OperatorPolicyList{} + + err := r.List(context.TODO(), &policiesList) + if err != nil { + OpLog.Error(err, "Failed to list the OperatorPolicy objects to evaluate") + } else { + for i := range policiesList.Items { + policy := policiesList.Items[i] + if !r.shouldEvaluatePolicy(&policy) { + continue + } + + // handle policy + err := r.handleSinglePolicy(&policy) + if err != nil { + OpLog.Error(err, "Error while evaluating operator policy") + } + } + } + } + + elapsed := time.Since(start).Seconds() + if float64(freq) > elapsed { + remainingSleep := float64(freq) - elapsed + sleepTime := time.Duration(remainingSleep) * time.Second + OpLog.V(2).Info("Sleeping before reprocessing the operator policies", "seconds", sleepTime) + time.Sleep(sleepTime) + } + } +} + +// handleSinglePolicy encapsulates the logic for processing a single operatorPolicy. +// Currently, this just means setting the status to Compliant and logging the name +// of the operatorPolicy. +// +// In the future, more reconciliation logic will be added. For reference: +// https://github.com/JustinKuli/ocm-enhancements/blob/89-operator-policy/enhancements/sig-policy/89-operator-policy-kind/README.md +func (r *OperatorPolicyReconciler) handleSinglePolicy( + policy *policyv1beta1.OperatorPolicy, +) error { + policy.Status.ComplianceState = policyv1.Compliant + + err := r.updatePolicyStatus(policy) + if err != nil { + OpLog.Info("error while updating policy status") + + return err + } + + OpLog.Info("Logging operator policy", "policy", policy.Name) + + return nil +} + +// updatePolicyStatus updates the status of the operatorPolicy. +// +// In the future, a condition should be added as well, and this should generate events. +func (r *OperatorPolicyReconciler) updatePolicyStatus( + policy *policyv1beta1.OperatorPolicy, +) error { + updatedStatus := policy.Status + + err := r.Get(context.TODO(), types.NamespacedName{Namespace: policy.Namespace, Name: policy.Name}, policy) + if err != nil { + OpLog.Info(fmt.Sprintf("Failed to refresh policy; using previously fetched version: %s", err)) + } else { + policy.Status = updatedStatus + } + + err = r.Status().Update(context.TODO(), policy) + if err != nil { + OpLog.Info(fmt.Sprintf("Failed to update policy status: %s", err)) + + return err + } + + return nil +} + +// shouldEvaluatePolicy will determine if the policy is ready for evaluation by checking +// for the compliance status. If it is already compliant, then evaluation will be skipped. +// It will be evaluated otherwise. +// +// In the future, other mechanisms for determining evaluation should be considered. +func (r *OperatorPolicyReconciler) shouldEvaluatePolicy( + policy *policyv1beta1.OperatorPolicy, +) bool { + if policy.Status.ComplianceState == policyv1.Compliant { + OpLog.Info(fmt.Sprintf("%s is already compliant, skipping evaluation", policy.Name)) + + return false + } + + return true } diff --git a/main.go b/main.go index 6e856599..11b0798f 100644 --- a/main.go +++ b/main.go @@ -43,6 +43,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" policyv1 "open-cluster-management.io/config-policy-controller/api/v1" + policyv1beta1 "open-cluster-management.io/config-policy-controller/api/v1beta1" "open-cluster-management.io/config-policy-controller/controllers" "open-cluster-management.io/config-policy-controller/pkg/common" "open-cluster-management.io/config-policy-controller/pkg/triggeruninstall" @@ -64,6 +65,7 @@ func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) //+kubebuilder:scaffold:scheme utilruntime.Must(policyv1.AddToScheme(scheme)) + utilruntime.Must(policyv1beta1.AddToScheme(scheme)) utilruntime.Must(extensionsv1.AddToScheme(scheme)) utilruntime.Must(extensionsv1beta1.AddToScheme(scheme)) } @@ -82,6 +84,7 @@ type ctrlOpts struct { enableLease bool enableLeaderElection bool enableMetrics bool + enableOperatorPolicy bool } func main() { @@ -335,10 +338,23 @@ func main() { SelectorReconciler: &nsSelReconciler, EnableMetrics: opts.enableMetrics, } + + OpReconciler := controllers.OperatorPolicyReconciler{ + Client: mgr.GetClient(), + } + if err = reconciler.SetupWithManager(mgr); err != nil { log.Error(err, "Unable to create controller", "controller", "ConfigurationPolicy") os.Exit(1) } + + if opts.enableOperatorPolicy { + if err = OpReconciler.SetupWithManager(mgr); err != nil { + log.Error(err, "Unable to create controller", "controller", "OperatorPolicy") + os.Exit(1) + } + } + //+kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { @@ -362,6 +378,13 @@ func main() { managerCancel() }() + if opts.enableOperatorPolicy { + go func() { + OpReconciler.PeriodicallyExecOperatorPolicies(opts.frequency, mgr.Elected()) + managerCancel() + }() + } + // This lease is not related to leader election. This is to report the status of the controller // to the addon framework. This can be seen in the "status" section of the ManagedClusterAddOn // resource objects. @@ -590,6 +613,13 @@ func parseOpts(flags *pflag.FlagSet, args []string) *ctrlOpts { "Will scale with concurrency, if not explicitly set.", ) + flags.BoolVar( + &opts.enableOperatorPolicy, + "enable-operator-policy", + false, + "Enable operator policy controller", + ) + _ = flags.Parse(args) // Scale QPS and Burst with concurrency, when they aren't explicitly set. diff --git a/main_test.go b/main_test.go index e5175632..d5f01dc9 100644 --- a/main_test.go +++ b/main_test.go @@ -21,6 +21,7 @@ func TestRunMain(t *testing.T) { "--leader-elect=false", fmt.Sprintf("--target-kubeconfig-path=%s", os.Getenv("TARGET_KUBECONFIG_PATH")), "--log-level=1", + "--enable-operator-policy=true", ) main()