Skip to content

Commit

Permalink
DRAFT: a NamespaceSelector controller
Browse files Browse the repository at this point in the history
It uses one watch on Namespaces, and then handles the LabelSelector
calculations locally. This lets it handle Include and Exclude nicely,
and understand *exactly* when the namespace selection has actually
changed (see the last 2 tests for behavior that is better in this
implementation).

It's still a little rough. The `NamespaceSelectorReconciler` might not
make sense in the package that it's currently in.
  • Loading branch information
JustinKuli committed Aug 8, 2023
1 parent 6e32ff4 commit 569765a
Show file tree
Hide file tree
Showing 5 changed files with 280 additions and 19 deletions.
19 changes: 16 additions & 3 deletions controllers/configurationpolicy_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,7 @@ type ConfigurationPolicyReconciler struct {
TargetK8sClient kubernetes.Interface
TargetK8sDynamicClient dynamic.Interface
TargetK8sConfig *rest.Config
SelectorReconciler *common.NamespaceSelectorReconciler // it's a pointer because it has a mutex
// Whether custom metrics collection is enabled
EnableMetrics bool
discoveryInfo
Expand Down Expand Up @@ -149,6 +150,8 @@ func (r *ConfigurationPolicyReconciler) Reconcile(ctx context.Context, request c
prometheus.Labels{"policy": fmt.Sprintf("%s/%s", request.Namespace, request.Name)})
_ = policyUserErrorsCounter.DeletePartialMatch(prometheus.Labels{"template": request.Name})
_ = policySystemErrorsCounter.DeletePartialMatch(prometheus.Labels{"template": request.Name})

r.SelectorReconciler.Delete(request.Name)
}

return reconcile.Result{}, nil
Expand Down Expand Up @@ -236,7 +239,7 @@ func (r *ConfigurationPolicyReconciler) PeriodicallyExecConfigPolicies(

for i := range policiesList.Items {
policy := policiesList.Items[i]
if !shouldEvaluatePolicy(&policy, cleanupImmediately) {
if !r.shouldEvaluatePolicy(&policy, cleanupImmediately) {
continue
}

Expand Down Expand Up @@ -346,7 +349,9 @@ func (r *ConfigurationPolicyReconciler) refreshDiscoveryInfo() error {
// met, then that will also trigger an evaluation. If cleanupImmediately is true, then only policies
// with finalizers will be ready for evaluation regardless of the last evaluation.
// cleanupImmediately should be set true when the controller is getting uninstalled.
func shouldEvaluatePolicy(policy *policyv1.ConfigurationPolicy, cleanupImmediately bool) bool {
func (r *ConfigurationPolicyReconciler) shouldEvaluatePolicy(
policy *policyv1.ConfigurationPolicy, cleanupImmediately bool,
) bool {
log := log.WithValues("policy", policy.GetName())

// If it's time to clean up such as when the config-policy-controller is being uninstalled, only evaluate policies
Expand Down Expand Up @@ -392,6 +397,12 @@ func shouldEvaluatePolicy(policy *policyv1.ConfigurationPolicy, cleanupImmediate
return true
}

if r.SelectorReconciler.HasUpdate(policy.Name) {
log.V(2).Info("There was an update for this policy's namespaces. Will evaluate it now.")

return true
}

if errors.Is(err, policyv1.ErrIsNever) {
log.Info("Skipping the policy evaluation due to the spec.evaluationInterval value being set to never")

Expand Down Expand Up @@ -473,11 +484,13 @@ func (r *ConfigurationPolicyReconciler) getObjectTemplateDetails(
selector := plc.Spec.NamespaceSelector
// If MatchLabels/MatchExpressions/Include were not provided, return no namespaces
if selector.MatchLabels == nil && selector.MatchExpressions == nil && len(selector.Include) == 0 {
r.SelectorReconciler.Delete(plc.Name)

log.Info("namespaceSelector is empty. Skipping namespace retrieval.")
} else {
// If an error occurred in the NamespaceSelector, update the policy status and abort
var err error
selectedNamespaces, err = common.GetSelectedNamespaces(r.TargetK8sClient, selector)
selectedNamespaces, err = r.SelectorReconciler.Get(plc.Name, plc.Spec.NamespaceSelector)
if err != nil {
errMsg := "Error filtering namespaces with provided namespaceSelector"
log.Error(
Expand Down
4 changes: 3 additions & 1 deletion controllers/configurationpolicy_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -954,6 +954,8 @@ func TestShouldEvaluatePolicy(t *testing.T) {
},
}

r := &ConfigurationPolicyReconciler{}

for _, test := range tests {
test := test

Expand All @@ -970,7 +972,7 @@ func TestShouldEvaluatePolicy(t *testing.T) {
policy.ObjectMeta.DeletionTimestamp = test.deletionTimestamp
policy.ObjectMeta.Finalizers = test.finalizers

if actual := shouldEvaluatePolicy(policy, test.cleanupImmediately); actual != test.expected {
if actual := r.shouldEvaluatePolicy(policy, test.cleanupImmediately); actual != test.expected {
t.Fatalf("expected %v but got %v", test.expected, actual)
}
},
Expand Down
67 changes: 62 additions & 5 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
"os"
"runtime"
"strings"
"sync"
"time"

"github.com/go-logr/zapr"
Expand Down Expand Up @@ -252,12 +253,14 @@ func main() {
var targetK8sClient kubernetes.Interface
var targetK8sDynamicClient dynamic.Interface
var targetK8sConfig *rest.Config
var nsSelMgr manager.Manager

if opts.targetKubeConfig == "" {
targetK8sConfig = cfg
targetK8sClient = kubernetes.NewForConfigOrDie(targetK8sConfig)
targetK8sDynamicClient = dynamic.NewForConfigOrDie(targetK8sConfig)
} else {
nsSelMgr = mgr
} else { // "Hosted mode"
var err error

targetK8sConfig, err = clientcmd.BuildConfigFromFlags("", opts.targetKubeConfig)
Expand All @@ -266,9 +269,18 @@ func main() {
os.Exit(1)
}

targetK8sConfig.Burst = int(opts.clientBurst)
targetK8sConfig.QPS = opts.clientQPS

targetK8sClient = kubernetes.NewForConfigOrDie(targetK8sConfig)
targetK8sDynamicClient = dynamic.NewForConfigOrDie(targetK8sConfig)

nsSelMgr, err = manager.New(targetK8sConfig, manager.Options{})
if err != nil {
log.Error(err, "Unable to create manager from target kube config")
os.Exit(1)
}

log.Info(
"Overrode the target Kubernetes cluster for policy evaluation and enforcement",
"path", opts.targetKubeConfig,
Expand All @@ -277,6 +289,16 @@ func main() {

instanceName, _ := os.Hostname() // on an error, instanceName will be empty, which is ok

// A separate controller is needed because of hosted mode concerns.

nsSelReconciler := common.NamespaceSelectorReconciler{
Client: nsSelMgr.GetClient(),
}
if err = nsSelReconciler.SetupWithManager(nsSelMgr); err != nil {
log.Error(err, "Unable to create controller", "controller", "NamespaceSelector")
os.Exit(1)
}

reconciler := controllers.ConfigurationPolicyReconciler{
Client: mgr.GetClient(),
DecryptionConcurrency: opts.decryptionConcurrency,
Expand All @@ -287,6 +309,7 @@ func main() {
TargetK8sClient: targetK8sClient,
TargetK8sDynamicClient: targetK8sDynamicClient,
TargetK8sConfig: targetK8sConfig,
SelectorReconciler: &nsSelReconciler,
EnableMetrics: opts.enableMetrics,
}
if err = reconciler.SetupWithManager(mgr); err != nil {
Expand All @@ -309,7 +332,7 @@ func main() {
managerCtx, managerCancel := context.WithCancel(context.Background())

// PeriodicallyExecConfigPolicies is the go-routine that periodically checks the policies
log.V(1).Info("Perodically processing Configuration Policies", "frequency", opts.frequency)
log.V(1).Info("Periodically processing Configuration Policies", "frequency", opts.frequency)

go func() {
reconciler.PeriodicallyExecConfigPolicies(terminatingCtx, opts.frequency, mgr.Elected())
Expand Down Expand Up @@ -350,10 +373,44 @@ func main() {
log.Info("Addon status reporting is not enabled")
}

log.Info("Starting manager")
log.Info("Starting managers")

var wg sync.WaitGroup
var errorExit bool

wg.Add(1)

go func() {
if err := mgr.Start(managerCtx); err != nil {
log.Error(err, "Problem running manager")

managerCancel()

errorExit = true
}

wg.Done()
}()

if opts.targetKubeConfig != "" { // "hosted mode"
wg.Add(1)

go func() {
if err := nsSelMgr.Start(managerCtx); err != nil {
log.Error(err, "Problem running manager")

managerCancel()

errorExit = true
}

wg.Done()
}()
}

wg.Wait()

if err := mgr.Start(managerCtx); err != nil {
log.Error(err, "Problem running manager")
if errorExit {
os.Exit(1)
}
}
Expand Down
Loading

0 comments on commit 569765a

Please sign in to comment.