diff --git a/config/manager/deployment.yaml b/config/manager/deployment.yaml index 710a1cc1..e1c604d9 100644 --- a/config/manager/deployment.yaml +++ b/config/manager/deployment.yaml @@ -28,20 +28,27 @@ spec: ports: - containerPort: 8080 name: http-prom + - containerPort: 9440 + name: healthz + protocol: TCP env: - name: RUNTIME_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - livenessProbe: - httpGet: - port: http-prom - path: /metrics args: - --watch-all-namespaces - --log-level=info - --log-json - --enable-leader-election + readinessProbe: + httpGet: + path: /readyz + port: healthz + livenessProbe: + httpGet: + path: /healthz + port: healthz resources: limits: cpu: 1000m diff --git a/controllers/kustomization_controller.go b/controllers/kustomization_controller.go index 003c721b..91617f5f 100644 --- a/controllers/kustomization_controller.go +++ b/controllers/kustomization_controller.go @@ -42,6 +42,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/kustomize/api/filesys" "sigs.k8s.io/kustomize/api/krusty" kustypes "sigs.k8s.io/kustomize/api/types" @@ -83,47 +84,18 @@ func (r *KustomizationReconciler) Reconcile(req ctrl.Request) (ctrl.Result, erro log := r.Log.WithValues("controller", strings.ToLower(kustomizev1.KustomizationKind), "request", req.NamespacedName) - // Examine if the object is under deletion - if kustomization.ObjectMeta.DeletionTimestamp.IsZero() { - // The object is not being deleted, so if it does not have our finalizer, - // then lets add the finalizer and update the object. This is equivalent - // registering our finalizer. - if !containsString(kustomization.ObjectMeta.Finalizers, kustomizev1.KustomizationFinalizer) { - kustomization.ObjectMeta.Finalizers = append(kustomization.ObjectMeta.Finalizers, kustomizev1.KustomizationFinalizer) - if err := r.Update(ctx, &kustomization); err != nil { - log.Error(err, "unable to register finalizer") - return ctrl.Result{}, err - } + // Add our finalizer if it does not exist + if !controllerutil.ContainsFinalizer(&kustomization, kustomizev1.KustomizationFinalizer) { + controllerutil.AddFinalizer(&kustomization, kustomizev1.KustomizationFinalizer) + if err := r.Update(ctx, &kustomization); err != nil { + log.Error(err, "unable to register finalizer") + return ctrl.Result{}, err } - } else { - // The object is being deleted - if containsString(kustomization.ObjectMeta.Finalizers, kustomizev1.KustomizationFinalizer) { - // Our finalizer is still present, so lets handle garbage collection - if kustomization.Spec.Prune && !kustomization.Spec.Suspend { - // create any necessary kube-clients - client, _, err := r.newKustomizationClient(kustomization) - if err != nil { - err = fmt.Errorf("Failed to build kube client for Kustomization: %w", err) - log.Error(err, "Unable to prune for finalizer") - return ctrl.Result{}, err - } - if err := r.prune(client, kustomization, kustomization.Status.Snapshot, true); err != nil { - r.event(kustomization, kustomization.Status.LastAppliedRevision, events.EventSeverityError, "pruning for deleted resource failed", nil) - // Return the error so we retry the failed garbage collection - return ctrl.Result{}, err - } - } - // Record deleted status - r.recordReadiness(kustomization, true) + } - // Remove our finalizer from the list and update it - kustomization.ObjectMeta.Finalizers = removeString(kustomization.ObjectMeta.Finalizers, kustomizev1.KustomizationFinalizer) - if err := r.Update(ctx, &kustomization); err != nil { - return ctrl.Result{}, err - } - // Stop reconciliation as the object is being deleted - return ctrl.Result{}, nil - } + // Examine if the object is under deletion + if !kustomization.ObjectMeta.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, log, kustomization) } if kustomization.Spec.Suspend { @@ -517,6 +489,35 @@ func (r *KustomizationReconciler) build(kustomization kustomizev1.Kustomization, return kustomizev1.NewSnapshot(resources, checksum) } +func (r *KustomizationReconciler) reconcileDelete(ctx context.Context, log logr.Logger, kustomization kustomizev1.Kustomization) (ctrl.Result, error) { + if kustomization.Spec.Prune && !kustomization.Spec.Suspend { + // create any necessary kube-clients + client, _, err := r.newKustomizationClient(kustomization) + if err != nil { + err = fmt.Errorf("failed to build kube client for Kustomization: %w", err) + log.Error(err, "Unable to prune for finalizer") + return ctrl.Result{}, err + } + if err := r.prune(client, kustomization, kustomization.Status.Snapshot, true); err != nil { + r.event(kustomization, kustomization.Status.LastAppliedRevision, events.EventSeverityError, "pruning for deleted resource failed", nil) + // Return the error so we retry the failed garbage collection + return ctrl.Result{}, err + } + } + + // Record deleted status + r.recordReadiness(kustomization, true) + + // Remove our finalizer from the list and update it + controllerutil.RemoveFinalizer(&kustomization, kustomizev1.KustomizationFinalizer) + if err := r.Update(ctx, &kustomization); err != nil { + return ctrl.Result{}, err + } + + // Stop reconciliation as the object is being deleted + return ctrl.Result{}, nil +} + func (r *KustomizationReconciler) validate(kustomization kustomizev1.Kustomization, dirPath string) error { if kustomization.Spec.Validation == "" { return nil @@ -904,13 +905,3 @@ func containsString(slice []string, s string) bool { } return false } - -func removeString(slice []string, s string) (result []string) { - for _, item := range slice { - if item == s { - continue - } - result = append(result, item) - } - return -} diff --git a/main.go b/main.go index 0b3dd7ed..251f00aa 100644 --- a/main.go +++ b/main.go @@ -26,6 +26,7 @@ import ( _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" "sigs.k8s.io/cli-utils/pkg/kstatus/polling" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/healthz" crtlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" kustomizev1 "github.com/fluxcd/kustomize-controller/api/v1beta1" @@ -54,6 +55,7 @@ func main() { var ( metricsAddr string eventsAddr string + healthAddr string enableLeaderElection bool concurrent int requeueDependency time.Duration @@ -64,6 +66,7 @@ func main() { flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.") flag.StringVar(&eventsAddr, "events-addr", "", "The address of the events receiver.") + flag.StringVar(&healthAddr, "health-addr", ":9440", "The address the health endpoint binds to.") flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, "Enable leader election for controller manager. "+ "Enabling this will ensure there is only one active controller manager.") @@ -96,19 +99,22 @@ func main() { } mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ - Scheme: scheme, - MetricsBindAddress: metricsAddr, - Port: 9443, - LeaderElection: enableLeaderElection, - LeaderElectionID: "7593cc5d.fluxcd.io", - Namespace: watchNamespace, - Logger: ctrl.Log, + Scheme: scheme, + MetricsBindAddress: metricsAddr, + HealthProbeBindAddress: healthAddr, + Port: 9443, + LeaderElection: enableLeaderElection, + LeaderElectionID: "7593cc5d.fluxcd.io", + Namespace: watchNamespace, + Logger: ctrl.Log, }) if err != nil { setupLog.Error(err, "unable to start manager") os.Exit(1) } + setupChecks(mgr) + if err = (&controllers.GitRepositoryWatcher{ Client: mgr.GetClient(), Log: ctrl.Log.WithName("controllers").WithName(sourcev1.GitRepositoryKind), @@ -148,3 +154,15 @@ func main() { os.Exit(1) } } + +func setupChecks(mgr ctrl.Manager) { + if err := mgr.AddReadyzCheck("ping", healthz.Ping); err != nil { + setupLog.Error(err, "unable to create ready check") + os.Exit(1) + } + + if err := mgr.AddHealthzCheck("ping", healthz.Ping); err != nil { + setupLog.Error(err, "unable to create health check") + os.Exit(1) + } +}