From bd591530aa0d49bbe3a41a59d428c57576d621a2 Mon Sep 17 00:00:00 2001 From: Chris Bandy Date: Thu, 12 Dec 2024 13:35:19 -0600 Subject: [PATCH] Separate postgrescluster.Reconciler client concerns We want to be mindful of our interactions with the Kubernetes API, and these interfaces will help keep functions focused. These interfaces are also narrower than client.Reader and client.Writer and may help us keep RBAC markers accurate. A new constructor populates these fields with a single client.Client. The client.WithFieldOwner constructor allows us to drop our Owner field and patch method. This allows `make check` to cover 9% more of the "postgrescluster" package. --- cmd/postgres-operator/main.go | 19 +- internal/controller/postgrescluster/apply.go | 8 +- .../controller/postgrescluster/apply_test.go | 33 ++-- .../postgrescluster/cluster_test.go | 50 +++-- .../controller/postgrescluster/controller.go | 69 +++---- .../postgrescluster/controller_ref_manager.go | 13 +- .../controller_ref_manager_test.go | 13 +- .../postgrescluster/controller_test.go | 27 +-- internal/controller/postgrescluster/delete.go | 4 +- .../controller/postgrescluster/instance.go | 20 +- .../postgrescluster/instance_rollout_test.go | 13 +- .../postgrescluster/instance_test.go | 44 ++--- .../controller/postgrescluster/patroni.go | 8 +- .../postgrescluster/patroni_test.go | 19 +- .../controller/postgrescluster/pgadmin.go | 14 +- .../postgrescluster/pgadmin_test.go | 30 ++- .../controller/postgrescluster/pgbackrest.go | 58 +++--- .../postgrescluster/pgbackrest_test.go | 109 +++++------ .../controller/postgrescluster/pgbouncer.go | 10 +- .../postgrescluster/pgbouncer_test.go | 28 ++- .../controller/postgrescluster/pgmonitor.go | 6 +- .../postgrescluster/pgmonitor_test.go | 10 +- internal/controller/postgrescluster/pki.go | 4 +- .../controller/postgrescluster/pki_test.go | 4 +- .../pod_disruption_budget_test.go | 5 +- .../controller/postgrescluster/postgres.go | 6 +- .../postgrescluster/postgres_test.go | 17 +- .../controller/postgrescluster/snapshots.go | 12 +- .../postgrescluster/snapshots_test.go | 179 +++++++++--------- .../controller/postgrescluster/volumes.go | 14 +- .../postgrescluster/volumes_test.go | 12 +- 31 files changed, 417 insertions(+), 441 deletions(-) diff --git a/cmd/postgres-operator/main.go b/cmd/postgres-operator/main.go index dd321d5541..50ac74943d 100644 --- a/cmd/postgres-operator/main.go +++ b/cmd/postgres-operator/main.go @@ -34,7 +34,6 @@ import ( "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/kubernetes" "github.com/crunchydata/postgres-operator/internal/logging" - "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/registration" "github.com/crunchydata/postgres-operator/internal/tracing" "github.com/crunchydata/postgres-operator/internal/upgradecheck" @@ -256,8 +255,8 @@ func main() { } // add all PostgreSQL Operator controllers to the runtime manager - addControllersToManager(manager, log, registrar) must(pgupgrade.ManagedReconciler(manager, registrar)) + must(postgrescluster.ManagedReconciler(manager, registrar)) must(standalone_pgadmin.ManagedReconciler(manager)) must(crunchybridgecluster.ManagedReconciler(manager, func() bridge.ClientInterface { return bridgeClient() @@ -306,19 +305,3 @@ func main() { log.Info("shutdown complete") } } - -// addControllersToManager adds all PostgreSQL Operator controllers to the provided controller -// runtime manager. -func addControllersToManager(mgr runtime.Manager, log logging.Logger, reg registration.Registration) { - pgReconciler := &postgrescluster.Reconciler{ - Client: mgr.GetClient(), - Owner: naming.ControllerPostgresCluster, - Recorder: mgr.GetEventRecorderFor(naming.ControllerPostgresCluster), - Registration: reg, - } - - if err := pgReconciler.SetupWithManager(mgr); err != nil { - log.Error(err, "unable to create PostgresCluster controller") - os.Exit(1) - } -} diff --git a/internal/controller/postgrescluster/apply.go b/internal/controller/postgrescluster/apply.go index ce3d2fb9e5..88659cf396 100644 --- a/internal/controller/postgrescluster/apply.go +++ b/internal/controller/postgrescluster/apply.go @@ -16,8 +16,8 @@ import ( ) // apply sends an apply patch to object's endpoint in the Kubernetes API and -// updates object with any returned content. The fieldManager is set to -// r.Owner and the force parameter is true. +// updates object with any returned content. The fieldManager is set by +// r.Writer and the force parameter is true. // - https://docs.k8s.io/reference/using-api/server-side-apply/#managers // - https://docs.k8s.io/reference/using-api/server-side-apply/#conflicts func (r *Reconciler) apply(ctx context.Context, object client.Object) error { @@ -32,7 +32,7 @@ func (r *Reconciler) apply(ctx context.Context, object client.Object) error { // Send the apply-patch with force=true. if err == nil { - err = r.patch(ctx, object, apply, client.ForceOwnership) + err = r.Writer.Patch(ctx, object, apply, client.ForceOwnership) } // Some fields cannot be server-side applied correctly. When their outcome @@ -44,7 +44,7 @@ func (r *Reconciler) apply(ctx context.Context, object client.Object) error { // Send the json-patch when necessary. if err == nil && !patch.IsEmpty() { - err = r.patch(ctx, object, patch) + err = r.Writer.Patch(ctx, object, patch) } return err } diff --git a/internal/controller/postgrescluster/apply_test.go b/internal/controller/postgrescluster/apply_test.go index 85dbca995d..1346db28b1 100644 --- a/internal/controller/postgrescluster/apply_test.go +++ b/internal/controller/postgrescluster/apply_test.go @@ -44,7 +44,8 @@ func TestServerSideApply(t *testing.T) { assert.NilError(t, err) t.Run("ObjectMeta", func(t *testing.T) { - reconciler := Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + cc := client.WithFieldOwner(cc, t.Name()) + reconciler := Reconciler{Writer: cc} constructor := func() *corev1.ConfigMap { var cm corev1.ConfigMap cm.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) @@ -55,7 +56,7 @@ func TestServerSideApply(t *testing.T) { // Create the object. before := constructor() - assert.NilError(t, cc.Patch(ctx, before, client.Apply, reconciler.Owner)) + assert.NilError(t, cc.Patch(ctx, before, client.Apply)) assert.Assert(t, before.GetResourceVersion() != "") // Allow the Kubernetes API clock to advance. @@ -63,7 +64,7 @@ func TestServerSideApply(t *testing.T) { // client.Apply changes the ResourceVersion inadvertently. after := constructor() - assert.NilError(t, cc.Patch(ctx, after, client.Apply, reconciler.Owner)) + assert.NilError(t, cc.Patch(ctx, after, client.Apply)) assert.Assert(t, after.GetResourceVersion() != "") switch { @@ -87,7 +88,8 @@ func TestServerSideApply(t *testing.T) { }) t.Run("ControllerReference", func(t *testing.T) { - reconciler := Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + cc := client.WithFieldOwner(cc, t.Name()) + reconciler := Reconciler{Writer: cc} // Setup two possible controllers. controller1 := new(corev1.ConfigMap) @@ -115,7 +117,7 @@ func TestServerSideApply(t *testing.T) { assert.NilError(t, controllerutil.SetControllerReference(controller2, applied, cc.Scheme())) - err1 := cc.Patch(ctx, applied, client.Apply, client.ForceOwnership, reconciler.Owner) + err1 := cc.Patch(ctx, applied, client.Apply, client.ForceOwnership) // Patch not accepted; the ownerReferences field is invalid. assert.Assert(t, apierrors.IsInvalid(err1), "got %#v", err1) @@ -154,20 +156,21 @@ func TestServerSideApply(t *testing.T) { return &sts } - reconciler := Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + cc := client.WithFieldOwner(cc, t.Name()) + reconciler := Reconciler{Writer: cc} upstream := constructor("status-upstream") // The structs defined in "k8s.io/api/apps/v1" marshal empty status fields. switch { case serverVersion.LessThan(version.MustParseGeneric("1.22")): assert.ErrorContains(t, - cc.Patch(ctx, upstream, client.Apply, client.ForceOwnership, reconciler.Owner), + cc.Patch(ctx, upstream, client.Apply, client.ForceOwnership), "field not declared in schema", "expected https://issue.k8s.io/109210") default: assert.NilError(t, - cc.Patch(ctx, upstream, client.Apply, client.ForceOwnership, reconciler.Owner)) + cc.Patch(ctx, upstream, client.Apply, client.ForceOwnership)) } // Our apply method generates the correct apply-patch. @@ -187,7 +190,8 @@ func TestServerSideApply(t *testing.T) { } t.Run("wrong-keys", func(t *testing.T) { - reconciler := Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + cc := client.WithFieldOwner(cc, t.Name()) + reconciler := Reconciler{Writer: cc} intent := constructor("some-selector") intent.Spec.Selector = map[string]string{"k1": "v1"} @@ -195,7 +199,7 @@ func TestServerSideApply(t *testing.T) { // Create the Service. before := intent.DeepCopy() assert.NilError(t, - cc.Patch(ctx, before, client.Apply, client.ForceOwnership, reconciler.Owner)) + cc.Patch(ctx, before, client.Apply, client.ForceOwnership)) // Something external mucks it up. assert.NilError(t, @@ -206,7 +210,7 @@ func TestServerSideApply(t *testing.T) { // client.Apply cannot correct it in old versions of Kubernetes. after := intent.DeepCopy() assert.NilError(t, - cc.Patch(ctx, after, client.Apply, client.ForceOwnership, reconciler.Owner)) + cc.Patch(ctx, after, client.Apply, client.ForceOwnership)) switch { case serverVersion.LessThan(version.MustParseGeneric("1.22")): @@ -248,7 +252,8 @@ func TestServerSideApply(t *testing.T) { {"empty", make(map[string]string)}, } { t.Run(tt.name, func(t *testing.T) { - reconciler := Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + cc := client.WithFieldOwner(cc, t.Name()) + reconciler := Reconciler{Writer: cc} intent := constructor(tt.name + "-selector") intent.Spec.Selector = tt.selector @@ -256,7 +261,7 @@ func TestServerSideApply(t *testing.T) { // Create the Service. before := intent.DeepCopy() assert.NilError(t, - cc.Patch(ctx, before, client.Apply, client.ForceOwnership, reconciler.Owner)) + cc.Patch(ctx, before, client.Apply, client.ForceOwnership)) // Something external mucks it up. assert.NilError(t, @@ -267,7 +272,7 @@ func TestServerSideApply(t *testing.T) { // client.Apply cannot correct it. after := intent.DeepCopy() assert.NilError(t, - cc.Patch(ctx, after, client.Apply, client.ForceOwnership, reconciler.Owner)) + cc.Patch(ctx, after, client.Apply, client.ForceOwnership)) assert.Assert(t, len(after.Spec.Selector) != len(intent.Spec.Selector), "got %v", after.Spec.Selector) diff --git a/internal/controller/postgrescluster/cluster_test.go b/internal/controller/postgrescluster/cluster_test.go index 5fa92d32cf..aab9e75fc2 100644 --- a/internal/controller/postgrescluster/cluster_test.go +++ b/internal/controller/postgrescluster/cluster_test.go @@ -82,19 +82,20 @@ func TestCustomLabels(t *testing.T) { require.ParallelCapacity(t, 2) reconciler := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), - Recorder: new(record.FakeRecorder), + Reader: cc, + Recorder: new(record.FakeRecorder), + StatusWriter: client.WithFieldOwner(cc, t.Name()).Status(), + Writer: client.WithFieldOwner(cc, t.Name()), } ns := setupNamespace(t, cc) reconcileTestCluster := func(cluster *v1beta1.PostgresCluster) { - assert.NilError(t, reconciler.Client.Create(ctx, cluster)) + assert.NilError(t, cc.Create(ctx, cluster)) t.Cleanup(func() { // Remove finalizers, if any, so the namespace can terminate. assert.Check(t, client.IgnoreNotFound( - reconciler.Client.Patch(ctx, cluster, client.RawPatch( + cc.Patch(ctx, cluster, client.RawPatch( client.Merge.Type(), []byte(`{"metadata":{"finalizers":[]}}`))))) }) @@ -168,7 +169,7 @@ func TestCustomLabels(t *testing.T) { for _, gvk := range gvks { uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) - assert.NilError(t, reconciler.Client.List(ctx, uList, + assert.NilError(t, cc.List(ctx, uList, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) @@ -216,7 +217,7 @@ func TestCustomLabels(t *testing.T) { for _, gvk := range gvks { uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) - assert.NilError(t, reconciler.Client.List(ctx, uList, + assert.NilError(t, cc.List(ctx, uList, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) @@ -263,7 +264,7 @@ func TestCustomLabels(t *testing.T) { for _, gvk := range gvks { uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) - assert.NilError(t, reconciler.Client.List(ctx, uList, + assert.NilError(t, cc.List(ctx, uList, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) @@ -298,7 +299,7 @@ func TestCustomLabels(t *testing.T) { for _, gvk := range gvks { uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) - assert.NilError(t, reconciler.Client.List(ctx, uList, + assert.NilError(t, cc.List(ctx, uList, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) @@ -320,19 +321,20 @@ func TestCustomAnnotations(t *testing.T) { require.ParallelCapacity(t, 2) reconciler := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), - Recorder: new(record.FakeRecorder), + Reader: cc, + Recorder: new(record.FakeRecorder), + StatusWriter: client.WithFieldOwner(cc, t.Name()).Status(), + Writer: client.WithFieldOwner(cc, t.Name()), } ns := setupNamespace(t, cc) reconcileTestCluster := func(cluster *v1beta1.PostgresCluster) { - assert.NilError(t, reconciler.Client.Create(ctx, cluster)) + assert.NilError(t, cc.Create(ctx, cluster)) t.Cleanup(func() { // Remove finalizers, if any, so the namespace can terminate. assert.Check(t, client.IgnoreNotFound( - reconciler.Client.Patch(ctx, cluster, client.RawPatch( + cc.Patch(ctx, cluster, client.RawPatch( client.Merge.Type(), []byte(`{"metadata":{"finalizers":[]}}`))))) }) @@ -407,7 +409,7 @@ func TestCustomAnnotations(t *testing.T) { for _, gvk := range gvks { uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) - assert.NilError(t, reconciler.Client.List(ctx, uList, + assert.NilError(t, cc.List(ctx, uList, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) @@ -455,7 +457,7 @@ func TestCustomAnnotations(t *testing.T) { for _, gvk := range gvks { uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) - assert.NilError(t, reconciler.Client.List(ctx, uList, + assert.NilError(t, cc.List(ctx, uList, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) @@ -502,7 +504,7 @@ func TestCustomAnnotations(t *testing.T) { for _, gvk := range gvks { uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) - assert.NilError(t, reconciler.Client.List(ctx, uList, + assert.NilError(t, cc.List(ctx, uList, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) @@ -537,7 +539,7 @@ func TestCustomAnnotations(t *testing.T) { for _, gvk := range gvks { uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) - assert.NilError(t, reconciler.Client.List(ctx, uList, + assert.NilError(t, cc.List(ctx, uList, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) @@ -554,10 +556,7 @@ func TestCustomAnnotations(t *testing.T) { } func TestGenerateClusterPrimaryService(t *testing.T) { - _, cc := setupKubernetes(t) - require.ParallelCapacity(t, 0) - - reconciler := &Reconciler{Client: cc} + reconciler := &Reconciler{} cluster := &v1beta1.PostgresCluster{} cluster.Namespace = "ns2" @@ -658,7 +657,7 @@ func TestReconcileClusterPrimaryService(t *testing.T) { _, cc := setupKubernetes(t) require.ParallelCapacity(t, 1) - reconciler := &Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + reconciler := &Reconciler{Writer: client.WithFieldOwner(cc, t.Name())} cluster := testCluster() cluster.Namespace = setupNamespace(t, cc).Name @@ -676,10 +675,7 @@ func TestReconcileClusterPrimaryService(t *testing.T) { } func TestGenerateClusterReplicaServiceIntent(t *testing.T) { - _, cc := setupKubernetes(t) - require.ParallelCapacity(t, 0) - - reconciler := &Reconciler{Client: cc} + reconciler := &Reconciler{} cluster := &v1beta1.PostgresCluster{} cluster.Namespace = "ns1" diff --git a/internal/controller/postgrescluster/controller.go b/internal/controller/postgrescluster/controller.go index 98093e8ce2..0f835695be 100644 --- a/internal/controller/postgrescluster/controller.go +++ b/internal/controller/postgrescluster/controller.go @@ -45,12 +45,25 @@ const controllerName = naming.ControllerPostgresCluster // Reconciler holds resources for the PostgresCluster reconciler type Reconciler struct { - Client client.Client - Owner client.FieldOwner PodExec func( ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error + + Reader interface { + Get(context.Context, client.ObjectKey, client.Object, ...client.GetOption) error + List(context.Context, client.ObjectList, ...client.ListOption) error + } + Writer interface { + Delete(context.Context, client.Object, ...client.DeleteOption) error + DeleteAllOf(context.Context, client.Object, ...client.DeleteAllOfOption) error + Patch(context.Context, client.Object, client.Patch, ...client.PatchOption) error + Update(context.Context, client.Object, ...client.UpdateOption) error + } + StatusWriter interface { + Patch(context.Context, client.Object, client.Patch, ...client.SubResourcePatchOption) error + } + Recorder record.EventRecorder Registration registration.Registration } @@ -69,7 +82,7 @@ func (r *Reconciler) Reconcile( // get the postgrescluster from the cache cluster := &v1beta1.PostgresCluster{} - if err := r.Client.Get(ctx, request.NamespacedName, cluster); err != nil { + if err := r.Reader.Get(ctx, request.NamespacedName, cluster); err != nil { // NotFound cannot be fixed by requeuing so ignore it. During background // deletion, we receive delete events from cluster's dependents after // cluster is deleted. @@ -175,8 +188,7 @@ func (r *Reconciler) Reconcile( if !equality.Semantic.DeepEqual(before.Status, cluster.Status) { // NOTE(cbandy): Kubernetes prior to v1.16.10 and v1.17.6 does not track // managed fields on the status subresource: https://issue.k8s.io/88901 - if err := r.Client.Status().Patch( - ctx, cluster, client.MergeFrom(before), r.Owner); err != nil { + if err := r.StatusWriter.Patch(ctx, cluster, client.MergeFrom(before)); err != nil { log.Error(err, "patching cluster status") return err } @@ -400,24 +412,12 @@ func (r *Reconciler) deleteControlled( version := object.GetResourceVersion() exactly := client.Preconditions{UID: &uid, ResourceVersion: &version} - return r.Client.Delete(ctx, object, exactly) + return r.Writer.Delete(ctx, object, exactly) } return nil } -// patch sends patch to object's endpoint in the Kubernetes API and updates -// object with any returned content. The fieldManager is set to r.Owner, but -// can be overridden in options. -// - https://docs.k8s.io/reference/using-api/server-side-apply/#managers -func (r *Reconciler) patch( - ctx context.Context, object client.Object, - patch client.Patch, options ...client.PatchOption, -) error { - options = append([]client.PatchOption{r.Owner}, options...) - return r.Client.Patch(ctx, object, patch, options...) -} - // The owner reference created by controllerutil.SetControllerReference blocks // deletion. The OwnerReferencesPermissionEnforcement plugin requires that the // creator of such a reference have either "delete" permission on the owner or @@ -431,7 +431,7 @@ func (r *Reconciler) patch( func (r *Reconciler) setControllerReference( owner *v1beta1.PostgresCluster, controlled client.Object, ) error { - return controllerutil.SetControllerReference(owner, controlled, r.Client.Scheme()) + return controllerutil.SetControllerReference(owner, controlled, runtime.Scheme) } // setOwnerReference sets an OwnerReference on the object without setting the @@ -439,7 +439,7 @@ func (r *Reconciler) setControllerReference( func (r *Reconciler) setOwnerReference( owner *v1beta1.PostgresCluster, controlled client.Object, ) error { - return controllerutil.SetOwnerReference(owner, controlled, r.Client.Scheme()) + return controllerutil.SetOwnerReference(owner, controlled, runtime.Scheme) } // +kubebuilder:rbac:groups="",resources="configmaps",verbs={get,list,watch} @@ -456,17 +456,22 @@ func (r *Reconciler) setOwnerReference( // +kubebuilder:rbac:groups="batch",resources="cronjobs",verbs={get,list,watch} // +kubebuilder:rbac:groups="policy",resources="poddisruptionbudgets",verbs={get,list,watch} -// SetupWithManager adds the PostgresCluster controller to the provided runtime manager -func (r *Reconciler) SetupWithManager(mgr manager.Manager) error { - if r.PodExec == nil { - var err error - r.PodExec, err = runtime.NewPodExecutor(mgr.GetConfig()) - if err != nil { - return err - } +// ManagedReconciler creates a [Reconciler] and adds it to m. +func ManagedReconciler(m manager.Manager, r registration.Registration) error { + exec, err := runtime.NewPodExecutor(m.GetConfig()) + kubernetes := client.WithFieldOwner(m.GetClient(), naming.ControllerPostgresCluster) + recorder := m.GetEventRecorderFor(naming.ControllerPostgresCluster) + + reconciler := &Reconciler{ + PodExec: exec, + Reader: kubernetes, + Recorder: recorder, + Registration: r, + StatusWriter: kubernetes.Status(), + Writer: kubernetes, } - return builder.ControllerManagedBy(mgr). + return errors.Join(err, builder.ControllerManagedBy(m). For(&v1beta1.PostgresCluster{}). Owns(&corev1.ConfigMap{}). Owns(&corev1.Endpoints{}). @@ -481,8 +486,8 @@ func (r *Reconciler) SetupWithManager(mgr manager.Manager) error { Owns(&rbacv1.RoleBinding{}). Owns(&batchv1.CronJob{}). Owns(&policyv1.PodDisruptionBudget{}). - Watches(&corev1.Pod{}, r.watchPods()). + Watches(&corev1.Pod{}, reconciler.watchPods()). Watches(&appsv1.StatefulSet{}, - r.controllerRefHandlerFuncs()). // watch all StatefulSets - Complete(r) + reconciler.controllerRefHandlerFuncs()). // watch all StatefulSets + Complete(reconciler)) } diff --git a/internal/controller/postgrescluster/controller_ref_manager.go b/internal/controller/postgrescluster/controller_ref_manager.go index 6caa58b85d..fc814259bf 100644 --- a/internal/controller/postgrescluster/controller_ref_manager.go +++ b/internal/controller/postgrescluster/controller_ref_manager.go @@ -12,7 +12,6 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -28,8 +27,7 @@ import ( func (r *Reconciler) adoptObject(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, obj client.Object) error { - if err := controllerutil.SetControllerReference(postgresCluster, obj, - r.Client.Scheme()); err != nil { + if err := r.setControllerReference(postgresCluster, obj); err != nil { return err } @@ -39,10 +37,7 @@ func (r *Reconciler) adoptObject(ctx context.Context, postgresCluster *v1beta1.P return err } - return r.Client.Patch(ctx, obj, client.RawPatch(types.StrategicMergePatchType, - patchBytes), &client.PatchOptions{ - FieldManager: controllerName, - }) + return r.Writer.Patch(ctx, obj, client.RawPatch(types.StrategicMergePatchType, patchBytes)) } // claimObject is responsible for adopting or releasing Objects based on their current @@ -129,7 +124,7 @@ func (r *Reconciler) getPostgresClusterForObject(ctx context.Context, } postgresCluster := &v1beta1.PostgresCluster{} - if err := r.Client.Get(ctx, types.NamespacedName{ + if err := r.Reader.Get(ctx, types.NamespacedName{ Name: clusterName, Namespace: obj.GetNamespace(), }, postgresCluster); err != nil { @@ -175,7 +170,7 @@ func (r *Reconciler) releaseObject(ctx context.Context, return err } - return r.Client.Patch(ctx, obj, client.RawPatch(types.StrategicMergePatchType, patch)) + return r.Writer.Patch(ctx, obj, client.RawPatch(types.StrategicMergePatchType, patch)) } // controllerRefHandlerFuncs returns the handler funcs that should be utilized to watch diff --git a/internal/controller/postgrescluster/controller_ref_manager_test.go b/internal/controller/postgrescluster/controller_ref_manager_test.go index fa8450c5d9..ea778d2ef6 100644 --- a/internal/controller/postgrescluster/controller_ref_manager_test.go +++ b/internal/controller/postgrescluster/controller_ref_manager_test.go @@ -22,7 +22,10 @@ func TestManageControllerRefs(t *testing.T) { require.ParallelCapacity(t, 1) ctx := context.Background() - r := &Reconciler{Client: tClient} + r := &Reconciler{ + Reader: tClient, + Writer: client.WithFieldOwner(tClient, t.Name()), + } clusterName := "hippo" cluster := testCluster() @@ -56,7 +59,7 @@ func TestManageControllerRefs(t *testing.T) { obj.Name = "adopt" obj.Labels = map[string]string{naming.LabelCluster: clusterName} - if err := r.Client.Create(ctx, obj); err != nil { + if err := tClient.Create(ctx, obj); err != nil { t.Error(err) } @@ -97,7 +100,7 @@ func TestManageControllerRefs(t *testing.T) { BlockOwnerDeletion: &isTrue, }) - if err := r.Client.Create(ctx, obj); err != nil { + if err := tClient.Create(ctx, obj); err != nil { t.Error(err) } @@ -120,7 +123,7 @@ func TestManageControllerRefs(t *testing.T) { obj.Name = "ignore-no-labels-refs" obj.Labels = map[string]string{"ignore-label": "ignore-value"} - if err := r.Client.Create(ctx, obj); err != nil { + if err := tClient.Create(ctx, obj); err != nil { t.Error(err) } @@ -143,7 +146,7 @@ func TestManageControllerRefs(t *testing.T) { obj.Name = "ignore-no-postgrescluster" obj.Labels = map[string]string{naming.LabelCluster: "nonexistent"} - if err := r.Client.Create(ctx, obj); err != nil { + if err := tClient.Create(ctx, obj); err != nil { t.Error(err) } diff --git a/internal/controller/postgrescluster/controller_test.go b/internal/controller/postgrescluster/controller_test.go index 36759cd784..a6f237b81a 100644 --- a/internal/controller/postgrescluster/controller_test.go +++ b/internal/controller/postgrescluster/controller_test.go @@ -39,7 +39,7 @@ func TestDeleteControlled(t *testing.T) { require.ParallelCapacity(t, 1) ns := setupNamespace(t, cc) - reconciler := Reconciler{Client: cc} + reconciler := Reconciler{Writer: cc} cluster := testCluster() cluster.Namespace = ns.Name @@ -118,6 +118,7 @@ spec: var _ = Describe("PostgresCluster Reconciler", func() { var test struct { Namespace *corev1.Namespace + Owner string Reconciler Reconciler Recorder *record.FakeRecorder } @@ -129,13 +130,17 @@ var _ = Describe("PostgresCluster Reconciler", func() { test.Namespace.Name = "postgres-operator-test-" + rand.String(6) Expect(suite.Client.Create(ctx, test.Namespace)).To(Succeed()) + test.Owner = "asdf" test.Recorder = record.NewFakeRecorder(100) test.Recorder.IncludeObject = true - test.Reconciler.Client = suite.Client - test.Reconciler.Owner = "asdf" + client := client.WithFieldOwner(suite.Client, test.Owner) + + test.Reconciler.Reader = client test.Reconciler.Recorder = test.Recorder test.Reconciler.Registration = nil + test.Reconciler.StatusWriter = client.Status() + test.Reconciler.Writer = client }) AfterEach(func() { @@ -284,7 +289,7 @@ spec: )) Expect(ccm.ManagedFields).To(ContainElement( MatchFields(IgnoreExtras, Fields{ - "Manager": Equal(string(test.Reconciler.Owner)), + "Manager": Equal(test.Owner), "Operation": Equal(metav1.ManagedFieldsOperationApply), }), )) @@ -308,7 +313,7 @@ spec: )) Expect(cps.ManagedFields).To(ContainElement( MatchFields(IgnoreExtras, Fields{ - "Manager": Equal(string(test.Reconciler.Owner)), + "Manager": Equal(test.Owner), "Operation": Equal(metav1.ManagedFieldsOperationApply), }), )) @@ -347,7 +352,7 @@ spec: // - https://pr.k8s.io/100970 Expect(existing.ManagedFields).To(ContainElement( MatchFields(IgnoreExtras, Fields{ - "Manager": Equal(string(test.Reconciler.Owner)), + "Manager": Equal(test.Owner), "FieldsV1": PointTo(MatchAllFields(Fields{ "Raw": WithTransform(func(in []byte) (out map[string]any) { Expect(yaml.Unmarshal(in, &out)).To(Succeed()) @@ -365,7 +370,7 @@ spec: default: Expect(existing.ManagedFields).To(ContainElements( MatchFields(IgnoreExtras, Fields{ - "Manager": Equal(string(test.Reconciler.Owner)), + "Manager": Equal(test.Owner), "FieldsV1": PointTo(MatchAllFields(Fields{ "Raw": WithTransform(func(in []byte) (out map[string]any) { Expect(yaml.Unmarshal(in, &out)).To(Succeed()) @@ -378,7 +383,7 @@ spec: })), }), MatchFields(IgnoreExtras, Fields{ - "Manager": Equal(string(test.Reconciler.Owner)), + "Manager": Equal(test.Owner), "FieldsV1": PointTo(MatchAllFields(Fields{ "Raw": WithTransform(func(in []byte) (out map[string]any) { Expect(yaml.Unmarshal(in, &out)).To(Succeed()) @@ -409,7 +414,7 @@ spec: )) Expect(ds.ManagedFields).To(ContainElement( MatchFields(IgnoreExtras, Fields{ - "Manager": Equal(string(test.Reconciler.Owner)), + "Manager": Equal(test.Owner), "Operation": Equal(metav1.ManagedFieldsOperationApply), }), )) @@ -501,7 +506,7 @@ spec: )) Expect(icm.ManagedFields).To(ContainElement( MatchFields(IgnoreExtras, Fields{ - "Manager": Equal(string(test.Reconciler.Owner)), + "Manager": Equal(test.Owner), "Operation": Equal(metav1.ManagedFieldsOperationApply), }), )) @@ -522,7 +527,7 @@ spec: )) Expect(instance.ManagedFields).To(ContainElement( MatchFields(IgnoreExtras, Fields{ - "Manager": Equal(string(test.Reconciler.Owner)), + "Manager": Equal(test.Owner), "Operation": Equal(metav1.ManagedFieldsOperationApply), }), )) diff --git a/internal/controller/postgrescluster/delete.go b/internal/controller/postgrescluster/delete.go index a1a4d322dd..74a786dd38 100644 --- a/internal/controller/postgrescluster/delete.go +++ b/internal/controller/postgrescluster/delete.go @@ -58,7 +58,7 @@ func (r *Reconciler) handleDelete( // Make another copy so that Patch doesn't write back to cluster. intent := before.DeepCopy() intent.Finalizers = append(intent.Finalizers, naming.Finalizer) - err := errors.WithStack(r.patch(ctx, intent, + err := errors.WithStack(r.Writer.Patch(ctx, intent, client.MergeFromWithOptions(before, client.MergeFromWithOptimisticLock{}))) // The caller can do what they like or requeue upon error. @@ -96,7 +96,7 @@ func (r *Reconciler) handleDelete( // Make another copy so that Patch doesn't write back to cluster. intent := before.DeepCopy() intent.Finalizers = finalizers.Delete(naming.Finalizer).List() - err := errors.WithStack(r.patch(ctx, intent, + err := errors.WithStack(r.Writer.Patch(ctx, intent, client.MergeFromWithOptions(before, client.MergeFromWithOptimisticLock{}))) // The caller should wait for further events or requeue upon error. diff --git a/internal/controller/postgrescluster/instance.go b/internal/controller/postgrescluster/instance.go index 68838f5c40..89a1cc25f8 100644 --- a/internal/controller/postgrescluster/instance.go +++ b/internal/controller/postgrescluster/instance.go @@ -299,14 +299,14 @@ func (r *Reconciler) observeInstances( selector, err := naming.AsSelector(naming.ClusterInstances(cluster.Name)) if err == nil { err = errors.WithStack( - r.Client.List(ctx, pods, + r.Reader.List(ctx, pods, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector}, )) } if err == nil { err = errors.WithStack( - r.Client.List(ctx, runners, + r.Reader.List(ctx, runners, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector}, )) @@ -386,7 +386,7 @@ func (r *Reconciler) deleteInstances( instances, err := naming.AsSelector(naming.ClusterInstances(cluster.Name)) if err == nil { err = errors.WithStack( - r.Client.List(ctx, pods, + r.Reader.List(ctx, pods, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: instances}, )) @@ -424,7 +424,7 @@ func (r *Reconciler) deleteInstances( // apps/v1.Deployment, apps/v1.ReplicaSet, and apps/v1.StatefulSet all // have a "spec.replicas" field with the same meaning. patch := client.RawPatch(client.Merge.Type(), []byte(`{"spec":{"replicas":0}}`)) - err := errors.WithStack(r.patch(ctx, instance, patch)) + err := errors.WithStack(r.Writer.Patch(ctx, instance, patch)) // When the pod controller is missing, requeue rather than return an // error. The garbage collector will stop the pod, and it is not our @@ -500,7 +500,7 @@ func (r *Reconciler) deleteInstance( uList.SetGroupVersionKind(gvk) err = errors.WithStack( - r.Client.List(ctx, uList, + r.Reader.List(ctx, uList, client.InNamespace(cluster.GetNamespace()), client.MatchingLabelsSelector{Selector: selector}, )) @@ -617,7 +617,7 @@ func (r *Reconciler) cleanupPodDisruptionBudgets( pdbList := &policyv1.PodDisruptionBudgetList{} if err == nil { - err = r.Client.List(ctx, pdbList, + err = r.Reader.List(ctx, pdbList, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{ Selector: selector, }) @@ -814,7 +814,7 @@ func (r *Reconciler) rolloutInstance( // NOTE(cbandy): This could return an apierrors.IsConflict() which should be // retried by another reconcile (not ignored). return errors.WithStack( - r.Client.Delete(ctx, pod, client.Preconditions{ + r.Writer.Delete(ctx, pod, client.Preconditions{ UID: &pod.UID, ResourceVersion: &pod.ResourceVersion, })) @@ -1153,7 +1153,7 @@ func (r *Reconciler) reconcileInstance( // Create new err variable to avoid abandoning the rest of the reconcile loop if there // is an error getting the monitoring user secret err := errors.WithStack( - r.Client.Get(ctx, client.ObjectKeyFromObject(monitoringUserSecret), monitoringUserSecret)) + r.Reader.Get(ctx, client.ObjectKeyFromObject(monitoringUserSecret), monitoringUserSecret)) if err == nil { pgPassword = string(monitoringUserSecret.Data["password"]) } @@ -1420,7 +1420,7 @@ func (r *Reconciler) reconcileInstanceCertificates( ) (*corev1.Secret, error) { existing := &corev1.Secret{ObjectMeta: naming.InstanceCertificates(instance)} err := errors.WithStack(client.IgnoreNotFound( - r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing))) + r.Reader.Get(ctx, client.ObjectKeyFromObject(existing), existing))) instanceCerts := &corev1.Secret{ObjectMeta: naming.InstanceCertificates(instance)} instanceCerts.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) @@ -1508,7 +1508,7 @@ func (r *Reconciler) reconcileInstanceSetPodDisruptionBudget( scaled, err = intstr.GetScaledValueFromIntOrPercent(minAvailable, int(*spec.Replicas), true) } if err == nil && scaled <= 0 { - err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(pdb), pdb)) + err := errors.WithStack(r.Reader.Get(ctx, client.ObjectKeyFromObject(pdb), pdb)) if err == nil { err = errors.WithStack(r.deleteControlled(ctx, cluster, pdb)) } diff --git a/internal/controller/postgrescluster/instance_rollout_test.go b/internal/controller/postgrescluster/instance_rollout_test.go index 7bd63ce9d1..2b8f0db5f8 100644 --- a/internal/controller/postgrescluster/instance_rollout_test.go +++ b/internal/controller/postgrescluster/instance_rollout_test.go @@ -57,9 +57,12 @@ func TestReconcilerRolloutInstance(t *testing.T) { } observed := &observedInstances{forCluster: instances} - key := client.ObjectKey{Namespace: "ns1", Name: "one-pod-bruh"} - reconciler := &Reconciler{} - reconciler.Client = fake.NewClientBuilder().WithObjects(instances[0].Pods[0]).Build() + cc := fake.NewClientBuilder().WithObjects(instances[0].Pods[0]).Build() + key := client.ObjectKeyFromObject(instances[0].Pods[0]) + reconciler := &Reconciler{ + Reader: cc, + Writer: cc, + } execCalls := 0 reconciler.PodExec = func( @@ -82,13 +85,13 @@ func TestReconcilerRolloutInstance(t *testing.T) { return nil } - assert.NilError(t, reconciler.Client.Get(ctx, key, &corev1.Pod{}), + assert.NilError(t, cc.Get(ctx, key, &corev1.Pod{}), "bug in test: expected pod to exist") assert.NilError(t, reconciler.rolloutInstance(ctx, cluster, observed, instances[0])) assert.Equal(t, execCalls, 1, "expected PodExec to be called") - err := reconciler.Client.Get(ctx, key, &corev1.Pod{}) + err := cc.Get(ctx, key, &corev1.Pod{}) assert.Assert(t, apierrors.IsNotFound(err), "expected pod to be deleted, got: %#v", err) }) diff --git a/internal/controller/postgrescluster/instance_test.go b/internal/controller/postgrescluster/instance_test.go index 0b24e38d72..786865c44c 100644 --- a/internal/controller/postgrescluster/instance_test.go +++ b/internal/controller/postgrescluster/instance_test.go @@ -1189,20 +1189,21 @@ func TestDeleteInstance(t *testing.T) { require.ParallelCapacity(t, 1) reconciler := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), - Recorder: new(record.FakeRecorder), + Reader: cc, + Recorder: new(record.FakeRecorder), + StatusWriter: client.WithFieldOwner(cc, t.Name()).Status(), + Writer: client.WithFieldOwner(cc, t.Name()), } // Define, Create, and Reconcile a cluster to get an instance running in kube cluster := testCluster() cluster.Namespace = setupNamespace(t, cc).Name - assert.NilError(t, reconciler.Client.Create(ctx, cluster)) + assert.NilError(t, cc.Create(ctx, cluster)) t.Cleanup(func() { // Remove finalizers, if any, so the namespace can terminate. assert.Check(t, client.IgnoreNotFound( - reconciler.Client.Patch(ctx, cluster, client.RawPatch( + cc.Patch(ctx, cluster, client.RawPatch( client.Merge.Type(), []byte(`{"metadata":{"finalizers":[]}}`))))) }) @@ -1215,7 +1216,7 @@ func TestDeleteInstance(t *testing.T) { assert.Assert(t, result.Requeue == false) stsList := &appsv1.StatefulSetList{} - assert.NilError(t, reconciler.Client.List(ctx, stsList, + assert.NilError(t, cc.List(ctx, stsList, client.InNamespace(cluster.Namespace), client.MatchingLabels{ naming.LabelCluster: cluster.Name, @@ -1248,7 +1249,7 @@ func TestDeleteInstance(t *testing.T) { err := wait.PollUntilContextTimeout(ctx, time.Second*3, Scale(time.Second*30), false, func(ctx context.Context) (bool, error) { uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) - assert.NilError(t, reconciler.Client.List(ctx, uList, + assert.NilError(t, cc.List(ctx, uList, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) @@ -1792,8 +1793,8 @@ func TestReconcileInstanceSetPodDisruptionBudget(t *testing.T) { require.ParallelCapacity(t, 1) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), } foundPDB := func( @@ -1801,7 +1802,7 @@ func TestReconcileInstanceSetPodDisruptionBudget(t *testing.T) { spec *v1beta1.PostgresInstanceSetSpec, ) bool { got := &policyv1.PodDisruptionBudget{} - err := r.Client.Get(ctx, + err := cc.Get(ctx, naming.AsObjectKey(naming.InstanceSet(cluster, spec)), got) return !apierrors.IsNotFound(err) @@ -1833,8 +1834,8 @@ func TestReconcileInstanceSetPodDisruptionBudget(t *testing.T) { spec := &cluster.Spec.InstanceSets[0] spec.MinAvailable = initialize.Pointer(intstr.FromInt32(1)) - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) assert.NilError(t, r.reconcileInstanceSetPodDisruptionBudget(ctx, cluster, spec)) assert.Assert(t, foundPDB(cluster, spec)) @@ -1860,8 +1861,8 @@ func TestReconcileInstanceSetPodDisruptionBudget(t *testing.T) { spec := &cluster.Spec.InstanceSets[0] spec.MinAvailable = initialize.Pointer(intstr.FromString("50%")) - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) assert.NilError(t, r.reconcileInstanceSetPodDisruptionBudget(ctx, cluster, spec)) assert.Assert(t, foundPDB(cluster, spec)) @@ -1910,8 +1911,8 @@ func TestCleanupDisruptionBudgets(t *testing.T) { require.ParallelCapacity(t, 1) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), } ns := setupNamespace(t, cc) @@ -1940,14 +1941,14 @@ func TestCleanupDisruptionBudgets(t *testing.T) { createPDB := func( pdb *policyv1.PodDisruptionBudget, ) error { - return r.Client.Create(ctx, pdb) + return cc.Create(ctx, pdb) } foundPDB := func( pdb *policyv1.PodDisruptionBudget, ) bool { return !apierrors.IsNotFound( - r.Client.Get(ctx, client.ObjectKeyFromObject(pdb), + cc.Get(ctx, client.ObjectKeyFromObject(pdb), &policyv1.PodDisruptionBudget{})) } @@ -1962,8 +1963,8 @@ func TestCleanupDisruptionBudgets(t *testing.T) { spec := &cluster.Spec.InstanceSets[0] spec.MinAvailable = initialize.Pointer(intstr.FromInt32(1)) - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) expectedPDB := generatePDB(t, cluster, spec, initialize.Pointer(intstr.FromInt32(1))) @@ -2007,8 +2008,7 @@ func TestReconcileInstanceConfigMap(t *testing.T) { require.ParallelCapacity(t, 1) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Writer: client.WithFieldOwner(cc, t.Name()), } t.Run("LocalVolumeOtelDisabled", func(t *testing.T) { diff --git a/internal/controller/postgrescluster/patroni.go b/internal/controller/postgrescluster/patroni.go index af3a3b8cca..7368fe295f 100644 --- a/internal/controller/postgrescluster/patroni.go +++ b/internal/controller/postgrescluster/patroni.go @@ -37,7 +37,7 @@ func (r *Reconciler) deletePatroniArtifacts( selector, err := naming.AsSelector(naming.ClusterPatronis(cluster)) if err == nil { err = errors.WithStack( - r.Client.DeleteAllOf(ctx, &corev1.Endpoints{}, + r.Writer.DeleteAllOf(ctx, &corev1.Endpoints{}, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector}, )) @@ -324,7 +324,7 @@ func (r *Reconciler) reconcilePatroniStatus( dcs := &corev1.Endpoints{ObjectMeta: naming.PatroniDistributedConfiguration(cluster)} err := errors.WithStack(client.IgnoreNotFound( - r.Client.Get(ctx, client.ObjectKeyFromObject(dcs), dcs))) + r.Reader.Get(ctx, client.ObjectKeyFromObject(dcs), dcs))) if err == nil { if dcs.Annotations["initialize"] != "" { @@ -362,14 +362,14 @@ func (r *Reconciler) reconcileReplicationSecret( Name: cluster.Spec.CustomReplicationClientTLSSecret.Name, Namespace: cluster.Namespace, }} - err := errors.WithStack(r.Client.Get(ctx, + err := errors.WithStack(r.Reader.Get(ctx, client.ObjectKeyFromObject(custom), custom)) return custom, err } existing := &corev1.Secret{ObjectMeta: naming.ReplicationClientCertSecret(cluster)} err := errors.WithStack(client.IgnoreNotFound( - r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing))) + r.Reader.Get(ctx, client.ObjectKeyFromObject(existing), existing))) leaf := &pki.LeafCertificate{} commonName := postgres.ReplicationUser diff --git a/internal/controller/postgrescluster/patroni_test.go b/internal/controller/postgrescluster/patroni_test.go index 728b75aee3..99df846112 100644 --- a/internal/controller/postgrescluster/patroni_test.go +++ b/internal/controller/postgrescluster/patroni_test.go @@ -32,11 +32,7 @@ import ( ) func TestGeneratePatroniLeaderLeaseService(t *testing.T) { - _, cc := setupKubernetes(t) - require.ParallelCapacity(t, 0) - reconciler := &Reconciler{ - Client: cc, Recorder: new(record.FakeRecorder), } @@ -232,7 +228,7 @@ func TestReconcilePatroniLeaderLease(t *testing.T) { require.ParallelCapacity(t, 1) ns := setupNamespace(t, cc) - reconciler := &Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + reconciler := &Reconciler{Writer: client.WithFieldOwner(cc, t.Name())} cluster := testCluster() cluster.Namespace = ns.Name @@ -322,7 +318,10 @@ func TestPatroniReplicationSecret(t *testing.T) { require.ParallelCapacity(t, 0) ctx := context.Background() - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{ + Reader: tClient, + Writer: client.WithFieldOwner(tClient, t.Name()), + } // test postgrescluster values var ( @@ -351,7 +350,7 @@ func TestPatroniReplicationSecret(t *testing.T) { patroniReplicationSecret := &corev1.Secret{ObjectMeta: naming.ReplicationClientCertSecret(postgresCluster)} patroniReplicationSecret.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) - err = r.Client.Get(ctx, client.ObjectKeyFromObject(patroniReplicationSecret), patroniReplicationSecret) + err = tClient.Get(ctx, client.ObjectKeyFromObject(patroniReplicationSecret), patroniReplicationSecret) assert.NilError(t, err) t.Run("ca.crt", func(t *testing.T) { @@ -426,7 +425,7 @@ func TestReconcilePatroniStatus(t *testing.T) { require.ParallelCapacity(t, 0) ns := setupNamespace(t, tClient) - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{Reader: tClient} systemIdentifier := "6952526174828511264" createResources := func(index, readyReplicas int, @@ -526,13 +525,9 @@ func TestReconcilePatroniStatus(t *testing.T) { } func TestReconcilePatroniSwitchover(t *testing.T) { - _, client := setupKubernetes(t) - require.ParallelCapacity(t, 0) - var called, failover, callError, callFails bool var timelineCallNoLeader, timelineCall bool r := Reconciler{ - Client: client, PodExec: func(ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string) error { called = true diff --git a/internal/controller/postgrescluster/pgadmin.go b/internal/controller/postgrescluster/pgadmin.go index dbaaf359ee..fe5d4ce21d 100644 --- a/internal/controller/postgrescluster/pgadmin.go +++ b/internal/controller/postgrescluster/pgadmin.go @@ -96,7 +96,7 @@ func (r *Reconciler) reconcilePGAdminConfigMap( // pgAdmin is disabled; delete the ConfigMap if it exists. Check the // client cache first using Get. key := client.ObjectKeyFromObject(configmap) - err := errors.WithStack(r.Client.Get(ctx, key, configmap)) + err := errors.WithStack(r.Reader.Get(ctx, key, configmap)) if err == nil { err = errors.WithStack(r.deleteControlled(ctx, cluster, configmap)) } @@ -212,7 +212,7 @@ func (r *Reconciler) reconcilePGAdminService( // pgAdmin is disabled; delete the Service if it exists. Check the client // cache first using Get. key := client.ObjectKeyFromObject(service) - err := errors.WithStack(r.Client.Get(ctx, key, service)) + err := errors.WithStack(r.Reader.Get(ctx, key, service)) if err == nil { err = errors.WithStack(r.deleteControlled(ctx, cluster, service)) } @@ -240,7 +240,7 @@ func (r *Reconciler) reconcilePGAdminStatefulSet( // pgAdmin is disabled; delete the Deployment if it exists. Check the // client cache first using Get. key := client.ObjectKeyFromObject(sts) - err := errors.WithStack(r.Client.Get(ctx, key, sts)) + err := errors.WithStack(r.Reader.Get(ctx, key, sts)) if err == nil { err = errors.WithStack(r.deleteControlled(ctx, cluster, sts)) } @@ -333,7 +333,7 @@ func (r *Reconciler) reconcilePGAdminStatefulSet( // When we delete the StatefulSet, we will leave its Pods in place. They will be claimed by // the StatefulSet that gets created in the next reconcile. existing := &appsv1.StatefulSet{} - if err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(sts), existing)); err != nil { + if err := errors.WithStack(r.Reader.Get(ctx, client.ObjectKeyFromObject(sts), existing)); err != nil { if !apierrors.IsNotFound(err) { return err } @@ -346,7 +346,7 @@ func (r *Reconciler) reconcilePGAdminStatefulSet( exactly := client.Preconditions{UID: &uid, ResourceVersion: &version} propagate := client.PropagationPolicy(metav1.DeletePropagationOrphan) - return errors.WithStack(client.IgnoreNotFound(r.Client.Delete(ctx, existing, exactly, propagate))) + return errors.WithStack(client.IgnoreNotFound(r.Writer.Delete(ctx, existing, exactly, propagate))) } } @@ -391,7 +391,7 @@ func (r *Reconciler) reconcilePGAdminDataVolume( // pgAdmin is disabled; delete the PVC if it exists. Check the client // cache first using Get. key := client.ObjectKeyFromObject(pvc) - err := errors.WithStack(r.Client.Get(ctx, key, pvc)) + err := errors.WithStack(r.Reader.Get(ctx, key, pvc)) if err == nil { err = errors.WithStack(r.deleteControlled(ctx, cluster, pvc)) } @@ -439,7 +439,7 @@ func (r *Reconciler) reconcilePGAdminUsers( pod := &corev1.Pod{ObjectMeta: naming.ClusterPGAdmin(cluster)} pod.Name += "-0" - err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(pod), pod)) + err := errors.WithStack(r.Reader.Get(ctx, client.ObjectKeyFromObject(pod), pod)) if err != nil { return client.IgnoreNotFound(err) } diff --git a/internal/controller/postgrescluster/pgadmin_test.go b/internal/controller/postgrescluster/pgadmin_test.go index 1d0a305b2a..a7ea705721 100644 --- a/internal/controller/postgrescluster/pgadmin_test.go +++ b/internal/controller/postgrescluster/pgadmin_test.go @@ -29,10 +29,7 @@ import ( ) func TestGeneratePGAdminConfigMap(t *testing.T) { - _, cc := setupKubernetes(t) - require.ParallelCapacity(t, 0) - - reconciler := &Reconciler{Client: cc} + reconciler := &Reconciler{} cluster := &v1beta1.PostgresCluster{} cluster.Namespace = "some-ns" @@ -118,11 +115,7 @@ ownerReferences: } func TestGeneratePGAdminService(t *testing.T) { - _, cc := setupKubernetes(t) - require.ParallelCapacity(t, 0) - reconciler := &Reconciler{ - Client: cc, Recorder: new(record.FakeRecorder), } @@ -354,7 +347,10 @@ func TestReconcilePGAdminService(t *testing.T) { _, cc := setupKubernetes(t) require.ParallelCapacity(t, 1) - reconciler := &Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + reconciler := &Reconciler{ + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), + } cluster := testCluster() cluster.Namespace = setupNamespace(t, cc).Name @@ -456,7 +452,10 @@ func TestReconcilePGAdminStatefulSet(t *testing.T) { _, cc := setupKubernetes(t) require.ParallelCapacity(t, 1) - reconciler := &Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + reconciler := &Reconciler{ + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), + } ns := setupNamespace(t, cc) cluster := pgAdminTestCluster(*ns) @@ -670,8 +669,7 @@ func TestReconcilePGAdminDataVolume(t *testing.T) { require.ParallelCapacity(t, 1) reconciler := &Reconciler{ - Client: tClient, - Owner: client.FieldOwner(t.Name()), + Writer: client.WithFieldOwner(tClient, t.Name()), } ns := setupNamespace(t, tClient) @@ -721,7 +719,7 @@ func TestReconcilePGAdminUsers(t *testing.T) { t.Run("NoPods", func(t *testing.T) { r := new(Reconciler) - r.Client = fake.NewClientBuilder().Build() + r.Reader = fake.NewClientBuilder().Build() assert.NilError(t, r.reconcilePGAdminUsers(ctx, cluster, nil, nil)) }) @@ -737,7 +735,7 @@ func TestReconcilePGAdminUsers(t *testing.T) { pod.Status.ContainerStatuses = nil r := new(Reconciler) - r.Client = fake.NewClientBuilder().WithObjects(pod).Build() + r.Reader = fake.NewClientBuilder().WithObjects(pod).Build() assert.NilError(t, r.reconcilePGAdminUsers(ctx, cluster, nil, nil)) }) @@ -757,7 +755,7 @@ func TestReconcilePGAdminUsers(t *testing.T) { new(corev1.ContainerStateRunning) r := new(Reconciler) - r.Client = fake.NewClientBuilder().WithObjects(pod).Build() + r.Reader = fake.NewClientBuilder().WithObjects(pod).Build() assert.NilError(t, r.reconcilePGAdminUsers(ctx, cluster, nil, nil)) }) @@ -773,7 +771,7 @@ func TestReconcilePGAdminUsers(t *testing.T) { new(corev1.ContainerStateRunning) r := new(Reconciler) - r.Client = fake.NewClientBuilder().WithObjects(pod).Build() + r.Reader = fake.NewClientBuilder().WithObjects(pod).Build() calls := 0 r.PodExec = func( diff --git a/internal/controller/postgrescluster/pgbackrest.go b/internal/controller/postgrescluster/pgbackrest.go index 6c6a182008..5f471b1408 100644 --- a/internal/controller/postgrescluster/pgbackrest.go +++ b/internal/controller/postgrescluster/pgbackrest.go @@ -137,7 +137,7 @@ func (r *Reconciler) applyRepoHostIntent(ctx context.Context, postgresCluster *v // When we delete the StatefulSet, we will leave its Pods in place. They will be claimed by // the StatefulSet that gets created in the next reconcile. existing := &appsv1.StatefulSet{} - if err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(repo), existing)); err != nil { + if err := errors.WithStack(r.Reader.Get(ctx, client.ObjectKeyFromObject(repo), existing)); err != nil { if !apierrors.IsNotFound(err) { return nil, err } @@ -150,7 +150,7 @@ func (r *Reconciler) applyRepoHostIntent(ctx context.Context, postgresCluster *v exactly := client.Preconditions{UID: &uid, ResourceVersion: &version} propagate := client.PropagationPolicy(metav1.DeletePropagationOrphan) - return repo, errors.WithStack(r.Client.Delete(ctx, existing, exactly, propagate)) + return repo, errors.WithStack(r.Writer.Delete(ctx, existing, exactly, propagate)) } } @@ -248,7 +248,7 @@ func (r *Reconciler) getPGBackRestResources(ctx context.Context, for _, gvk := range gvks { uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) - if err := r.Client.List(ctx, uList, + if err := r.Reader.List(ctx, uList, client.InNamespace(postgresCluster.GetNamespace()), client.MatchingLabelsSelector{Selector: selector}); err != nil { return nil, errors.WithStack(err) @@ -398,7 +398,7 @@ func (r *Reconciler) cleanupRepoResources(ctx context.Context, // If nothing has specified that the resource should not be deleted, then delete if delete { - if err := r.Client.Delete(ctx, &ownedResources[i], + if err := r.Writer.Delete(ctx, &ownedResources[i], client.PropagationPolicy(metav1.DeletePropagationBackground)); err != nil { return []unstructured.Unstructured{}, errors.WithStack(err) } @@ -894,7 +894,7 @@ func (r *Reconciler) generateBackupJobSpecIntent(ctx context.Context, postgresCl Namespace: postgresCluster.GetNamespace(), }, } - err := errors.WithStack(r.Client.Get(ctx, + err := errors.WithStack(r.Reader.Get(ctx, client.ObjectKeyFromObject(logVolume), logVolume)) if err != nil { // PVC not retrieved, create warning event @@ -935,7 +935,7 @@ func (r *Reconciler) observeRestoreEnv(ctx context.Context, // lookup the various patroni endpoints leaderEP, dcsEP, failoverEP := corev1.Endpoints{}, corev1.Endpoints{}, corev1.Endpoints{} currentEndpoints := []corev1.Endpoints{} - if err := r.Client.Get(ctx, naming.AsObjectKey(naming.PatroniLeaderEndpoints(cluster)), + if err := r.Reader.Get(ctx, naming.AsObjectKey(naming.PatroniLeaderEndpoints(cluster)), &leaderEP); err != nil { if !apierrors.IsNotFound(err) { return nil, nil, errors.WithStack(err) @@ -943,7 +943,7 @@ func (r *Reconciler) observeRestoreEnv(ctx context.Context, } else { currentEndpoints = append(currentEndpoints, leaderEP) } - if err := r.Client.Get(ctx, naming.AsObjectKey(naming.PatroniDistributedConfiguration(cluster)), + if err := r.Reader.Get(ctx, naming.AsObjectKey(naming.PatroniDistributedConfiguration(cluster)), &dcsEP); err != nil { if !apierrors.IsNotFound(err) { return nil, nil, errors.WithStack(err) @@ -951,7 +951,7 @@ func (r *Reconciler) observeRestoreEnv(ctx context.Context, } else { currentEndpoints = append(currentEndpoints, dcsEP) } - if err := r.Client.Get(ctx, naming.AsObjectKey(naming.PatroniTrigger(cluster)), + if err := r.Reader.Get(ctx, naming.AsObjectKey(naming.PatroniTrigger(cluster)), &failoverEP); err != nil { if !apierrors.IsNotFound(err) { return nil, nil, errors.WithStack(err) @@ -961,7 +961,7 @@ func (r *Reconciler) observeRestoreEnv(ctx context.Context, } restoreJobs := &batchv1.JobList{} - if err := r.Client.List(ctx, restoreJobs, &client.ListOptions{ + if err := r.Reader.List(ctx, restoreJobs, &client.ListOptions{ Namespace: cluster.Namespace, LabelSelector: naming.PGBackRestRestoreJobSelector(cluster.GetName()), }); err != nil { @@ -1009,26 +1009,26 @@ func (r *Reconciler) observeRestoreEnv(ctx context.Context, // by the restore job. Clean them up if they still exist. selector := naming.PGBackRestRestoreConfigSelector(cluster.GetName()) restoreConfigMaps := &corev1.ConfigMapList{} - if err := r.Client.List(ctx, restoreConfigMaps, &client.ListOptions{ + if err := r.Reader.List(ctx, restoreConfigMaps, &client.ListOptions{ Namespace: cluster.Namespace, LabelSelector: selector, }); err != nil { return nil, nil, errors.WithStack(err) } for i := range restoreConfigMaps.Items { - if err := r.Client.Delete(ctx, &restoreConfigMaps.Items[i]); err != nil { + if err := r.Writer.Delete(ctx, &restoreConfigMaps.Items[i]); err != nil { return nil, nil, errors.WithStack(err) } } restoreSecrets := &corev1.SecretList{} - if err := r.Client.List(ctx, restoreSecrets, &client.ListOptions{ + if err := r.Reader.List(ctx, restoreSecrets, &client.ListOptions{ Namespace: cluster.Namespace, LabelSelector: selector, }); err != nil { return nil, nil, errors.WithStack(err) } for i := range restoreSecrets.Items { - if err := r.Client.Delete(ctx, &restoreSecrets.Items[i]); err != nil { + if err := r.Writer.Delete(ctx, &restoreSecrets.Items[i]); err != nil { return nil, nil, errors.WithStack(err) } } @@ -1120,7 +1120,7 @@ func (r *Reconciler) prepareForRestore(ctx context.Context, // remove any existing restore Jobs if restoreJob != nil { setPreparingClusterCondition("removing restore job") - if err := r.Client.Delete(ctx, restoreJob, + if err := r.Writer.Delete(ctx, restoreJob, client.PropagationPolicy(metav1.DeletePropagationBackground)); err != nil { return errors.WithStack(err) } @@ -1130,7 +1130,7 @@ func (r *Reconciler) prepareForRestore(ctx context.Context, if clusterRunning { setPreparingClusterCondition("removing runners") for _, runner := range runners { - err := r.Client.Delete(ctx, runner, + err := r.Writer.Delete(ctx, runner, client.PropagationPolicy(metav1.DeletePropagationForeground)) if client.IgnoreNotFound(err) != nil { return errors.WithStack(err) @@ -1161,7 +1161,7 @@ func (r *Reconciler) prepareForRestore(ctx context.Context, setPreparingClusterCondition("removing DCS") // delete any Endpoints for i := range currentEndpoints { - if err := r.Client.Delete(ctx, ¤tEndpoints[i]); client.IgnoreNotFound(err) != nil { + if err := r.Writer.Delete(ctx, ¤tEndpoints[i]); client.IgnoreNotFound(err) != nil { return errors.WithStack(err) } } @@ -1687,7 +1687,7 @@ func (r *Reconciler) reconcilePostgresClusterDataSource(ctx context.Context, "PostgreSQL data for the cluster: %w", err) } } else { - if err := r.Client.Get(ctx, + if err := r.Reader.Get(ctx, client.ObjectKey{Name: sourceClusterName, Namespace: sourceClusterNamespace}, sourceCluster); err != nil { if apierrors.IsNotFound(err) { @@ -1889,7 +1889,7 @@ func (r *Reconciler) copyRestoreConfiguration(ctx context.Context, sourceConfig := &corev1.ConfigMap{ObjectMeta: naming.PGBackRestConfig(sourceCluster)} if err == nil { err = errors.WithStack( - r.Client.Get(ctx, client.ObjectKeyFromObject(sourceConfig), sourceConfig)) + r.Reader.Get(ctx, client.ObjectKeyFromObject(sourceConfig), sourceConfig)) } // Retrieve the pgBackRest Secret of the source cluster if it has one. When @@ -1897,7 +1897,7 @@ func (r *Reconciler) copyRestoreConfiguration(ctx context.Context, sourceSecret := &corev1.Secret{ObjectMeta: naming.PGBackRestSecret(sourceCluster)} if err == nil { err = errors.WithStack( - r.Client.Get(ctx, client.ObjectKeyFromObject(sourceSecret), sourceSecret)) + r.Reader.Get(ctx, client.ObjectKeyFromObject(sourceSecret), sourceSecret)) if apierrors.IsNotFound(err) { sourceSecret, err = nil, nil @@ -1985,7 +1985,7 @@ func (r *Reconciler) copyConfigurationResources(ctx context.Context, cluster, // Get the existing Secret for the copy, if it exists. It **must** // exist if not configured as optional. if secretProjection.Optional != nil && *secretProjection.Optional { - if err := errors.WithStack(r.Client.Get(ctx, secretName, + if err := errors.WithStack(r.Reader.Get(ctx, secretName, secretCopy)); apierrors.IsNotFound(err) { continue } else { @@ -1993,7 +1993,7 @@ func (r *Reconciler) copyConfigurationResources(ctx context.Context, cluster, } } else { if err := errors.WithStack( - r.Client.Get(ctx, secretName, secretCopy)); err != nil { + r.Reader.Get(ctx, secretName, secretCopy)); err != nil { return err } } @@ -2039,7 +2039,7 @@ func (r *Reconciler) copyConfigurationResources(ctx context.Context, cluster, // Get the existing ConfigMap for the copy, if it exists. It **must** // exist if not configured as optional. if configMapProjection.Optional != nil && *configMapProjection.Optional { - if err := errors.WithStack(r.Client.Get(ctx, configMapName, + if err := errors.WithStack(r.Reader.Get(ctx, configMapName, configMapCopy)); apierrors.IsNotFound(err) { continue } else { @@ -2047,7 +2047,7 @@ func (r *Reconciler) copyConfigurationResources(ctx context.Context, cluster, } } else { if err := errors.WithStack( - r.Client.Get(ctx, configMapName, configMapCopy)); err != nil { + r.Reader.Get(ctx, configMapName, configMapCopy)); err != nil { return err } } @@ -2102,7 +2102,7 @@ func (r *Reconciler) reconcilePGBackRestConfig(ctx context.Context, Namespace: postgresCluster.GetNamespace(), }, } - err := errors.WithStack(r.Client.Get(ctx, + err := errors.WithStack(r.Reader.Get(ctx, client.ObjectKeyFromObject(logVolume), logVolume)) if err != nil { // PVC not retrieved, create warning event @@ -2153,7 +2153,7 @@ func (r *Reconciler) reconcilePGBackRestSecret(ctx context.Context, existing := &corev1.Secret{} err := errors.WithStack(client.IgnoreNotFound( - r.Client.Get(ctx, client.ObjectKeyFromObject(intent), existing))) + r.Reader.Get(ctx, client.ObjectKeyFromObject(intent), existing))) if err == nil { err = r.setControllerReference(cluster, intent) @@ -2429,7 +2429,7 @@ func (r *Reconciler) reconcileManualBackup(ctx context.Context, // per a new value for the annotation (unless the user manually deletes the Job). if completed || failed { if manualAnnotation != "" && backupID != manualAnnotation { - return errors.WithStack(r.Client.Delete(ctx, currentBackupJob, + return errors.WithStack(r.Writer.Delete(ctx, currentBackupJob, client.PropagationPolicy(metav1.DeletePropagationBackground))) } } @@ -2702,7 +2702,7 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, if failed || replicaCreateRepoChanged || (job.GetAnnotations()[naming.PGBackRestCurrentConfig] != containerName) || (job.GetAnnotations()[naming.PGBackRestConfigHash] != configHash) { - if err := r.Client.Delete(ctx, job, + if err := r.Writer.Delete(ctx, job, client.PropagationPolicy(metav1.DeletePropagationBackground)); err != nil { return errors.WithStack(err) } @@ -2829,7 +2829,7 @@ func (r *Reconciler) writeRepoVolumeSizeRequestStatus(ctx context.Context, pods := &corev1.PodList{} if err := errors.WithStack( - r.Client.List(ctx, pods, + r.Reader.List(ctx, pods, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{ Selector: naming.PGBackRestDedicatedLabels(cluster.Name).AsSelector()}, @@ -3342,7 +3342,7 @@ func (r *Reconciler) ObserveBackupUniverse(ctx context.Context, }, } err = errors.WithStack( - r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing)) + r.Reader.Get(ctx, client.ObjectKeyFromObject(existing), existing)) repoHostStatefulSetNotFound = apierrors.IsNotFound(err) // If we have an error that is not related to a missing repo-host StatefulSet, diff --git a/internal/controller/postgrescluster/pgbackrest_test.go b/internal/controller/postgrescluster/pgbackrest_test.go index a77e7e990e..7eab360b1c 100644 --- a/internal/controller/postgrescluster/pgbackrest_test.go +++ b/internal/controller/postgrescluster/pgbackrest_test.go @@ -178,9 +178,9 @@ func TestReconcilePGBackRest(t *testing.T) { r := &Reconciler{} ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ - Client: mgr.GetClient(), + Reader: mgr.GetClient(), Recorder: mgr.GetEventRecorderFor(controllerName), - Owner: controllerName, + Writer: client.WithFieldOwner(mgr.GetClient(), controllerName), } }) t.Cleanup(func() { teardownManager(cancel, t) }) @@ -678,7 +678,7 @@ func TestReconcilePGBackRestRBAC(t *testing.T) { _, tClient := setupKubernetes(t) require.ParallelCapacity(t, 0) - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{Writer: client.WithFieldOwner(tClient, t.Name())} clusterName := "hippocluster" clusterUID := "hippouid" @@ -737,7 +737,7 @@ func TestReconcileRepoHostRBAC(t *testing.T) { _, tClient := setupKubernetes(t) require.ParallelCapacity(t, 0) - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{Reader: tClient, Writer: client.WithFieldOwner(tClient, t.Name())} clusterName := "hippocluster" clusterUID := "hippouid" @@ -804,9 +804,7 @@ func TestReconcileStanzaCreate(t *testing.T) { r := &Reconciler{} ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ - Client: mgr.GetClient(), Recorder: mgr.GetEventRecorderFor(controllerName), - Owner: controllerName, } }) t.Cleanup(func() { teardownManager(cancel, t) }) @@ -925,7 +923,7 @@ func TestReconcileReplicaCreateBackup(t *testing.T) { _, tClient := setupKubernetes(t) require.ParallelCapacity(t, 1) - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{Writer: client.WithFieldOwner(tClient, t.Name())} clusterName := "hippocluster" clusterUID := "hippouid" @@ -1086,9 +1084,8 @@ func TestReconcileManualBackup(t *testing.T) { r := &Reconciler{} _, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ - Client: mgr.GetClient(), Recorder: mgr.GetEventRecorderFor(controllerName), - Owner: controllerName, + Writer: client.WithFieldOwner(mgr.GetClient(), controllerName), } }) t.Cleanup(func() { teardownManager(cancel, t) }) @@ -1524,7 +1521,10 @@ func TestGetPGBackRestResources(t *testing.T) { _, tClient := setupKubernetes(t) require.ParallelCapacity(t, 1) - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{ + Reader: tClient, + Writer: client.WithFieldOwner(tClient, t.Name()), + } clusterName := "hippocluster" clusterUID := "hippouid" @@ -1830,9 +1830,9 @@ func TestReconcilePostgresClusterDataSource(t *testing.T) { r := &Reconciler{} ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ - Client: tClient, + Reader: tClient, Recorder: mgr.GetEventRecorderFor(controllerName), - Owner: controllerName, + Writer: client.WithFieldOwner(tClient, controllerName), } }) t.Cleanup(func() { teardownManager(cancel, t) }) @@ -2209,9 +2209,9 @@ func TestReconcileCloudBasedDataSource(t *testing.T) { r := &Reconciler{} ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ - Client: tClient, + Reader: tClient, Recorder: mgr.GetEventRecorderFor(controllerName), - Owner: controllerName, + Writer: client.WithFieldOwner(tClient, controllerName), } }) t.Cleanup(func() { teardownManager(cancel, t) }) @@ -2385,7 +2385,10 @@ func TestCopyConfigurationResources(t *testing.T) { ctx := context.Background() require.ParallelCapacity(t, 2) - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{ + Reader: tClient, + Writer: client.WithFieldOwner(tClient, t.Name()), + } ns1 := setupNamespace(t, tClient) ns2 := setupNamespace(t, tClient) @@ -2634,8 +2637,7 @@ func TestGenerateBackupJobIntent(t *testing.T) { ns := setupNamespace(t, cc) r := &Reconciler{ - Client: cc, - Owner: controllerName, + Reader: cc, } ctx := context.Background() @@ -3027,7 +3029,7 @@ volumes: }, Spec: corev1.PersistentVolumeClaimSpec(testVolumeClaimSpec()), } - err := r.Client.Create(ctx, pvc) + err := cc.Create(ctx, pvc) assert.NilError(t, err) spec := r.generateBackupJobSpecIntent(ctx, @@ -3140,11 +3142,8 @@ volumes: } func TestGenerateRepoHostIntent(t *testing.T) { - _, cc := setupKubernetes(t) - require.ParallelCapacity(t, 0) - ctx := context.Background() - r := Reconciler{Client: cc} + r := Reconciler{} t.Run("empty", func(t *testing.T) { _, err := r.generateRepoHostIntent(ctx, &v1beta1.PostgresCluster{}, "", &RepoResources{}, @@ -3230,12 +3229,7 @@ func TestGenerateRepoHostIntent(t *testing.T) { } func TestGenerateRestoreJobIntent(t *testing.T) { - _, cc := setupKubernetes(t) - require.ParallelCapacity(t, 0) - - r := Reconciler{ - Client: cc, - } + r := Reconciler{} t.Run("empty", func(t *testing.T) { err := r.generateRestoreJobIntent(&v1beta1.PostgresCluster{}, "", "", @@ -3457,7 +3451,7 @@ func TestObserveRestoreEnv(t *testing.T) { _, tClient := setupKubernetes(t) require.ParallelCapacity(t, 1) - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{Reader: tClient} namespace := setupNamespace(t, tClient).Name generateJob := func(clusterName string, completed, failed *bool) *batchv1.Job { @@ -3557,18 +3551,18 @@ func TestObserveRestoreEnv(t *testing.T) { fakeLeaderEP := &corev1.Endpoints{} fakeLeaderEP.ObjectMeta = naming.PatroniLeaderEndpoints(cluster) fakeLeaderEP.Namespace = namespace - assert.NilError(t, r.Client.Create(ctx, fakeLeaderEP)) + assert.NilError(t, tClient.Create(ctx, fakeLeaderEP)) fakeDCSEP := &corev1.Endpoints{} fakeDCSEP.ObjectMeta = naming.PatroniDistributedConfiguration(cluster) fakeDCSEP.Namespace = namespace - assert.NilError(t, r.Client.Create(ctx, fakeDCSEP)) + assert.NilError(t, tClient.Create(ctx, fakeDCSEP)) fakeFailoverEP := &corev1.Endpoints{} fakeFailoverEP.ObjectMeta = naming.PatroniTrigger(cluster) fakeFailoverEP.Namespace = namespace - assert.NilError(t, r.Client.Create(ctx, fakeFailoverEP)) + assert.NilError(t, tClient.Create(ctx, fakeFailoverEP)) job := generateJob(cluster.Name, initialize.Bool(false), initialize.Bool(false)) - assert.NilError(t, r.Client.Create(ctx, job)) + assert.NilError(t, tClient.Create(ctx, job)) }, result: testResult{ foundRestoreJob: true, @@ -3581,15 +3575,15 @@ func TestObserveRestoreEnv(t *testing.T) { fakeLeaderEP := &corev1.Endpoints{} fakeLeaderEP.ObjectMeta = naming.PatroniLeaderEndpoints(cluster) fakeLeaderEP.Namespace = namespace - assert.NilError(t, r.Client.Create(ctx, fakeLeaderEP)) + assert.NilError(t, tClient.Create(ctx, fakeLeaderEP)) fakeDCSEP := &corev1.Endpoints{} fakeDCSEP.ObjectMeta = naming.PatroniDistributedConfiguration(cluster) fakeDCSEP.Namespace = namespace - assert.NilError(t, r.Client.Create(ctx, fakeDCSEP)) + assert.NilError(t, tClient.Create(ctx, fakeDCSEP)) fakeFailoverEP := &corev1.Endpoints{} fakeFailoverEP.ObjectMeta = naming.PatroniTrigger(cluster) fakeFailoverEP.Namespace = namespace - assert.NilError(t, r.Client.Create(ctx, fakeFailoverEP)) + assert.NilError(t, tClient.Create(ctx, fakeFailoverEP)) }, result: testResult{ foundRestoreJob: false, @@ -3600,7 +3594,7 @@ func TestObserveRestoreEnv(t *testing.T) { desc: "restore job only exists", createResources: func(t *testing.T, cluster *v1beta1.PostgresCluster) { job := generateJob(cluster.Name, initialize.Bool(false), initialize.Bool(false)) - assert.NilError(t, r.Client.Create(ctx, job)) + assert.NilError(t, tClient.Create(ctx, job)) }, result: testResult{ foundRestoreJob: true, @@ -3614,8 +3608,8 @@ func TestObserveRestoreEnv(t *testing.T) { t.Skip("requires mocking of Job conditions") } job := generateJob(cluster.Name, initialize.Bool(true), nil) - assert.NilError(t, r.Client.Create(ctx, job.DeepCopy())) - assert.NilError(t, r.Client.Status().Update(ctx, job)) + assert.NilError(t, tClient.Create(ctx, job.DeepCopy())) + assert.NilError(t, tClient.Status().Update(ctx, job)) }, result: testResult{ foundRestoreJob: true, @@ -3634,8 +3628,8 @@ func TestObserveRestoreEnv(t *testing.T) { t.Skip("requires mocking of Job conditions") } job := generateJob(cluster.Name, nil, initialize.Bool(true)) - assert.NilError(t, r.Client.Create(ctx, job.DeepCopy())) - assert.NilError(t, r.Client.Status().Update(ctx, job)) + assert.NilError(t, tClient.Create(ctx, job.DeepCopy())) + assert.NilError(t, tClient.Status().Update(ctx, job)) }, result: testResult{ foundRestoreJob: true, @@ -3685,7 +3679,9 @@ func TestPrepareForRestore(t *testing.T) { _, tClient := setupKubernetes(t) require.ParallelCapacity(t, 1) - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{ + Writer: client.WithFieldOwner(tClient, t.Name()), + } namespace := setupNamespace(t, tClient).Name generateJob := func(clusterName string) *batchv1.Job { @@ -3739,7 +3735,7 @@ func TestPrepareForRestore(t *testing.T) { createResources: func(t *testing.T, cluster *v1beta1.PostgresCluster) (*batchv1.Job, []corev1.Endpoints) { job := generateJob(cluster.Name) - assert.NilError(t, r.Client.Create(ctx, job)) + assert.NilError(t, tClient.Create(ctx, job)) return job, nil }, result: testResult{ @@ -3759,15 +3755,15 @@ func TestPrepareForRestore(t *testing.T) { fakeLeaderEP := corev1.Endpoints{} fakeLeaderEP.ObjectMeta = naming.PatroniLeaderEndpoints(cluster) fakeLeaderEP.Namespace = namespace - assert.NilError(t, r.Client.Create(ctx, &fakeLeaderEP)) + assert.NilError(t, tClient.Create(ctx, &fakeLeaderEP)) fakeDCSEP := corev1.Endpoints{} fakeDCSEP.ObjectMeta = naming.PatroniDistributedConfiguration(cluster) fakeDCSEP.Namespace = namespace - assert.NilError(t, r.Client.Create(ctx, &fakeDCSEP)) + assert.NilError(t, tClient.Create(ctx, &fakeDCSEP)) fakeFailoverEP := corev1.Endpoints{} fakeFailoverEP.ObjectMeta = naming.PatroniTrigger(cluster) fakeFailoverEP.Namespace = namespace - assert.NilError(t, r.Client.Create(ctx, &fakeFailoverEP)) + assert.NilError(t, tClient.Create(ctx, &fakeFailoverEP)) return nil, []corev1.Endpoints{fakeLeaderEP, fakeDCSEP, fakeFailoverEP} }, result: testResult{ @@ -3874,19 +3870,19 @@ func TestPrepareForRestore(t *testing.T) { leaderEP, dcsEP, failoverEP := corev1.Endpoints{}, corev1.Endpoints{}, corev1.Endpoints{} currentEndpoints := []corev1.Endpoints{} - if err := r.Client.Get(ctx, naming.AsObjectKey(naming.PatroniLeaderEndpoints(cluster)), + if err := tClient.Get(ctx, naming.AsObjectKey(naming.PatroniLeaderEndpoints(cluster)), &leaderEP); err != nil { assert.NilError(t, client.IgnoreNotFound(err)) } else { currentEndpoints = append(currentEndpoints, leaderEP) } - if err := r.Client.Get(ctx, naming.AsObjectKey(naming.PatroniDistributedConfiguration(cluster)), + if err := tClient.Get(ctx, naming.AsObjectKey(naming.PatroniDistributedConfiguration(cluster)), &dcsEP); err != nil { assert.NilError(t, client.IgnoreNotFound(err)) } else { currentEndpoints = append(currentEndpoints, dcsEP) } - if err := r.Client.Get(ctx, naming.AsObjectKey(naming.PatroniTrigger(cluster)), + if err := tClient.Get(ctx, naming.AsObjectKey(naming.PatroniTrigger(cluster)), &failoverEP); err != nil { assert.NilError(t, client.IgnoreNotFound(err)) } else { @@ -3894,7 +3890,7 @@ func TestPrepareForRestore(t *testing.T) { } restoreJobs := &batchv1.JobList{} - assert.NilError(t, r.Client.List(ctx, restoreJobs, &client.ListOptions{ + assert.NilError(t, tClient.List(ctx, restoreJobs, &client.ListOptions{ Namespace: cluster.Namespace, LabelSelector: naming.PGBackRestRestoreJobSelector(cluster.GetName()), })) @@ -3930,9 +3926,9 @@ func TestReconcileScheduledBackups(t *testing.T) { r := &Reconciler{} _, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ - Client: mgr.GetClient(), + Reader: mgr.GetClient(), Recorder: mgr.GetEventRecorderFor(controllerName), - Owner: controllerName, + Writer: client.WithFieldOwner(mgr.GetClient(), controllerName), } }) t.Cleanup(func() { teardownManager(cancel, t) }) @@ -4193,7 +4189,7 @@ func TestSetScheduledJobStatus(t *testing.T) { _, tClient := setupKubernetes(t) require.ParallelCapacity(t, 0) - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{Reader: tClient} clusterName := "hippocluster" clusterUID := "hippouid" @@ -4266,9 +4262,9 @@ func TestBackupsEnabled(t *testing.T) { r := &Reconciler{} ctx, cancel := setupManager(t, cfg, func(mgr manager.Manager) { r = &Reconciler{ - Client: mgr.GetClient(), + Reader: mgr.GetClient(), Recorder: mgr.GetEventRecorderFor(controllerName), - Owner: controllerName, + Writer: client.WithFieldOwner(mgr.GetClient(), controllerName), } }) t.Cleanup(func() { teardownManager(cancel, t) }) @@ -4424,8 +4420,7 @@ func TestGetRepoHostVolumeRequests(t *testing.T) { require.ParallelCapacity(t, 1) reconciler := &Reconciler{ - Client: tClient, - Owner: client.FieldOwner(t.Name()), + Reader: tClient, } testCases := []struct { diff --git a/internal/controller/postgrescluster/pgbouncer.go b/internal/controller/postgrescluster/pgbouncer.go index 46b66cdbf5..8eddb7e429 100644 --- a/internal/controller/postgrescluster/pgbouncer.go +++ b/internal/controller/postgrescluster/pgbouncer.go @@ -74,7 +74,7 @@ func (r *Reconciler) reconcilePGBouncerConfigMap( // PgBouncer is disabled; delete the ConfigMap if it exists. Check the // client cache first using Get. key := client.ObjectKeyFromObject(configmap) - err := errors.WithStack(r.Client.Get(ctx, key, configmap)) + err := errors.WithStack(r.Reader.Get(ctx, key, configmap)) if err == nil { err = errors.WithStack(r.deleteControlled(ctx, cluster, configmap)) } @@ -207,7 +207,7 @@ func (r *Reconciler) reconcilePGBouncerSecret( ) (*corev1.Secret, error) { existing := &corev1.Secret{ObjectMeta: naming.ClusterPGBouncer(cluster)} err := errors.WithStack( - r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing)) + r.Reader.Get(ctx, client.ObjectKeyFromObject(existing), existing)) if client.IgnoreNotFound(err) != nil { return nil, err } @@ -351,7 +351,7 @@ func (r *Reconciler) reconcilePGBouncerService( // PgBouncer is disabled; delete the Service if it exists. Check the client // cache first using Get. key := client.ObjectKeyFromObject(service) - err := errors.WithStack(r.Client.Get(ctx, key, service)) + err := errors.WithStack(r.Reader.Get(ctx, key, service)) if err == nil { err = errors.WithStack(r.deleteControlled(ctx, cluster, service)) } @@ -534,7 +534,7 @@ func (r *Reconciler) reconcilePGBouncerDeployment( // PgBouncer is disabled; delete the Deployment if it exists. Check the // client cache first using Get. key := client.ObjectKeyFromObject(deploy) - err := errors.WithStack(r.Client.Get(ctx, key, deploy)) + err := errors.WithStack(r.Reader.Get(ctx, key, deploy)) if err == nil { err = errors.WithStack(r.deleteControlled(ctx, cluster, deploy)) } @@ -559,7 +559,7 @@ func (r *Reconciler) reconcilePGBouncerPodDisruptionBudget( ) error { deleteExistingPDB := func(cluster *v1beta1.PostgresCluster) error { existing := &policyv1.PodDisruptionBudget{ObjectMeta: naming.ClusterPGBouncer(cluster)} - err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing)) + err := errors.WithStack(r.Reader.Get(ctx, client.ObjectKeyFromObject(existing), existing)) if err == nil { err = errors.WithStack(r.deleteControlled(ctx, cluster, existing)) } diff --git a/internal/controller/postgrescluster/pgbouncer_test.go b/internal/controller/postgrescluster/pgbouncer_test.go index e6df4fbab8..c6b3e65e4d 100644 --- a/internal/controller/postgrescluster/pgbouncer_test.go +++ b/internal/controller/postgrescluster/pgbouncer_test.go @@ -27,11 +27,7 @@ import ( ) func TestGeneratePGBouncerService(t *testing.T) { - _, cc := setupKubernetes(t) - require.ParallelCapacity(t, 0) - reconciler := &Reconciler{ - Client: cc, Recorder: new(record.FakeRecorder), } @@ -263,7 +259,10 @@ func TestReconcilePGBouncerService(t *testing.T) { _, cc := setupKubernetes(t) require.ParallelCapacity(t, 1) - reconciler := &Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + reconciler := &Reconciler{ + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), + } cluster := testCluster() cluster.Namespace = setupNamespace(t, cc).Name @@ -365,11 +364,8 @@ func TestReconcilePGBouncerService(t *testing.T) { } func TestGeneratePGBouncerDeployment(t *testing.T) { - _, cc := setupKubernetes(t) - require.ParallelCapacity(t, 0) - ctx := context.Background() - reconciler := &Reconciler{Client: cc} + reconciler := &Reconciler{} cluster := &v1beta1.PostgresCluster{} cluster.Namespace = "ns3" @@ -547,15 +543,15 @@ func TestReconcilePGBouncerDisruptionBudget(t *testing.T) { require.ParallelCapacity(t, 0) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), } foundPDB := func( cluster *v1beta1.PostgresCluster, ) bool { got := &policyv1.PodDisruptionBudget{} - err := r.Client.Get(ctx, + err := cc.Get(ctx, naming.AsObjectKey(naming.ClusterPGBouncer(cluster)), got) return !apierrors.IsNotFound(err) @@ -594,8 +590,8 @@ func TestReconcilePGBouncerDisruptionBudget(t *testing.T) { cluster.Spec.Proxy.PGBouncer.Replicas = initialize.Int32(1) cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.Pointer(intstr.FromInt32(1)) - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) assert.NilError(t, r.reconcilePGBouncerPodDisruptionBudget(ctx, cluster)) assert.Assert(t, foundPDB(cluster)) @@ -621,8 +617,8 @@ func TestReconcilePGBouncerDisruptionBudget(t *testing.T) { cluster.Spec.Proxy.PGBouncer.Replicas = initialize.Int32(1) cluster.Spec.Proxy.PGBouncer.MinAvailable = initialize.Pointer(intstr.FromString("50%")) - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) assert.NilError(t, r.reconcilePGBouncerPodDisruptionBudget(ctx, cluster)) assert.Assert(t, foundPDB(cluster)) diff --git a/internal/controller/postgrescluster/pgmonitor.go b/internal/controller/postgrescluster/pgmonitor.go index 9a6043f868..e30bf3f56f 100644 --- a/internal/controller/postgrescluster/pgmonitor.go +++ b/internal/controller/postgrescluster/pgmonitor.go @@ -153,7 +153,7 @@ func (r *Reconciler) reconcileMonitoringSecret( existing := &corev1.Secret{ObjectMeta: naming.MonitoringUserSecret(cluster)} err := errors.WithStack( - r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing)) + r.Reader.Get(ctx, client.ObjectKeyFromObject(existing), existing)) if client.IgnoreNotFound(err) != nil { return nil, err } @@ -380,7 +380,7 @@ func (r *Reconciler) reconcileExporterWebConfig(ctx context.Context, } existing := &corev1.ConfigMap{ObjectMeta: naming.ExporterWebConfigMap(cluster)} - err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing)) + err := errors.WithStack(r.Reader.Get(ctx, client.ObjectKeyFromObject(existing), existing)) if client.IgnoreNotFound(err) != nil { return nil, err } @@ -439,7 +439,7 @@ func (r *Reconciler) reconcileExporterQueriesConfig(ctx context.Context, cluster *v1beta1.PostgresCluster) (*corev1.ConfigMap, error) { existing := &corev1.ConfigMap{ObjectMeta: naming.ExporterQueriesConfigMap(cluster)} - err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing)) + err := errors.WithStack(r.Reader.Get(ctx, client.ObjectKeyFromObject(existing), existing)) if client.IgnoreNotFound(err) != nil { return nil, err } diff --git a/internal/controller/postgrescluster/pgmonitor_test.go b/internal/controller/postgrescluster/pgmonitor_test.go index e4ccaf0d9f..e91b176ec0 100644 --- a/internal/controller/postgrescluster/pgmonitor_test.go +++ b/internal/controller/postgrescluster/pgmonitor_test.go @@ -702,7 +702,10 @@ func TestReconcileMonitoringSecret(t *testing.T) { _, cc := setupKubernetes(t) require.ParallelCapacity(t, 0) - reconciler := &Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + reconciler := &Reconciler{ + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), + } cluster := testCluster() cluster.Default() @@ -776,7 +779,10 @@ func TestReconcileExporterQueriesConfig(t *testing.T) { _, cc := setupKubernetes(t) require.ParallelCapacity(t, 0) - reconciler := &Reconciler{Client: cc, Owner: client.FieldOwner(t.Name())} + reconciler := &Reconciler{ + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), + } cluster := testCluster() cluster.Default() diff --git a/internal/controller/postgrescluster/pki.go b/internal/controller/postgrescluster/pki.go index d52d6a75da..0e686d4f72 100644 --- a/internal/controller/postgrescluster/pki.go +++ b/internal/controller/postgrescluster/pki.go @@ -42,7 +42,7 @@ func (r *Reconciler) reconcileRootCertificate( existing := &corev1.Secret{} existing.Namespace, existing.Name = cluster.Namespace, naming.RootCertSecret err := errors.WithStack(client.IgnoreNotFound( - r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing))) + r.Reader.Get(ctx, client.ObjectKeyFromObject(existing), existing))) root := &pki.RootCertificateAuthority{} @@ -120,7 +120,7 @@ func (r *Reconciler) reconcileClusterCertificate( existing := &corev1.Secret{ObjectMeta: naming.PostgresTLSSecret(cluster)} err := errors.WithStack(client.IgnoreNotFound( - r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing))) + r.Reader.Get(ctx, client.ObjectKeyFromObject(existing), existing))) leaf := &pki.LeafCertificate{} dnsNames := append(naming.ServiceDNSNames(ctx, primaryService), naming.ServiceDNSNames(ctx, replicaService)...) diff --git a/internal/controller/postgrescluster/pki_test.go b/internal/controller/postgrescluster/pki_test.go index ed74b1220b..b61e983258 100644 --- a/internal/controller/postgrescluster/pki_test.go +++ b/internal/controller/postgrescluster/pki_test.go @@ -42,8 +42,8 @@ func TestReconcileCerts(t *testing.T) { namespace := setupNamespace(t, tClient).Name r := &Reconciler{ - Client: tClient, - Owner: controllerName, + Reader: tClient, + Writer: client.WithFieldOwner(tClient, controllerName), } // set up cluster1 diff --git a/internal/controller/postgrescluster/pod_disruption_budget_test.go b/internal/controller/postgrescluster/pod_disruption_budget_test.go index 6463068d4c..e8cbffc19a 100644 --- a/internal/controller/postgrescluster/pod_disruption_budget_test.go +++ b/internal/controller/postgrescluster/pod_disruption_budget_test.go @@ -13,14 +13,11 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "github.com/crunchydata/postgres-operator/internal/initialize" - "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) func TestGeneratePodDisruptionBudget(t *testing.T) { - _, cc := setupKubernetes(t) - r := &Reconciler{Client: cc} - require.ParallelCapacity(t, 0) + r := &Reconciler{} var ( minAvailable *intstr.IntOrString diff --git a/internal/controller/postgrescluster/postgres.go b/internal/controller/postgrescluster/postgres.go index 5b11082637..93280ac6a0 100644 --- a/internal/controller/postgrescluster/postgres.go +++ b/internal/controller/postgrescluster/postgres.go @@ -537,7 +537,7 @@ func (r *Reconciler) reconcilePostgresUserSecrets( selector, err := naming.AsSelector(naming.ClusterPostgresUsers(cluster.Name)) if err == nil { err = errors.WithStack( - r.Client.List(ctx, secrets, + r.Reader.List(ctx, secrets, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector}, )) @@ -893,7 +893,7 @@ func (r *Reconciler) reconcilePostgresWALVolume( // No WAL volume is specified; delete the PVC safely if it exists. Check // the client cache first using Get. key := client.ObjectKeyFromObject(pvc) - err := errors.WithStack(r.Client.Get(ctx, key, pvc)) + err := errors.WithStack(r.Reader.Get(ctx, key, pvc)) if err != nil { return nil, client.IgnoreNotFound(err) } @@ -992,7 +992,7 @@ func (r *Reconciler) reconcileDatabaseInitSQL(ctx context.Context, Namespace: cluster.Namespace, }, } - err := r.Client.Get(ctx, client.ObjectKeyFromObject(cm), cm) + err := r.Reader.Get(ctx, client.ObjectKeyFromObject(cm), cm) if err != nil { return "", err } diff --git a/internal/controller/postgrescluster/postgres_test.go b/internal/controller/postgrescluster/postgres_test.go index e9b6432886..f3693369d2 100644 --- a/internal/controller/postgrescluster/postgres_test.go +++ b/internal/controller/postgrescluster/postgres_test.go @@ -279,10 +279,7 @@ func TestGeneratePostgresParameters(t *testing.T) { } func TestGeneratePostgresUserSecret(t *testing.T) { - _, tClient := setupKubernetes(t) - require.ParallelCapacity(t, 0) - - reconciler := &Reconciler{Client: tClient} + reconciler := &Reconciler{} cluster := &v1beta1.PostgresCluster{} cluster.Namespace = "ns1" @@ -480,8 +477,8 @@ func TestReconcilePostgresVolumes(t *testing.T) { require.ParallelCapacity(t, 1) reconciler := &Reconciler{ - Client: tClient, - Owner: client.FieldOwner(t.Name()), + Reader: tClient, + Writer: client.WithFieldOwner(tClient, t.Name()), } t.Run("DataVolumeNoSourceCluster", func(t *testing.T) { @@ -584,7 +581,7 @@ volumeMode: Filesystem assert.NilError(t, err) // Get snapshot and update Status.ReadyToUse and CreationTime - err = reconciler.Client.Get(ctx, client.ObjectKeyFromObject(snapshot), snapshot) + err = tClient.Get(ctx, client.ObjectKeyFromObject(snapshot), snapshot) assert.NilError(t, err) currentTime := metav1.Now() @@ -592,7 +589,7 @@ volumeMode: Filesystem ReadyToUse: initialize.Bool(true), CreationTime: ¤tTime, } - err = reconciler.Client.Status().Update(ctx, snapshot) + err = tClient.Status().Update(ctx, snapshot) assert.NilError(t, err) // Reconcile volume @@ -857,7 +854,7 @@ func TestReconcileDatabaseInitSQL(t *testing.T) { require.ParallelCapacity(t, 0) r := &Reconciler{ - Client: client, + Reader: client, // Overwrite the PodExec function with a check to ensure the exec // call would have been made @@ -981,7 +978,7 @@ func TestReconcileDatabaseInitSQLConfigMap(t *testing.T) { require.ParallelCapacity(t, 0) r := &Reconciler{ - Client: client, + Reader: client, // Overwrite the PodExec function with a check to ensure the exec // call would have been made diff --git a/internal/controller/postgrescluster/snapshots.go b/internal/controller/postgrescluster/snapshots.go index a16bd650fd..74e506f45a 100644 --- a/internal/controller/postgrescluster/snapshots.go +++ b/internal/controller/postgrescluster/snapshots.go @@ -196,7 +196,7 @@ func (r *Reconciler) reconcileDedicatedSnapshotVolume( // Check the client cache first using Get. if cluster.Spec.Backups.Snapshots == nil { key := client.ObjectKeyFromObject(pvc) - err := errors.WithStack(r.Client.Get(ctx, key, pvc)) + err := errors.WithStack(r.Reader.Get(ctx, key, pvc)) if err == nil { err = errors.WithStack(r.deleteControlled(ctx, cluster, pvc)) } @@ -263,13 +263,13 @@ func (r *Reconciler) reconcileDedicatedSnapshotVolume( patch := client.RawPatch(client.Merge.Type(), []byte(annotations)) err = r.handlePersistentVolumeClaimError(cluster, - errors.WithStack(r.patch(ctx, pvc, patch))) + errors.WithStack(r.Writer.Patch(ctx, pvc, patch))) if err != nil { return pvc, err } - err = r.Client.Delete(ctx, restoreJob, client.PropagationPolicy(metav1.DeletePropagationBackground)) + err = r.Writer.Delete(ctx, restoreJob, client.PropagationPolicy(metav1.DeletePropagationBackground)) return pvc, errors.WithStack(err) } @@ -459,7 +459,7 @@ func (r *Reconciler) getDedicatedSnapshotVolumeRestoreJob(ctx context.Context, selectJobs, err := naming.AsSelector(naming.ClusterRestoreJobs(postgrescluster.Name)) if err == nil { err = errors.WithStack( - r.Client.List(ctx, jobs, + r.Reader.List(ctx, jobs, client.InNamespace(postgrescluster.Namespace), client.MatchingLabelsSelector{Selector: selectJobs}, )) @@ -489,7 +489,7 @@ func (r *Reconciler) getLatestCompleteBackupJob(ctx context.Context, selectJobs, err := naming.AsSelector(naming.ClusterBackupJobs(postgrescluster.Name)) if err == nil { err = errors.WithStack( - r.Client.List(ctx, jobs, + r.Reader.List(ctx, jobs, client.InNamespace(postgrescluster.Namespace), client.MatchingLabelsSelector{Selector: selectJobs}, )) @@ -555,7 +555,7 @@ func (r *Reconciler) getSnapshotsForCluster(ctx context.Context, cluster *v1beta } snapshots := &volumesnapshotv1.VolumeSnapshotList{} err = errors.WithStack( - r.Client.List(ctx, snapshots, + r.Reader.List(ctx, snapshots, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectSnapshots}, )) diff --git a/internal/controller/postgrescluster/snapshots_test.go b/internal/controller/postgrescluster/snapshots_test.go index af5d4d1247..83efcad704 100644 --- a/internal/controller/postgrescluster/snapshots_test.go +++ b/internal/controller/postgrescluster/snapshots_test.go @@ -39,9 +39,9 @@ func TestReconcileVolumeSnapshots(t *testing.T) { recorder := events.NewRecorder(t, runtime.Scheme) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Reader: cc, Recorder: recorder, + Writer: client.WithFieldOwner(cc, t.Name()), } ns := setupNamespace(t, cc) @@ -60,8 +60,8 @@ func TestReconcileVolumeSnapshots(t *testing.T) { cluster := testCluster() cluster.Namespace = ns.Name cluster.UID = "the-uid-123" - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) // Create a snapshot pvc := &corev1.PersistentVolumeClaim{ @@ -72,14 +72,14 @@ func TestReconcileVolumeSnapshots(t *testing.T) { volumeSnapshotClassName := "my-snapshotclass" snapshot, err := r.generateVolumeSnapshot(cluster, *pvc, volumeSnapshotClassName) assert.NilError(t, err) - assert.NilError(t, r.Client.Create(ctx, snapshot)) + assert.NilError(t, cc.Create(ctx, snapshot)) // Get all snapshots for this cluster and assert 1 exists selectSnapshots, err := naming.AsSelector(naming.Cluster(cluster.Name)) assert.NilError(t, err) snapshots := &volumesnapshotv1.VolumeSnapshotList{} assert.NilError(t, - r.Client.List(ctx, snapshots, + cc.List(ctx, snapshots, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectSnapshots}, )) @@ -91,7 +91,7 @@ func TestReconcileVolumeSnapshots(t *testing.T) { // Get all snapshots for this cluster and assert 0 exist snapshots = &volumesnapshotv1.VolumeSnapshotList{} assert.NilError(t, - r.Client.List(ctx, snapshots, + cc.List(ctx, snapshots, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectSnapshots}, )) @@ -147,8 +147,8 @@ func TestReconcileVolumeSnapshots(t *testing.T) { }, DeletionPolicy: "Delete", } - assert.NilError(t, r.Client.Create(ctx, volumeSnapshotClass)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, volumeSnapshotClass)) }) + assert.NilError(t, cc.Create(ctx, volumeSnapshotClass)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, volumeSnapshotClass)) }) // Create a cluster with snapshots enabled cluster := testCluster() @@ -156,8 +156,8 @@ func TestReconcileVolumeSnapshots(t *testing.T) { cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ VolumeSnapshotClassName: volumeSnapshotClassName, } - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) // Create pvc for reconcile pvc := &corev1.PersistentVolumeClaim{ @@ -174,7 +174,7 @@ func TestReconcileVolumeSnapshots(t *testing.T) { assert.NilError(t, err) snapshots := &volumesnapshotv1.VolumeSnapshotList{} assert.NilError(t, - r.Client.List(ctx, snapshots, + cc.List(ctx, snapshots, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectSnapshots}, )) @@ -193,8 +193,8 @@ func TestReconcileVolumeSnapshots(t *testing.T) { }, DeletionPolicy: "Delete", } - assert.NilError(t, r.Client.Create(ctx, volumeSnapshotClass)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, volumeSnapshotClass)) }) + assert.NilError(t, cc.Create(ctx, volumeSnapshotClass)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, volumeSnapshotClass)) }) // Create a cluster with snapshots enabled cluster := testCluster() @@ -203,8 +203,8 @@ func TestReconcileVolumeSnapshots(t *testing.T) { cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ VolumeSnapshotClassName: volumeSnapshotClassName, } - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) // Create pvc with annotation pvcName := initialize.String("dedicated-snapshot-volume") @@ -240,14 +240,14 @@ func TestReconcileVolumeSnapshots(t *testing.T) { }, } assert.NilError(t, r.setControllerReference(cluster, snapshot1)) - assert.NilError(t, r.Client.Create(ctx, snapshot1)) + assert.NilError(t, cc.Create(ctx, snapshot1)) // Update snapshot status truePtr := initialize.Bool(true) snapshot1.Status = &volumesnapshotv1.VolumeSnapshotStatus{ ReadyToUse: truePtr, } - assert.NilError(t, r.Client.Status().Update(ctx, snapshot1)) + assert.NilError(t, cc.Status().Update(ctx, snapshot1)) // Create second snapshot with different annotation value snapshot2 := &volumesnapshotv1.VolumeSnapshot{ @@ -272,13 +272,13 @@ func TestReconcileVolumeSnapshots(t *testing.T) { }, } assert.NilError(t, r.setControllerReference(cluster, snapshot2)) - assert.NilError(t, r.Client.Create(ctx, snapshot2)) + assert.NilError(t, cc.Create(ctx, snapshot2)) // Update second snapshot's status snapshot2.Status = &volumesnapshotv1.VolumeSnapshotStatus{ ReadyToUse: truePtr, } - assert.NilError(t, r.Client.Status().Update(ctx, snapshot2)) + assert.NilError(t, cc.Status().Update(ctx, snapshot2)) // Reconcile assert.NilError(t, r.reconcileVolumeSnapshots(ctx, cluster, pvc)) @@ -288,7 +288,7 @@ func TestReconcileVolumeSnapshots(t *testing.T) { assert.NilError(t, err) snapshots := &volumesnapshotv1.VolumeSnapshotList{} assert.NilError(t, - r.Client.List(ctx, snapshots, + cc.List(ctx, snapshots, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectSnapshots}, )) @@ -308,8 +308,8 @@ func TestReconcileVolumeSnapshots(t *testing.T) { }, DeletionPolicy: "Delete", } - assert.NilError(t, r.Client.Create(ctx, volumeSnapshotClass)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, volumeSnapshotClass)) }) + assert.NilError(t, cc.Create(ctx, volumeSnapshotClass)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, volumeSnapshotClass)) }) // Create a cluster with snapshots enabled cluster := testCluster() @@ -318,8 +318,8 @@ func TestReconcileVolumeSnapshots(t *testing.T) { cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ VolumeSnapshotClassName: volumeSnapshotClassName, } - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) // Create pvc with annotation pvcName := initialize.String("dedicated-snapshot-volume") @@ -340,7 +340,7 @@ func TestReconcileVolumeSnapshots(t *testing.T) { assert.NilError(t, err) snapshots := &volumesnapshotv1.VolumeSnapshotList{} assert.NilError(t, - r.Client.List(ctx, snapshots, + cc.List(ctx, snapshots, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectSnapshots}, )) @@ -356,9 +356,9 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { recorder := events.NewRecorder(t, runtime.Scheme) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Reader: cc, Recorder: recorder, + Writer: client.WithFieldOwner(cc, t.Name()), } // Enable snapshots feature gate @@ -374,8 +374,8 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { cluster := testCluster() cluster.Namespace = ns.Name cluster.UID = "the-uid-123" - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) // Create a dedicated snapshot volume pvc := &corev1.PersistentVolumeClaim{ @@ -396,14 +396,14 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { spec := testVolumeClaimSpec() pvc.Spec = spec.AsPersistentVolumeClaimSpec() assert.NilError(t, r.setControllerReference(cluster, pvc)) - assert.NilError(t, r.Client.Create(ctx, pvc)) + assert.NilError(t, cc.Create(ctx, pvc)) // Assert that the pvc was created selectPvcs, err := naming.AsSelector(naming.Cluster(cluster.Name)) assert.NilError(t, err) pvcs := &corev1.PersistentVolumeClaimList{} assert.NilError(t, - r.Client.List(ctx, pvcs, + cc.List(ctx, pvcs, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectPvcs}, )) @@ -419,7 +419,7 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { // Assert that the pvc has been deleted or marked for deletion key, fetched := client.ObjectKeyFromObject(pvc), &corev1.PersistentVolumeClaim{} - if err := r.Client.Get(ctx, key, fetched); err == nil { + if err := cc.Get(ctx, key, fetched); err == nil { assert.Assert(t, fetched.DeletionTimestamp != nil, "expected deleted") } else { assert.Assert(t, apierrors.IsNotFound(err), "expected NotFound, got %v", err) @@ -435,8 +435,8 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ VolumeSnapshotClassName: "my-snapshotclass", } - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) // Create volumes for reconcile clusterVolumes := []*corev1.PersistentVolumeClaim{} @@ -451,7 +451,7 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { assert.NilError(t, err) pvcs := &corev1.PersistentVolumeClaimList{} assert.NilError(t, - r.Client.List(ctx, pvcs, + cc.List(ctx, pvcs, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectPvcs}, )) @@ -470,18 +470,18 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ VolumeSnapshotClassName: "my-snapshotclass", } - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) // Create successful backup job backupJob := testBackupJob(cluster) assert.NilError(t, r.setControllerReference(cluster, backupJob)) - assert.NilError(t, r.Client.Create(ctx, backupJob)) + assert.NilError(t, cc.Create(ctx, backupJob)) currentTime := metav1.Now() startTime := metav1.NewTime(currentTime.AddDate(0, 0, -1)) backupJob.Status = succeededJobStatus(startTime, currentTime) - assert.NilError(t, r.Client.Status().Update(ctx, backupJob)) + assert.NilError(t, cc.Status().Update(ctx, backupJob)) // Create instance set and volumes for reconcile sts := &appsv1.StatefulSet{} @@ -498,7 +498,7 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { selectJobs, err := naming.AsSelector(naming.ClusterRestoreJobs(cluster.Name)) assert.NilError(t, err) assert.NilError(t, - r.Client.List(ctx, restoreJobs, + cc.List(ctx, restoreJobs, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectJobs}, )) @@ -518,8 +518,8 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ VolumeSnapshotClassName: "my-snapshotclass", } - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) // Create times for jobs currentTime := metav1.Now() @@ -530,10 +530,10 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { // Create successful backup job backupJob := testBackupJob(cluster) assert.NilError(t, r.setControllerReference(cluster, backupJob)) - assert.NilError(t, r.Client.Create(ctx, backupJob)) + assert.NilError(t, cc.Create(ctx, backupJob)) backupJob.Status = succeededJobStatus(earlierStartTime, earlierTime) - assert.NilError(t, r.Client.Status().Update(ctx, backupJob)) + assert.NilError(t, cc.Status().Update(ctx, backupJob)) // Create successful restore job restoreJob := testRestoreJob(cluster) @@ -541,10 +541,10 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { naming.PGBackRestBackupJobCompletion: backupJob.Status.CompletionTime.Format(time.RFC3339), } assert.NilError(t, r.setControllerReference(cluster, restoreJob)) - assert.NilError(t, r.Client.Create(ctx, restoreJob)) + assert.NilError(t, cc.Create(ctx, restoreJob)) restoreJob.Status = succeededJobStatus(currentStartTime, currentTime) - assert.NilError(t, r.Client.Status().Update(ctx, restoreJob)) + assert.NilError(t, cc.Status().Update(ctx, restoreJob)) // Create instance set and volumes for reconcile sts := &appsv1.StatefulSet{} @@ -561,7 +561,7 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { selectJobs, err := naming.AsSelector(naming.ClusterRestoreJobs(cluster.Name)) assert.NilError(t, err) assert.NilError(t, - r.Client.List(ctx, restoreJobs, + cc.List(ctx, restoreJobs, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectJobs}, )) @@ -583,8 +583,8 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ VolumeSnapshotClassName: "my-snapshotclass", } - assert.NilError(t, r.Client.Create(ctx, cluster)) - t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) + assert.NilError(t, cc.Create(ctx, cluster)) + t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, cluster)) }) // Create times for jobs currentTime := metav1.Now() @@ -594,10 +594,10 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { // Create successful backup job backupJob := testBackupJob(cluster) assert.NilError(t, r.setControllerReference(cluster, backupJob)) - assert.NilError(t, r.Client.Create(ctx, backupJob)) + assert.NilError(t, cc.Create(ctx, backupJob)) backupJob.Status = succeededJobStatus(startTime, earlierTime) - assert.NilError(t, r.Client.Status().Update(ctx, backupJob)) + assert.NilError(t, cc.Status().Update(ctx, backupJob)) // Create failed restore job restoreJob := testRestoreJob(cluster) @@ -605,13 +605,13 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { naming.PGBackRestBackupJobCompletion: backupJob.Status.CompletionTime.Format(time.RFC3339), } assert.NilError(t, r.setControllerReference(cluster, restoreJob)) - assert.NilError(t, r.Client.Create(ctx, restoreJob)) + assert.NilError(t, cc.Create(ctx, restoreJob)) restoreJob.Status = batchv1.JobStatus{ Succeeded: 0, Failed: 1, } - assert.NilError(t, r.Client.Status().Update(ctx, restoreJob)) + assert.NilError(t, cc.Status().Update(ctx, restoreJob)) // Setup instances and volumes for reconcile sts := &appsv1.StatefulSet{} @@ -639,8 +639,7 @@ func TestCreateDedicatedSnapshotVolume(t *testing.T) { _, cc := setupKubernetes(t) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Writer: client.WithFieldOwner(cc, t.Name()), } ns := setupNamespace(t, cc) @@ -667,8 +666,7 @@ func TestDedicatedSnapshotVolumeRestore(t *testing.T) { _, cc := setupKubernetes(t) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Writer: client.WithFieldOwner(cc, t.Name()), } ns := setupNamespace(t, cc) @@ -695,7 +693,7 @@ func TestDedicatedSnapshotVolumeRestore(t *testing.T) { selectJobs, err := naming.AsSelector(naming.ClusterRestoreJobs(cluster.Name)) assert.NilError(t, err) assert.NilError(t, - r.Client.List(ctx, jobs, + cc.List(ctx, jobs, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectJobs}, )) @@ -709,8 +707,7 @@ func TestGenerateSnapshotOfDedicatedSnapshotVolume(t *testing.T) { require.ParallelCapacity(t, 1) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Reader: cc, } ns := setupNamespace(t, cc) @@ -740,8 +737,7 @@ func TestGenerateVolumeSnapshot(t *testing.T) { require.ParallelCapacity(t, 1) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Reader: cc, } ns := setupNamespace(t, cc) @@ -769,8 +765,8 @@ func TestGetDedicatedSnapshotVolumeRestoreJob(t *testing.T) { require.ParallelCapacity(t, 1) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), } ns := setupNamespace(t, cc) @@ -787,7 +783,7 @@ func TestGetDedicatedSnapshotVolumeRestoreJob(t *testing.T) { job1 := testRestoreJob(cluster) job1.Namespace = ns.Name - err := r.Client.Create(ctx, job1) + err := cc.Create(ctx, job1) assert.NilError(t, err) dsvRestoreJob, err := r.getDedicatedSnapshotVolumeRestoreJob(ctx, cluster) @@ -803,14 +799,14 @@ func TestGetDedicatedSnapshotVolumeRestoreJob(t *testing.T) { naming.PGBackRestBackupJobCompletion: "backup-timestamp", } - err := r.Client.Create(ctx, job2) + err := cc.Create(ctx, job2) assert.NilError(t, err) job3 := testRestoreJob(cluster) job3.Name = "restore-job-3" job3.Namespace = ns.Name - assert.NilError(t, r.Client.Create(ctx, job3)) + assert.NilError(t, cc.Create(ctx, job3)) dsvRestoreJob, err := r.getDedicatedSnapshotVolumeRestoreJob(ctx, cluster) assert.NilError(t, err) @@ -824,8 +820,8 @@ func TestGetLatestCompleteBackupJob(t *testing.T) { _, cc := setupKubernetes(t) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), } ns := setupNamespace(t, cc) @@ -842,7 +838,7 @@ func TestGetLatestCompleteBackupJob(t *testing.T) { job1 := testBackupJob(cluster) job1.Namespace = ns.Name - err := r.Client.Create(ctx, job1) + err := cc.Create(ctx, job1) assert.NilError(t, err) latestCompleteBackupJob, err := r.getLatestCompleteBackupJob(ctx, cluster) @@ -867,13 +863,13 @@ func TestGetLatestCompleteBackupJob(t *testing.T) { job2.Namespace = ns.Name job2.Name = "backup-job-2" - assert.NilError(t, r.Client.Create(ctx, job2)) + assert.NilError(t, cc.Create(ctx, job2)) // Get job1 and update Status. - assert.NilError(t, r.Client.Get(ctx, client.ObjectKeyFromObject(job1), job1)) + assert.NilError(t, cc.Get(ctx, client.ObjectKeyFromObject(job1), job1)) job1.Status = succeededJobStatus(currentStartTime, currentTime) - assert.NilError(t, r.Client.Status().Update(ctx, job1)) + assert.NilError(t, cc.Status().Update(ctx, job1)) latestCompleteBackupJob, err := r.getLatestCompleteBackupJob(ctx, cluster) assert.NilError(t, err) @@ -903,16 +899,16 @@ func TestGetLatestCompleteBackupJob(t *testing.T) { assert.NilError(t, r.apply(ctx, job2)) // Get job1 and update Status. - assert.NilError(t, r.Client.Get(ctx, client.ObjectKeyFromObject(job1), job1)) + assert.NilError(t, cc.Get(ctx, client.ObjectKeyFromObject(job1), job1)) job1.Status = succeededJobStatus(currentStartTime, currentTime) - assert.NilError(t, r.Client.Status().Update(ctx, job1)) + assert.NilError(t, cc.Status().Update(ctx, job1)) // Get job2 and update Status. - assert.NilError(t, r.Client.Get(ctx, client.ObjectKeyFromObject(job2), job2)) + assert.NilError(t, cc.Get(ctx, client.ObjectKeyFromObject(job2), job2)) job2.Status = succeededJobStatus(earlierStartTime, earlierTime) - assert.NilError(t, r.Client.Status().Update(ctx, job2)) + assert.NilError(t, cc.Status().Update(ctx, job2)) latestCompleteBackupJob, err := r.getLatestCompleteBackupJob(ctx, cluster) assert.NilError(t, err) @@ -1024,8 +1020,8 @@ func TestGetSnapshotsForCluster(t *testing.T) { require.ParallelCapacity(t, 1) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Reader: cc, + Writer: client.WithFieldOwner(cc, t.Name()), } ns := setupNamespace(t, cc) @@ -1054,7 +1050,7 @@ func TestGetSnapshotsForCluster(t *testing.T) { } snapshot.Spec.Source.PersistentVolumeClaimName = initialize.String("some-pvc-name") snapshot.Spec.VolumeSnapshotClassName = initialize.String("some-class-name") - assert.NilError(t, r.Client.Create(ctx, snapshot)) + assert.NilError(t, cc.Create(ctx, snapshot)) snapshots, err := r.getSnapshotsForCluster(ctx, cluster) assert.NilError(t, err) @@ -1095,7 +1091,7 @@ func TestGetSnapshotsForCluster(t *testing.T) { } snapshot2.Spec.Source.PersistentVolumeClaimName = initialize.String("another-pvc-name") snapshot2.Spec.VolumeSnapshotClassName = initialize.String("another-class-name") - assert.NilError(t, r.Client.Create(ctx, snapshot2)) + assert.NilError(t, cc.Create(ctx, snapshot2)) snapshots, err := r.getSnapshotsForCluster(ctx, cluster) assert.NilError(t, err) @@ -1242,25 +1238,24 @@ func TestDeleteSnapshots(t *testing.T) { _, cc := setupKubernetes(t) r := &Reconciler{ - Client: cc, - Owner: client.FieldOwner(t.Name()), + Writer: client.WithFieldOwner(cc, t.Name()), } ns := setupNamespace(t, cc) cluster := testCluster() cluster.Namespace = ns.Name cluster.UID = "the-uid-123" - assert.NilError(t, r.Client.Create(ctx, cluster)) + assert.NilError(t, cc.Create(ctx, cluster)) rhinoCluster := testCluster() rhinoCluster.Name = "rhino" rhinoCluster.Namespace = ns.Name rhinoCluster.UID = "the-uid-456" - assert.NilError(t, r.Client.Create(ctx, rhinoCluster)) + assert.NilError(t, cc.Create(ctx, rhinoCluster)) t.Cleanup(func() { - assert.Check(t, r.Client.Delete(ctx, cluster)) - assert.Check(t, r.Client.Delete(ctx, rhinoCluster)) + assert.Check(t, cc.Delete(ctx, cluster)) + assert.Check(t, cc.Delete(ctx, rhinoCluster)) }) t.Run("NoSnapshots", func(t *testing.T) { @@ -1287,7 +1282,7 @@ func TestDeleteSnapshots(t *testing.T) { }, } assert.NilError(t, r.setControllerReference(rhinoCluster, snapshot1)) - assert.NilError(t, r.Client.Create(ctx, snapshot1)) + assert.NilError(t, cc.Create(ctx, snapshot1)) snapshots := []*volumesnapshotv1.VolumeSnapshot{ snapshot1, @@ -1295,7 +1290,7 @@ func TestDeleteSnapshots(t *testing.T) { assert.NilError(t, r.deleteSnapshots(ctx, cluster, snapshots)) existingSnapshots := &volumesnapshotv1.VolumeSnapshotList{} assert.NilError(t, - r.Client.List(ctx, existingSnapshots, + cc.List(ctx, existingSnapshots, client.InNamespace(ns.Namespace), )) assert.Equal(t, len(existingSnapshots.Items), 1) @@ -1337,7 +1332,7 @@ func TestDeleteSnapshots(t *testing.T) { }, } assert.NilError(t, r.setControllerReference(cluster, snapshot2)) - assert.NilError(t, r.Client.Create(ctx, snapshot2)) + assert.NilError(t, cc.Create(ctx, snapshot2)) snapshots := []*volumesnapshotv1.VolumeSnapshot{ snapshot1, snapshot2, @@ -1345,7 +1340,7 @@ func TestDeleteSnapshots(t *testing.T) { assert.NilError(t, r.deleteSnapshots(ctx, cluster, snapshots)) existingSnapshots := &volumesnapshotv1.VolumeSnapshotList{} assert.NilError(t, - r.Client.List(ctx, existingSnapshots, + cc.List(ctx, existingSnapshots, client.InNamespace(ns.Namespace), )) assert.Equal(t, len(existingSnapshots.Items), 1) diff --git a/internal/controller/postgrescluster/volumes.go b/internal/controller/postgrescluster/volumes.go index a26fa05e78..93c8ded149 100644 --- a/internal/controller/postgrescluster/volumes.go +++ b/internal/controller/postgrescluster/volumes.go @@ -18,7 +18,6 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/validation/field" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/internal/initialize" @@ -41,7 +40,7 @@ func (r *Reconciler) observePersistentVolumeClaims( selector, err := naming.AsSelector(naming.Cluster(cluster.Name)) if err == nil { err = errors.WithStack( - r.Client.List(ctx, volumes, + r.Reader.List(ctx, volumes, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector}, )) @@ -392,7 +391,7 @@ func (r *Reconciler) reconcileDirMoveJobs(ctx context.Context, cluster.Spec.DataSource.Volumes != nil { var list batchv1.JobList - if err := r.Client.List(ctx, &list, &client.ListOptions{ + if err := r.Reader.List(ctx, &list, &client.ListOptions{ Namespace: cluster.Namespace, LabelSelector: naming.DirectoryMoveJobLabels(cluster.Name).AsSelector(), }); err != nil { @@ -547,8 +546,7 @@ func (r *Reconciler) reconcileMovePGDataDir(ctx context.Context, // set gvk and ownership refs moveDirJob.SetGroupVersionKind(batchv1.SchemeGroupVersion.WithKind("Job")) - if err := controllerutil.SetControllerReference(cluster, moveDirJob, - r.Client.Scheme()); err != nil { + if err := r.setControllerReference(cluster, moveDirJob); err != nil { return true, errors.WithStack(err) } @@ -666,8 +664,7 @@ func (r *Reconciler) reconcileMoveWALDir(ctx context.Context, // set gvk and ownership refs moveDirJob.SetGroupVersionKind(batchv1.SchemeGroupVersion.WithKind("Job")) - if err := controllerutil.SetControllerReference(cluster, moveDirJob, - r.Client.Scheme()); err != nil { + if err := r.setControllerReference(cluster, moveDirJob); err != nil { return true, errors.WithStack(err) } @@ -788,8 +785,7 @@ func (r *Reconciler) reconcileMoveRepoDir(ctx context.Context, // set gvk and ownership refs moveDirJob.SetGroupVersionKind(batchv1.SchemeGroupVersion.WithKind("Job")) - if err := controllerutil.SetControllerReference(cluster, moveDirJob, - r.Client.Scheme()); err != nil { + if err := r.setControllerReference(cluster, moveDirJob); err != nil { return true, errors.WithStack(err) } diff --git a/internal/controller/postgrescluster/volumes_test.go b/internal/controller/postgrescluster/volumes_test.go index 85087d079b..e9ebaebd6f 100644 --- a/internal/controller/postgrescluster/volumes_test.go +++ b/internal/controller/postgrescluster/volumes_test.go @@ -375,7 +375,10 @@ func TestReconcileConfigureExistingPVCs(t *testing.T) { _, tClient := setupKubernetes(t) require.ParallelCapacity(t, 1) - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{ + Reader: tClient, + Writer: client.WithFieldOwner(tClient, t.Name()), + } ns := setupNamespace(t, tClient) cluster := &v1beta1.PostgresCluster{ @@ -635,7 +638,10 @@ func TestReconcileMoveDirectories(t *testing.T) { _, tClient := setupKubernetes(t) require.ParallelCapacity(t, 1) - r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + r := &Reconciler{ + Reader: tClient, + Writer: client.WithFieldOwner(tClient, t.Name()), + } ns := setupNamespace(t, tClient) cluster := &v1beta1.PostgresCluster{ @@ -728,7 +734,7 @@ func TestReconcileMoveDirectories(t *testing.T) { assert.Assert(t, returnEarly) moveJobs := &batchv1.JobList{} - err = r.Client.List(ctx, moveJobs, &client.ListOptions{ + err = tClient.List(ctx, moveJobs, &client.ListOptions{ Namespace: cluster.Namespace, LabelSelector: naming.DirectoryMoveJobLabels(cluster.Name).AsSelector(), })