From fe960adf8d5d89c3b0e57891c1f5441e2479ff55 Mon Sep 17 00:00:00 2001 From: Drew Sessler Date: Wed, 3 Sep 2025 11:29:05 -0700 Subject: [PATCH] Add LoggingConfiguration struct for providing log path to pgbackrest processes. Add ability to send pgbackrest logs to additional volume for instance sidecar, repo host, and backups jobs. Add validation to prevent incorrect log paths or setting pgbackrest log-path via global. Add appropriate unit and integration tests. --- ...ator.crunchydata.com_postgresclusters.yaml | 75 ++++- internal/collector/pgbackrest.go | 10 +- internal/collector/pgbackrest_test.go | 18 +- internal/collector/postgres.go | 8 +- .../controller/postgrescluster/instance.go | 4 +- .../controller/postgrescluster/pgbackrest.go | 57 ++-- .../postgrescluster/pgbackrest_test.go | 132 +++++++- internal/pgbackrest/config.go | 70 ++-- internal/pgbackrest/config_test.go | 184 +++++++++++ internal/postgres/config.go | 4 +- .../testing/validation/pgbackrest_test.go | 310 ++++++++++++++++++ internal/util/pgbackrest.go | 26 ++ internal/util/pgbackrest_test.go | 42 +++ .../v1/pgbackrest_types.go | 18 + .../v1/postgrescluster_types.go | 9 +- .../v1/zz_generated.deepcopy.go | 16 + .../v1beta1/pgbackrest_types.go | 14 +- .../v1beta1/postgrescluster_types.go | 3 + .../v1beta1/shared_types.go | 7 + .../v1beta1/zz_generated.deepcopy.go | 30 ++ 20 files changed, 941 insertions(+), 96 deletions(-) create mode 100644 internal/testing/validation/pgbackrest_test.go create mode 100644 internal/util/pgbackrest.go create mode 100644 internal/util/pgbackrest_test.go create mode 100644 pkg/apis/postgres-operator.crunchydata.com/v1/pgbackrest_types.go diff --git a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml index c863abe73c..5a250c9350 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml @@ -151,7 +151,7 @@ spec: properties: configuration: description: |- - Projected volumes containing custom pgBackRest configuration. These files are mounted + Projected volumes containing custom pgBackRest configuration. These files are mounted under "/etc/pgbackrest/conf.d" alongside any pgBackRest configuration generated by the PostgreSQL Operator: https://pgbackrest.org/configuration.html @@ -1424,6 +1424,14 @@ spec: x-kubernetes-list-type: atomic type: object type: object + log: + description: Logging configuration for pgbackrest processes + running in Backup Job Pods. + properties: + path: + maxLength: 256 + type: string + type: object priorityClassName: description: |- Priority class name for the pgBackRest backup Job pods. @@ -1583,6 +1591,14 @@ spec: x-kubernetes-list-type: map type: object type: object + log: + description: Logging configuration for pgbackrest processes + running in postgres instance pods. + properties: + path: + maxLength: 256 + type: string + type: object manual: description: Defines details for manual pgBackRest backup Jobs @@ -2551,6 +2567,14 @@ spec: x-kubernetes-list-type: atomic type: object type: object + log: + description: Logging configuration for pgbackrest processes + running in the repo host pod. + properties: + path: + maxLength: 256 + type: string + type: object priorityClassName: description: |- Priority class name for the pgBackRest repo host pod. Changing this value @@ -4562,6 +4586,21 @@ spec: required: - repos type: object + x-kubernetes-validations: + - message: pgbackrest sidecar log path is restricted to an existing + additional volume + rule: '!self.?log.path.hasValue() || self.log.path.startsWith("/volumes/")' + - message: repo host log path is restricted to an existing additional + volume + rule: '!self.?repoHost.log.path.hasValue() || self.repoHost.volumes.additional.exists(x, + self.repoHost.log.path.startsWith("/volumes/"+x.name))' + - message: backup jobs log path is restricted to an existing additional + volume + rule: '!self.?jobs.log.path.hasValue() || self.jobs.volumes.additional.exists(x, + self.jobs.log.path.startsWith("/volumes/"+x.name))' + - message: pgbackrest log-path must be set via the various log.path + fields in the spec + rule: '!self.?global["log-path"].hasValue()' snapshots: description: VolumeSnapshot configuration properties: @@ -11209,6 +11248,7 @@ spec: type: object type: array volumes: + description: Volumes to be added to the instance set. properties: additional: description: Additional pre-existing volumes to add to the @@ -18416,6 +18456,12 @@ spec: || !v.startsWith("/volumes") || self.instances.all(i, i.?volumes.additional.hasValue() && i.volumes.additional.exists(volume, v.startsWith("/volumes/" + volume.name)))).orValue(true) + - fieldPath: .backups.pgbackrest.log.path + message: all instances need an additional volume for pgbackrest sidecar + to log in "/volumes" + rule: self.?backups.pgbackrest.log.path.optMap(v, !v.startsWith("/volumes") + || self.instances.all(i, i.?volumes.additional.hasValue() && i.volumes.additional.exists(volume, + v.startsWith("/volumes/" + volume.name)))).orValue(true) status: description: PostgresClusterStatus defines the observed state of PostgresCluster properties: @@ -18951,7 +18997,7 @@ spec: properties: configuration: description: |- - Projected volumes containing custom pgBackRest configuration. These files are mounted + Projected volumes containing custom pgBackRest configuration. These files are mounted under "/etc/pgbackrest/conf.d" alongside any pgBackRest configuration generated by the PostgreSQL Operator: https://pgbackrest.org/configuration.html @@ -20224,6 +20270,14 @@ spec: x-kubernetes-list-type: atomic type: object type: object + log: + description: Logging configuration for pgbackrest processes + running in Backup Job Pods. + properties: + path: + maxLength: 256 + type: string + type: object priorityClassName: description: |- Priority class name for the pgBackRest backup Job pods. @@ -20383,6 +20437,14 @@ spec: x-kubernetes-list-type: map type: object type: object + log: + description: Logging configuration for pgbackrest processes + running in postgres instance pods. + properties: + path: + maxLength: 256 + type: string + type: object manual: description: Defines details for manual pgBackRest backup Jobs @@ -21351,6 +21413,14 @@ spec: x-kubernetes-list-type: atomic type: object type: object + log: + description: Logging configuration for pgbackrest processes + running in the repo host pod. + properties: + path: + maxLength: 256 + type: string + type: object priorityClassName: description: |- Priority class name for the pgBackRest repo host pod. Changing this value @@ -30002,6 +30072,7 @@ spec: type: object type: array volumes: + description: Volumes to be added to the instance set. properties: additional: description: Additional pre-existing volumes to add to the diff --git a/internal/collector/pgbackrest.go b/internal/collector/pgbackrest.go index 75cc9a55c1..1d51a22181 100644 --- a/internal/collector/pgbackrest.go +++ b/internal/collector/pgbackrest.go @@ -8,7 +8,6 @@ import ( "context" _ "embed" "encoding/json" - "fmt" "slices" "github.com/crunchydata/postgres-operator/internal/naming" @@ -25,19 +24,12 @@ func NewConfigForPgBackrestRepoHostPod( ctx context.Context, spec *v1beta1.InstrumentationSpec, repos []v1beta1.PGBackRestRepo, + directory string, ) *Config { config := NewConfig(spec) if OpenTelemetryLogsEnabled(ctx, spec) { - var directory string - for _, repo := range repos { - if repo.Volume != nil { - directory = fmt.Sprintf(naming.PGBackRestRepoLogPath, repo.Name) - break - } - } - // We should only enter this function if a PVC is assigned for a dedicated repohost // but if we don't have one, exit early. if directory == "" { diff --git a/internal/collector/pgbackrest_test.go b/internal/collector/pgbackrest_test.go index 2b26d40531..653b8b7806 100644 --- a/internal/collector/pgbackrest_test.go +++ b/internal/collector/pgbackrest_test.go @@ -30,8 +30,7 @@ func TestNewConfigForPgBackrestRepoHostPod(t *testing.T) { } var instrumentation *v1beta1.InstrumentationSpec require.UnmarshalInto(t, &instrumentation, `{}`) - - config := NewConfigForPgBackrestRepoHostPod(ctx, instrumentation, repos) + config := NewConfigForPgBackrestRepoHostPod(ctx, instrumentation, repos, "/test/directory") result, err := config.ToYAML() assert.NilError(t, err) @@ -43,7 +42,7 @@ exporters: extensions: file_storage/pgbackrest_logs: create_directory: false - directory: /pgbackrest/repo1/log/receiver + directory: /test/directory/receiver fsync: true processors: batch/1s: @@ -101,8 +100,8 @@ processors: receivers: filelog/pgbackrest_log: include: - - /pgbackrest/repo1/log/*.log - - /pgbackrest/repo1/log/*.log.1 + - /test/directory/*.log + - /test/directory/*.log.1 multiline: line_start_pattern: ^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3}|^-{19} storage: file_storage/pgbackrest_logs @@ -136,8 +135,7 @@ service: Volume: new(v1beta1.RepoPVC), }, } - - config := NewConfigForPgBackrestRepoHostPod(ctx, testInstrumentationSpec(), repos) + config := NewConfigForPgBackrestRepoHostPod(ctx, testInstrumentationSpec(), repos, "/another/directory") result, err := config.ToYAML() assert.NilError(t, err) @@ -153,7 +151,7 @@ exporters: extensions: file_storage/pgbackrest_logs: create_directory: false - directory: /pgbackrest/repo1/log/receiver + directory: /another/directory/receiver fsync: true processors: batch/1s: @@ -211,8 +209,8 @@ processors: receivers: filelog/pgbackrest_log: include: - - /pgbackrest/repo1/log/*.log - - /pgbackrest/repo1/log/*.log.1 + - /another/directory/*.log + - /another/directory/*.log.1 multiline: line_start_pattern: ^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3}|^-{19} storage: file_storage/pgbackrest_logs diff --git a/internal/collector/postgres.go b/internal/collector/postgres.go index b73ae91a25..ca627a8fda 100644 --- a/internal/collector/postgres.go +++ b/internal/collector/postgres.go @@ -16,6 +16,7 @@ import ( "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/postgres" + "github.com/crunchydata/postgres-operator/internal/util" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -244,8 +245,9 @@ func EnablePostgresLogging( } // pgBackRest pipeline + pgBackRestLogPath := util.GetPGBackRestLogPathForInstance(inCluster) outConfig.Extensions["file_storage/pgbackrest_logs"] = map[string]any{ - "directory": naming.PGBackRestPGDataLogPath + "/receiver", + "directory": pgBackRestLogPath + "/receiver", "create_directory": false, "fsync": true, } @@ -258,8 +260,8 @@ func EnablePostgresLogging( // a log record or two to the old file while rotation is occurring. // The collector knows not to create duplicate logs. "include": []string{ - naming.PGBackRestPGDataLogPath + "/*.log", - naming.PGBackRestPGDataLogPath + "/*.log.1", + pgBackRestLogPath + "/*.log", + pgBackRestLogPath + "/*.log.1", }, "storage": "file_storage/pgbackrest_logs", diff --git a/internal/controller/postgrescluster/instance.go b/internal/controller/postgrescluster/instance.go index a700aa1f95..b000074e59 100644 --- a/internal/controller/postgrescluster/instance.go +++ b/internal/controller/postgrescluster/instance.go @@ -1205,7 +1205,7 @@ func (r *Reconciler) reconcileInstance( // TODO(sidecar): Create these directories sometime other than startup. collector.AddToPod(ctx, cluster.Spec.Instrumentation, cluster.Spec.ImagePullPolicy, instanceConfigMap, &instance.Spec.Template, []corev1.VolumeMount{postgres.DataVolumeMount()}, pgPassword, - []string{naming.PGBackRestPGDataLogPath}, includeLogrotate, true) + []string{util.GetPGBackRestLogPathForInstance(cluster)}, includeLogrotate, true) } // Add postgres-exporter to the instance Pod spec @@ -1433,7 +1433,7 @@ func (r *Reconciler) reconcileInstanceConfigMap( collector.AddLogrotateConfigs(ctx, cluster.Spec.Instrumentation, instanceConfigMap, []collector.LogrotateConfig{{ - LogFiles: []string{naming.PGBackRestPGDataLogPath + "/*.log"}, + LogFiles: []string{util.GetPGBackRestLogPathForInstance(cluster) + "/*.log"}, }}) } } diff --git a/internal/controller/postgrescluster/pgbackrest.go b/internal/controller/postgrescluster/pgbackrest.go index 0acb86513f..d31350fd42 100644 --- a/internal/controller/postgrescluster/pgbackrest.go +++ b/internal/controller/postgrescluster/pgbackrest.go @@ -8,6 +8,7 @@ import ( "context" "fmt" "io" + "path/filepath" "reflect" "regexp" "sort" @@ -38,6 +39,7 @@ import ( "github.com/crunchydata/postgres-operator/internal/pgbackrest" "github.com/crunchydata/postgres-operator/internal/pki" "github.com/crunchydata/postgres-operator/internal/postgres" + "github.com/crunchydata/postgres-operator/internal/shell" "github.com/crunchydata/postgres-operator/internal/util" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -821,7 +823,13 @@ func (r *Reconciler) generateBackupJobSpecIntent(ctx context.Context, postgresCl {Name: "SELECTOR", Value: naming.PGBackRestDedicatedSelector(postgresCluster.GetName()).String()}, } } else { - container.Command = []string{"/bin/pgbackrest", "backup"} + mkdirCommand := "" + cloudLogPath := getCloudLogPath(postgresCluster) + if cloudLogPath != "" { + mkdirCommand += shell.MakeDirectories(cloudLogPath, cloudLogPath) + "; " + } + + container.Command = []string{"sh", "-c", "--", mkdirCommand + `exec "$@"`, "--", "/bin/pgbackrest", "backup"} container.Command = append(container.Command, cmdOpts...) } @@ -885,8 +893,8 @@ func (r *Reconciler) generateBackupJobSpecIntent(ctx context.Context, postgresCl pgbackrest.AddConfigToCloudBackupJob(postgresCluster, &jobSpec.Template) // Mount the PVC named in the "pgbackrest-cloud-log-volume" annotation, if any. - if logVolumeName := postgresCluster.Annotations[naming.PGBackRestCloudLogVolume]; logVolumeName != "" { - util.AddCloudLogVolumeToPod(&jobSpec.Template.Spec, logVolumeName) + if logVolume := postgresCluster.Annotations[naming.PGBackRestCloudLogVolume]; logVolume != "" { + util.AddCloudLogVolumeToPod(&jobSpec.Template.Spec, logVolume) } } @@ -2075,28 +2083,7 @@ func (r *Reconciler) reconcilePGBackRestConfig(ctx context.Context, repoHostName, configHash, serviceName, serviceNamespace string, instanceNames []string) error { - // If the user has specified a PVC to use as a log volume for cloud backups via the - // PGBackRestCloudLogVolume annotation, check for the PVC. If we find it, set the cloud - // log path. If the user has specified a PVC, but we can't find it, create a warning event. - cloudLogPath := "" - if logVolumeName := postgresCluster.Annotations[naming.PGBackRestCloudLogVolume]; logVolumeName != "" { - logVolume := &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: logVolumeName, - Namespace: postgresCluster.GetNamespace(), - }, - } - err := errors.WithStack(r.Client.Get(ctx, - client.ObjectKeyFromObject(logVolume), logVolume)) - if err != nil { - // PVC not retrieved, create warning event - r.Recorder.Event(postgresCluster, corev1.EventTypeWarning, - "PGBackRestCloudLogVolumeNotFound", err.Error()) - } else { - // We successfully found the specified PVC, so we will set the log path - cloudLogPath = "/volumes/" + logVolumeName - } - } + cloudLogPath := getCloudLogPath(postgresCluster) backrestConfig, err := pgbackrest.CreatePGBackRestConfigMapIntent(ctx, postgresCluster, repoHostName, configHash, serviceName, serviceNamespace, cloudLogPath, instanceNames) @@ -3351,3 +3338,23 @@ func authorizeBackupRemovalAnnotationPresent(postgresCluster *v1beta1.PostgresCl } return false } + +// getCloudLogPath is responsible for determining the appropriate log path for pgbackrest +// in cloud backup jobs. If the user has specified a PVC to use as a log volume for cloud +// backups via the PGBackRestCloudLogVolume annotation, set the cloud log path accordingly. +// If the user has not set the PGBackRestCloudLogVolume annotation, but has set a log path +// via the spec, use that. +// TODO: Make sure this is what we want (i.e. annotation to take precedence over spec) +// +// This function assumes that the backups/pgbackrest spec is present in postgresCluster. +func getCloudLogPath(postgresCluster *v1beta1.PostgresCluster) string { + cloudLogPath := "" + if logVolume := postgresCluster.Annotations[naming.PGBackRestCloudLogVolume]; logVolume != "" { + cloudLogPath = "/volumes/" + logVolume + } else if postgresCluster.Spec.Backups.PGBackRest.Jobs != nil && + postgresCluster.Spec.Backups.PGBackRest.Jobs.Log != nil && + postgresCluster.Spec.Backups.PGBackRest.Jobs.Log.Path != "" { + cloudLogPath = filepath.Clean(postgresCluster.Spec.Backups.PGBackRest.Jobs.Log.Path) + } + return cloudLogPath +} diff --git a/internal/controller/postgrescluster/pgbackrest_test.go b/internal/controller/postgrescluster/pgbackrest_test.go index d87223a2ee..b4e590411a 100644 --- a/internal/controller/postgrescluster/pgbackrest_test.go +++ b/internal/controller/postgrescluster/pgbackrest_test.go @@ -2663,6 +2663,11 @@ func TestGenerateBackupJobIntent(t *testing.T) { assert.Assert(t, cmp.MarshalMatches(spec.Template.Spec, ` containers: - command: + - sh + - -c + - -- + - exec "$@" + - -- - /bin/pgbackrest - backup - --stanza=db @@ -2965,6 +2970,12 @@ volumes: assert.Assert(t, cmp.MarshalMatches(spec.Template.Spec, ` containers: - command: + - sh + - -c + - -- + - mkdir -p '/volumes/another-pvc' && { chmod 0775 '/volumes/another-pvc' || :; }; + exec "$@" + - -- - /bin/pgbackrest - backup - --stanza=db @@ -3031,7 +3042,11 @@ volumes: cluster := cluster.DeepCopy() cluster.Namespace = ns.Name + cluster.Annotations = map[string]string{} cluster.Spec.Backups.PGBackRest.Jobs = &v1beta1.BackupJobs{ + Log: &v1beta1.LoggingConfiguration{ + Path: "/volumes/stuff/log", + }, Volumes: &v1beta1.PGBackRestVolumesSpec{ Additional: []v1beta1.AdditionalVolume{ { @@ -3048,15 +3063,66 @@ volumes: nil, nil, ) - for _, container := range spec.Template.Spec.Containers { - assert.Assert(t, cmp.MarshalContains(container.VolumeMounts, - ` -- mountPath: /volumes/stuff - name: volumes-stuff`)) - } - - assert.Assert(t, cmp.MarshalContains(spec.Template.Spec.Volumes, - ` + assert.Assert(t, cmp.MarshalMatches(spec.Template.Spec, ` +containers: +- command: + - sh + - -c + - -- + - mkdir -p '/volumes/stuff/log' && { chmod 0775 '/volumes/stuff/log' || :; }; exec + "$@" + - -- + - /bin/pgbackrest + - backup + - --stanza=db + - --repo= + name: pgbackrest + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgbackrest/conf.d + name: pgbackrest-config + readOnly: true + - mountPath: /tmp + name: tmp + - mountPath: /volumes/stuff + name: volumes-stuff +enableServiceLinks: false +restartPolicy: Never +securityContext: + fsGroup: 26 + fsGroupChangePolicy: OnRootMismatch +volumes: +- name: pgbackrest-config + projected: + sources: + - configMap: + items: + - key: pgbackrest_cloud.conf + path: pgbackrest_cloud.conf + name: hippo-test-pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: hippo-test-pgbackrest +- emptyDir: + sizeLimit: 16Mi + name: tmp - name: volumes-stuff persistentVolumeClaim: claimName: additional-pvc`)) @@ -4497,3 +4563,51 @@ func TestGetRepoHostVolumeRequests(t *testing.T) { }) } } + +func TestGetCloudLogPath(t *testing.T) { + t.Run("NoAnnotationNoSpecPath", func(t *testing.T) { + postgrescluster := &v1beta1.PostgresCluster{} + assert.Equal(t, getCloudLogPath(postgrescluster), "") + }) + + t.Run("AnnotationSetNoSpecPath", func(t *testing.T) { + postgrescluster := &v1beta1.PostgresCluster{} + postgrescluster.Annotations = map[string]string{} + postgrescluster.Annotations[naming.PGBackRestCloudLogVolume] = "another-pvc" + assert.Equal(t, getCloudLogPath(postgrescluster), "/volumes/another-pvc") + }) + + t.Run("NoAnnotationSpecPathSet", func(t *testing.T) { + postgrescluster := &v1beta1.PostgresCluster{ + Spec: v1beta1.PostgresClusterSpec{ + Backups: v1beta1.Backups{ + PGBackRest: v1beta1.PGBackRestArchive{ + Jobs: &v1beta1.BackupJobs{ + Log: &v1beta1.LoggingConfiguration{ + Path: "/volumes/test/log/", + }, + }, + }, + }, + }, + } + assert.Equal(t, getCloudLogPath(postgrescluster), "/volumes/test/log") + }) + + t.Run("BothAnnotationAndSpecPathSet", func(t *testing.T) { + postgrescluster := &v1beta1.PostgresCluster{ + Spec: v1beta1.PostgresClusterSpec{ + Backups: v1beta1.Backups{ + PGBackRest: v1beta1.PGBackRestArchive{ + Log: &v1beta1.LoggingConfiguration{ + Path: "/volumes/test/log", + }, + }, + }, + }, + } + postgrescluster.Annotations = map[string]string{} + postgrescluster.Annotations[naming.PGBackRestCloudLogVolume] = "another-pvc" + assert.Equal(t, getCloudLogPath(postgrescluster), "/volumes/another-pvc") + }) +} diff --git a/internal/pgbackrest/config.go b/internal/pgbackrest/config.go index 090f119d1c..f1d1fc30f8 100644 --- a/internal/pgbackrest/config.go +++ b/internal/pgbackrest/config.go @@ -71,8 +71,8 @@ const ( // CreatePGBackRestConfigMapIntent creates a configmap struct with pgBackRest pgbackrest.conf settings in the data field. // The keys within the data field correspond to the use of that configuration. -// pgbackrest_job.conf is used by certain jobs, such as stanza create and backup -// pgbackrest_primary.conf is used by the primary database pod +// pgbackrest-server.conf is used by the pgBackRest TLS server +// pgbackrest_instance.conf is used by the primary database pod // pgbackrest_repo.conf is used by the pgBackRest repository pod // pgbackrest_cloud.conf is used by cloud repo backup jobs func CreatePGBackRestConfigMapIntent(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, @@ -112,6 +112,7 @@ func CreatePGBackRestConfigMapIntent(ctx context.Context, postgresCluster *v1bet strconv.Itoa(postgresCluster.Spec.PostgresVersion), pgPort, postgresCluster.Spec.Backups.PGBackRest.Repos, postgresCluster.Spec.Backups.PGBackRest.Global, + util.GetPGBackRestLogPathForInstance(postgresCluster), ).String() // As the cluster transitions from having a repository host to having none, @@ -122,6 +123,9 @@ func CreatePGBackRestConfigMapIntent(ctx context.Context, postgresCluster *v1bet serverConfig(postgresCluster).String() if RepoHostVolumeDefined(postgresCluster) && repoHostName != "" { + // Get pgbackrest log path for repo host pod + pgBackRestLogPath := generateRepoHostLogPath(postgresCluster) + cm.Data[CMRepoKey] = iniGeneratedWarning + populateRepoHostConfigurationMap( serviceName, serviceNamespace, @@ -130,26 +134,20 @@ func CreatePGBackRestConfigMapIntent(ctx context.Context, postgresCluster *v1bet pgPort, instanceNames, postgresCluster.Spec.Backups.PGBackRest.Repos, postgresCluster.Spec.Backups.PGBackRest.Global, + pgBackRestLogPath, ).String() if collector.OpenTelemetryLogsOrMetricsEnabled(ctx, postgresCluster) { - err = collector.AddToConfigMap(ctx, collector.NewConfigForPgBackrestRepoHostPod( ctx, postgresCluster.Spec.Instrumentation, postgresCluster.Spec.Backups.PGBackRest.Repos, + pgBackRestLogPath, ), cm) // If OTel logging is enabled, add logrotate config for the RepoHost if err == nil && collector.OpenTelemetryLogsEnabled(ctx, postgresCluster) { - var pgBackRestLogPath string - for _, repo := range postgresCluster.Spec.Backups.PGBackRest.Repos { - if repo.Volume != nil { - pgBackRestLogPath = fmt.Sprintf(naming.PGBackRestRepoLogPath, repo.Name) - break - } - } collector.AddLogrotateConfigs(ctx, postgresCluster.Spec.Instrumentation, cm, []collector.LogrotateConfig{{ LogFiles: []string{pgBackRestLogPath + "/*.log"}, @@ -180,13 +178,7 @@ func CreatePGBackRestConfigMapIntent(ctx context.Context, postgresCluster *v1bet func MakePGBackrestLogDir(template *corev1.PodTemplateSpec, cluster *v1beta1.PostgresCluster) string { - var pgBackRestLogPath string - for _, repo := range cluster.Spec.Backups.PGBackRest.Repos { - if repo.Volume != nil { - pgBackRestLogPath = fmt.Sprintf(naming.PGBackRestRepoLogPath, repo.Name) - break - } - } + pgBackRestLogPath := generateRepoHostLogPath(cluster) container := corev1.Container{ // TODO(log-rotation): The second argument here should be the path @@ -380,7 +372,7 @@ func populatePGInstanceConfigurationMap( serviceName, serviceNamespace, repoHostName, pgdataDir, fetchKeyCommand, postgresVersion string, pgPort int32, repos []v1beta1.PGBackRestRepo, - globalConfig map[string]string, + globalConfig map[string]string, pgBackRestLogPath string, ) iniSectionSet { // TODO(cbandy): pass a FQDN in already. @@ -396,7 +388,7 @@ func populatePGInstanceConfigurationMap( // pgBackRest spool-path should always be co-located with the Postgres WAL path. global.Set("spool-path", "/pgdata/pgbackrest-spool") // pgBackRest will log to the pgData volume for commands run on the PostgreSQL instance - global.Set("log-path", naming.PGBackRestPGDataLogPath) + global.Set("log-path", pgBackRestLogPath) for _, repo := range repos { global.Set(repo.Name+"-path", defaultRepo1Path+repo.Name) @@ -450,13 +442,12 @@ func populateRepoHostConfigurationMap( serviceName, serviceNamespace, pgdataDir, fetchKeyCommand, postgresVersion string, pgPort int32, pgHosts []string, repos []v1beta1.PGBackRestRepo, - globalConfig map[string]string, + globalConfig map[string]string, logPath string, ) iniSectionSet { global := iniMultiSet{} stanza := iniMultiSet{} - var pgBackRestLogPathSet bool for _, repo := range repos { global.Set(repo.Name+"-path", defaultRepo1Path+repo.Name) @@ -468,20 +459,14 @@ func populateRepoHostConfigurationMap( global.Set(option, val) } } - - if !pgBackRestLogPathSet && repo.Volume != nil { - // pgBackRest will log to the first configured repo volume when commands - // are run on the pgBackRest repo host. With our previous check in - // RepoHostVolumeDefined(), we've already validated that at least one - // defined repo has a volume. - global.Set("log-path", fmt.Sprintf(naming.PGBackRestRepoLogPath, repo.Name)) - pgBackRestLogPathSet = true - } } - // If no log path was set, don't log because the default path is not writable. - if !pgBackRestLogPathSet { + // If no log path was provided, don't log because the default path is not writable. + // Otherwise, set the log-path. + if logPath == "" { global.Set("log-level-file", "off") + } else { + global.Set("log-path", logPath) } for option, val := range globalConfig { @@ -818,3 +803,24 @@ func serverConfig(cluster *v1beta1.PostgresCluster) iniSectionSet { "global:server": server, } } + +// generateRepoHostLogPath takes a postgrescluster and returns the log path that +// should be used by pgbackrest in the Repo Host Pod based on the repos specified +// and whether the user has specified a log path. +// +// This function assumes that the backups/pgbackrest spec is present in cluster. +func generateRepoHostLogPath(cluster *v1beta1.PostgresCluster) string { + for _, repo := range cluster.Spec.Backups.PGBackRest.Repos { + if repo.Volume != nil { + // If the user has set a log path in the spec, use it. + // Otherwise, default to /pgbackrest/repo#/log + if cluster.Spec.Backups.PGBackRest.RepoHost != nil && + cluster.Spec.Backups.PGBackRest.RepoHost.Log != nil && + cluster.Spec.Backups.PGBackRest.RepoHost.Log.Path != "" { + return cluster.Spec.Backups.PGBackRest.RepoHost.Log.Path + } + return fmt.Sprintf(naming.PGBackRestRepoLogPath, repo.Name) + } + } + return "" +} diff --git a/internal/pgbackrest/config_test.go b/internal/pgbackrest/config_test.go index 4617b3a80a..91ce833c03 100644 --- a/internal/pgbackrest/config_test.go +++ b/internal/pgbackrest/config_test.go @@ -451,6 +451,104 @@ pg1-socket-path = /tmp/postgres `, "\t\n")+"\n") }) + t.Run("LoggingToAdditionalVolume", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.UID = "guitar" + cluster.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{ + { + Name: "repo1", + Volume: &v1beta1.RepoPVC{}, + }, + } + cluster.Spec.Backups.PGBackRest.RepoHost = &v1beta1.PGBackRestRepoHost{ + Log: &v1beta1.LoggingConfiguration{ + Path: "/volumes/test", + }, + Volumes: &v1beta1.PGBackRestVolumesSpec{ + Additional: []v1beta1.AdditionalVolume{ + { + ClaimName: "some-pvc", + Name: "test", + }, + }, + }, + } + + configmap, err := CreatePGBackRestConfigMapIntent(context.Background(), cluster, + "repo-hostname", "anumber", "pod-service-name", "test-ns", "", + []string{"some-instance"}) + + assert.NilError(t, err) + assert.DeepEqual(t, configmap.Annotations, map[string]string{}) + assert.DeepEqual(t, configmap.Labels, map[string]string{ + "postgres-operator.crunchydata.com/cluster": "hippo-dance", + "postgres-operator.crunchydata.com/pgbackrest": "", + "postgres-operator.crunchydata.com/pgbackrest-config": "", + }) + + assert.Equal(t, configmap.Data["config-hash"], "anumber") + assert.Equal(t, configmap.Data["pgbackrest-server.conf"], strings.Trim(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. + +[global] +tls-server-address = 0.0.0.0 +tls-server-auth = pgbackrest@guitar=* +tls-server-ca-file = /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt +tls-server-cert-file = /etc/pgbackrest/server/server-tls.crt +tls-server-key-file = /etc/pgbackrest/server/server-tls.key + +[global:server] +log-level-console = detail +log-level-file = off +log-level-stderr = error +log-timestamp = n + `, "\t\n")+"\n") + + assert.Equal(t, configmap.Data["pgbackrest_instance.conf"], strings.Trim(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. + +[global] +archive-async = y +log-path = /pgdata/pgbackrest/log +repo1-host = repo-hostname-0.pod-service-name.test-ns.svc.`+domain+` +repo1-host-ca-file = /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt +repo1-host-cert-file = /etc/pgbackrest/conf.d/~postgres-operator/client-tls.crt +repo1-host-key-file = /etc/pgbackrest/conf.d/~postgres-operator/client-tls.key +repo1-host-type = tls +repo1-host-user = postgres +repo1-path = /pgbackrest/repo1 +spool-path = /pgdata/pgbackrest-spool + +[db] +pg1-path = /pgdata/pg12 +pg1-port = 2345 +pg1-socket-path = /tmp/postgres + `, "\t\n")+"\n") + + assert.Equal(t, configmap.Data["pgbackrest_repo.conf"], strings.Trim(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. + +[global] +log-path = /volumes/test +repo1-path = /pgbackrest/repo1 + +[db] +pg1-host = some-instance-0.pod-service-name.test-ns.svc.`+domain+` +pg1-host-ca-file = /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt +pg1-host-cert-file = /etc/pgbackrest/conf.d/~postgres-operator/client-tls.crt +pg1-host-key-file = /etc/pgbackrest/conf.d/~postgres-operator/client-tls.key +pg1-host-type = tls +pg1-path = /pgdata/pg12 +pg1-port = 2345 +pg1-socket-path = /tmp/postgres + `, "\t\n")+"\n") + + assert.Equal(t, configmap.Data["pgbackrest_cloud.conf"], "") + }) + t.Run("CustomMetadata", func(t *testing.T) { cluster := cluster.DeepCopy() cluster.Spec.Metadata = &v1beta1.Metadata{ @@ -799,3 +897,89 @@ log-level-stderr = error log-timestamp = n `) } + +func TestGenerateRepoHostLogPath(t *testing.T) { + cluster := v1beta1.PostgresCluster{} + cluster.Namespace = "ns1" + cluster.Name = "hippo-dance" + + cluster.Spec.Port = initialize.Int32(2345) + cluster.Spec.PostgresVersion = 12 + + cluster.Spec.Backups = v1beta1.Backups{ + PGBackRest: v1beta1.PGBackRestArchive{}, + } + + t.Run("NoReposNoRepoHost", func(t *testing.T) { + cluster := cluster.DeepCopy() + assert.Equal(t, generateRepoHostLogPath(cluster), "") + }) + + t.Run("NoVolumeRepo", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{ + { + Name: "repo1", + GCS: &v1beta1.RepoGCS{}, + }, + } + assert.Equal(t, generateRepoHostLogPath(cluster), "") + }) + + t.Run("OneVolumeRepo", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{ + { + Name: "repo1", + Volume: &v1beta1.RepoPVC{}, + }, + } + assert.Equal(t, generateRepoHostLogPath(cluster), "/pgbackrest/repo1/log") + }) + + t.Run("TwoVolumeRepos", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{ + { + Name: "repo1", + Volume: &v1beta1.RepoPVC{}, + }, + { + Name: "repo2", + Volume: &v1beta1.RepoPVC{}, + }, + } + assert.Equal(t, generateRepoHostLogPath(cluster), "/pgbackrest/repo1/log") + }) + + t.Run("VolumeRepoNotFirst", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{ + { + Name: "repo1", + GCS: &v1beta1.RepoGCS{}, + }, + { + Name: "repo2", + Volume: &v1beta1.RepoPVC{}, + }, + } + assert.Equal(t, generateRepoHostLogPath(cluster), "/pgbackrest/repo2/log") + }) + + t.Run("LogPathSpecified", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{ + { + Name: "repo1", + Volume: &v1beta1.RepoPVC{}, + }, + } + cluster.Spec.Backups.PGBackRest.RepoHost = &v1beta1.PGBackRestRepoHost{ + Log: &v1beta1.LoggingConfiguration{ + Path: "/some/directory", + }, + } + assert.Equal(t, generateRepoHostLogPath(cluster), "/some/directory") + }) +} diff --git a/internal/postgres/config.go b/internal/postgres/config.go index 1e9f52a7e7..0300d4d34b 100644 --- a/internal/postgres/config.go +++ b/internal/postgres/config.go @@ -423,8 +423,8 @@ func startupCommand( `(`+shell.MakeDirectories(dataMountPath, naming.PatroniPGDataLogPath)+`) ||`, `halt "$(permissions `+shell.QuoteWord(naming.PatroniPGDataLogPath)+` ||:)"`, - `(`+shell.MakeDirectories(dataMountPath, naming.PGBackRestPGDataLogPath)+`) ||`, - `halt "$(permissions `+shell.QuoteWord(naming.PGBackRestPGDataLogPath)+` ||:)"`, + `(`+shell.MakeDirectories(dataMountPath, util.GetPGBackRestLogPathForInstance(cluster))+`) ||`, + `halt "$(permissions `+shell.QuoteWord(util.GetPGBackRestLogPathForInstance(cluster))+` ||:)"`, ) pg_rewind_override := "" diff --git a/internal/testing/validation/pgbackrest_test.go b/internal/testing/validation/pgbackrest_test.go new file mode 100644 index 0000000000..6226967057 --- /dev/null +++ b/internal/testing/validation/pgbackrest_test.go @@ -0,0 +1,310 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package validation + +import ( + "context" + "testing" + + "gotest.tools/v3/assert" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crunchydata/postgres-operator/internal/testing/require" + v1 "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1" +) + +func TestV1PGBackRestLogging(t *testing.T) { + ctx := context.Background() + cc := require.Kubernetes(t) + t.Parallel() + + namespace := require.Namespace(t, cc) + + base := v1.NewPostgresCluster() + base.Namespace = namespace.Name + base.Name = "pgbackrest-logging" + // required fields + require.UnmarshalInto(t, &base.Spec, `{ + postgresVersion: 16, + instances: [{ + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + }], + backups: { + pgbackrest: { + repos: [{ + name: repo1, + }] + }, + }, + }`) + + assert.NilError(t, cc.Create(ctx, base.DeepCopy(), client.DryRunAll), + "expected this base to be valid") + + t.Run("Cannot set log-path via global", func(t *testing.T) { + tmp := base.DeepCopy() + + require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{ + global: { + log-path: "/anything" + } + }`) + err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll) + assert.Assert(t, apierrors.IsInvalid(err)) + assert.ErrorContains(t, err, "pgbackrest log-path must be set via the various log.path fields in the spec") + }) + + t.Run("Cannot set pgbackrest sidecar's log.path without correct subdir", func(t *testing.T) { + tmp := base.DeepCopy() + + t.Run("Wrong subdir", func(t *testing.T) { + require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{ + log: { + path: "/something/wrong" + } + }`) + + err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll) + assert.Assert(t, apierrors.IsInvalid(err)) + assert.ErrorContains(t, err, "pgbackrest sidecar log path is restricted to an existing additional volume") + }) + + t.Run("Single instance - missing additional volume", func(t *testing.T) { + require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{ + log: { + path: "/volumes/test" + } + }`) + + err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll) + assert.Assert(t, apierrors.IsInvalid(err)) + assert.ErrorContains(t, err, `all instances need an additional volume for pgbackrest sidecar to log in "/volumes"`) + }) + + t.Run("Multiple instances - one missing additional volume", func(t *testing.T) { + require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{ + log: { + path: "/volumes/test" + } + }`) + + require.UnmarshalInto(t, &tmp.Spec.InstanceSets, `[{ + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + volumes: { + additional: [{ + name: "test", + claimName: "pvc-claim" + }] + } + },{ + name: "instance2", + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + }]`) + + err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll) + assert.Assert(t, apierrors.IsInvalid(err)) + assert.ErrorContains(t, err, `all instances need an additional volume for pgbackrest sidecar to log in "/volumes"`) + }) + + t.Run("Single instance - additional volume present", func(t *testing.T) { + require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{ + log: { + path: "/volumes/test" + } + }`) + + require.UnmarshalInto(t, &tmp.Spec.InstanceSets, `[{ + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + volumes: { + additional: [{ + name: "test", + claimName: "pvc-claim" + }] + } + }]`) + + assert.NilError(t, cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll), "expected log.path to be valid") + }) + + t.Run("Multiple instances - additional volume present but not matching path", func(t *testing.T) { + require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{ + log: { + path: "/volumes/test" + } + }`) + + require.UnmarshalInto(t, &tmp.Spec.InstanceSets, `[{ + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + volumes: { + additional: [{ + name: "test", + claimName: "pvc-claim" + }] + } + },{ + name: "instance2", + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + volumes: { + additional: [{ + name: "another", + claimName: "another-pvc-claim" + }] + } + }]`) + + err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll) + assert.Assert(t, apierrors.IsInvalid(err)) + assert.ErrorContains(t, err, `all instances need an additional volume for pgbackrest sidecar to log in "/volumes"`) + }) + + t.Run("Multiple instances - additional volumes present and matching log path", func(t *testing.T) { + require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{ + log: { + path: "/volumes/test" + } + }`) + + require.UnmarshalInto(t, &tmp.Spec.InstanceSets, `[{ + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + volumes: { + additional: [{ + name: "test", + claimName: "pvc-claim" + }] + } + },{ + name: "instance2", + dataVolumeClaimSpec: { + accessModes: [ReadWriteOnce], + resources: { requests: { storage: 1Mi } }, + }, + volumes: { + additional: [{ + name: "test", + claimName: "another-pvc-claim" + }] + } + }]`) + + assert.NilError(t, cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll), "expected log.path to be valid") + }) + }) + + t.Run("Cannot set logging on volumes that don't exist", func(t *testing.T) { + t.Run("Repo Host", func(t *testing.T) { + tmp := base.DeepCopy() + + require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{ + repoHost: { + log: { + path: "/volumes/wrong" + }, + volumes: { + additional: [ + { + name: logging, + claimName: required-1 + }] + } + } + }`) + + err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll) + assert.Assert(t, apierrors.IsInvalid(err)) + assert.ErrorContains(t, err, "repo host log path is restricted to an existing additional volume") + }) + + t.Run("Backup Jobs", func(t *testing.T) { + tmp := base.DeepCopy() + + require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{ + jobs: { + log: { + path: "/volumes/wrong" + }, + volumes: { + additional: [ + { + name: logging, + claimName: required-1 + }] + } + } + }`) + + err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll) + assert.Assert(t, apierrors.IsInvalid(err)) + assert.ErrorContains(t, err, "backup jobs log path is restricted to an existing additional volume") + }) + }) + + t.Run("Can set logging on volumes that do exist", func(t *testing.T) { + t.Run("Repo Host", func(t *testing.T) { + tmp := base.DeepCopy() + + require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{ + repoHost: { + log: { + path: "/volumes/logging/logs" + }, + volumes: { + additional: [ + { + name: logging, + claimName: required-1 + }] + } + } + }`) + + assert.NilError(t, cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll), + "expected this configuration to be valid") + }) + + t.Run("Backup Jobs", func(t *testing.T) { + tmp := base.DeepCopy() + + require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{ + jobs: { + log: { + path: "/volumes/logging/logs" + }, + volumes: { + additional: [ + { + name: logging, + claimName: required-1 + }] + } + } + }`) + + assert.NilError(t, cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll), + "expected this configuration to be valid") + }) + }) +} diff --git a/internal/util/pgbackrest.go b/internal/util/pgbackrest.go new file mode 100644 index 0000000000..8452c16b9d --- /dev/null +++ b/internal/util/pgbackrest.go @@ -0,0 +1,26 @@ +// Copyright 2017 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package util + +import ( + "path/filepath" + + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// GetInstanceLogPath is responsible for determining the appropriate log path for pgbackrest +// in instance pods. If the user has set a log path via the spec, use that. Otherwise, use +// the default log path set in the naming package. Ensure trailing slashes are trimmed. +// +// This function assumes that the backups/pgbackrest spec is present in postgresCluster. +func GetPGBackRestLogPathForInstance(postgresCluster *v1beta1.PostgresCluster) string { + logPath := naming.PGBackRestPGDataLogPath + if postgresCluster.Spec.Backups.PGBackRest.Log != nil && + postgresCluster.Spec.Backups.PGBackRest.Log.Path != "" { + logPath = postgresCluster.Spec.Backups.PGBackRest.Log.Path + } + return filepath.Clean(logPath) +} diff --git a/internal/util/pgbackrest_test.go b/internal/util/pgbackrest_test.go new file mode 100644 index 0000000000..e654436afa --- /dev/null +++ b/internal/util/pgbackrest_test.go @@ -0,0 +1,42 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package util + +import ( + "testing" + + "gotest.tools/v3/assert" + + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestGetPGBackRestLogPathForInstance(t *testing.T) { + t.Run("NoSpecPath", func(t *testing.T) { + postgrescluster := &v1beta1.PostgresCluster{ + Spec: v1beta1.PostgresClusterSpec{ + Backups: v1beta1.Backups{ + PGBackRest: v1beta1.PGBackRestArchive{}, + }, + }, + } + assert.Equal(t, GetPGBackRestLogPathForInstance(postgrescluster), naming.PGBackRestPGDataLogPath) + }) + + t.Run("SpecPathSet", func(t *testing.T) { + postgrescluster := &v1beta1.PostgresCluster{ + Spec: v1beta1.PostgresClusterSpec{ + Backups: v1beta1.Backups{ + PGBackRest: v1beta1.PGBackRestArchive{ + Log: &v1beta1.LoggingConfiguration{ + Path: "/volumes/test/log", + }, + }, + }, + }, + } + assert.Equal(t, GetPGBackRestLogPathForInstance(postgrescluster), "/volumes/test/log") + }) +} diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1/pgbackrest_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1/pgbackrest_types.go new file mode 100644 index 0000000000..77076a5de3 --- /dev/null +++ b/pkg/apis/postgres-operator.crunchydata.com/v1/pgbackrest_types.go @@ -0,0 +1,18 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package v1 + +import ( + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// PGBackRestArchive defines a pgBackRest archive configuration +// +kubebuilder:validation:XValidation:rule=`!self.?log.path.hasValue() || self.log.path.startsWith("/volumes/")`,message=`pgbackrest sidecar log path is restricted to an existing additional volume` +// +kubebuilder:validation:XValidation:rule=`!self.?repoHost.log.path.hasValue() || self.repoHost.volumes.additional.exists(x, self.repoHost.log.path.startsWith("/volumes/"+x.name))`,message=`repo host log path is restricted to an existing additional volume` +// +kubebuilder:validation:XValidation:rule=`!self.?jobs.log.path.hasValue() || self.jobs.volumes.additional.exists(x, self.jobs.log.path.startsWith("/volumes/"+x.name))`,message=`backup jobs log path is restricted to an existing additional volume` +// +kubebuilder:validation:XValidation:rule=`!self.?global["log-path"].hasValue()`,message=`pgbackrest log-path must be set via the various log.path fields in the spec` +type PGBackRestArchive struct { + v1beta1.PGBackRestArchive `json:",inline"` +} diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1/postgrescluster_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1/postgrescluster_types.go index 1a642e13c9..d2f38441dc 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1/postgrescluster_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1/postgrescluster_types.go @@ -22,6 +22,10 @@ import ( // +kubebuilder:validation:XValidation:fieldPath=`.config.parameters.log_directory`,message=`all instances need "volumes.temp" to log in "/pgtmp"`,rule=`self.?config.parameters.log_directory.optMap(v, type(v) != string || !v.startsWith("/pgtmp/logs/postgres") || self.instances.all(i, i.?volumes.temp.hasValue())).orValue(true)` // +kubebuilder:validation:XValidation:fieldPath=`.config.parameters.log_directory`,message=`all instances need "walVolumeClaimSpec" to log in "/pgwal"`,rule=`self.?config.parameters.log_directory.optMap(v, type(v) != string || !v.startsWith("/pgwal/logs/postgres") || self.instances.all(i, i.?walVolumeClaimSpec.hasValue())).orValue(true)` // +kubebuilder:validation:XValidation:fieldPath=`.config.parameters.log_directory`,message=`all instances need an additional volume to log in "/volumes"`,rule=`self.?config.parameters.log_directory.optMap(v, type(v) != string || !v.startsWith("/volumes") || self.instances.all(i, i.?volumes.additional.hasValue() && i.volumes.additional.exists(volume, v.startsWith("/volumes/" + volume.name)))).orValue(true)` +// +// # pgBackRest Logging +// +// +kubebuilder:validation:XValidation:fieldPath=`.backups.pgbackrest.log.path`,message=`all instances need an additional volume for pgbackrest sidecar to log in "/volumes"`,rule=`self.?backups.pgbackrest.log.path.optMap(v, !v.startsWith("/volumes") || self.instances.all(i, i.?volumes.additional.hasValue() && i.volumes.additional.exists(volume, v.startsWith("/volumes/" + volume.name)))).orValue(true)` type PostgresClusterSpec struct { // +optional Metadata *v1beta1.Metadata `json:"metadata,omitempty"` @@ -68,6 +72,7 @@ type PostgresClusterSpec struct { // namespace as the cluster. // +optional DatabaseInitSQL *DatabaseInitSQL `json:"databaseInitSQL,omitempty"` + // Whether or not the PostgreSQL cluster should use the defined default // scheduling constraints. If the field is unset or false, the default // scheduling constraints will be used in addition to any custom constraints @@ -356,7 +361,7 @@ type Backups struct { // pgBackRest archive configuration // +optional - PGBackRest v1beta1.PGBackRestArchive `json:"pgbackrest"` + PGBackRest PGBackRestArchive `json:"pgbackrest"` // VolumeSnapshot configuration // +optional @@ -538,6 +543,8 @@ type PostgresInstanceSetSpec struct { // +optional TablespaceVolumes []TablespaceVolume `json:"tablespaceVolumes,omitempty"` + // Volumes to be added to the instance set. + // +optional Volumes *v1beta1.PostgresVolumesSpec `json:"volumes,omitempty"` } diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1/zz_generated.deepcopy.go b/pkg/apis/postgres-operator.crunchydata.com/v1/zz_generated.deepcopy.go index 80043ab766..46d1817070 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1/zz_generated.deepcopy.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1/zz_generated.deepcopy.go @@ -182,6 +182,22 @@ func (in *MonitoringStatus) DeepCopy() *MonitoringStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PGBackRestArchive) DeepCopyInto(out *PGBackRestArchive) { + *out = *in + in.PGBackRestArchive.DeepCopyInto(&out.PGBackRestArchive) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PGBackRestArchive. +func (in *PGBackRestArchive) DeepCopy() *PGBackRestArchive { + if in == nil { + return nil + } + out := new(PGBackRestArchive) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PGBouncerPodSpec) DeepCopyInto(out *PGBouncerPodSpec) { *out = *in diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbackrest_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbackrest_types.go index d9777bdcd5..0f87676a72 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbackrest_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbackrest_types.go @@ -89,7 +89,7 @@ type PGBackRestArchive struct { // +optional Metadata *Metadata `json:"metadata,omitempty"` - // Projected volumes containing custom pgBackRest configuration. These files are mounted + // Projected volumes containing custom pgBackRest configuration. These files are mounted // under "/etc/pgbackrest/conf.d" alongside any pgBackRest configuration generated by the // PostgreSQL Operator: // https://pgbackrest.org/configuration.html @@ -113,6 +113,10 @@ type PGBackRestArchive struct { // +optional Jobs *BackupJobs `json:"jobs,omitempty"` + // Logging configuration for pgbackrest processes running in postgres instance pods. + // +optional + Log *LoggingConfiguration `json:"log,omitempty"` + // Defines a pgBackRest repository // +kubebuilder:validation:MinItems=1 // +listType=map @@ -155,6 +159,10 @@ type BackupJobs struct { // +optional Resources corev1.ResourceRequirements `json:"resources,omitzero"` + // Logging configuration for pgbackrest processes running in Backup Job Pods. + // +optional + Log *LoggingConfiguration `json:"log,omitempty"` + // Priority class name for the pgBackRest backup Job pods. // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ // +optional @@ -215,6 +223,10 @@ type PGBackRestRepoHost struct { // +optional Affinity *corev1.Affinity `json:"affinity,omitempty"` + // Logging configuration for pgbackrest processes running in the repo host pod. + // +optional + Log *LoggingConfiguration `json:"log,omitempty"` + // Priority class name for the pgBackRest repo host pod. Changing this value // causes PostgreSQL to restart. // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go index 4374fa5e4e..525f772d93 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go @@ -59,6 +59,7 @@ type PostgresClusterSpec struct { // namespace as the cluster. // +optional DatabaseInitSQL *DatabaseInitSQL `json:"databaseInitSQL,omitempty"` + // Whether or not the PostgreSQL cluster should use the defined default // scheduling constraints. If the field is unset or false, the default // scheduling constraints will be used in addition to any custom constraints @@ -526,6 +527,8 @@ type PostgresInstanceSetSpec struct { // +optional TablespaceVolumes []TablespaceVolume `json:"tablespaceVolumes,omitempty"` + // Volumes to be added to the instance set. + // +optional Volumes *PostgresVolumesSpec `json:"volumes,omitempty"` } diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go index 4f276a8d07..c6351ca868 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go @@ -383,3 +383,10 @@ func (in *AdditionalVolume) AsVolume(name string) corev1.Volume { return out } + +// LoggingConfiguration provides logging configuration for various components +type LoggingConfiguration struct { + // +kubebuilder:validation:MaxLength=256 + // +optional + Path string `json:"path,omitempty"` +} diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go index 8843869827..ac271ad548 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go @@ -67,6 +67,11 @@ func (in *AutoGrowSpec) DeepCopy() *AutoGrowSpec { func (in *BackupJobs) DeepCopyInto(out *BackupJobs) { *out = *in in.Resources.DeepCopyInto(&out.Resources) + if in.Log != nil { + in, out := &in.Log, &out.Log + *out = new(LoggingConfiguration) + **out = **in + } if in.PriorityClassName != nil { in, out := &in.PriorityClassName, &out.PriorityClassName *out = new(string) @@ -658,6 +663,21 @@ func (in *InstrumentationSpec) DeepCopy() *InstrumentationSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoggingConfiguration) DeepCopyInto(out *LoggingConfiguration) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfiguration. +func (in *LoggingConfiguration) DeepCopy() *LoggingConfiguration { + if in == nil { + return nil + } + out := new(LoggingConfiguration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Metadata) DeepCopyInto(out *Metadata) { *out = *in @@ -1156,6 +1176,11 @@ func (in *PGBackRestArchive) DeepCopyInto(out *PGBackRestArchive) { *out = new(BackupJobs) (*in).DeepCopyInto(*out) } + if in.Log != nil { + in, out := &in.Log, &out.Log + *out = new(LoggingConfiguration) + **out = **in + } if in.Repos != nil { in, out := &in.Repos, &out.Repos *out = make([]PGBackRestRepo, len(*in)) @@ -1374,6 +1399,11 @@ func (in *PGBackRestRepoHost) DeepCopyInto(out *PGBackRestRepoHost) { *out = new(corev1.Affinity) (*in).DeepCopyInto(*out) } + if in.Log != nil { + in, out := &in.Log, &out.Log + *out = new(LoggingConfiguration) + **out = **in + } if in.PriorityClassName != nil { in, out := &in.PriorityClassName, &out.PriorityClassName *out = new(string)