Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ spec:
properties:
configuration:
description: |-
Projected volumes containing custom pgBackRest configuration. These files are mounted
Projected volumes containing custom pgBackRest configuration. These files are mounted
under "/etc/pgbackrest/conf.d" alongside any pgBackRest configuration generated by the
PostgreSQL Operator:
https://pgbackrest.org/configuration.html
Expand Down Expand Up @@ -1424,6 +1424,14 @@ spec:
x-kubernetes-list-type: atomic
type: object
type: object
log:
description: Logging configuration for pgbackrest processes
running in Backup Job Pods.
properties:
path:
maxLength: 256
type: string
type: object
priorityClassName:
description: |-
Priority class name for the pgBackRest backup Job pods.
Expand Down Expand Up @@ -1583,6 +1591,14 @@ spec:
x-kubernetes-list-type: map
type: object
type: object
log:
description: Logging configuration for pgbackrest processes
running in postgres instance pods.
properties:
path:
maxLength: 256
type: string
type: object
manual:
description: Defines details for manual pgBackRest backup
Jobs
Expand Down Expand Up @@ -2551,6 +2567,14 @@ spec:
x-kubernetes-list-type: atomic
type: object
type: object
log:
description: Logging configuration for pgbackrest processes
running in the repo host pod.
properties:
path:
maxLength: 256
type: string
type: object
priorityClassName:
description: |-
Priority class name for the pgBackRest repo host pod. Changing this value
Expand Down Expand Up @@ -4562,6 +4586,21 @@ spec:
required:
- repos
type: object
x-kubernetes-validations:
- message: pgbackrest sidecar log path is restricted to an existing
additional volume
rule: '!self.?log.path.hasValue() || self.log.path.startsWith("/volumes/")'
- message: repo host log path is restricted to an existing additional
volume
rule: '!self.?repoHost.log.path.hasValue() || self.repoHost.volumes.additional.exists(x,
self.repoHost.log.path.startsWith("/volumes/"+x.name))'
- message: backup jobs log path is restricted to an existing additional
volume
rule: '!self.?jobs.log.path.hasValue() || self.jobs.volumes.additional.exists(x,
self.jobs.log.path.startsWith("/volumes/"+x.name))'
- message: pgbackrest log-path must be set via the various log.path
fields in the spec
rule: '!self.?global["log-path"].hasValue()'
snapshots:
description: VolumeSnapshot configuration
properties:
Expand Down Expand Up @@ -11209,6 +11248,7 @@ spec:
type: object
type: array
volumes:
description: Volumes to be added to the instance set.
properties:
additional:
description: Additional pre-existing volumes to add to the
Expand Down Expand Up @@ -18416,6 +18456,12 @@ spec:
|| !v.startsWith("/volumes") || self.instances.all(i, i.?volumes.additional.hasValue()
&& i.volumes.additional.exists(volume, v.startsWith("/volumes/" +
volume.name)))).orValue(true)
- fieldPath: .backups.pgbackrest.log.path
message: all instances need an additional volume for pgbackrest sidecar
to log in "/volumes"
rule: self.?backups.pgbackrest.log.path.optMap(v, !v.startsWith("/volumes")
|| self.instances.all(i, i.?volumes.additional.hasValue() && i.volumes.additional.exists(volume,
v.startsWith("/volumes/" + volume.name)))).orValue(true)
status:
description: PostgresClusterStatus defines the observed state of PostgresCluster
properties:
Expand Down Expand Up @@ -18951,7 +18997,7 @@ spec:
properties:
configuration:
description: |-
Projected volumes containing custom pgBackRest configuration. These files are mounted
Projected volumes containing custom pgBackRest configuration. These files are mounted
under "/etc/pgbackrest/conf.d" alongside any pgBackRest configuration generated by the
PostgreSQL Operator:
https://pgbackrest.org/configuration.html
Expand Down Expand Up @@ -20224,6 +20270,14 @@ spec:
x-kubernetes-list-type: atomic
type: object
type: object
log:
description: Logging configuration for pgbackrest processes
running in Backup Job Pods.
properties:
path:
maxLength: 256
type: string
type: object
priorityClassName:
description: |-
Priority class name for the pgBackRest backup Job pods.
Expand Down Expand Up @@ -20383,6 +20437,14 @@ spec:
x-kubernetes-list-type: map
type: object
type: object
log:
description: Logging configuration for pgbackrest processes
running in postgres instance pods.
properties:
path:
maxLength: 256
type: string
type: object
manual:
description: Defines details for manual pgBackRest backup
Jobs
Expand Down Expand Up @@ -21351,6 +21413,14 @@ spec:
x-kubernetes-list-type: atomic
type: object
type: object
log:
description: Logging configuration for pgbackrest processes
running in the repo host pod.
properties:
path:
maxLength: 256
type: string
type: object
priorityClassName:
description: |-
Priority class name for the pgBackRest repo host pod. Changing this value
Expand Down Expand Up @@ -30002,6 +30072,7 @@ spec:
type: object
type: array
volumes:
description: Volumes to be added to the instance set.
properties:
additional:
description: Additional pre-existing volumes to add to the
Expand Down
10 changes: 1 addition & 9 deletions internal/collector/pgbackrest.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ import (
"context"
_ "embed"
"encoding/json"
"fmt"
"slices"

"github.com/crunchydata/postgres-operator/internal/naming"
Expand All @@ -25,19 +24,12 @@ func NewConfigForPgBackrestRepoHostPod(
ctx context.Context,
spec *v1beta1.InstrumentationSpec,
repos []v1beta1.PGBackRestRepo,
directory string,
) *Config {
config := NewConfig(spec)

if OpenTelemetryLogsEnabled(ctx, spec) {

var directory string
for _, repo := range repos {
if repo.Volume != nil {
directory = fmt.Sprintf(naming.PGBackRestRepoLogPath, repo.Name)
break
}
}

// We should only enter this function if a PVC is assigned for a dedicated repohost
// but if we don't have one, exit early.
if directory == "" {
Expand Down
18 changes: 8 additions & 10 deletions internal/collector/pgbackrest_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,7 @@ func TestNewConfigForPgBackrestRepoHostPod(t *testing.T) {
}
var instrumentation *v1beta1.InstrumentationSpec
require.UnmarshalInto(t, &instrumentation, `{}`)

config := NewConfigForPgBackrestRepoHostPod(ctx, instrumentation, repos)
config := NewConfigForPgBackrestRepoHostPod(ctx, instrumentation, repos, "/test/directory")

result, err := config.ToYAML()
assert.NilError(t, err)
Expand All @@ -43,7 +42,7 @@ exporters:
extensions:
file_storage/pgbackrest_logs:
create_directory: false
directory: /pgbackrest/repo1/log/receiver
directory: /test/directory/receiver
fsync: true
processors:
batch/1s:
Expand Down Expand Up @@ -101,8 +100,8 @@ processors:
receivers:
filelog/pgbackrest_log:
include:
- /pgbackrest/repo1/log/*.log
- /pgbackrest/repo1/log/*.log.1
- /test/directory/*.log
- /test/directory/*.log.1
multiline:
line_start_pattern: ^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3}|^-{19}
storage: file_storage/pgbackrest_logs
Expand Down Expand Up @@ -136,8 +135,7 @@ service:
Volume: new(v1beta1.RepoPVC),
},
}

config := NewConfigForPgBackrestRepoHostPod(ctx, testInstrumentationSpec(), repos)
config := NewConfigForPgBackrestRepoHostPod(ctx, testInstrumentationSpec(), repos, "/another/directory")

result, err := config.ToYAML()
assert.NilError(t, err)
Expand All @@ -153,7 +151,7 @@ exporters:
extensions:
file_storage/pgbackrest_logs:
create_directory: false
directory: /pgbackrest/repo1/log/receiver
directory: /another/directory/receiver
fsync: true
processors:
batch/1s:
Expand Down Expand Up @@ -211,8 +209,8 @@ processors:
receivers:
filelog/pgbackrest_log:
include:
- /pgbackrest/repo1/log/*.log
- /pgbackrest/repo1/log/*.log.1
- /another/directory/*.log
- /another/directory/*.log.1
multiline:
line_start_pattern: ^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3}|^-{19}
storage: file_storage/pgbackrest_logs
Expand Down
8 changes: 5 additions & 3 deletions internal/collector/postgres.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ import (

"github.com/crunchydata/postgres-operator/internal/naming"
"github.com/crunchydata/postgres-operator/internal/postgres"
"github.com/crunchydata/postgres-operator/internal/util"
"github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1"
)

Expand Down Expand Up @@ -244,8 +245,9 @@ func EnablePostgresLogging(
}

// pgBackRest pipeline
pgBackRestLogPath := util.GetPGBackRestLogPathForInstance(inCluster)
outConfig.Extensions["file_storage/pgbackrest_logs"] = map[string]any{
"directory": naming.PGBackRestPGDataLogPath + "/receiver",
"directory": pgBackRestLogPath + "/receiver",
"create_directory": false,
"fsync": true,
}
Expand All @@ -258,8 +260,8 @@ func EnablePostgresLogging(
// a log record or two to the old file while rotation is occurring.
// The collector knows not to create duplicate logs.
"include": []string{
naming.PGBackRestPGDataLogPath + "/*.log",
naming.PGBackRestPGDataLogPath + "/*.log.1",
pgBackRestLogPath + "/*.log",
pgBackRestLogPath + "/*.log.1",
},
"storage": "file_storage/pgbackrest_logs",

Expand Down
4 changes: 2 additions & 2 deletions internal/controller/postgrescluster/instance.go
Original file line number Diff line number Diff line change
Expand Up @@ -1205,7 +1205,7 @@ func (r *Reconciler) reconcileInstance(
// TODO(sidecar): Create these directories sometime other than startup.
collector.AddToPod(ctx, cluster.Spec.Instrumentation, cluster.Spec.ImagePullPolicy, instanceConfigMap, &instance.Spec.Template,
[]corev1.VolumeMount{postgres.DataVolumeMount()}, pgPassword,
[]string{naming.PGBackRestPGDataLogPath}, includeLogrotate, true)
[]string{util.GetPGBackRestLogPathForInstance(cluster)}, includeLogrotate, true)
}

// Add postgres-exporter to the instance Pod spec
Expand Down Expand Up @@ -1433,7 +1433,7 @@ func (r *Reconciler) reconcileInstanceConfigMap(
collector.AddLogrotateConfigs(ctx, cluster.Spec.Instrumentation,
instanceConfigMap,
[]collector.LogrotateConfig{{
LogFiles: []string{naming.PGBackRestPGDataLogPath + "/*.log"},
LogFiles: []string{util.GetPGBackRestLogPathForInstance(cluster) + "/*.log"},
}})
}
}
Expand Down
57 changes: 32 additions & 25 deletions internal/controller/postgrescluster/pgbackrest.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"context"
"fmt"
"io"
"path/filepath"
"reflect"
"regexp"
"sort"
Expand Down Expand Up @@ -38,6 +39,7 @@ import (
"github.com/crunchydata/postgres-operator/internal/pgbackrest"
"github.com/crunchydata/postgres-operator/internal/pki"
"github.com/crunchydata/postgres-operator/internal/postgres"
"github.com/crunchydata/postgres-operator/internal/shell"
"github.com/crunchydata/postgres-operator/internal/util"
"github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1"
)
Expand Down Expand Up @@ -821,7 +823,13 @@ func (r *Reconciler) generateBackupJobSpecIntent(ctx context.Context, postgresCl
{Name: "SELECTOR", Value: naming.PGBackRestDedicatedSelector(postgresCluster.GetName()).String()},
}
} else {
container.Command = []string{"/bin/pgbackrest", "backup"}
mkdirCommand := ""
cloudLogPath := getCloudLogPath(postgresCluster)
if cloudLogPath != "" {
mkdirCommand += shell.MakeDirectories(cloudLogPath, cloudLogPath) + "; "
}

container.Command = []string{"sh", "-c", "--", mkdirCommand + `exec "$@"`, "--", "/bin/pgbackrest", "backup"}
container.Command = append(container.Command, cmdOpts...)
}

Expand Down Expand Up @@ -885,8 +893,8 @@ func (r *Reconciler) generateBackupJobSpecIntent(ctx context.Context, postgresCl
pgbackrest.AddConfigToCloudBackupJob(postgresCluster, &jobSpec.Template)

// Mount the PVC named in the "pgbackrest-cloud-log-volume" annotation, if any.
if logVolumeName := postgresCluster.Annotations[naming.PGBackRestCloudLogVolume]; logVolumeName != "" {
util.AddCloudLogVolumeToPod(&jobSpec.Template.Spec, logVolumeName)
if logVolume := postgresCluster.Annotations[naming.PGBackRestCloudLogVolume]; logVolume != "" {
util.AddCloudLogVolumeToPod(&jobSpec.Template.Spec, logVolume)
}
}

Expand Down Expand Up @@ -2075,28 +2083,7 @@ func (r *Reconciler) reconcilePGBackRestConfig(ctx context.Context,
repoHostName, configHash, serviceName, serviceNamespace string,
instanceNames []string) error {

// If the user has specified a PVC to use as a log volume for cloud backups via the
// PGBackRestCloudLogVolume annotation, check for the PVC. If we find it, set the cloud
// log path. If the user has specified a PVC, but we can't find it, create a warning event.
cloudLogPath := ""
if logVolumeName := postgresCluster.Annotations[naming.PGBackRestCloudLogVolume]; logVolumeName != "" {
logVolume := &corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: logVolumeName,
Namespace: postgresCluster.GetNamespace(),
},
}
err := errors.WithStack(r.Client.Get(ctx,
client.ObjectKeyFromObject(logVolume), logVolume))
if err != nil {
// PVC not retrieved, create warning event
r.Recorder.Event(postgresCluster, corev1.EventTypeWarning,
"PGBackRestCloudLogVolumeNotFound", err.Error())
} else {
// We successfully found the specified PVC, so we will set the log path
cloudLogPath = "/volumes/" + logVolumeName
}
}
cloudLogPath := getCloudLogPath(postgresCluster)

backrestConfig, err := pgbackrest.CreatePGBackRestConfigMapIntent(ctx, postgresCluster, repoHostName,
configHash, serviceName, serviceNamespace, cloudLogPath, instanceNames)
Expand Down Expand Up @@ -3351,3 +3338,23 @@ func authorizeBackupRemovalAnnotationPresent(postgresCluster *v1beta1.PostgresCl
}
return false
}

// getCloudLogPath is responsible for determining the appropriate log path for pgbackrest
// in cloud backup jobs. If the user has specified a PVC to use as a log volume for cloud
// backups via the PGBackRestCloudLogVolume annotation, set the cloud log path accordingly.
// If the user has not set the PGBackRestCloudLogVolume annotation, but has set a log path
// via the spec, use that.
// TODO: Make sure this is what we want (i.e. annotation to take precedence over spec)
//
// This function assumes that the backups/pgbackrest spec is present in postgresCluster.
func getCloudLogPath(postgresCluster *v1beta1.PostgresCluster) string {
cloudLogPath := ""
if logVolume := postgresCluster.Annotations[naming.PGBackRestCloudLogVolume]; logVolume != "" {
cloudLogPath = "/volumes/" + logVolume
} else if postgresCluster.Spec.Backups.PGBackRest.Jobs != nil &&
postgresCluster.Spec.Backups.PGBackRest.Jobs.Log != nil &&
postgresCluster.Spec.Backups.PGBackRest.Jobs.Log.Path != "" {
cloudLogPath = filepath.Clean(postgresCluster.Spec.Backups.PGBackRest.Jobs.Log.Path)
}
return cloudLogPath
}
Loading
Loading