Skip to content

Commit adfd56a

Browse files
committed
Add validation rule that ensures that all instances have an additional volume that matches the log path when attempting to log pgbackrest sidecar to /volumes. Add test cases.
1 parent dd22a73 commit adfd56a

File tree

3 files changed

+107
-14
lines changed

3 files changed

+107
-14
lines changed

config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18456,6 +18456,12 @@ spec:
1845618456
|| !v.startsWith("/volumes") || self.instances.all(i, i.?volumes.additional.hasValue()
1845718457
&& i.volumes.additional.exists(volume, v.startsWith("/volumes/" +
1845818458
volume.name)))).orValue(true)
18459+
- fieldPath: .backups.pgbackrest.log.path
18460+
message: all instances need an additional volume for pgbackrest sidecar
18461+
to log in "/volumes"
18462+
rule: self.?backups.pgbackrest.log.path.optMap(v, !v.startsWith("/volumes")
18463+
|| self.instances.all(i, i.?volumes.additional.hasValue() && i.volumes.additional.exists(volume,
18464+
v.startsWith("/volumes/" + volume.name)))).orValue(true)
1845918465
status:
1846018466
description: PostgresClusterStatus defines the observed state of PostgresCluster
1846118467
properties:

internal/testing/validation/pgbackrest_test.go

Lines changed: 97 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -63,23 +63,106 @@ func TestV1PGBackRestLogging(t *testing.T) {
6363
t.Run("Cannot set pgbackrest sidecar's log.path without correct subdir", func(t *testing.T) {
6464
tmp := base.DeepCopy()
6565

66-
require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{
67-
log: {
68-
path: "/something/wrong"
69-
}
70-
}`)
66+
t.Run("Wrong subdir", func(t *testing.T) {
67+
require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{
68+
log: {
69+
path: "/something/wrong"
70+
}
71+
}`)
7172

72-
err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll)
73-
assert.Assert(t, apierrors.IsInvalid(err))
74-
assert.ErrorContains(t, err, "pgbackrest sidecar log path is restricted to an existing additional volume")
73+
err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll)
74+
assert.Assert(t, apierrors.IsInvalid(err))
75+
assert.ErrorContains(t, err, "pgbackrest sidecar log path is restricted to an existing additional volume")
76+
})
7577

76-
require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{
77-
log: {
78-
path: "/volumes/this/should/pass"
79-
}
80-
}`)
78+
t.Run("Single instance - missing additional volume", func(t *testing.T) {
79+
require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{
80+
log: {
81+
path: "/volumes/anything"
82+
}
83+
}`)
8184

82-
assert.NilError(t, cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll), "expected log.path to be valid")
85+
err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll)
86+
assert.Assert(t, apierrors.IsInvalid(err))
87+
assert.ErrorContains(t, err, `all instances need an additional volume for pgbackrest sidecar to log in "/volumes"`)
88+
})
89+
90+
t.Run("Multiple instances - one missing additional volume", func(t *testing.T) {
91+
require.UnmarshalInto(t, &tmp.Spec.Backups.PGBackRest, `{
92+
log: {
93+
path: "/volumes/anything"
94+
}
95+
}`)
96+
97+
require.UnmarshalInto(t, &tmp.Spec.InstanceSets, `[{
98+
dataVolumeClaimSpec: {
99+
accessModes: [ReadWriteOnce],
100+
resources: { requests: { storage: 1Mi } },
101+
},
102+
volumes: {
103+
additional: [{
104+
name: "test",
105+
claimName: "pvc-claim"
106+
}]
107+
}
108+
},{
109+
name: "instance2",
110+
dataVolumeClaimSpec: {
111+
accessModes: [ReadWriteOnce],
112+
resources: { requests: { storage: 1Mi } },
113+
},
114+
}]`)
115+
116+
err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll)
117+
assert.Assert(t, apierrors.IsInvalid(err))
118+
assert.ErrorContains(t, err, `all instances need an additional volume for pgbackrest sidecar to log in "/volumes"`)
119+
})
120+
121+
t.Run("Single instance - additional volume present", func(t *testing.T) {
122+
require.UnmarshalInto(t, &tmp.Spec.InstanceSets, `[{
123+
dataVolumeClaimSpec: {
124+
accessModes: [ReadWriteOnce],
125+
resources: { requests: { storage: 1Mi } },
126+
},
127+
volumes: {
128+
additional: [{
129+
name: "test",
130+
claimName: "pvc-claim"
131+
}]
132+
}
133+
}]`)
134+
135+
assert.NilError(t, cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll), "expected log.path to be valid")
136+
})
137+
138+
t.Run("Multiple instances - additional volume present", func(t *testing.T) {
139+
require.UnmarshalInto(t, &tmp.Spec.InstanceSets, `[{
140+
dataVolumeClaimSpec: {
141+
accessModes: [ReadWriteOnce],
142+
resources: { requests: { storage: 1Mi } },
143+
},
144+
volumes: {
145+
additional: [{
146+
name: "test",
147+
claimName: "pvc-claim"
148+
}]
149+
}
150+
},{
151+
name: "instance2",
152+
dataVolumeClaimSpec: {
153+
accessModes: [ReadWriteOnce],
154+
resources: { requests: { storage: 1Mi } },
155+
},
156+
volumes: {
157+
additional: [{
158+
name: "another",
159+
claimName: "another-pvc-claim"
160+
}]
161+
}
162+
}]`)
163+
164+
assert.NilError(t, cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll), "expected log.path to be valid")
165+
})
83166
})
84167

85168
t.Run("Cannot set logging on volumes that don't exist", func(t *testing.T) {

pkg/apis/postgres-operator.crunchydata.com/v1/postgrescluster_types.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,10 @@ import (
2222
// +kubebuilder:validation:XValidation:fieldPath=`.config.parameters.log_directory`,message=`all instances need "volumes.temp" to log in "/pgtmp"`,rule=`self.?config.parameters.log_directory.optMap(v, type(v) != string || !v.startsWith("/pgtmp/logs/postgres") || self.instances.all(i, i.?volumes.temp.hasValue())).orValue(true)`
2323
// +kubebuilder:validation:XValidation:fieldPath=`.config.parameters.log_directory`,message=`all instances need "walVolumeClaimSpec" to log in "/pgwal"`,rule=`self.?config.parameters.log_directory.optMap(v, type(v) != string || !v.startsWith("/pgwal/logs/postgres") || self.instances.all(i, i.?walVolumeClaimSpec.hasValue())).orValue(true)`
2424
// +kubebuilder:validation:XValidation:fieldPath=`.config.parameters.log_directory`,message=`all instances need an additional volume to log in "/volumes"`,rule=`self.?config.parameters.log_directory.optMap(v, type(v) != string || !v.startsWith("/volumes") || self.instances.all(i, i.?volumes.additional.hasValue() && i.volumes.additional.exists(volume, v.startsWith("/volumes/" + volume.name)))).orValue(true)`
25+
//
26+
// # pgBackRest Logging
27+
//
28+
// +kubebuilder:validation:XValidation:fieldPath=`.backups.pgbackrest.log.path`,message=`all instances need an additional volume for pgbackrest sidecar to log in "/volumes"`,rule=`self.?backups.pgbackrest.log.path.optMap(v, !v.startsWith("/volumes") || self.instances.all(i, i.?volumes.additional.hasValue() && i.volumes.additional.exists(volume, v.startsWith("/volumes/" + volume.name)))).orValue(true)`
2529
type PostgresClusterSpec struct {
2630
// +optional
2731
Metadata *v1beta1.Metadata `json:"metadata,omitempty"`

0 commit comments

Comments
 (0)