-
Notifications
You must be signed in to change notification settings - Fork 636
Add validation for pgbouncer logfile #4280
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
daa6b46
fe54a36
a9f85de
746c906
1ea3e45
1141e0f
2564b01
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -142,6 +142,10 @@ func TestPod(t *testing.T) { | |
t.Parallel() | ||
|
||
features := feature.NewGate() | ||
assert.NilError(t, features.SetFromMap(map[string]bool{ | ||
feature.OpenTelemetryLogs: true, | ||
feature.OpenTelemetryMetrics: true, | ||
})) | ||
Comment on lines
+145
to
+148
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 👍 to setting it here since nothing happens until |
||
ctx := feature.NewContext(context.Background(), features) | ||
|
||
cluster := new(v1beta1.PostgresCluster) | ||
|
@@ -463,6 +467,59 @@ containers: | |
- mountPath: /etc/pgbouncer | ||
name: pgbouncer-config | ||
readOnly: true | ||
- command: | ||
- bash | ||
- -ceu | ||
- -- | ||
- "monitor() {\n\nmkdir -p '/tmp/receiver' && { chmod 0775 '/tmp/receiver' || :; | ||
}\nOTEL_PIDFILE=/tmp/otel.pid\n\nstart_otel_collector() {\n\techo \"Starting OTel | ||
Collector\"\n\t/otelcol-contrib --config /etc/otel-collector/config.yaml &\n\techo | ||
$! > $OTEL_PIDFILE\n}\nstart_otel_collector\n\nexec {fd}<> <(:||:)\nwhile read | ||
-r -t 5 -u \"${fd}\" ||:; do\n\tlogrotate -s /tmp/logrotate.status /etc/logrotate.d/logrotate.conf\n\tif | ||
[[ \"${directory}\" -nt \"/proc/self/fd/${fd}\" ]] && kill -HUP $(head -1 ${OTEL_PIDFILE?});\n\tthen\n\t\techo | ||
\"OTel configuration changed...\"\n\t\texec {fd}>&- && exec {fd}<> <(:||:)\n\t\tstat | ||
--format='Loaded configuration dated %y' \"${directory}\"\n\tfi\n\tif [[ ! -e | ||
/proc/$(head -1 ${OTEL_PIDFILE?}) ]] ; then\n\t\tstart_otel_collector\n\tfi\ndone\n}; | ||
export directory=\"$1\"; export -f monitor; exec -a \"$0\" bash -ceu monitor" | ||
- collector | ||
- /etc/otel-collector | ||
env: | ||
- name: K8S_POD_NAMESPACE | ||
valueFrom: | ||
fieldRef: | ||
fieldPath: metadata.namespace | ||
- name: K8S_POD_NAME | ||
valueFrom: | ||
fieldRef: | ||
fieldPath: metadata.name | ||
- name: PGPASSWORD | ||
imagePullPolicy: Always | ||
name: collector | ||
ports: | ||
- containerPort: 9187 | ||
name: otel-metrics | ||
protocol: TCP | ||
resources: {} | ||
securityContext: | ||
allowPrivilegeEscalation: false | ||
capabilities: | ||
drop: | ||
- ALL | ||
privileged: false | ||
readOnlyRootFilesystem: true | ||
runAsNonRoot: true | ||
seccompProfile: | ||
type: RuntimeDefault | ||
volumeMounts: | ||
- mountPath: /etc/pgbouncer | ||
name: pgbouncer-config | ||
readOnly: true | ||
- mountPath: /etc/otel-collector | ||
name: collector-config | ||
readOnly: true | ||
- mountPath: /etc/logrotate.d | ||
name: logrotate-config | ||
readOnly: true | ||
volumes: | ||
- name: pgbouncer-config | ||
projected: | ||
|
@@ -490,6 +547,20 @@ volumes: | |
items: | ||
- key: ca.crt | ||
path: ~postgres-operator/backend-ca.crt | ||
- name: logrotate-config | ||
projected: | ||
sources: | ||
- configMap: | ||
items: | ||
- key: logrotate.conf | ||
path: logrotate.conf | ||
- name: collector-config | ||
projected: | ||
sources: | ||
- configMap: | ||
items: | ||
- key: collector.yaml | ||
path: config.yaml | ||
`)) | ||
}) | ||
|
||
|
@@ -498,6 +569,8 @@ volumes: | |
"logfile": "/volumes/required/mylog.log", | ||
} | ||
logfile = "/volumes/required/mylog.log" | ||
// Reset the instrumentation from the previous test | ||
cluster.Spec.Instrumentation = nil | ||
|
||
call() | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,168 @@ | ||
// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. | ||
// | ||
// SPDX-License-Identifier: Apache-2.0 | ||
|
||
package validation | ||
|
||
import ( | ||
"context" | ||
"testing" | ||
|
||
"gotest.tools/v3/assert" | ||
apierrors "k8s.io/apimachinery/pkg/api/errors" | ||
"sigs.k8s.io/controller-runtime/pkg/client" | ||
|
||
"github.com/crunchydata/postgres-operator/internal/testing/require" | ||
v1 "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1" | ||
) | ||
|
||
func TestV1PGBouncerLogging(t *testing.T) { | ||
ctx := context.Background() | ||
cc := require.Kubernetes(t) | ||
t.Parallel() | ||
|
||
namespace := require.Namespace(t, cc) | ||
|
||
base := v1.NewPostgresCluster() | ||
base.Namespace = namespace.Name | ||
base.Name = "pgbouncer-logging" | ||
// required fields | ||
require.UnmarshalInto(t, &base.Spec, `{ | ||
postgresVersion: 16, | ||
instances: [{ | ||
dataVolumeClaimSpec: { | ||
accessModes: [ReadWriteOnce], | ||
resources: { requests: { storage: 1Mi } }, | ||
}, | ||
}], | ||
}`) | ||
|
||
assert.NilError(t, cc.Create(ctx, base.DeepCopy(), client.DryRunAll), | ||
"expected this base to be valid") | ||
|
||
t.Run("Can set logging on tmp with .log", func(t *testing.T) { | ||
tmp := base.DeepCopy() | ||
|
||
require.UnmarshalInto(t, &tmp.Spec.Proxy, `{ | ||
pgBouncer: { | ||
config: { | ||
global: { | ||
logfile: "/tmp/logs/pgbouncer/log.log" | ||
} | ||
} | ||
} | ||
}`) | ||
assert.NilError(t, cc.Create(ctx, base.DeepCopy(), client.DryRunAll), | ||
"expected this option to be valid") | ||
}) | ||
|
||
t.Run("Cannot set logging on tmp without .log", func(t *testing.T) { | ||
tmp := base.DeepCopy() | ||
|
||
require.UnmarshalInto(t, &tmp.Spec.Proxy, `{ | ||
pgBouncer: { | ||
config: { | ||
global: { | ||
logfile: "/tmp/logs/pgbouncer/log.txt" | ||
} | ||
} | ||
} | ||
}`) | ||
|
||
err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll) | ||
assert.Assert(t, apierrors.IsInvalid(err)) | ||
assert.ErrorContains(t, err, "logfile config must end with '.log'") | ||
}) | ||
|
||
t.Run("Cannot set logging on tmp without correct subdir", func(t *testing.T) { | ||
tmp := base.DeepCopy() | ||
|
||
require.UnmarshalInto(t, &tmp.Spec.Proxy, `{ | ||
pgBouncer: { | ||
config: { | ||
global: { | ||
logfile: "/tmp/logs/log.log" | ||
} | ||
} | ||
} | ||
}`) | ||
|
||
err := cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll) | ||
assert.Assert(t, apierrors.IsInvalid(err)) | ||
assert.ErrorContains(t, err, "logfile destination is restricted to '/tmp/logs/pgbouncer/' or an existing additional volume") | ||
|
||
require.UnmarshalInto(t, &tmp.Spec.Proxy, `{ | ||
pgBouncer: { | ||
config: { | ||
global: { | ||
logfile: "/tmp/pgbouncer/log.log" | ||
} | ||
} | ||
} | ||
}`) | ||
|
||
err = cc.Create(ctx, tmp.DeepCopy(), client.DryRunAll) | ||
assert.Assert(t, apierrors.IsInvalid(err)) | ||
assert.ErrorContains(t, err, "logfile destination is restricted to '/tmp/logs/pgbouncer/' or an existing additional volume") | ||
}) | ||
|
||
t.Run("Cannot set logging on volumes that don't exist", func(t *testing.T) { | ||
vol := base.DeepCopy() | ||
|
||
require.UnmarshalInto(t, &vol.Spec.Proxy, `{ | ||
pgBouncer: { | ||
config: { | ||
global: { | ||
logfile: "/volumes/logging/log.log" | ||
} | ||
} | ||
} | ||
}`) | ||
|
||
err := cc.Create(ctx, vol.DeepCopy(), client.DryRunAll) | ||
assert.Assert(t, apierrors.IsInvalid(err)) | ||
assert.ErrorContains(t, err, "logfile destination is restricted to '/tmp/logs/pgbouncer/' or an existing additional volume") | ||
}) | ||
|
||
t.Run("Cannot set logging elsewhere", func(t *testing.T) { | ||
vol := base.DeepCopy() | ||
|
||
require.UnmarshalInto(t, &vol.Spec.Proxy, `{ | ||
pgBouncer: { | ||
config: { | ||
global: { | ||
logfile: "/var/log.log" | ||
} | ||
} | ||
} | ||
}`) | ||
|
||
err := cc.Create(ctx, vol.DeepCopy(), client.DryRunAll) | ||
assert.Assert(t, apierrors.IsInvalid(err)) | ||
assert.ErrorContains(t, err, "logfile destination is restricted to '/tmp/logs/pgbouncer/' or an existing additional volume") | ||
}) | ||
|
||
t.Run("Can set logging on volumes that exist", func(t *testing.T) { | ||
vol := base.DeepCopy() | ||
|
||
require.UnmarshalInto(t, &vol.Spec.Proxy, `{ | ||
pgBouncer: { | ||
config: { | ||
global: { | ||
logfile: "/volumes/logging/log.log" | ||
} | ||
}, | ||
volumes: { | ||
additional: [ | ||
{ | ||
name: logging, | ||
claimName: required-1 | ||
}] | ||
} | ||
} | ||
}`) | ||
|
||
assert.NilError(t, cc.Create(ctx, vol.DeepCopy(), client.DryRunAll), | ||
"expected this option to be valid") | ||
}) | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,15 @@ | ||
// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. | ||
// | ||
// SPDX-License-Identifier: Apache-2.0 | ||
|
||
package v1 | ||
|
||
import ( | ||
"github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" | ||
) | ||
|
||
// PGBouncerPodSpec defines the desired state of a PgBouncer connection pooler. | ||
// +kubebuilder:validation:XValidation:rule=`self.?config.global.logfile.optMap(f, f.startsWith("/tmp/logs/pgbouncer/") || (self.?volumes.additional.hasValue() && self.volumes.additional.exists(v, f.startsWith("/volumes/" + v.name)))).orValue(true)`,message=`config.global.logfile destination is restricted to '/tmp/logs/pgbouncer/' or an existing additional volume` | ||
type PGBouncerPodSpec struct { | ||
v1beta1.PGBouncerPodSpec `json:",inline"` | ||
} | ||
benjaminjb marked this conversation as resolved.
Show resolved
Hide resolved
|
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Original file line number | Diff line number | Diff line change | ||||
---|---|---|---|---|---|---|
|
@@ -26,7 +26,14 @@ type PGBouncerConfiguration struct { | |||||
|
||||||
// Settings that apply to the entire PgBouncer process. | ||||||
// More info: https://www.pgbouncer.org/config.html | ||||||
// --- | ||||||
// # Logging | ||||||
// +kubebuilder:validation:XValidation:rule=`!has(self.logfile) || self.logfile.endsWith('.log')`,message=`logfile config must end with '.log'` | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
Is something like this more legible? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Ah, that doesn't work:
But this works:
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. meh, for just one step, I'll do without the "?" |
||||||
// +kubebuilder:validation:MaxProperties=50 | ||||||
Comment on lines
+31
to
+32
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. How do we feel about adding these two validations/exclusions here to v1beta1? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 👍🏻 These new rules/criteria seem fine to me. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. agreed 👍 |
||||||
// See also XValidation rule on v1 PostgresProxySpec | ||||||
// | ||||||
// +optional | ||||||
// +mapType=granular | ||||||
Global map[string]string `json:"global,omitempty"` | ||||||
|
||||||
// PgBouncer database definitions. The key is the database requested by a | ||||||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Missed this in an earlier PR re: OTEL handling user-set logfiles.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Good catch.