diff --git a/audit/entry_filter.go b/audit/entry_filter.go
new file mode 100644
index 000000000000..7a7a253b10a7
--- /dev/null
+++ b/audit/entry_filter.go
@@ -0,0 +1,91 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: BUSL-1.1
+
+package audit
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/eventlogger"
+ "github.com/hashicorp/go-bexpr"
+ "github.com/hashicorp/vault/helper/namespace"
+ "github.com/hashicorp/vault/internal/observability/event"
+)
+
+var _ eventlogger.Node = (*EntryFilter)(nil)
+
+// NewEntryFilter should be used to create an EntryFilter node.
+// The filter supplied should be in bexpr format and reference fields from logical.LogInputBexpr.
+func NewEntryFilter(filter string) (*EntryFilter, error) {
+ const op = "audit.NewEntryFilter"
+
+ filter = strings.TrimSpace(filter)
+ if filter == "" {
+ return nil, fmt.Errorf("%s: cannot create new audit filter with empty filter expression: %w", op, event.ErrInvalidParameter)
+ }
+
+ eval, err := bexpr.CreateEvaluator(filter)
+ if err != nil {
+ return nil, fmt.Errorf("%s: cannot create new audit filter: %w", op, err)
+ }
+
+ return &EntryFilter{evaluator: eval}, nil
+}
+
+// Reopen is a no-op for the filter node.
+func (*EntryFilter) Reopen() error {
+ return nil
+}
+
+// Type describes the type of this node (filter).
+func (*EntryFilter) Type() eventlogger.NodeType {
+ return eventlogger.NodeTypeFilter
+}
+
+// Process will attempt to parse the incoming event data and decide whether it
+// should be filtered or remain in the pipeline and passed to the next node.
+func (f *EntryFilter) Process(ctx context.Context, e *eventlogger.Event) (*eventlogger.Event, error) {
+ const op = "audit.(EntryFilter).Process"
+
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+
+ if e == nil {
+ return nil, fmt.Errorf("%s: event is nil: %w", op, event.ErrInvalidParameter)
+ }
+
+ a, ok := e.Payload.(*AuditEvent)
+ if !ok {
+ return nil, fmt.Errorf("%s: cannot parse event payload: %w", op, event.ErrInvalidParameter)
+ }
+
+ // If we don't have data to process, then we're done.
+ if a.Data == nil {
+ return nil, nil
+ }
+
+ ns, err := namespace.FromContext(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("%s: cannot obtain namespace: %w", op, err)
+ }
+
+ datum := a.Data.BexprDatum(ns.Path)
+
+ result, err := f.evaluator.Evaluate(datum)
+ if err != nil {
+ return nil, fmt.Errorf("%s: unable to evaluate filter: %w", op, err)
+ }
+
+ if result {
+ // Allow this event to carry on through the pipeline.
+ return e, nil
+ }
+
+ // End process of this pipeline.
+ return nil, nil
+}
diff --git a/audit/entry_filter_test.go b/audit/entry_filter_test.go
new file mode 100644
index 000000000000..a5efea1dc69a
--- /dev/null
+++ b/audit/entry_filter_test.go
@@ -0,0 +1,249 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: BUSL-1.1
+
+package audit
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/hashicorp/eventlogger"
+ "github.com/hashicorp/vault/helper/namespace"
+ "github.com/hashicorp/vault/internal/observability/event"
+ "github.com/hashicorp/vault/sdk/logical"
+ "github.com/stretchr/testify/require"
+)
+
+// TestEntryFilter_NewEntryFilter tests that we can create EntryFilter types correctly.
+func TestEntryFilter_NewEntryFilter(t *testing.T) {
+ t.Parallel()
+
+ tests := map[string]struct {
+ Filter string
+ IsErrorExpected bool
+ ExpectedErrorMessage string
+ }{
+ "empty-filter": {
+ Filter: "",
+ IsErrorExpected: true,
+ ExpectedErrorMessage: "audit.NewEntryFilter: cannot create new audit filter with empty filter expression: invalid parameter",
+ },
+ "spacey-filter": {
+ Filter: " ",
+ IsErrorExpected: true,
+ ExpectedErrorMessage: "audit.NewEntryFilter: cannot create new audit filter with empty filter expression: invalid parameter",
+ },
+ "bad-filter": {
+ Filter: "____",
+ IsErrorExpected: true,
+ ExpectedErrorMessage: "audit.NewEntryFilter: cannot create new audit filter",
+ },
+ "good-filter": {
+ Filter: "foo == bar",
+ IsErrorExpected: false,
+ },
+ }
+
+ for name, tc := range tests {
+ name := name
+ tc := tc
+ t.Run(name, func(t *testing.T) {
+ t.Parallel()
+
+ f, err := NewEntryFilter(tc.Filter)
+ switch {
+ case tc.IsErrorExpected:
+ require.ErrorContains(t, err, tc.ExpectedErrorMessage)
+ require.Nil(t, f)
+ default:
+ require.NoError(t, err)
+ require.NotNil(t, f)
+ }
+ })
+ }
+}
+
+// TestEntryFilter_Reopen ensures we can reopen the filter node.
+func TestEntryFilter_Reopen(t *testing.T) {
+ t.Parallel()
+
+ f := &EntryFilter{}
+ res := f.Reopen()
+ require.Nil(t, res)
+}
+
+// TestEntryFilter_Type ensures we always return the right type for this node.
+func TestEntryFilter_Type(t *testing.T) {
+ t.Parallel()
+
+ f := &EntryFilter{}
+ require.Equal(t, eventlogger.NodeTypeFilter, f.Type())
+}
+
+// TestEntryFilter_Process_ContextDone ensures that we stop processing the event
+// if the context was cancelled.
+func TestEntryFilter_Process_ContextDone(t *testing.T) {
+ t.Parallel()
+
+ ctx, cancel := context.WithCancel(context.Background())
+
+ // Explicitly cancel the context
+ cancel()
+
+ l, err := NewEntryFilter("foo == bar")
+ require.NoError(t, err)
+
+ // Fake audit event
+ a, err := NewEvent(RequestType)
+ require.NoError(t, err)
+
+ // Fake event logger event
+ e := &eventlogger.Event{
+ Type: eventlogger.EventType(event.AuditType.String()),
+ CreatedAt: time.Now(),
+ Formatted: make(map[string][]byte),
+ Payload: a,
+ }
+
+ e2, err := l.Process(ctx, e)
+
+ require.Error(t, err)
+ require.ErrorContains(t, err, "context canceled")
+
+ // Ensure that the pipeline won't continue.
+ require.Nil(t, e2)
+}
+
+// TestEntryFilter_Process_NilEvent ensures we receive the right error when the
+// event we are trying to process is nil.
+func TestEntryFilter_Process_NilEvent(t *testing.T) {
+ t.Parallel()
+
+ l, err := NewEntryFilter("foo == bar")
+ require.NoError(t, err)
+ e, err := l.Process(context.Background(), nil)
+ require.Error(t, err)
+ require.EqualError(t, err, "audit.(EntryFilter).Process: event is nil: invalid parameter")
+
+ // Ensure that the pipeline won't continue.
+ require.Nil(t, e)
+}
+
+// TestEntryFilter_Process_BadPayload ensures we receive the correct error when
+// attempting to process an event with a payload that cannot be parsed back to
+// an audit event.
+func TestEntryFilter_Process_BadPayload(t *testing.T) {
+ t.Parallel()
+
+ l, err := NewEntryFilter("foo == bar")
+ require.NoError(t, err)
+
+ e := &eventlogger.Event{
+ Type: eventlogger.EventType(event.AuditType.String()),
+ CreatedAt: time.Now(),
+ Formatted: make(map[string][]byte),
+ Payload: nil,
+ }
+
+ e2, err := l.Process(context.Background(), e)
+ require.Error(t, err)
+ require.EqualError(t, err, "audit.(EntryFilter).Process: cannot parse event payload: invalid parameter")
+
+ // Ensure that the pipeline won't continue.
+ require.Nil(t, e2)
+}
+
+// TestEntryFilter_Process_NoAuditDataInPayload ensure we stop processing a pipeline
+// when the data in the audit event is nil.
+func TestEntryFilter_Process_NoAuditDataInPayload(t *testing.T) {
+ t.Parallel()
+
+ l, err := NewEntryFilter("foo == bar")
+ require.NoError(t, err)
+
+ a, err := NewEvent(RequestType)
+ require.NoError(t, err)
+
+ // Ensure audit data is nil
+ a.Data = nil
+
+ e := &eventlogger.Event{
+ Type: eventlogger.EventType(event.AuditType.String()),
+ CreatedAt: time.Now(),
+ Formatted: make(map[string][]byte),
+ Payload: a,
+ }
+
+ e2, err := l.Process(context.Background(), e)
+
+ // Make sure we get the 'nil, nil' response to stop processing this pipeline.
+ require.NoError(t, err)
+ require.Nil(t, e2)
+}
+
+// TestEntryFilter_Process_FilterSuccess tests that when a filter matches we
+// receive no error and the event is not nil so it continues in the pipeline.
+func TestEntryFilter_Process_FilterSuccess(t *testing.T) {
+ t.Parallel()
+
+ l, err := NewEntryFilter("mount_type == juan")
+ require.NoError(t, err)
+
+ a, err := NewEvent(RequestType)
+ require.NoError(t, err)
+
+ a.Data = &logical.LogInput{
+ Request: &logical.Request{
+ Operation: logical.CreateOperation,
+ MountType: "juan",
+ },
+ }
+
+ e := &eventlogger.Event{
+ Type: eventlogger.EventType(event.AuditType.String()),
+ CreatedAt: time.Now(),
+ Formatted: make(map[string][]byte),
+ Payload: a,
+ }
+
+ ctx := namespace.ContextWithNamespace(context.Background(), namespace.RootNamespace)
+
+ e2, err := l.Process(ctx, e)
+
+ require.NoError(t, err)
+ require.NotNil(t, e2)
+}
+
+// TestEntryFilter_Process_FilterFail tests that when a filter fails to match we
+// receive no error, but also the event is nil so that the pipeline completes.
+func TestEntryFilter_Process_FilterFail(t *testing.T) {
+ t.Parallel()
+
+ l, err := NewEntryFilter("mount_type == john and operation == create and namespace == root")
+ require.NoError(t, err)
+
+ a, err := NewEvent(RequestType)
+ require.NoError(t, err)
+
+ a.Data = &logical.LogInput{
+ Request: &logical.Request{
+ Operation: logical.CreateOperation,
+ MountType: "juan",
+ },
+ }
+
+ e := &eventlogger.Event{
+ Type: eventlogger.EventType(event.AuditType.String()),
+ CreatedAt: time.Now(),
+ Formatted: make(map[string][]byte),
+ Payload: a,
+ }
+
+ ctx := namespace.ContextWithNamespace(context.Background(), namespace.RootNamespace)
+
+ e2, err := l.Process(ctx, e)
+
+ require.NoError(t, err)
+ require.Nil(t, e2)
+}
diff --git a/audit/entry_formatter.go b/audit/entry_formatter.go
index af404e1d10de..6937949db368 100644
--- a/audit/entry_formatter.go
+++ b/audit/entry_formatter.go
@@ -11,16 +11,13 @@ import (
"strings"
"time"
- "github.com/jefferai/jsonx"
-
- "github.com/hashicorp/vault/helper/namespace"
- "github.com/hashicorp/vault/sdk/logical"
-
"github.com/go-jose/go-jose/v3/jwt"
+ "github.com/hashicorp/eventlogger"
+ "github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/internal/observability/event"
"github.com/hashicorp/vault/sdk/helper/jsonutil"
-
- "github.com/hashicorp/eventlogger"
+ "github.com/hashicorp/vault/sdk/logical"
+ "github.com/jefferai/jsonx"
)
var (
@@ -29,7 +26,7 @@ var (
)
// NewEntryFormatter should be used to create an EntryFormatter.
-// Accepted options: WithPrefix.
+// Accepted options: WithHeaderFormatter, WithPrefix.
func NewEntryFormatter(config FormatterConfig, salter Salter, opt ...Option) (*EntryFormatter, error) {
const op = "audit.NewEntryFormatter"
@@ -80,7 +77,7 @@ func (f *EntryFormatter) Process(ctx context.Context, e *eventlogger.Event) (*ev
return nil, fmt.Errorf("%s: event is nil: %w", op, event.ErrInvalidParameter)
}
- a, ok := e.Payload.(*auditEvent)
+ a, ok := e.Payload.(*AuditEvent)
if !ok {
return nil, fmt.Errorf("%s: cannot parse event payload: %w", op, event.ErrInvalidParameter)
}
diff --git a/audit/event.go b/audit/event.go
index d7b60b3df58d..6ea3f184914d 100644
--- a/audit/event.go
+++ b/audit/event.go
@@ -12,7 +12,7 @@ import (
// NewEvent should be used to create an audit event. The subtype field is needed
// for audit events. It will generate an ID if no ID is supplied. Supported
// options: WithID, WithNow.
-func NewEvent(s subtype, opt ...Option) (*auditEvent, error) {
+func NewEvent(s subtype, opt ...Option) (*AuditEvent, error) {
const op = "audit.newEvent"
// Get the default options
@@ -30,7 +30,7 @@ func NewEvent(s subtype, opt ...Option) (*auditEvent, error) {
}
}
- audit := &auditEvent{
+ audit := &AuditEvent{
ID: opts.withID,
Timestamp: opts.withNow,
Version: version,
@@ -44,8 +44,8 @@ func NewEvent(s subtype, opt ...Option) (*auditEvent, error) {
}
// validate attempts to ensure the audit event in its present state is valid.
-func (a *auditEvent) validate() error {
- const op = "audit.(auditEvent).validate"
+func (a *AuditEvent) validate() error {
+ const op = "audit.(AuditEvent).validate"
if a == nil {
return fmt.Errorf("%s: event is nil: %w", op, event.ErrInvalidParameter)
diff --git a/audit/event_test.go b/audit/event_test.go
index d6249fd1b8f2..7a520e3483d8 100644
--- a/audit/event_test.go
+++ b/audit/event_test.go
@@ -29,14 +29,14 @@ func TestAuditEvent_new(t *testing.T) {
Subtype: subtype(""),
Format: format(""),
IsErrorExpected: true,
- ExpectedErrorMessage: "audit.newEvent: audit.(auditEvent).validate: audit.(subtype).validate: '' is not a valid event subtype: invalid parameter",
+ ExpectedErrorMessage: "audit.newEvent: audit.(AuditEvent).validate: audit.(subtype).validate: '' is not a valid event subtype: invalid parameter",
},
"empty-Option": {
Options: []Option{},
Subtype: subtype(""),
Format: format(""),
IsErrorExpected: true,
- ExpectedErrorMessage: "audit.newEvent: audit.(auditEvent).validate: audit.(subtype).validate: '' is not a valid event subtype: invalid parameter",
+ ExpectedErrorMessage: "audit.newEvent: audit.(AuditEvent).validate: audit.(subtype).validate: '' is not a valid event subtype: invalid parameter",
},
"bad-id": {
Options: []Option{WithID("")},
@@ -108,22 +108,22 @@ func TestAuditEvent_new(t *testing.T) {
// TestAuditEvent_Validate exercises the validation for an audit event.
func TestAuditEvent_Validate(t *testing.T) {
tests := map[string]struct {
- Value *auditEvent
+ Value *AuditEvent
IsErrorExpected bool
ExpectedErrorMessage string
}{
"nil": {
Value: nil,
IsErrorExpected: true,
- ExpectedErrorMessage: "audit.(auditEvent).validate: event is nil: invalid parameter",
+ ExpectedErrorMessage: "audit.(AuditEvent).validate: event is nil: invalid parameter",
},
"default": {
- Value: &auditEvent{},
+ Value: &AuditEvent{},
IsErrorExpected: true,
- ExpectedErrorMessage: "audit.(auditEvent).validate: missing ID: invalid parameter",
+ ExpectedErrorMessage: "audit.(AuditEvent).validate: missing ID: invalid parameter",
},
"id-empty": {
- Value: &auditEvent{
+ Value: &AuditEvent{
ID: "",
Version: version,
Subtype: RequestType,
@@ -131,10 +131,10 @@ func TestAuditEvent_Validate(t *testing.T) {
Data: nil,
},
IsErrorExpected: true,
- ExpectedErrorMessage: "audit.(auditEvent).validate: missing ID: invalid parameter",
+ ExpectedErrorMessage: "audit.(AuditEvent).validate: missing ID: invalid parameter",
},
"version-fiddled": {
- Value: &auditEvent{
+ Value: &AuditEvent{
ID: "audit_123",
Version: "magic-v2",
Subtype: RequestType,
@@ -142,10 +142,10 @@ func TestAuditEvent_Validate(t *testing.T) {
Data: nil,
},
IsErrorExpected: true,
- ExpectedErrorMessage: "audit.(auditEvent).validate: event version unsupported: invalid parameter",
+ ExpectedErrorMessage: "audit.(AuditEvent).validate: event version unsupported: invalid parameter",
},
"subtype-fiddled": {
- Value: &auditEvent{
+ Value: &AuditEvent{
ID: "audit_123",
Version: version,
Subtype: subtype("moon"),
@@ -153,10 +153,10 @@ func TestAuditEvent_Validate(t *testing.T) {
Data: nil,
},
IsErrorExpected: true,
- ExpectedErrorMessage: "audit.(auditEvent).validate: audit.(subtype).validate: 'moon' is not a valid event subtype: invalid parameter",
+ ExpectedErrorMessage: "audit.(AuditEvent).validate: audit.(subtype).validate: 'moon' is not a valid event subtype: invalid parameter",
},
"default-time": {
- Value: &auditEvent{
+ Value: &AuditEvent{
ID: "audit_123",
Version: version,
Subtype: ResponseType,
@@ -164,10 +164,10 @@ func TestAuditEvent_Validate(t *testing.T) {
Data: nil,
},
IsErrorExpected: true,
- ExpectedErrorMessage: "audit.(auditEvent).validate: event timestamp cannot be the zero time instant: invalid parameter",
+ ExpectedErrorMessage: "audit.(AuditEvent).validate: event timestamp cannot be the zero time instant: invalid parameter",
},
"valid": {
- Value: &auditEvent{
+ Value: &AuditEvent{
ID: "audit_123",
Version: version,
Subtype: ResponseType,
diff --git a/audit/nodes.go b/audit/nodes.go
index 01602d4b1389..624777e72515 100644
--- a/audit/nodes.go
+++ b/audit/nodes.go
@@ -15,10 +15,12 @@ import (
)
// ProcessManual will attempt to create an (audit) event with the specified data
-// and manually iterate over the supplied nodes calling Process on each.
+// and manually iterate over the supplied nodes calling Process on each until the
+// event is nil (which indicates the pipeline has completed).
// Order of IDs in the NodeID slice determines the order they are processed.
// (Audit) Event will be of RequestType (as opposed to ResponseType).
-// The last node must be a sink node (eventlogger.NodeTypeSink).
+// The last node must be a filter node (eventlogger.NodeTypeFilter) or
+// sink node (eventlogger.NodeTypeSink).
func ProcessManual(ctx context.Context, data *logical.LogInput, ids []eventlogger.NodeID, nodes map[eventlogger.NodeID]eventlogger.Node) error {
switch {
case data == nil:
@@ -52,9 +54,15 @@ func ProcessManual(ctx context.Context, data *logical.LogInput, ids []eventlogge
// Process nodes in order, updating the event with the result.
// This means we *should* do:
- // 1. formatter (temporary)
- // 2. sink
+ // 1. filter (optional if configured)
+ // 2. formatter (temporary)
+ // 3. sink
for _, id := range ids {
+ // If the event is nil, we've completed processing the pipeline (hopefully
+ // by either a filter node or a sink node).
+ if e == nil {
+ break
+ }
node, ok := nodes[id]
if !ok {
return fmt.Errorf("node not found: %v", id)
@@ -74,12 +82,14 @@ func ProcessManual(ctx context.Context, data *logical.LogInput, ids []eventlogge
return err
}
- // Track the last node we have processed, as we should end with a sink.
+ // Track the last node we have processed, as we should end with a filter or sink.
lastSeen = node.Type()
}
- if lastSeen != eventlogger.NodeTypeSink {
- return errors.New("last node must be a sink")
+ switch lastSeen {
+ case eventlogger.NodeTypeSink, eventlogger.NodeTypeFilter:
+ default:
+ return errors.New("last node must be a filter or sink")
}
return nil
diff --git a/audit/nodes_test.go b/audit/nodes_test.go
index a50034c1d418..3aa4ef533210 100644
--- a/audit/nodes_test.go
+++ b/audit/nodes_test.go
@@ -185,12 +185,13 @@ func TestProcessManual_LastNodeNotSink(t *testing.T) {
err = ProcessManual(namespace.RootContext(context.Background()), data, ids, nodes)
require.Error(t, err)
- require.EqualError(t, err, "last node must be a sink")
+ require.EqualError(t, err, "last node must be a filter or sink")
}
-// TestProcessManual ensures that the manual processing of a test message works
-// as expected with proper inputs.
-func TestProcessManual(t *testing.T) {
+// TestProcessManualEndWithSink ensures that the manual processing of a test
+// message works as expected with proper inputs, which mean processing ends with
+// sink node.
+func TestProcessManualEndWithSink(t *testing.T) {
t.Parallel()
var ids []eventlogger.NodeID
@@ -215,6 +216,39 @@ func TestProcessManual(t *testing.T) {
require.NoError(t, err)
}
+// TestProcessManual_EndWithFilter ensures that the manual processing of a test
+// message works as expected with proper inputs, which mean processing ends with
+// sink node.
+func TestProcessManual_EndWithFilter(t *testing.T) {
+ t.Parallel()
+
+ var ids []eventlogger.NodeID
+ nodes := make(map[eventlogger.NodeID]eventlogger.Node)
+
+ // Filter node
+ filterId, filterNode := newFilterNode(t)
+ ids = append(ids, filterId)
+ nodes[filterId] = filterNode
+
+ // Formatter node
+ formatterId, formatterNode := newFormatterNode(t)
+ ids = append(ids, formatterId)
+ nodes[formatterId] = formatterNode
+
+ // Sink node
+ sinkId, sinkNode := newSinkNode(t)
+ ids = append(ids, sinkId)
+ nodes[sinkId] = sinkNode
+
+ // Data
+ requestId, err := uuid.GenerateUUID()
+ require.NoError(t, err)
+ data := newData(requestId)
+
+ err = ProcessManual(namespace.RootContext(context.Background()), data, ids, nodes)
+ require.NoError(t, err)
+}
+
// newSinkNode creates a new UUID and NoopSink (sink node).
func newSinkNode(t *testing.T) (eventlogger.NodeID, *event.NoopSink) {
t.Helper()
@@ -226,6 +260,25 @@ func newSinkNode(t *testing.T) (eventlogger.NodeID, *event.NoopSink) {
return sinkId, sinkNode
}
+// TestFilter is a trivial implementation of eventlogger.Node used as a placeholder
+// for Filter nodes in tests.
+type TestFilter struct{}
+
+// Process trivially filters the event preventing it from being processed by subsequent nodes.
+func (f *TestFilter) Process(_ context.Context, e *eventlogger.Event) (*eventlogger.Event, error) {
+ return nil, nil
+}
+
+// Reopen does nothing.
+func (f *TestFilter) Reopen() error {
+ return nil
+}
+
+// Type returns the eventlogger.NodeTypeFormatter type.
+func (f *TestFilter) Type() eventlogger.NodeType {
+ return eventlogger.NodeTypeFilter
+}
+
// TestFormatter is a trivial implementation of the eventlogger.Node interface
// used as a place-holder for Formatter nodes in tests.
type TestFormatter struct{}
@@ -248,6 +301,15 @@ func (f *TestFormatter) Type() eventlogger.NodeType {
return eventlogger.NodeTypeFormatter
}
+// newFilterNode creates a new TestFormatter (filter node).
+func newFilterNode(t *testing.T) (eventlogger.NodeID, *TestFilter) {
+ nodeId, err := event.GenerateNodeID()
+ require.NoError(t, err)
+ node := &TestFilter{}
+
+ return nodeId, node
+}
+
// newFormatterNode creates a new TestFormatter (formatter node).
func newFormatterNode(t *testing.T) (eventlogger.NodeID, *TestFormatter) {
nodeId, err := event.GenerateNodeID()
diff --git a/audit/sink_wrapper.go b/audit/sink_wrapper.go
index 9edf5c17833f..f61c908a687c 100644
--- a/audit/sink_wrapper.go
+++ b/audit/sink_wrapper.go
@@ -11,8 +11,10 @@ import (
"github.com/hashicorp/eventlogger"
)
+var _ eventlogger.Node = (*SinkWrapper)(nil)
+
// SinkWrapper is a wrapper for any kind of Sink Node that processes events
-// containing an auditEvent payload.
+// containing an AuditEvent payload.
type SinkWrapper struct {
Name string
Sink eventlogger.Node
@@ -23,7 +25,7 @@ type SinkWrapper struct {
// once this method returns.
func (s *SinkWrapper) Process(ctx context.Context, e *eventlogger.Event) (*eventlogger.Event, error) {
defer func() {
- auditEvent, ok := e.Payload.(*auditEvent)
+ auditEvent, ok := e.Payload.(*AuditEvent)
if ok {
metrics.MeasureSince([]string{"audit", s.Name, auditEvent.Subtype.MetricTag()}, e.CreatedAt)
}
diff --git a/audit/types.go b/audit/types.go
index 0b0f3982a2d9..3434ff84d840 100644
--- a/audit/types.go
+++ b/audit/types.go
@@ -8,9 +8,9 @@ import (
"io"
"time"
- "github.com/hashicorp/eventlogger"
+ "github.com/hashicorp/go-bexpr"
+ "github.com/hashicorp/vault/internal/observability/event"
"github.com/hashicorp/vault/sdk/helper/salt"
-
"github.com/hashicorp/vault/sdk/logical"
)
@@ -35,8 +35,8 @@ type subtype string
// format defines types of format audit events support.
type format string
-// auditEvent is the audit event.
-type auditEvent struct {
+// AuditEvent is the audit event.
+type AuditEvent struct {
ID string `json:"id"`
Version string `json:"version"`
Subtype subtype `json:"subtype"` // the subtype of the audit event.
@@ -144,6 +144,13 @@ type FormatterConfig struct {
RequiredFormat format
}
+// EntryFilter should be used to filter audit requests and responses which should
+// make it to a sink.
+type EntryFilter struct {
+ // the evaluator for the bexpr expression that should be applied by the node.
+ evaluator *bexpr.Evaluator
+}
+
// RequestEntry is the structure of a request audit log entry.
type RequestEntry struct {
Time string `json:"time,omitempty"`
@@ -268,6 +275,10 @@ type Backend interface {
// Salter interface must be implemented by anything implementing Backend.
Salter
+ // The PipelineReader interface allows backends to surface information about their
+ // nodes for node and pipeline registration.
+ event.PipelineReader
+
// LogRequest is used to synchronously log a request. This is done after the
// request is authorized but before the request is executed. The arguments
// MUST not be modified in any way. They should be deep copied if this is
@@ -291,12 +302,6 @@ type Backend interface {
// Invalidate is called for path invalidation
Invalidate(context.Context)
-
- // RegisterNodesAndPipeline provides an eventlogger.Broker pointer so that
- // the Backend can call its RegisterNode and RegisterPipeline methods with
- // the nodes and the pipeline that were created in the corresponding
- // Factory function.
- RegisterNodesAndPipeline(*eventlogger.Broker, string) error
}
// BackendConfig contains configuration parameters used in the factory func to
diff --git a/builtin/audit/file/backend.go b/builtin/audit/file/backend.go
index fc6a44a58719..2681ee244e99 100644
--- a/builtin/audit/file/backend.go
+++ b/builtin/audit/file/backend.go
@@ -27,75 +27,71 @@ const (
discard = "discard"
)
-func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool, headersConfig audit.HeaderFormatter) (audit.Backend, error) {
- if conf.SaltConfig == nil {
- return nil, fmt.Errorf("nil salt config")
- }
- if conf.SaltView == nil {
- return nil, fmt.Errorf("nil salt view")
- }
-
- path, ok := conf.Config["file_path"]
- if !ok {
- path, ok = conf.Config["path"]
- if !ok {
- return nil, fmt.Errorf("file_path is required")
- }
- }
+var _ audit.Backend = (*Backend)(nil)
- // normalize path if configured for stdout
- if strings.EqualFold(path, stdout) {
- path = stdout
- }
- if strings.EqualFold(path, discard) {
- path = discard
- }
+// Backend is the audit backend for the file-based audit store.
+//
+// NOTE: This audit backend is currently very simple: it appends to a file.
+// It doesn't do anything more at the moment to assist with rotation
+// or reset the write cursor, this should be done in the future.
+type Backend struct {
+ f *os.File
+ fileLock sync.RWMutex
+ formatter *audit.EntryFormatterWriter
+ formatConfig audit.FormatterConfig
+ mode os.FileMode
+ name string
+ nodeIDList []eventlogger.NodeID
+ nodeMap map[eventlogger.NodeID]eventlogger.Node
+ filePath string
+ salt *atomic.Value
+ saltConfig *salt.Config
+ saltMutex sync.RWMutex
+ saltView logical.Storage
+}
- var cfgOpts []audit.Option
+func Factory(_ context.Context, conf *audit.BackendConfig, useEventLogger bool, headersConfig audit.HeaderFormatter) (audit.Backend, error) {
+ const op = "file.Factory"
- if format, ok := conf.Config["format"]; ok {
- cfgOpts = append(cfgOpts, audit.WithFormat(format))
+ if conf.SaltConfig == nil {
+ return nil, fmt.Errorf("%s: nil salt config", op)
}
-
- // Check if hashing of accessor is disabled
- if hmacAccessorRaw, ok := conf.Config["hmac_accessor"]; ok {
- v, err := strconv.ParseBool(hmacAccessorRaw)
- if err != nil {
- return nil, err
- }
- cfgOpts = append(cfgOpts, audit.WithHMACAccessor(v))
+ if conf.SaltView == nil {
+ return nil, fmt.Errorf("%s: nil salt view", op)
}
- // Check if raw logging is enabled
- if raw, ok := conf.Config["log_raw"]; ok {
- v, err := strconv.ParseBool(raw)
- if err != nil {
- return nil, err
- }
- cfgOpts = append(cfgOpts, audit.WithRaw(v))
+ // Get file path from config or fall back to the old option name ('path') for compatibility
+ // (see commit bac4fe0799a372ba1245db642f3f6cd1f1d02669).
+ var filePath string
+ if p, ok := conf.Config["file_path"]; ok {
+ filePath = p
+ } else if p, ok = conf.Config["path"]; ok {
+ filePath = p
+ } else {
+ return nil, fmt.Errorf("%s: file_path is required", op)
}
- if elideListResponsesRaw, ok := conf.Config["elide_list_responses"]; ok {
- v, err := strconv.ParseBool(elideListResponsesRaw)
- if err != nil {
- return nil, err
- }
- cfgOpts = append(cfgOpts, audit.WithElision(v))
+ // normalize file path if configured for stdout
+ if strings.EqualFold(filePath, stdout) {
+ filePath = stdout
+ }
+ if strings.EqualFold(filePath, discard) {
+ filePath = discard
}
mode := os.FileMode(0o600)
if modeRaw, ok := conf.Config["mode"]; ok {
m, err := strconv.ParseUint(modeRaw, 8, 32)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("%s: unable to parse 'mode': %w", op, err)
}
switch m {
case 0:
// if mode is 0000, then do not modify file mode
- if path != stdout && path != discard {
- fileInfo, err := os.Stat(path)
+ if filePath != stdout && filePath != discard {
+ fileInfo, err := os.Stat(filePath)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("%s: unable to stat %q: %w", op, filePath, err)
}
mode = fileInfo.Mode()
}
@@ -104,18 +100,19 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool
}
}
- cfg, err := audit.NewFormatterConfig(cfgOpts...)
+ cfg, err := formatterConfig(conf.Config)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("%s: failed to create formatter config: %w", op, err)
}
b := &Backend{
- path: path,
+ filePath: filePath,
+ formatConfig: cfg,
mode: mode,
+ name: conf.MountPath,
saltConfig: conf.SaltConfig,
saltView: conf.SaltView,
salt: new(atomic.Value),
- formatConfig: cfg,
}
// Ensure we are working with the right type by explicitly storing a nil of
@@ -125,8 +122,9 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool
// Configure the formatter for either case.
f, err := audit.NewEntryFormatter(b.formatConfig, b, audit.WithHeaderFormatter(headersConfig), audit.WithPrefix(conf.Config["prefix"]))
if err != nil {
- return nil, fmt.Errorf("error creating formatter: %w", err)
+ return nil, fmt.Errorf("%s: error creating formatter: %w", op, err)
}
+
var w audit.Writer
switch b.formatConfig.RequiredFormat {
case audit.JSONFormat:
@@ -134,63 +132,40 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool
case audit.JSONxFormat:
w = &audit.JSONxWriter{Prefix: conf.Config["prefix"]}
default:
- return nil, fmt.Errorf("unknown format type %q", b.formatConfig.RequiredFormat)
+ return nil, fmt.Errorf("%s: unknown format type %q", op, b.formatConfig.RequiredFormat)
}
fw, err := audit.NewEntryFormatterWriter(b.formatConfig, f, w)
if err != nil {
- return nil, fmt.Errorf("error creating formatter writer: %w", err)
+ return nil, fmt.Errorf("%s: error creating formatter writer: %w", op, err)
}
b.formatter = fw
if useEventLogger {
- b.nodeIDList = make([]eventlogger.NodeID, 2)
+ b.nodeIDList = []eventlogger.NodeID{}
b.nodeMap = make(map[eventlogger.NodeID]eventlogger.Node)
- formatterNodeID, err := event.GenerateNodeID()
+ err := b.configureFilterNode(conf.Config["filter"])
if err != nil {
- return nil, fmt.Errorf("error generating random NodeID for formatter node: %w", err)
+ return nil, fmt.Errorf("%s: error configuring filter node: %w", op, err)
}
- b.nodeIDList[0] = formatterNodeID
- b.nodeMap[formatterNodeID] = f
-
- var sinkNode eventlogger.Node
-
- switch path {
- case stdout:
- sinkNode = &audit.SinkWrapper{Name: path, Sink: event.NewStdoutSinkNode(b.formatConfig.RequiredFormat.String())}
- case discard:
- sinkNode = &audit.SinkWrapper{Name: path, Sink: event.NewNoopSink()}
- default:
- var err error
-
- var opts []event.Option
- // Check if mode is provided
- if modeRaw, ok := conf.Config["mode"]; ok {
- opts = append(opts, event.WithFileMode(modeRaw))
- }
-
- // The NewFileSink function attempts to open the file and will
- // return an error if it can't.
- n, err := event.NewFileSink(
- b.path,
- b.formatConfig.RequiredFormat.String(), opts...)
- if err != nil {
- return nil, fmt.Errorf("file sink creation failed for path %q: %w", path, err)
- }
- sinkNode = &audit.SinkWrapper{Name: conf.MountPath, Sink: n}
+ formatterOpts := []audit.Option{
+ audit.WithHeaderFormatter(headersConfig),
+ audit.WithPrefix(conf.Config["prefix"]),
}
- sinkNodeID, err := event.GenerateNodeID()
+ err = b.configureFormatterNode(cfg, formatterOpts...)
if err != nil {
- return nil, fmt.Errorf("error generating random NodeID for sink node: %w", err)
+ return nil, fmt.Errorf("%s: error configuring formatter node: %w", op, err)
}
- b.nodeIDList[1] = sinkNodeID
- b.nodeMap[sinkNodeID] = sinkNode
+ err = b.configureSinkNode(conf.MountPath, filePath, conf.Config["mode"], cfg.RequiredFormat.String())
+ if err != nil {
+ return nil, fmt.Errorf("%s: error configuring sink node: %w", op, err)
+ }
} else {
- switch path {
+ switch filePath {
case stdout:
case discard:
default:
@@ -198,7 +173,7 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool
// otherwise it will be too late to catch later without problems
// (ref: https://github.com/hashicorp/vault/issues/550)
if err := b.open(); err != nil {
- return nil, fmt.Errorf("sanity check failed; unable to open %q for writing: %w", path, err)
+ return nil, fmt.Errorf("%s: sanity check failed; unable to open %q for writing: %w", op, filePath, err)
}
}
}
@@ -206,32 +181,6 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool
return b, nil
}
-// Backend is the audit backend for the file-based audit store.
-//
-// NOTE: This audit backend is currently very simple: it appends to a file.
-// It doesn't do anything more at the moment to assist with rotation
-// or reset the write cursor, this should be done in the future.
-type Backend struct {
- path string
-
- formatter *audit.EntryFormatterWriter
- formatConfig audit.FormatterConfig
-
- fileLock sync.RWMutex
- f *os.File
- mode os.FileMode
-
- saltMutex sync.RWMutex
- salt *atomic.Value
- saltConfig *salt.Config
- saltView logical.Storage
-
- nodeIDList []eventlogger.NodeID
- nodeMap map[eventlogger.NodeID]eventlogger.Node
-}
-
-var _ audit.Backend = (*Backend)(nil)
-
func (b *Backend) Salt(ctx context.Context) (*salt.Salt, error) {
s := b.salt.Load().(*salt.Salt)
if s != nil {
@@ -256,9 +205,10 @@ func (b *Backend) Salt(ctx context.Context) (*salt.Salt, error) {
return newSalt, nil
}
+// Deprecated: Use eventlogger.
func (b *Backend) LogRequest(ctx context.Context, in *logical.LogInput) error {
var writer io.Writer
- switch b.path {
+ switch b.filePath {
case stdout:
writer = os.Stdout
case discard:
@@ -274,6 +224,7 @@ func (b *Backend) LogRequest(ctx context.Context, in *logical.LogInput) error {
return b.log(ctx, buf, writer)
}
+// Deprecated: Use eventlogger.
func (b *Backend) log(_ context.Context, buf *bytes.Buffer, writer io.Writer) error {
reader := bytes.NewReader(buf.Bytes())
@@ -290,7 +241,7 @@ func (b *Backend) log(_ context.Context, buf *bytes.Buffer, writer io.Writer) er
if _, err := reader.WriteTo(writer); err == nil {
b.fileLock.Unlock()
return nil
- } else if b.path == stdout {
+ } else if b.filePath == stdout {
b.fileLock.Unlock()
return err
}
@@ -312,9 +263,10 @@ func (b *Backend) log(_ context.Context, buf *bytes.Buffer, writer io.Writer) er
return err
}
+// Deprecated: Use eventlogger.
func (b *Backend) LogResponse(ctx context.Context, in *logical.LogInput) error {
var writer io.Writer
- switch b.path {
+ switch b.filePath {
case stdout:
writer = os.Stdout
case discard:
@@ -338,7 +290,7 @@ func (b *Backend) LogTestMessage(ctx context.Context, in *logical.LogInput, conf
// Old behavior
var writer io.Writer
- switch b.path {
+ switch b.filePath {
case stdout:
writer = os.Stdout
case discard:
@@ -360,27 +312,28 @@ func (b *Backend) LogTestMessage(ctx context.Context, in *logical.LogInput, conf
}
// The file lock must be held before calling this
+// Deprecated: Use eventlogger.
func (b *Backend) open() error {
if b.f != nil {
return nil
}
- if err := os.MkdirAll(filepath.Dir(b.path), b.mode); err != nil {
+ if err := os.MkdirAll(filepath.Dir(b.filePath), b.mode); err != nil {
return err
}
var err error
- b.f, err = os.OpenFile(b.path, os.O_APPEND|os.O_WRONLY|os.O_CREATE, b.mode)
+ b.f, err = os.OpenFile(b.filePath, os.O_APPEND|os.O_WRONLY|os.O_CREATE, b.mode)
if err != nil {
return err
}
// Change the file mode in case the log file already existed. We special
// case /dev/null since we can't chmod it and bypass if the mode is zero
- switch b.path {
+ switch b.filePath {
case "/dev/null":
default:
if b.mode != 0 {
- err = os.Chmod(b.path, b.mode)
+ err = os.Chmod(b.filePath, b.mode)
if err != nil {
return err
}
@@ -402,7 +355,7 @@ func (b *Backend) Reload(_ context.Context) error {
return nil
} else {
// old non-eventlogger behavior
- switch b.path {
+ switch b.filePath {
case stdout, discard:
return nil
}
@@ -432,20 +385,168 @@ func (b *Backend) Invalidate(_ context.Context) {
b.salt.Store((*salt.Salt)(nil))
}
-// RegisterNodesAndPipeline registers the nodes and a pipeline as required by
-// the audit.Backend interface.
-func (b *Backend) RegisterNodesAndPipeline(broker *eventlogger.Broker, name string) error {
- for id, node := range b.nodeMap {
- if err := broker.RegisterNode(id, node, eventlogger.WithNodeRegistrationPolicy(eventlogger.DenyOverwrite)); err != nil {
- return err
+// formatterConfig creates the configuration required by a formatter node using
+// the config map supplied to the factory.
+func formatterConfig(config map[string]string) (audit.FormatterConfig, error) {
+ const op = "file.formatterConfig"
+
+ var opts []audit.Option
+
+ if format, ok := config["format"]; ok {
+ opts = append(opts, audit.WithFormat(format))
+ }
+
+ // Check if hashing of accessor is disabled
+ if hmacAccessorRaw, ok := config["hmac_accessor"]; ok {
+ v, err := strconv.ParseBool(hmacAccessorRaw)
+ if err != nil {
+ return audit.FormatterConfig{}, fmt.Errorf("%s: unable to parse 'hmac_accessor': %w", op, err)
+ }
+ opts = append(opts, audit.WithHMACAccessor(v))
+ }
+
+ // Check if raw logging is enabled
+ if raw, ok := config["log_raw"]; ok {
+ v, err := strconv.ParseBool(raw)
+ if err != nil {
+ return audit.FormatterConfig{}, fmt.Errorf("%s: unable to parse 'log_raw': %w", op, err)
}
+ opts = append(opts, audit.WithRaw(v))
+ }
+
+ if elideListResponsesRaw, ok := config["elide_list_responses"]; ok {
+ v, err := strconv.ParseBool(elideListResponsesRaw)
+ if err != nil {
+ return audit.FormatterConfig{}, fmt.Errorf("%s: unable to parse 'elide_list_responses': %w", op, err)
+ }
+ opts = append(opts, audit.WithElision(v))
+ }
+
+ return audit.NewFormatterConfig(opts...)
+}
+
+// configureFilterNode is used to configure a filter node and associated ID on the Backend.
+func (b *Backend) configureFilterNode(filter string) error {
+ const op = "file.(Backend).configureFilterNode"
+
+ filter = strings.TrimSpace(filter)
+ if filter == "" {
+ return nil
+ }
+
+ filterNodeID, err := event.GenerateNodeID()
+ if err != nil {
+ return fmt.Errorf("%s: error generating random NodeID for filter node: %w", op, err)
+ }
+
+ filterNode, err := audit.NewEntryFilter(filter)
+ if err != nil {
+ return fmt.Errorf("%s: error creating filter node: %w", op, err)
}
- pipeline := eventlogger.Pipeline{
- PipelineID: eventlogger.PipelineID(name),
- EventType: eventlogger.EventType(event.AuditType.String()),
- NodeIDs: b.nodeIDList,
+ b.nodeIDList = append(b.nodeIDList, filterNodeID)
+ b.nodeMap[filterNodeID] = filterNode
+ return nil
+}
+
+// configureFormatterNode is used to configure a formatter node and associated ID on the Backend.
+func (b *Backend) configureFormatterNode(formatConfig audit.FormatterConfig, opts ...audit.Option) error {
+ const op = "file.(Backend).configureFormatterNode"
+
+ formatterNodeID, err := event.GenerateNodeID()
+ if err != nil {
+ return fmt.Errorf("%s: error generating random NodeID for formatter node: %w", op, err)
}
- return broker.RegisterPipeline(pipeline, eventlogger.WithPipelineRegistrationPolicy(eventlogger.DenyOverwrite))
+ formatterNode, err := audit.NewEntryFormatter(formatConfig, b, opts...)
+ if err != nil {
+ return fmt.Errorf("%s: error creating formatter: %w", op, err)
+ }
+
+ b.nodeIDList = append(b.nodeIDList, formatterNodeID)
+ b.nodeMap[formatterNodeID] = formatterNode
+ return nil
+}
+
+// configureSinkNode is used to configure a sink node and associated ID on the Backend.
+func (b *Backend) configureSinkNode(name string, filePath string, mode string, format string) error {
+ const op = "file.(Backend).configureSinkNode"
+
+ name = strings.TrimSpace(name)
+ if name == "" {
+ return fmt.Errorf("%s: name is required: %w", op, event.ErrInvalidParameter)
+ }
+
+ filePath = strings.TrimSpace(filePath)
+ if filePath == "" {
+ return fmt.Errorf("%s: file path is required: %w", op, event.ErrInvalidParameter)
+ }
+
+ format = strings.TrimSpace(format)
+ if format == "" {
+ return fmt.Errorf("%s: format is required: %w", op, event.ErrInvalidParameter)
+ }
+
+ sinkNodeID, err := event.GenerateNodeID()
+ if err != nil {
+ return fmt.Errorf("%s: error generating random NodeID for sink node: %w", op, err)
+ }
+
+ // normalize file path if configured for stdout or discard
+ if strings.EqualFold(filePath, stdout) {
+ filePath = stdout
+ } else if strings.EqualFold(filePath, discard) {
+ filePath = discard
+ }
+
+ var sinkNode eventlogger.Node
+ var sinkName string
+
+ switch filePath {
+ case stdout:
+ sinkName = stdout
+ sinkNode, err = event.NewStdoutSinkNode(format)
+ case discard:
+ sinkName = discard
+ sinkNode = event.NewNoopSink()
+ default:
+ // The NewFileSink function attempts to open the file and will return an error if it can't.
+ sinkName = name
+ sinkNode, err = event.NewFileSink(filePath, format, []event.Option{event.WithFileMode(mode)}...)
+ }
+
+ if err != nil {
+ return fmt.Errorf("%s: file sink creation failed for path %q: %w", op, filePath, err)
+ }
+
+ sinkNode = &audit.SinkWrapper{Name: sinkName, Sink: sinkNode}
+
+ b.nodeIDList = append(b.nodeIDList, sinkNodeID)
+ b.nodeMap[sinkNodeID] = sinkNode
+ return nil
+}
+
+// Name for this backend, this would ideally correspond to the mount path for the audit device.
+func (b *Backend) Name() string {
+ return b.name
+}
+
+// Nodes returns the nodes which should be used by the event framework to process audit entries.
+func (b *Backend) Nodes() map[eventlogger.NodeID]eventlogger.Node {
+ return b.nodeMap
+}
+
+// NodeIDs returns the IDs of the nodes, in the order they are required.
+func (b *Backend) NodeIDs() []eventlogger.NodeID {
+ return b.nodeIDList
+}
+
+// EventType returns the event type for the backend.
+func (b *Backend) EventType() eventlogger.EventType {
+ return eventlogger.EventType(event.AuditType.String())
+}
+
+// HasFiltering determines if the first node for the pipeline is an eventlogger.NodeTypeFilter.
+func (b *Backend) HasFiltering() bool {
+ return len(b.nodeIDList) > 0 && b.nodeMap[b.nodeIDList[0]].Type() == eventlogger.NodeTypeFilter
}
diff --git a/builtin/audit/file/backend_test.go b/builtin/audit/file/backend_test.go
index e0ba06319ca5..17ea7fd20365 100644
--- a/builtin/audit/file/backend_test.go
+++ b/builtin/audit/file/backend_test.go
@@ -12,10 +12,12 @@ import (
"testing"
"time"
+ "github.com/hashicorp/eventlogger"
"github.com/hashicorp/vault/audit"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/sdk/helper/salt"
"github.com/hashicorp/vault/sdk/logical"
+ "github.com/stretchr/testify/require"
)
func TestAuditFile_fileModeNew(t *testing.T) {
@@ -145,6 +147,7 @@ func TestAuditFile_EventLogger_fileModeNew(t *testing.T) {
}
_, err = Factory(context.Background(), &audit.BackendConfig{
+ MountPath: "foo/bar",
SaltConfig: &salt.Config{},
SaltView: &logical.InmemStorage{},
Config: config,
@@ -210,3 +213,366 @@ func BenchmarkAuditFile_request(b *testing.B) {
}
})
}
+
+// TestBackend_formatterConfig ensures that all the configuration values are parsed correctly.
+func TestBackend_formatterConfig(t *testing.T) {
+ t.Parallel()
+
+ tests := map[string]struct {
+ config map[string]string
+ want audit.FormatterConfig
+ wantErr bool
+ expectedMessage string
+ }{
+ "happy-path-json": {
+ config: map[string]string{
+ "format": audit.JSONFormat.String(),
+ "hmac_accessor": "true",
+ "log_raw": "true",
+ "elide_list_responses": "true",
+ },
+ want: audit.FormatterConfig{
+ Raw: true,
+ HMACAccessor: true,
+ ElideListResponses: true,
+ RequiredFormat: "json",
+ }, wantErr: false,
+ },
+ "happy-path-jsonx": {
+ config: map[string]string{
+ "format": audit.JSONxFormat.String(),
+ "hmac_accessor": "true",
+ "log_raw": "true",
+ "elide_list_responses": "true",
+ },
+ want: audit.FormatterConfig{
+ Raw: true,
+ HMACAccessor: true,
+ ElideListResponses: true,
+ RequiredFormat: "jsonx",
+ },
+ wantErr: false,
+ },
+ "invalid-format": {
+ config: map[string]string{
+ "format": " squiggly ",
+ "hmac_accessor": "true",
+ "log_raw": "true",
+ "elide_list_responses": "true",
+ },
+ want: audit.FormatterConfig{},
+ wantErr: true,
+ expectedMessage: "audit.NewFormatterConfig: error applying options: audit.(format).validate: 'squiggly' is not a valid format: invalid parameter",
+ },
+ "invalid-hmac-accessor": {
+ config: map[string]string{
+ "format": audit.JSONFormat.String(),
+ "hmac_accessor": "maybe",
+ },
+ want: audit.FormatterConfig{},
+ wantErr: true,
+ expectedMessage: "file.formatterConfig: unable to parse 'hmac_accessor': strconv.ParseBool: parsing \"maybe\": invalid syntax",
+ },
+ "invalid-log-raw": {
+ config: map[string]string{
+ "format": audit.JSONFormat.String(),
+ "hmac_accessor": "true",
+ "log_raw": "maybe",
+ },
+ want: audit.FormatterConfig{},
+ wantErr: true,
+ expectedMessage: "file.formatterConfig: unable to parse 'log_raw': strconv.ParseBool: parsing \"maybe\": invalid syntax",
+ },
+ "invalid-elide-bool": {
+ config: map[string]string{
+ "format": audit.JSONFormat.String(),
+ "hmac_accessor": "true",
+ "log_raw": "true",
+ "elide_list_responses": "maybe",
+ },
+ want: audit.FormatterConfig{},
+ wantErr: true,
+ expectedMessage: "file.formatterConfig: unable to parse 'elide_list_responses': strconv.ParseBool: parsing \"maybe\": invalid syntax",
+ },
+ }
+ for name, tc := range tests {
+ name := name
+ tc := tc
+ t.Run(name, func(t *testing.T) {
+ t.Parallel()
+
+ got, err := formatterConfig(tc.config)
+ if tc.wantErr {
+ require.Error(t, err)
+ require.EqualError(t, err, tc.expectedMessage)
+ } else {
+ require.NoError(t, err)
+ }
+ require.Equal(t, tc.want, got)
+ })
+ }
+}
+
+// TestBackend_configureFilterNode ensures that configureFilterNode handles various
+// filter values as expected. Empty (including whitespace) strings should return
+// no error but skip configuration of the node.
+func TestBackend_configureFilterNode(t *testing.T) {
+ t.Parallel()
+
+ tests := map[string]struct {
+ filter string
+ shouldSkipNode bool
+ wantErr bool
+ expectedErrorMsg string
+ }{
+ "happy": {
+ filter: "foo == bar",
+ },
+ "empty": {
+ filter: "",
+ shouldSkipNode: true,
+ },
+ "spacey": {
+ filter: " ",
+ shouldSkipNode: true,
+ },
+ "bad": {
+ filter: "___qwerty",
+ wantErr: true,
+ expectedErrorMsg: "file.(Backend).configureFilterNode: error creating filter node: audit.NewEntryFilter: cannot create new audit filter",
+ },
+ }
+ for name, tc := range tests {
+ name := name
+ tc := tc
+ t.Run(name, func(t *testing.T) {
+ t.Parallel()
+
+ b := &Backend{
+ nodeIDList: []eventlogger.NodeID{},
+ nodeMap: map[eventlogger.NodeID]eventlogger.Node{},
+ }
+
+ err := b.configureFilterNode(tc.filter)
+
+ switch {
+ case tc.wantErr:
+ require.Error(t, err)
+ require.ErrorContains(t, err, tc.expectedErrorMsg)
+ require.Len(t, b.nodeIDList, 0)
+ require.Len(t, b.nodeMap, 0)
+ case tc.shouldSkipNode:
+ require.NoError(t, err)
+ require.Len(t, b.nodeIDList, 0)
+ require.Len(t, b.nodeMap, 0)
+ default:
+ require.NoError(t, err)
+ require.Len(t, b.nodeIDList, 1)
+ require.Len(t, b.nodeMap, 1)
+ id := b.nodeIDList[0]
+ node := b.nodeMap[id]
+ require.Equal(t, eventlogger.NodeTypeFilter, node.Type())
+ }
+ })
+ }
+}
+
+// TestBackend_configureFormatterNode ensures that configureFormatterNode
+// populates the nodeIDList and nodeMap on Backend when given valid formatConfig.
+func TestBackend_configureFormatterNode(t *testing.T) {
+ t.Parallel()
+
+ b := &Backend{
+ nodeIDList: []eventlogger.NodeID{},
+ nodeMap: map[eventlogger.NodeID]eventlogger.Node{},
+ }
+
+ formatConfig, err := audit.NewFormatterConfig()
+ require.NoError(t, err)
+
+ err = b.configureFormatterNode(formatConfig)
+
+ require.NoError(t, err)
+ require.Len(t, b.nodeIDList, 1)
+ require.Len(t, b.nodeMap, 1)
+ id := b.nodeIDList[0]
+ node := b.nodeMap[id]
+ require.Equal(t, eventlogger.NodeTypeFormatter, node.Type())
+}
+
+// TestBackend_configureSinkNode ensures that we can correctly configure the sink
+// node on the Backend, and any incorrect parameters result in the relevant errors.
+func TestBackend_configureSinkNode(t *testing.T) {
+ t.Parallel()
+
+ tests := map[string]struct {
+ name string
+ filePath string
+ mode string
+ format string
+ wantErr bool
+ expectedErrMsg string
+ expectedName string
+ }{
+ "name-empty": {
+ name: "",
+ wantErr: true,
+ expectedErrMsg: "file.(Backend).configureSinkNode: name is required: invalid parameter",
+ },
+ "name-whitespace": {
+ name: " ",
+ wantErr: true,
+ expectedErrMsg: "file.(Backend).configureSinkNode: name is required: invalid parameter",
+ },
+ "filePath-empty": {
+ name: "foo",
+ filePath: "",
+ wantErr: true,
+ expectedErrMsg: "file.(Backend).configureSinkNode: file path is required: invalid parameter",
+ },
+ "filePath-whitespace": {
+ name: "foo",
+ filePath: " ",
+ wantErr: true,
+ expectedErrMsg: "file.(Backend).configureSinkNode: file path is required: invalid parameter",
+ },
+ "filePath-stdout-lower": {
+ name: "foo",
+ expectedName: "stdout",
+ filePath: "stdout",
+ format: "json",
+ },
+ "filePath-stdout-upper": {
+ name: "foo",
+ expectedName: "stdout",
+ filePath: "STDOUT",
+ format: "json",
+ },
+ "filePath-stdout-mixed": {
+ name: "foo",
+ expectedName: "stdout",
+ filePath: "StdOut",
+ format: "json",
+ },
+ "filePath-discard-lower": {
+ name: "foo",
+ expectedName: "discard",
+ filePath: "discard",
+ format: "json",
+ },
+ "filePath-discard-upper": {
+ name: "foo",
+ expectedName: "discard",
+ filePath: "DISCARD",
+ format: "json",
+ },
+ "filePath-discard-mixed": {
+ name: "foo",
+ expectedName: "discard",
+ filePath: "DisCArd",
+ format: "json",
+ },
+ "format-empty": {
+ name: "foo",
+ filePath: "/tmp/",
+ format: "",
+ wantErr: true,
+ expectedErrMsg: "file.(Backend).configureSinkNode: format is required: invalid parameter",
+ },
+ "format-whitespace": {
+ name: "foo",
+ filePath: "/tmp/",
+ format: " ",
+ wantErr: true,
+ expectedErrMsg: "file.(Backend).configureSinkNode: format is required: invalid parameter",
+ },
+ "filePath-weird-with-mode-zero": {
+ name: "foo",
+ filePath: "/tmp/qwerty",
+ format: "json",
+ mode: "0",
+ wantErr: true,
+ expectedErrMsg: "file.(Backend).configureSinkNode: file sink creation failed for path \"/tmp/qwerty\": event.NewFileSink: unable to determine existing file mode: stat /tmp/qwerty: no such file or directory",
+ },
+ "happy": {
+ name: "foo",
+ filePath: "/tmp/audit.log",
+ mode: "",
+ format: "json",
+ wantErr: false,
+ expectedName: "foo",
+ },
+ }
+
+ for name, tc := range tests {
+ name := name
+ tc := tc
+ t.Run(name, func(t *testing.T) {
+ t.Parallel()
+
+ b := &Backend{
+ nodeIDList: []eventlogger.NodeID{},
+ nodeMap: map[eventlogger.NodeID]eventlogger.Node{},
+ }
+
+ err := b.configureSinkNode(tc.name, tc.filePath, tc.mode, tc.format)
+
+ if tc.wantErr {
+ require.Error(t, err)
+ require.EqualError(t, err, tc.expectedErrMsg)
+ require.Len(t, b.nodeIDList, 0)
+ require.Len(t, b.nodeMap, 0)
+ } else {
+ require.NoError(t, err)
+ require.Len(t, b.nodeIDList, 1)
+ require.Len(t, b.nodeMap, 1)
+ id := b.nodeIDList[0]
+ node := b.nodeMap[id]
+ require.Equal(t, eventlogger.NodeTypeSink, node.Type())
+ sw, ok := node.(*audit.SinkWrapper)
+ require.True(t, ok)
+ require.Equal(t, tc.expectedName, sw.Name)
+ }
+ })
+ }
+}
+
+// TestBackend_configureFilterFormatterSink ensures that configuring all three
+// types of nodes on a Backend works as expected, i.e. we have all three nodes
+// at the end and nothing gets overwritten. The order of calls influences the
+// slice of IDs on the Backend.
+func TestBackend_configureFilterFormatterSink(t *testing.T) {
+ t.Parallel()
+
+ b := &Backend{
+ nodeIDList: []eventlogger.NodeID{},
+ nodeMap: map[eventlogger.NodeID]eventlogger.Node{},
+ }
+
+ formatConfig, err := audit.NewFormatterConfig()
+ require.NoError(t, err)
+
+ err = b.configureFilterNode("foo == bar")
+ require.NoError(t, err)
+
+ err = b.configureFormatterNode(formatConfig)
+ require.NoError(t, err)
+
+ err = b.configureSinkNode("foo", "/tmp/foo", "0777", "json")
+ require.NoError(t, err)
+
+ require.Len(t, b.nodeIDList, 3)
+ require.Len(t, b.nodeMap, 3)
+
+ id := b.nodeIDList[0]
+ node := b.nodeMap[id]
+ require.Equal(t, eventlogger.NodeTypeFilter, node.Type())
+
+ id = b.nodeIDList[1]
+ node = b.nodeMap[id]
+ require.Equal(t, eventlogger.NodeTypeFormatter, node.Type())
+
+ id = b.nodeIDList[2]
+ node = b.nodeMap[id]
+ require.Equal(t, eventlogger.NodeTypeSink, node.Type())
+}
diff --git a/builtin/audit/socket/backend.go b/builtin/audit/socket/backend.go
index 1e906468c7f8..09662c2ab683 100644
--- a/builtin/audit/socket/backend.go
+++ b/builtin/audit/socket/backend.go
@@ -9,6 +9,7 @@ import (
"fmt"
"net"
"strconv"
+ "strings"
"sync"
"time"
@@ -21,83 +22,76 @@ import (
"github.com/hashicorp/vault/sdk/logical"
)
-func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool, headersConfig audit.HeaderFormatter) (audit.Backend, error) {
+var _ audit.Backend = (*Backend)(nil)
+
+// Backend is the audit backend for the socket audit transport.
+type Backend struct {
+ sync.Mutex
+ address string
+ connection net.Conn
+ formatter *audit.EntryFormatterWriter
+ formatConfig audit.FormatterConfig
+ name string
+ nodeIDList []eventlogger.NodeID
+ nodeMap map[eventlogger.NodeID]eventlogger.Node
+ salt *salt.Salt
+ saltConfig *salt.Config
+ saltMutex sync.RWMutex
+ saltView logical.Storage
+ socketType string
+ writeDuration time.Duration
+}
+
+func Factory(_ context.Context, conf *audit.BackendConfig, useEventLogger bool, headersConfig audit.HeaderFormatter) (audit.Backend, error) {
+ const op = "socket.Factory"
+
if conf.SaltConfig == nil {
- return nil, fmt.Errorf("nil salt config")
+ return nil, fmt.Errorf("%s: nil salt config", op)
}
+
if conf.SaltView == nil {
- return nil, fmt.Errorf("nil salt view")
+ return nil, fmt.Errorf("%s: nil salt view", op)
}
address, ok := conf.Config["address"]
if !ok {
- return nil, fmt.Errorf("address is required")
+ return nil, fmt.Errorf("%s: address is required", op)
}
socketType, ok := conf.Config["socket_type"]
if !ok {
socketType = "tcp"
}
+
writeDeadline, ok := conf.Config["write_timeout"]
if !ok {
writeDeadline = "2s"
}
+
writeDuration, err := parseutil.ParseDurationSecond(writeDeadline)
if err != nil {
- return nil, err
- }
-
- var cfgOpts []audit.Option
-
- if format, ok := conf.Config["format"]; ok {
- cfgOpts = append(cfgOpts, audit.WithFormat(format))
- }
-
- // Check if hashing of accessor is disabled
- if hmacAccessorRaw, ok := conf.Config["hmac_accessor"]; ok {
- v, err := strconv.ParseBool(hmacAccessorRaw)
- if err != nil {
- return nil, err
- }
- cfgOpts = append(cfgOpts, audit.WithHMACAccessor(v))
- }
-
- // Check if raw logging is enabled
- if raw, ok := conf.Config["log_raw"]; ok {
- v, err := strconv.ParseBool(raw)
- if err != nil {
- return nil, err
- }
- cfgOpts = append(cfgOpts, audit.WithRaw(v))
+ return nil, fmt.Errorf("%s: failed to parse 'write_timeout': %w", op, err)
}
- if elideListResponsesRaw, ok := conf.Config["elide_list_responses"]; ok {
- v, err := strconv.ParseBool(elideListResponsesRaw)
- if err != nil {
- return nil, err
- }
- cfgOpts = append(cfgOpts, audit.WithElision(v))
- }
-
- cfg, err := audit.NewFormatterConfig(cfgOpts...)
+ cfg, err := formatterConfig(conf.Config)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("%s: failed to create formatter config: %w", op, err)
}
b := &Backend{
- saltConfig: conf.SaltConfig,
- saltView: conf.SaltView,
- formatConfig: cfg,
-
- writeDuration: writeDuration,
address: address,
+ formatConfig: cfg,
+ name: conf.MountPath,
+ saltConfig: conf.SaltConfig,
+ saltView: conf.SaltView,
socketType: socketType,
+ writeDuration: writeDuration,
}
// Configure the formatter for either case.
- f, err := audit.NewEntryFormatter(b.formatConfig, b, audit.WithHeaderFormatter(headersConfig))
+ f, err := audit.NewEntryFormatter(cfg, b, audit.WithHeaderFormatter(headersConfig))
if err != nil {
- return nil, fmt.Errorf("error creating formatter: %w", err)
+ return nil, fmt.Errorf("%s: error creating formatter: %w", op, err)
}
var w audit.Writer
switch b.formatConfig.RequiredFormat {
@@ -109,72 +103,44 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool
fw, err := audit.NewEntryFormatterWriter(b.formatConfig, f, w)
if err != nil {
- return nil, fmt.Errorf("error creating formatter writer: %w", err)
+ return nil, fmt.Errorf("%s: error creating formatter writer: %w", op, err)
}
b.formatter = fw
if useEventLogger {
- var opts []event.Option
+ b.nodeIDList = []eventlogger.NodeID{}
+ b.nodeMap = make(map[eventlogger.NodeID]eventlogger.Node)
- if socketType, ok := conf.Config["socket_type"]; ok {
- opts = append(opts, event.WithSocketType(socketType))
+ err := b.configureFilterNode(conf.Config["filter"])
+ if err != nil {
+ return nil, fmt.Errorf("%s: error configuring filter node: %w", op, err)
}
- if writeDeadline, ok := conf.Config["write_timeout"]; ok {
- opts = append(opts, event.WithMaxDuration(writeDeadline))
+ opts := []audit.Option{
+ audit.WithHeaderFormatter(headersConfig),
}
- b.nodeIDList = make([]eventlogger.NodeID, 2)
- b.nodeMap = make(map[eventlogger.NodeID]eventlogger.Node)
-
- formatterNodeID, err := event.GenerateNodeID()
+ err = b.configureFormatterNode(cfg, opts...)
if err != nil {
- return nil, fmt.Errorf("error generating random NodeID for formatter node: %w", err)
+ return nil, fmt.Errorf("%s: error configuring formatter node: %w", op, err)
}
- b.nodeIDList[0] = formatterNodeID
- b.nodeMap[formatterNodeID] = f
- n, err := event.NewSocketSink(b.formatConfig.RequiredFormat.String(), address, opts...)
- if err != nil {
- return nil, fmt.Errorf("error creating socket sink node: %w", err)
+ sinkOpts := []event.Option{
+ event.WithSocketType(socketType),
+ event.WithMaxDuration(writeDeadline),
}
- sinkNode := &audit.SinkWrapper{Name: conf.MountPath, Sink: n}
- sinkNodeID, err := event.GenerateNodeID()
+
+ err = b.configureSinkNode(conf.MountPath, address, cfg.RequiredFormat.String(), sinkOpts...)
if err != nil {
- return nil, fmt.Errorf("error generating random NodeID for sink node: %w", err)
+ return nil, fmt.Errorf("%s: error configuring sink node: %w", op, err)
}
- b.nodeIDList[1] = sinkNodeID
- b.nodeMap[sinkNodeID] = sinkNode
}
return b, nil
}
-// Backend is the audit backend for the socket audit transport.
-type Backend struct {
- connection net.Conn
-
- formatter *audit.EntryFormatterWriter
- formatConfig audit.FormatterConfig
-
- writeDuration time.Duration
- address string
- socketType string
-
- sync.Mutex
-
- saltMutex sync.RWMutex
- salt *salt.Salt
- saltConfig *salt.Config
- saltView logical.Storage
-
- nodeIDList []eventlogger.NodeID
- nodeMap map[eventlogger.NodeID]eventlogger.Node
-}
-
-var _ audit.Backend = (*Backend)(nil)
-
+// Deprecated: Use eventlogger.
func (b *Backend) LogRequest(ctx context.Context, in *logical.LogInput) error {
var buf bytes.Buffer
if err := b.formatter.FormatAndWriteRequest(ctx, &buf, in); err != nil {
@@ -198,6 +164,7 @@ func (b *Backend) LogRequest(ctx context.Context, in *logical.LogInput) error {
return err
}
+// Deprecated: Use eventlogger.
func (b *Backend) LogResponse(ctx context.Context, in *logical.LogInput) error {
var buf bytes.Buffer
if err := b.formatter.FormatAndWriteResponse(ctx, &buf, in); err != nil {
@@ -256,6 +223,7 @@ func (b *Backend) LogTestMessage(ctx context.Context, in *logical.LogInput, conf
return err
}
+// Deprecated: Use eventlogger.
func (b *Backend) write(ctx context.Context, buf []byte) error {
if b.connection == nil {
if err := b.reconnect(ctx); err != nil {
@@ -276,6 +244,7 @@ func (b *Backend) write(ctx context.Context, buf []byte) error {
return nil
}
+// Deprecated: Use eventlogger.
func (b *Backend) reconnect(ctx context.Context) error {
if b.connection != nil {
b.connection.Close()
@@ -317,12 +286,12 @@ func (b *Backend) Salt(ctx context.Context) (*salt.Salt, error) {
if b.salt != nil {
return b.salt, nil
}
- salt, err := salt.NewSalt(ctx, b.saltView, b.saltConfig)
+ s, err := salt.NewSalt(ctx, b.saltView, b.saltConfig)
if err != nil {
return nil, err
}
- b.salt = salt
- return salt, nil
+ b.salt = s
+ return s, nil
}
func (b *Backend) Invalidate(_ context.Context) {
@@ -331,20 +300,146 @@ func (b *Backend) Invalidate(_ context.Context) {
b.salt = nil
}
-// RegisterNodesAndPipeline registers the nodes and a pipeline as required by
-// the audit.Backend interface.
-func (b *Backend) RegisterNodesAndPipeline(broker *eventlogger.Broker, name string) error {
- for id, node := range b.nodeMap {
- if err := broker.RegisterNode(id, node, eventlogger.WithNodeRegistrationPolicy(eventlogger.DenyOverwrite)); err != nil {
- return err
+// formatterConfig creates the configuration required by a formatter node using
+// the config map supplied to the factory.
+func formatterConfig(config map[string]string) (audit.FormatterConfig, error) {
+ const op = "socket.formatterConfig"
+
+ var cfgOpts []audit.Option
+
+ if format, ok := config["format"]; ok {
+ cfgOpts = append(cfgOpts, audit.WithFormat(format))
+ }
+
+ // Check if hashing of accessor is disabled
+ if hmacAccessorRaw, ok := config["hmac_accessor"]; ok {
+ v, err := strconv.ParseBool(hmacAccessorRaw)
+ if err != nil {
+ return audit.FormatterConfig{}, fmt.Errorf("%s: unable to parse 'hmac_accessor': %w", op, err)
+ }
+ cfgOpts = append(cfgOpts, audit.WithHMACAccessor(v))
+ }
+
+ // Check if raw logging is enabled
+ if raw, ok := config["log_raw"]; ok {
+ v, err := strconv.ParseBool(raw)
+ if err != nil {
+ return audit.FormatterConfig{}, fmt.Errorf("%s: unable to parse 'log_raw': %w", op, err)
+ }
+ cfgOpts = append(cfgOpts, audit.WithRaw(v))
+ }
+
+ if elideListResponsesRaw, ok := config["elide_list_responses"]; ok {
+ v, err := strconv.ParseBool(elideListResponsesRaw)
+ if err != nil {
+ return audit.FormatterConfig{}, fmt.Errorf("%s: unable to parse 'elide_list_responses': %w", op, err)
}
+ cfgOpts = append(cfgOpts, audit.WithElision(v))
+ }
+
+ return audit.NewFormatterConfig(cfgOpts...)
+}
+
+// configureFilterNode is used to configure a filter node and associated ID on the Backend.
+func (b *Backend) configureFilterNode(filter string) error {
+ const op = "socket.(Backend).configureFilterNode"
+
+ filter = strings.TrimSpace(filter)
+ if filter == "" {
+ return nil
+ }
+
+ filterNodeID, err := event.GenerateNodeID()
+ if err != nil {
+ return fmt.Errorf("%s: error generating random NodeID for filter node: %w", op, err)
+ }
+
+ filterNode, err := audit.NewEntryFilter(filter)
+ if err != nil {
+ return fmt.Errorf("%s: error creating filter node: %w", op, err)
+ }
+
+ b.nodeIDList = append(b.nodeIDList, filterNodeID)
+ b.nodeMap[filterNodeID] = filterNode
+ return nil
+}
+
+// configureFormatterNode is used to configure a formatter node and associated ID on the Backend.
+func (b *Backend) configureFormatterNode(formatConfig audit.FormatterConfig, opts ...audit.Option) error {
+ const op = "socket.(Backend).configureFormatterNode"
+
+ formatterNodeID, err := event.GenerateNodeID()
+ if err != nil {
+ return fmt.Errorf("%s: error generating random NodeID for formatter node: %w", op, err)
+ }
+
+ formatterNode, err := audit.NewEntryFormatter(formatConfig, b, opts...)
+ if err != nil {
+ return fmt.Errorf("%s: error creating formatter: %w", op, err)
+ }
+
+ b.nodeIDList = append(b.nodeIDList, formatterNodeID)
+ b.nodeMap[formatterNodeID] = formatterNode
+ return nil
+}
+
+// configureSinkNode is used to configure a sink node and associated ID on the Backend.
+func (b *Backend) configureSinkNode(name string, address string, format string, opts ...event.Option) error {
+ const op = "socket.(Backend).configureSinkNode"
+
+ name = strings.TrimSpace(name)
+ if name == "" {
+ return fmt.Errorf("%s: name is required: %w", op, event.ErrInvalidParameter)
+ }
+
+ address = strings.TrimSpace(address)
+ if address == "" {
+ return fmt.Errorf("%s: address is required: %w", op, event.ErrInvalidParameter)
+ }
+
+ format = strings.TrimSpace(format)
+ if format == "" {
+ return fmt.Errorf("%s: format is required: %w", op, event.ErrInvalidParameter)
}
- pipeline := eventlogger.Pipeline{
- PipelineID: eventlogger.PipelineID(name),
- EventType: eventlogger.EventType(event.AuditType.String()),
- NodeIDs: b.nodeIDList,
+ sinkNodeID, err := event.GenerateNodeID()
+ if err != nil {
+ return fmt.Errorf("%s: error generating random NodeID for sink node: %w", op, err)
+ }
+
+ n, err := event.NewSocketSink(address, format, opts...)
+ if err != nil {
+ return fmt.Errorf("%s: error creating socket sink node: %w", op, err)
}
- return broker.RegisterPipeline(pipeline, eventlogger.WithPipelineRegistrationPolicy(eventlogger.DenyOverwrite))
+ sinkNode := &audit.SinkWrapper{Name: name, Sink: n}
+
+ b.nodeIDList = append(b.nodeIDList, sinkNodeID)
+ b.nodeMap[sinkNodeID] = sinkNode
+ return nil
+}
+
+// Name for this backend, this would ideally correspond to the mount path for the audit device.
+func (b *Backend) Name() string {
+ return b.name
+}
+
+// Nodes returns the nodes which should be used by the event framework to process audit entries.
+func (b *Backend) Nodes() map[eventlogger.NodeID]eventlogger.Node {
+ return b.nodeMap
+}
+
+// NodeIDs returns the IDs of the nodes, in the order they are required.
+func (b *Backend) NodeIDs() []eventlogger.NodeID {
+ return b.nodeIDList
+}
+
+// EventType returns the event type for the backend.
+func (b *Backend) EventType() eventlogger.EventType {
+ return eventlogger.EventType(event.AuditType.String())
+}
+
+// HasFiltering determines if the first node for the pipeline is an eventlogger.NodeTypeFilter.
+func (b *Backend) HasFiltering() bool {
+ return len(b.nodeIDList) > 0 && b.nodeMap[b.nodeIDList[0]].Type() == eventlogger.NodeTypeFilter
}
diff --git a/builtin/audit/socket/backend_test.go b/builtin/audit/socket/backend_test.go
new file mode 100644
index 000000000000..d1dfc384720c
--- /dev/null
+++ b/builtin/audit/socket/backend_test.go
@@ -0,0 +1,331 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: BUSL-1.1
+
+package socket
+
+import (
+ "testing"
+
+ "github.com/hashicorp/eventlogger"
+ "github.com/hashicorp/vault/audit"
+ "github.com/stretchr/testify/require"
+)
+
+// TestBackend_formatterConfig ensures that all the configuration values are parsed correctly.
+func TestBackend_formatterConfig(t *testing.T) {
+ t.Parallel()
+
+ tests := map[string]struct {
+ config map[string]string
+ want audit.FormatterConfig
+ wantErr bool
+ expectedErrMsg string
+ }{
+ "happy-path-json": {
+ config: map[string]string{
+ "format": audit.JSONFormat.String(),
+ "hmac_accessor": "true",
+ "log_raw": "true",
+ "elide_list_responses": "true",
+ },
+ want: audit.FormatterConfig{
+ Raw: true,
+ HMACAccessor: true,
+ ElideListResponses: true,
+ RequiredFormat: "json",
+ }, wantErr: false,
+ },
+ "happy-path-jsonx": {
+ config: map[string]string{
+ "format": audit.JSONxFormat.String(),
+ "hmac_accessor": "true",
+ "log_raw": "true",
+ "elide_list_responses": "true",
+ },
+ want: audit.FormatterConfig{
+ Raw: true,
+ HMACAccessor: true,
+ ElideListResponses: true,
+ RequiredFormat: "jsonx",
+ },
+ wantErr: false,
+ },
+ "invalid-format": {
+ config: map[string]string{
+ "format": " squiggly ",
+ "hmac_accessor": "true",
+ "log_raw": "true",
+ "elide_list_responses": "true",
+ },
+ want: audit.FormatterConfig{},
+ wantErr: true,
+ expectedErrMsg: "audit.NewFormatterConfig: error applying options: audit.(format).validate: 'squiggly' is not a valid format: invalid parameter",
+ },
+ "invalid-hmac-accessor": {
+ config: map[string]string{
+ "format": audit.JSONFormat.String(),
+ "hmac_accessor": "maybe",
+ },
+ want: audit.FormatterConfig{},
+ wantErr: true,
+ expectedErrMsg: "socket.formatterConfig: unable to parse 'hmac_accessor': strconv.ParseBool: parsing \"maybe\": invalid syntax",
+ },
+ "invalid-log-raw": {
+ config: map[string]string{
+ "format": audit.JSONFormat.String(),
+ "hmac_accessor": "true",
+ "log_raw": "maybe",
+ },
+ want: audit.FormatterConfig{},
+ wantErr: true,
+ expectedErrMsg: "socket.formatterConfig: unable to parse 'log_raw': strconv.ParseBool: parsing \"maybe\": invalid syntax",
+ },
+ "invalid-elide-bool": {
+ config: map[string]string{
+ "format": audit.JSONFormat.String(),
+ "hmac_accessor": "true",
+ "log_raw": "true",
+ "elide_list_responses": "maybe",
+ },
+ want: audit.FormatterConfig{},
+ wantErr: true,
+ expectedErrMsg: "socket.formatterConfig: unable to parse 'elide_list_responses': strconv.ParseBool: parsing \"maybe\": invalid syntax",
+ },
+ }
+ for name, tc := range tests {
+ name := name
+ tc := tc
+ t.Run(name, func(t *testing.T) {
+ t.Parallel()
+
+ got, err := formatterConfig(tc.config)
+ if tc.wantErr {
+ require.Error(t, err)
+ require.EqualError(t, err, tc.expectedErrMsg)
+ } else {
+ require.NoError(t, err)
+ }
+ require.Equal(t, tc.want, got)
+ })
+ }
+}
+
+// TestBackend_configureFilterNode ensures that configureFilterNode handles various
+// filter values as expected. Empty (including whitespace) strings should return
+// no error but skip configuration of the node.
+func TestBackend_configureFilterNode(t *testing.T) {
+ t.Parallel()
+
+ tests := map[string]struct {
+ filter string
+ shouldSkipNode bool
+ wantErr bool
+ expectedErrorMsg string
+ }{
+ "happy": {
+ filter: "foo == bar",
+ },
+ "empty": {
+ filter: "",
+ shouldSkipNode: true,
+ },
+ "spacey": {
+ filter: " ",
+ shouldSkipNode: true,
+ },
+ "bad": {
+ filter: "___qwerty",
+ wantErr: true,
+ expectedErrorMsg: "socket.(Backend).configureFilterNode: error creating filter node: audit.NewEntryFilter: cannot create new audit filter",
+ },
+ }
+ for name, tc := range tests {
+ name := name
+ tc := tc
+ t.Run(name, func(t *testing.T) {
+ t.Parallel()
+
+ b := &Backend{
+ nodeIDList: []eventlogger.NodeID{},
+ nodeMap: map[eventlogger.NodeID]eventlogger.Node{},
+ }
+
+ err := b.configureFilterNode(tc.filter)
+
+ switch {
+ case tc.wantErr:
+ require.Error(t, err)
+ require.ErrorContains(t, err, tc.expectedErrorMsg)
+ require.Len(t, b.nodeIDList, 0)
+ require.Len(t, b.nodeMap, 0)
+ case tc.shouldSkipNode:
+ require.NoError(t, err)
+ require.Len(t, b.nodeIDList, 0)
+ require.Len(t, b.nodeMap, 0)
+ default:
+ require.NoError(t, err)
+ require.Len(t, b.nodeIDList, 1)
+ require.Len(t, b.nodeMap, 1)
+ id := b.nodeIDList[0]
+ node := b.nodeMap[id]
+ require.Equal(t, eventlogger.NodeTypeFilter, node.Type())
+ }
+ })
+ }
+}
+
+// TestBackend_configureFormatterNode ensures that configureFormatterNode
+// populates the nodeIDList and nodeMap on Backend when given valid formatConfig.
+func TestBackend_configureFormatterNode(t *testing.T) {
+ t.Parallel()
+
+ b := &Backend{
+ nodeIDList: []eventlogger.NodeID{},
+ nodeMap: map[eventlogger.NodeID]eventlogger.Node{},
+ }
+
+ formatConfig, err := audit.NewFormatterConfig()
+ require.NoError(t, err)
+
+ err = b.configureFormatterNode(formatConfig)
+
+ require.NoError(t, err)
+ require.Len(t, b.nodeIDList, 1)
+ require.Len(t, b.nodeMap, 1)
+ id := b.nodeIDList[0]
+ node := b.nodeMap[id]
+ require.Equal(t, eventlogger.NodeTypeFormatter, node.Type())
+}
+
+// TestBackend_configureSinkNode ensures that we can correctly configure the sink
+// node on the Backend, and any incorrect parameters result in the relevant errors.
+func TestBackend_configureSinkNode(t *testing.T) {
+ t.Parallel()
+
+ tests := map[string]struct {
+ name string
+ address string
+ format string
+ wantErr bool
+ expectedErrMsg string
+ expectedName string
+ }{
+ "name-empty": {
+ name: "",
+ address: "wss://foo",
+ wantErr: true,
+ expectedErrMsg: "socket.(Backend).configureSinkNode: name is required: invalid parameter",
+ },
+ "name-whitespace": {
+ name: " ",
+ address: "wss://foo",
+ wantErr: true,
+ expectedErrMsg: "socket.(Backend).configureSinkNode: name is required: invalid parameter",
+ },
+ "address-empty": {
+ name: "foo",
+ address: "",
+ wantErr: true,
+ expectedErrMsg: "socket.(Backend).configureSinkNode: address is required: invalid parameter",
+ },
+ "address-whitespace": {
+ name: "foo",
+ address: " ",
+ wantErr: true,
+ expectedErrMsg: "socket.(Backend).configureSinkNode: address is required: invalid parameter",
+ },
+ "format-empty": {
+ name: "foo",
+ address: "wss://foo",
+ format: "",
+ wantErr: true,
+ expectedErrMsg: "socket.(Backend).configureSinkNode: format is required: invalid parameter",
+ },
+ "format-whitespace": {
+ name: "foo",
+ address: "wss://foo",
+ format: " ",
+ wantErr: true,
+ expectedErrMsg: "socket.(Backend).configureSinkNode: format is required: invalid parameter",
+ },
+ "happy": {
+ name: "foo",
+ address: "wss://foo",
+ format: "json",
+ wantErr: false,
+ expectedName: "foo",
+ },
+ }
+
+ for name, tc := range tests {
+ name := name
+ tc := tc
+ t.Run(name, func(t *testing.T) {
+ t.Parallel()
+
+ b := &Backend{
+ nodeIDList: []eventlogger.NodeID{},
+ nodeMap: map[eventlogger.NodeID]eventlogger.Node{},
+ }
+
+ err := b.configureSinkNode(tc.name, tc.address, tc.format)
+
+ if tc.wantErr {
+ require.Error(t, err)
+ require.EqualError(t, err, tc.expectedErrMsg)
+ require.Len(t, b.nodeIDList, 0)
+ require.Len(t, b.nodeMap, 0)
+ } else {
+ require.NoError(t, err)
+ require.Len(t, b.nodeIDList, 1)
+ require.Len(t, b.nodeMap, 1)
+ id := b.nodeIDList[0]
+ node := b.nodeMap[id]
+ require.Equal(t, eventlogger.NodeTypeSink, node.Type())
+ sw, ok := node.(*audit.SinkWrapper)
+ require.True(t, ok)
+ require.Equal(t, tc.expectedName, sw.Name)
+ }
+ })
+ }
+}
+
+// TestBackend_configureFilterFormatterSink ensures that configuring all three
+// types of nodes on a Backend works as expected, i.e. we have all three nodes
+// at the end and nothing gets overwritten. The order of calls influences the
+// slice of IDs on the Backend.
+func TestBackend_configureFilterFormatterSink(t *testing.T) {
+ t.Parallel()
+
+ b := &Backend{
+ nodeIDList: []eventlogger.NodeID{},
+ nodeMap: map[eventlogger.NodeID]eventlogger.Node{},
+ }
+
+ formatConfig, err := audit.NewFormatterConfig()
+ require.NoError(t, err)
+
+ err = b.configureFilterNode("foo == bar")
+ require.NoError(t, err)
+
+ err = b.configureFormatterNode(formatConfig)
+ require.NoError(t, err)
+
+ err = b.configureSinkNode("foo", "https://hashicorp.com", "json")
+ require.NoError(t, err)
+
+ require.Len(t, b.nodeIDList, 3)
+ require.Len(t, b.nodeMap, 3)
+
+ id := b.nodeIDList[0]
+ node := b.nodeMap[id]
+ require.Equal(t, eventlogger.NodeTypeFilter, node.Type())
+
+ id = b.nodeIDList[1]
+ node = b.nodeMap[id]
+ require.Equal(t, eventlogger.NodeTypeFormatter, node.Type())
+
+ id = b.nodeIDList[2]
+ node = b.nodeMap[id]
+ require.Equal(t, eventlogger.NodeTypeSink, node.Type())
+}
diff --git a/builtin/audit/syslog/backend.go b/builtin/audit/syslog/backend.go
index 9dc0298f64f6..45d6e0762daa 100644
--- a/builtin/audit/syslog/backend.go
+++ b/builtin/audit/syslog/backend.go
@@ -8,6 +8,7 @@ import (
"context"
"fmt"
"strconv"
+ "strings"
"sync"
"github.com/hashicorp/eventlogger"
@@ -18,13 +19,31 @@ import (
"github.com/hashicorp/vault/sdk/logical"
)
-func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool, headersConfig audit.HeaderFormatter) (audit.Backend, error) {
+var _ audit.Backend = (*Backend)(nil)
+
+// Backend is the audit backend for the syslog-based audit store.
+type Backend struct {
+ formatter *audit.EntryFormatterWriter
+ formatConfig audit.FormatterConfig
+ logger gsyslog.Syslogger
+ name string
+ nodeIDList []eventlogger.NodeID
+ nodeMap map[eventlogger.NodeID]eventlogger.Node
+ salt *salt.Salt
+ saltConfig *salt.Config
+ saltMutex sync.RWMutex
+ saltView logical.Storage
+}
+
+func Factory(_ context.Context, conf *audit.BackendConfig, useEventLogger bool, headersConfig audit.HeaderFormatter) (audit.Backend, error) {
+ const op = "syslog.Factory"
+
if conf.SaltConfig == nil {
- return nil, fmt.Errorf("nil salt config")
+ return nil, fmt.Errorf("%s: nil salt config", op)
}
if conf.SaltView == nil {
- return nil, fmt.Errorf("nil salt view")
+ return nil, fmt.Errorf("%s: nil salt view", op)
}
// Get facility or default to AUTH
@@ -39,60 +58,29 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool
tag = "vault"
}
- var cfgOpts []audit.Option
-
- if format, ok := conf.Config["format"]; ok {
- cfgOpts = append(cfgOpts, audit.WithFormat(format))
- }
-
- // Check if hashing of accessor is disabled
- if hmacAccessorRaw, ok := conf.Config["hmac_accessor"]; ok {
- v, err := strconv.ParseBool(hmacAccessorRaw)
- if err != nil {
- return nil, err
- }
- cfgOpts = append(cfgOpts, audit.WithHMACAccessor(v))
- }
-
- // Check if raw logging is enabled
- if raw, ok := conf.Config["log_raw"]; ok {
- v, err := strconv.ParseBool(raw)
- if err != nil {
- return nil, err
- }
- cfgOpts = append(cfgOpts, audit.WithRaw(v))
- }
-
- if elideListResponsesRaw, ok := conf.Config["elide_list_responses"]; ok {
- v, err := strconv.ParseBool(elideListResponsesRaw)
- if err != nil {
- return nil, err
- }
- cfgOpts = append(cfgOpts, audit.WithElision(v))
- }
-
- cfg, err := audit.NewFormatterConfig(cfgOpts...)
+ cfg, err := formatterConfig(conf.Config)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("%s: failed to create formatter config: %w", op, err)
}
// Get the logger
logger, err := gsyslog.NewLogger(gsyslog.LOG_INFO, facility, tag)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("%s: cannot create logger: %w", op, err)
}
b := &Backend{
+ formatConfig: cfg,
logger: logger,
+ name: conf.MountPath,
saltConfig: conf.SaltConfig,
saltView: conf.SaltView,
- formatConfig: cfg,
}
// Configure the formatter for either case.
f, err := audit.NewEntryFormatter(b.formatConfig, b, audit.WithHeaderFormatter(headersConfig), audit.WithPrefix(conf.Config["prefix"]))
if err != nil {
- return nil, fmt.Errorf("error creating formatter: %w", err)
+ return nil, fmt.Errorf("%s: error creating formatter: %w", op, err)
}
var w audit.Writer
@@ -105,67 +93,45 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool
fw, err := audit.NewEntryFormatterWriter(b.formatConfig, f, w)
if err != nil {
- return nil, fmt.Errorf("error creating formatter writer: %w", err)
+ return nil, fmt.Errorf("%s: error creating formatter writer: %w", op, err)
}
b.formatter = fw
if useEventLogger {
- var opts []event.Option
+ b.nodeIDList = []eventlogger.NodeID{}
+ b.nodeMap = make(map[eventlogger.NodeID]eventlogger.Node)
- // Get facility or default to AUTH
- if facility, ok := conf.Config["facility"]; ok {
- opts = append(opts, event.WithFacility(facility))
+ err := b.configureFilterNode(conf.Config["filter"])
+ if err != nil {
+ return nil, fmt.Errorf("%s: error configuring filter node: %w", op, err)
}
- if tag, ok := conf.Config["tag"]; ok {
- opts = append(opts, event.WithTag(tag))
+ formatterOpts := []audit.Option{
+ audit.WithHeaderFormatter(headersConfig),
+ audit.WithPrefix(conf.Config["prefix"]),
}
- b.nodeIDList = make([]eventlogger.NodeID, 2)
- b.nodeMap = make(map[eventlogger.NodeID]eventlogger.Node)
-
- formatterNodeID, err := event.GenerateNodeID()
+ err = b.configureFormatterNode(cfg, formatterOpts...)
if err != nil {
- return nil, fmt.Errorf("error generating random NodeID for formatter node: %w", err)
+ return nil, fmt.Errorf("%s: error configuring formatter node: %w", op, err)
}
- b.nodeIDList[0] = formatterNodeID
- b.nodeMap[formatterNodeID] = f
- n, err := event.NewSyslogSink(b.formatConfig.RequiredFormat.String(), opts...)
- if err != nil {
- return nil, fmt.Errorf("error creating syslog sink node: %w", err)
+ sinkOpts := []event.Option{
+ event.WithFacility(facility),
+ event.WithTag(tag),
}
- sinkNode := &audit.SinkWrapper{Name: conf.MountPath, Sink: n}
- sinkNodeID, err := event.GenerateNodeID()
+ err = b.configureSinkNode(conf.MountPath, cfg.RequiredFormat.String(), sinkOpts...)
if err != nil {
- return nil, fmt.Errorf("error generating random NodeID for sink node: %w", err)
+ return nil, fmt.Errorf("%s: error configuring sink node: %w", op, err)
}
- b.nodeIDList[1] = sinkNodeID
- b.nodeMap[sinkNodeID] = sinkNode
}
- return b, nil
-}
-
-// Backend is the audit backend for the syslog-based audit store.
-type Backend struct {
- logger gsyslog.Syslogger
- formatter *audit.EntryFormatterWriter
- formatConfig audit.FormatterConfig
-
- saltMutex sync.RWMutex
- salt *salt.Salt
- saltConfig *salt.Config
- saltView logical.Storage
-
- nodeIDList []eventlogger.NodeID
- nodeMap map[eventlogger.NodeID]eventlogger.Node
+ return b, nil
}
-var _ audit.Backend = (*Backend)(nil)
-
+// Deprecated: Use eventlogger.
func (b *Backend) LogRequest(ctx context.Context, in *logical.LogInput) error {
var buf bytes.Buffer
if err := b.formatter.FormatAndWriteRequest(ctx, &buf, in); err != nil {
@@ -177,6 +143,7 @@ func (b *Backend) LogRequest(ctx context.Context, in *logical.LogInput) error {
return err
}
+// Deprecated: Use eventlogger.
func (b *Backend) LogResponse(ctx context.Context, in *logical.LogInput) error {
var buf bytes.Buffer
if err := b.formatter.FormatAndWriteResponse(ctx, &buf, in); err != nil {
@@ -227,12 +194,12 @@ func (b *Backend) Salt(ctx context.Context) (*salt.Salt, error) {
if b.salt != nil {
return b.salt, nil
}
- salt, err := salt.NewSalt(ctx, b.saltView, b.saltConfig)
+ s, err := salt.NewSalt(ctx, b.saltView, b.saltConfig)
if err != nil {
return nil, err
}
- b.salt = salt
- return salt, nil
+ b.salt = s
+ return s, nil
}
func (b *Backend) Invalidate(_ context.Context) {
@@ -241,20 +208,142 @@ func (b *Backend) Invalidate(_ context.Context) {
b.salt = nil
}
-// RegisterNodesAndPipeline registers the nodes and a pipeline as required by
-// the audit.Backend interface.
-func (b *Backend) RegisterNodesAndPipeline(broker *eventlogger.Broker, name string) error {
- for id, node := range b.nodeMap {
- if err := broker.RegisterNode(id, node, eventlogger.WithNodeRegistrationPolicy(eventlogger.DenyOverwrite)); err != nil {
- return err
+// formatterConfig creates the configuration required by a formatter node using
+// the config map supplied to the factory.
+func formatterConfig(config map[string]string) (audit.FormatterConfig, error) {
+ const op = "syslog.formatterConfig"
+
+ var opts []audit.Option
+
+ if format, ok := config["format"]; ok {
+ opts = append(opts, audit.WithFormat(format))
+ }
+
+ // Check if hashing of accessor is disabled
+ if hmacAccessorRaw, ok := config["hmac_accessor"]; ok {
+ v, err := strconv.ParseBool(hmacAccessorRaw)
+ if err != nil {
+ return audit.FormatterConfig{}, fmt.Errorf("%s: unable to parse 'hmac_accessor': %w", op, err)
}
+ opts = append(opts, audit.WithHMACAccessor(v))
}
- pipeline := eventlogger.Pipeline{
- PipelineID: eventlogger.PipelineID(name),
- EventType: eventlogger.EventType(event.AuditType.String()),
- NodeIDs: b.nodeIDList,
+ // Check if raw logging is enabled
+ if raw, ok := config["log_raw"]; ok {
+ v, err := strconv.ParseBool(raw)
+ if err != nil {
+ return audit.FormatterConfig{}, fmt.Errorf("%s: unable to parse 'log_raw': %w", op, err)
+ }
+ opts = append(opts, audit.WithRaw(v))
+ }
+
+ if elideListResponsesRaw, ok := config["elide_list_responses"]; ok {
+ v, err := strconv.ParseBool(elideListResponsesRaw)
+ if err != nil {
+ return audit.FormatterConfig{}, fmt.Errorf("%s: unable to parse 'elide_list_responses': %w", op, err)
+ }
+ opts = append(opts, audit.WithElision(v))
+ }
+
+ return audit.NewFormatterConfig(opts...)
+}
+
+// configureFilterNode is used to configure a filter node and associated ID on the Backend.
+func (b *Backend) configureFilterNode(filter string) error {
+ const op = "syslog.(Backend).configureFilterNode"
+
+ filter = strings.TrimSpace(filter)
+ if filter == "" {
+ return nil
+ }
+
+ filterNodeID, err := event.GenerateNodeID()
+ if err != nil {
+ return fmt.Errorf("%s: error generating random NodeID for filter node: %w", op, err)
+ }
+
+ filterNode, err := audit.NewEntryFilter(filter)
+ if err != nil {
+ return fmt.Errorf("%s: error creating filter node: %w", op, err)
+ }
+
+ b.nodeIDList = append(b.nodeIDList, filterNodeID)
+ b.nodeMap[filterNodeID] = filterNode
+ return nil
+}
+
+// configureFormatterNode is used to configure a formatter node and associated ID on the Backend.
+func (b *Backend) configureFormatterNode(formatConfig audit.FormatterConfig, opts ...audit.Option) error {
+ const op = "syslog.(Backend).configureFormatterNode"
+
+ formatterNodeID, err := event.GenerateNodeID()
+ if err != nil {
+ return fmt.Errorf("%s: error generating random NodeID for formatter node: %w", op, err)
+ }
+
+ formatterNode, err := audit.NewEntryFormatter(formatConfig, b, opts...)
+ if err != nil {
+ return fmt.Errorf("%s: error creating formatter: %w", op, err)
+ }
+
+ b.nodeIDList = append(b.nodeIDList, formatterNodeID)
+ b.nodeMap[formatterNodeID] = formatterNode
+ return nil
+}
+
+// configureSinkNode is used to configure a sink node and associated ID on the Backend.
+func (b *Backend) configureSinkNode(name string, format string, opts ...event.Option) error {
+ const op = "syslog.(Backend).configureSinkNode"
+
+ name = strings.TrimSpace(name)
+ if name == "" {
+ return fmt.Errorf("%s: name is required: %w", op, event.ErrInvalidParameter)
+ }
+
+ format = strings.TrimSpace(format)
+ if format == "" {
+ return fmt.Errorf("%s: format is required: %w", op, event.ErrInvalidParameter)
+ }
+
+ sinkNodeID, err := event.GenerateNodeID()
+ if err != nil {
+ return fmt.Errorf("%s: error generating random NodeID for sink node: %w", op, err)
+ }
+
+ n, err := event.NewSyslogSink(format, opts...)
+ if err != nil {
+ return fmt.Errorf("%s: error creating syslog sink node: %w", op, err)
}
- return broker.RegisterPipeline(pipeline, eventlogger.WithPipelineRegistrationPolicy(eventlogger.DenyOverwrite))
+ // wrap the sink node with metrics middleware
+ sinkNode := &audit.SinkWrapper{Name: name, Sink: n}
+
+ b.nodeIDList = append(b.nodeIDList, sinkNodeID)
+ b.nodeMap[sinkNodeID] = sinkNode
+ return nil
+}
+
+// Name for this backend, this would ideally correspond to the mount path for the audit device.
+func (b *Backend) Name() string {
+ return b.name
+}
+
+// Nodes returns the nodes which should be used by the event framework to process audit entries.
+func (b *Backend) Nodes() map[eventlogger.NodeID]eventlogger.Node {
+ return b.nodeMap
+}
+
+// NodeIDs returns the IDs of the nodes, in the order they are required.
+func (b *Backend) NodeIDs() []eventlogger.NodeID {
+ return b.nodeIDList
+}
+
+// EventType returns the event type for the backend.
+func (b *Backend) EventType() eventlogger.EventType {
+ return eventlogger.EventType(event.AuditType.String())
+}
+
+// HasFiltering determines if the first node for the pipeline is an eventlogger.NodeTypeFilter.
+func (b *Backend) HasFiltering() bool {
+ return len(b.nodeIDList) > 0 && b.nodeMap[b.nodeIDList[0]].Type() == eventlogger.NodeTypeFilter
}
diff --git a/builtin/audit/syslog/backend_test.go b/builtin/audit/syslog/backend_test.go
new file mode 100644
index 000000000000..4aeaa5d0da5c
--- /dev/null
+++ b/builtin/audit/syslog/backend_test.go
@@ -0,0 +1,313 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: BUSL-1.1
+
+package syslog
+
+import (
+ "testing"
+
+ "github.com/hashicorp/eventlogger"
+ "github.com/hashicorp/vault/audit"
+ "github.com/stretchr/testify/require"
+)
+
+// TestBackend_formatterConfig ensures that all the configuration values are parsed correctly.
+func TestBackend_formatterConfig(t *testing.T) {
+ t.Parallel()
+
+ tests := map[string]struct {
+ config map[string]string
+ want audit.FormatterConfig
+ wantErr bool
+ expectedErrMsg string
+ }{
+ "happy-path-json": {
+ config: map[string]string{
+ "format": audit.JSONFormat.String(),
+ "hmac_accessor": "true",
+ "log_raw": "true",
+ "elide_list_responses": "true",
+ },
+ want: audit.FormatterConfig{
+ Raw: true,
+ HMACAccessor: true,
+ ElideListResponses: true,
+ RequiredFormat: "json",
+ }, wantErr: false,
+ },
+ "happy-path-jsonx": {
+ config: map[string]string{
+ "format": audit.JSONxFormat.String(),
+ "hmac_accessor": "true",
+ "log_raw": "true",
+ "elide_list_responses": "true",
+ },
+ want: audit.FormatterConfig{
+ Raw: true,
+ HMACAccessor: true,
+ ElideListResponses: true,
+ RequiredFormat: "jsonx",
+ },
+ wantErr: false,
+ },
+ "invalid-format": {
+ config: map[string]string{
+ "format": " squiggly ",
+ "hmac_accessor": "true",
+ "log_raw": "true",
+ "elide_list_responses": "true",
+ },
+ want: audit.FormatterConfig{},
+ wantErr: true,
+ expectedErrMsg: "audit.NewFormatterConfig: error applying options: audit.(format).validate: 'squiggly' is not a valid format: invalid parameter",
+ },
+ "invalid-hmac-accessor": {
+ config: map[string]string{
+ "format": audit.JSONFormat.String(),
+ "hmac_accessor": "maybe",
+ },
+ want: audit.FormatterConfig{},
+ wantErr: true,
+ expectedErrMsg: "syslog.formatterConfig: unable to parse 'hmac_accessor': strconv.ParseBool: parsing \"maybe\": invalid syntax",
+ },
+ "invalid-log-raw": {
+ config: map[string]string{
+ "format": audit.JSONFormat.String(),
+ "hmac_accessor": "true",
+ "log_raw": "maybe",
+ },
+ want: audit.FormatterConfig{},
+ wantErr: true,
+ expectedErrMsg: "syslog.formatterConfig: unable to parse 'log_raw': strconv.ParseBool: parsing \"maybe\": invalid syntax",
+ },
+ "invalid-elide-bool": {
+ config: map[string]string{
+ "format": audit.JSONFormat.String(),
+ "hmac_accessor": "true",
+ "log_raw": "true",
+ "elide_list_responses": "maybe",
+ },
+ want: audit.FormatterConfig{},
+ wantErr: true,
+ expectedErrMsg: "syslog.formatterConfig: unable to parse 'elide_list_responses': strconv.ParseBool: parsing \"maybe\": invalid syntax",
+ },
+ }
+ for name, tc := range tests {
+ name := name
+ tc := tc
+ t.Run(name, func(t *testing.T) {
+ t.Parallel()
+
+ got, err := formatterConfig(tc.config)
+ if tc.wantErr {
+ require.Error(t, err)
+ require.EqualError(t, err, tc.expectedErrMsg)
+ } else {
+ require.NoError(t, err)
+ }
+ require.Equal(t, tc.want, got)
+ })
+ }
+}
+
+// TestBackend_configureFilterNode ensures that configureFilterNode handles various
+// filter values as expected. Empty (including whitespace) strings should return
+// no error but skip configuration of the node.
+func TestBackend_configureFilterNode(t *testing.T) {
+ t.Parallel()
+
+ tests := map[string]struct {
+ filter string
+ shouldSkipNode bool
+ wantErr bool
+ expectedErrorMsg string
+ }{
+ "happy": {
+ filter: "foo == bar",
+ },
+ "empty": {
+ filter: "",
+ shouldSkipNode: true,
+ },
+ "spacey": {
+ filter: " ",
+ shouldSkipNode: true,
+ },
+ "bad": {
+ filter: "___qwerty",
+ wantErr: true,
+ expectedErrorMsg: "syslog.(Backend).configureFilterNode: error creating filter node: audit.NewEntryFilter: cannot create new audit filter",
+ },
+ }
+ for name, tc := range tests {
+ name := name
+ tc := tc
+ t.Run(name, func(t *testing.T) {
+ t.Parallel()
+
+ b := &Backend{
+ nodeIDList: []eventlogger.NodeID{},
+ nodeMap: map[eventlogger.NodeID]eventlogger.Node{},
+ }
+
+ err := b.configureFilterNode(tc.filter)
+
+ switch {
+ case tc.wantErr:
+ require.Error(t, err)
+ require.ErrorContains(t, err, tc.expectedErrorMsg)
+ require.Len(t, b.nodeIDList, 0)
+ require.Len(t, b.nodeMap, 0)
+ case tc.shouldSkipNode:
+ require.NoError(t, err)
+ require.Len(t, b.nodeIDList, 0)
+ require.Len(t, b.nodeMap, 0)
+ default:
+ require.NoError(t, err)
+ require.Len(t, b.nodeIDList, 1)
+ require.Len(t, b.nodeMap, 1)
+ id := b.nodeIDList[0]
+ node := b.nodeMap[id]
+ require.Equal(t, eventlogger.NodeTypeFilter, node.Type())
+ }
+ })
+ }
+}
+
+// TestBackend_configureFormatterNode ensures that configureFormatterNode
+// populates the nodeIDList and nodeMap on Backend when given valid formatConfig.
+func TestBackend_configureFormatterNode(t *testing.T) {
+ t.Parallel()
+
+ b := &Backend{
+ nodeIDList: []eventlogger.NodeID{},
+ nodeMap: map[eventlogger.NodeID]eventlogger.Node{},
+ }
+
+ formatConfig, err := audit.NewFormatterConfig()
+ require.NoError(t, err)
+
+ err = b.configureFormatterNode(formatConfig)
+
+ require.NoError(t, err)
+ require.Len(t, b.nodeIDList, 1)
+ require.Len(t, b.nodeMap, 1)
+ id := b.nodeIDList[0]
+ node := b.nodeMap[id]
+ require.Equal(t, eventlogger.NodeTypeFormatter, node.Type())
+}
+
+// TestBackend_configureSinkNode ensures that we can correctly configure the sink
+// node on the Backend, and any incorrect parameters result in the relevant errors.
+func TestBackend_configureSinkNode(t *testing.T) {
+ t.Parallel()
+
+ tests := map[string]struct {
+ name string
+ format string
+ wantErr bool
+ expectedErrMsg string
+ expectedName string
+ }{
+ "name-empty": {
+ name: "",
+ wantErr: true,
+ expectedErrMsg: "syslog.(Backend).configureSinkNode: name is required: invalid parameter",
+ },
+ "name-whitespace": {
+ name: " ",
+ wantErr: true,
+ expectedErrMsg: "syslog.(Backend).configureSinkNode: name is required: invalid parameter",
+ },
+ "format-empty": {
+ name: "foo",
+ format: "",
+ wantErr: true,
+ expectedErrMsg: "syslog.(Backend).configureSinkNode: format is required: invalid parameter",
+ },
+ "format-whitespace": {
+ name: "foo",
+ format: " ",
+ wantErr: true,
+ expectedErrMsg: "syslog.(Backend).configureSinkNode: format is required: invalid parameter",
+ },
+ "happy": {
+ name: "foo",
+ format: "json",
+ wantErr: false,
+ expectedName: "foo",
+ },
+ }
+
+ for name, tc := range tests {
+ name := name
+ tc := tc
+ t.Run(name, func(t *testing.T) {
+ t.Parallel()
+
+ b := &Backend{
+ nodeIDList: []eventlogger.NodeID{},
+ nodeMap: map[eventlogger.NodeID]eventlogger.Node{},
+ }
+
+ err := b.configureSinkNode(tc.name, tc.format)
+
+ if tc.wantErr {
+ require.Error(t, err)
+ require.EqualError(t, err, tc.expectedErrMsg)
+ require.Len(t, b.nodeIDList, 0)
+ require.Len(t, b.nodeMap, 0)
+ } else {
+ require.NoError(t, err)
+ require.Len(t, b.nodeIDList, 1)
+ require.Len(t, b.nodeMap, 1)
+ id := b.nodeIDList[0]
+ node := b.nodeMap[id]
+ require.Equal(t, eventlogger.NodeTypeSink, node.Type())
+ sw, ok := node.(*audit.SinkWrapper)
+ require.True(t, ok)
+ require.Equal(t, tc.expectedName, sw.Name)
+ }
+ })
+ }
+}
+
+// TestBackend_configureFilterFormatterSink ensures that configuring all three
+// types of nodes on a Backend works as expected, i.e. we have all three nodes
+// at the end and nothing gets overwritten. The order of calls influences the
+// slice of IDs on the Backend.
+func TestBackend_configureFilterFormatterSink(t *testing.T) {
+ t.Parallel()
+
+ b := &Backend{
+ nodeIDList: []eventlogger.NodeID{},
+ nodeMap: map[eventlogger.NodeID]eventlogger.Node{},
+ }
+
+ formatConfig, err := audit.NewFormatterConfig()
+ require.NoError(t, err)
+
+ err = b.configureFilterNode("foo == bar")
+ require.NoError(t, err)
+
+ err = b.configureFormatterNode(formatConfig)
+ require.NoError(t, err)
+
+ err = b.configureSinkNode("foo", "json")
+ require.NoError(t, err)
+
+ require.Len(t, b.nodeIDList, 3)
+ require.Len(t, b.nodeMap, 3)
+
+ id := b.nodeIDList[0]
+ node := b.nodeMap[id]
+ require.Equal(t, eventlogger.NodeTypeFilter, node.Type())
+
+ id = b.nodeIDList[1]
+ node = b.nodeMap[id]
+ require.Equal(t, eventlogger.NodeTypeFormatter, node.Type())
+
+ id = b.nodeIDList[2]
+ node = b.nodeMap[id]
+ require.Equal(t, eventlogger.NodeTypeSink, node.Type())
+}
diff --git a/changelog/23950.txt b/changelog/23950.txt
deleted file mode 100644
index d28cdcea2a25..000000000000
--- a/changelog/23950.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-```release-note:improvement
-fairshare/jobmanager: Add 'stopped' method
-```
diff --git a/changelog/24476.txt b/changelog/24476.txt
new file mode 100644
index 000000000000..797ed9a48d47
--- /dev/null
+++ b/changelog/24476.txt
@@ -0,0 +1,3 @@
+```release-note:improvement
+ui: improve accessibility - color contrast, labels, and automatic testing
+```
diff --git a/changelog/24549.txt b/changelog/24549.txt
new file mode 100644
index 000000000000..6838b024c782
--- /dev/null
+++ b/changelog/24549.txt
@@ -0,0 +1,3 @@
+```release-note:bug
+api: sys/leader ActiveTime field no longer gets reset when we do an internal state change that doesn't change our active status.
+```
diff --git a/changelog/24558.txt b/changelog/24558.txt
new file mode 100644
index 000000000000..cd573e6d5849
--- /dev/null
+++ b/changelog/24558.txt
@@ -0,0 +1,3 @@
+```release-note:feature
+core/audit: add filter parameter when enabling an audit device, allowing filtering (using go-bexpr expressions) of audit entries written to the device's audit log
+```
diff --git a/changelog/24616.txt b/changelog/24616.txt
new file mode 100644
index 000000000000..54f0f1edfcd8
--- /dev/null
+++ b/changelog/24616.txt
@@ -0,0 +1,3 @@
+```release-note:bug
+fairshare: fix a race condition in JobManager.GetWorkerCounts
+```
\ No newline at end of file
diff --git a/enos/enos-modules.hcl b/enos/enos-modules.hcl
index 4b9eb8000e90..8772a9e140b1 100644
--- a/enos/enos-modules.hcl
+++ b/enos/enos-modules.hcl
@@ -226,6 +226,13 @@ module "vault_verify_undo_logs" {
vault_instance_count = var.vault_instance_count
}
+module "vault_verify_default_lcq" {
+ source = "./modules/vault_verify_default_lcq"
+
+ vault_autopilot_default_max_leases = "300000"
+ vault_instance_count = var.vault_instance_count
+}
+
module "vault_verify_replication" {
source = "./modules/vault_verify_replication"
diff --git a/enos/enos-scenario-autopilot.hcl b/enos/enos-scenario-autopilot.hcl
index 0e3c1c73ac3e..dfa9c646da5e 100644
--- a/enos/enos-scenario-autopilot.hcl
+++ b/enos/enos-scenario-autopilot.hcl
@@ -50,8 +50,9 @@ scenario "autopilot" {
rhel = provider.enos.rhel
ubuntu = provider.enos.ubuntu
}
- manage_service = matrix.artifact_type == "bundle"
- vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : global.vault_install_dir_packages[matrix.distro]
+ manage_service = matrix.artifact_type == "bundle"
+ vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : global.vault_install_dir_packages[matrix.distro]
+ vault_autopilot_default_max_leases = semverconstraint(matrix.initial_version, ">=1.16.0-0") ? "300000" : ""
}
step "build_vault" {
@@ -524,6 +525,27 @@ scenario "autopilot" {
}
}
+ # Verify that upgrading from a version <1.16.0 does not introduce Default LCQ
+ step "verify_default_lcq" {
+ module = module.vault_verify_default_lcq
+ depends_on = [
+ step.create_vault_cluster_upgrade_targets,
+ step.remove_old_nodes,
+ step.upgrade_vault_cluster_with_autopilot,
+ step.verify_autopilot_idle_state
+ ]
+
+ providers = {
+ enos = local.enos_provider[matrix.distro]
+ }
+
+ variables {
+ vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts
+ vault_root_token = step.create_vault_cluster.root_token
+ vault_autopilot_default_max_leases = local.vault_autopilot_default_max_leases
+ }
+ }
+
output "audit_device_file_path" {
description = "The file path for the file audit device, if enabled"
value = step.create_vault_cluster.audit_device_file_path
diff --git a/enos/modules/vault_verify_default_lcq/main.tf b/enos/modules/vault_verify_default_lcq/main.tf
new file mode 100644
index 000000000000..d79fd63d5edf
--- /dev/null
+++ b/enos/modules/vault_verify_default_lcq/main.tf
@@ -0,0 +1,74 @@
+# Copyright (c) HashiCorp, Inc.
+# SPDX-License-Identifier: BUSL-1.1
+
+terraform {
+ required_providers {
+ enos = {
+ source = "app.terraform.io/hashicorp-qti/enos"
+ }
+ }
+}
+
+variable "vault_instance_count" {
+ type = number
+ description = "How many vault instances are in the cluster"
+}
+
+variable "vault_instances" {
+ type = map(object({
+ private_ip = string
+ public_ip = string
+ }))
+ description = "The vault cluster instances that were created"
+}
+
+variable "vault_root_token" {
+ type = string
+ description = "The vault root token"
+}
+
+variable "vault_autopilot_default_max_leases" {
+ type = string
+ description = "The autopilot upgrade expected max_leases"
+}
+
+variable "timeout" {
+ type = number
+ description = "The max number of seconds to wait before timing out"
+ default = 60
+}
+
+variable "retry_interval" {
+ type = number
+ description = "How many seconds to wait between each retry"
+ default = 2
+}
+
+locals {
+ public_ips = {
+ for idx in range(var.vault_instance_count) : idx => {
+ public_ip = values(var.vault_instances)[idx].public_ip
+ private_ip = values(var.vault_instances)[idx].private_ip
+ }
+ }
+}
+
+resource "enos_remote_exec" "smoke_verify_default_lcq" {
+ for_each = local.public_ips
+
+ environment = {
+ RETRY_INTERVAL = var.retry_interval
+ TIMEOUT_SECONDS = var.timeout
+ VAULT_ADDR = "http://localhost:8200"
+ VAULT_TOKEN = var.vault_root_token
+ DEFAULT_LCQ = var.vault_autopilot_default_max_leases
+ }
+
+ scripts = [abspath("${path.module}/scripts/smoke-verify-default-lcq.sh")]
+
+ transport = {
+ ssh = {
+ host = each.value.public_ip
+ }
+ }
+}
diff --git a/enos/modules/vault_verify_default_lcq/scripts/smoke-verify-default-lcq.sh b/enos/modules/vault_verify_default_lcq/scripts/smoke-verify-default-lcq.sh
new file mode 100755
index 000000000000..493d8b8ba96d
--- /dev/null
+++ b/enos/modules/vault_verify_default_lcq/scripts/smoke-verify-default-lcq.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+# Copyright (c) HashiCorp, Inc.
+# SPDX-License-Identifier: BUSL-1.1
+
+function fail() {
+ echo "$1" 1>&2
+ exit 1
+}
+
+[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set"
+[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set"
+[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
+[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
+
+getMaxLeases() {
+ curl --request GET --header "X-Vault-Token: $VAULT_TOKEN" \
+ "$VAULT_ADDR/v1/sys/quotas/lease-count/default" | jq '.data.max_leases // empty'
+}
+
+waitForMaxLeases() {
+ local max_leases
+ if ! max_leases=$(getMaxLeases); then
+ echo "failed getting /v1/sys/quotas/lease-count/default data" 1>&2
+ return 1
+ fi
+
+ if [[ "$max_leases" == "$DEFAULT_LCQ" ]]; then
+ echo "$max_leases"
+ return 0
+ else
+ echo "Expected Default LCQ $DEFAULT_LCQ but got $max_leases"
+ return 1
+ fi
+}
+
+begin_time=$(date +%s)
+end_time=$((begin_time + TIMEOUT_SECONDS))
+while [ "$(date +%s)" -lt "$end_time" ]; do
+ if waitForMaxLeases; then
+ exit 0
+ fi
+
+ sleep "$RETRY_INTERVAL"
+done
+
+fail "Timed out waiting for Default LCQ verification to complete. Data:\n\t$(getMaxLeases)"
\ No newline at end of file
diff --git a/helper/fairshare/jobmanager.go b/helper/fairshare/jobmanager.go
index c6d3521f9295..086a549f647e 100644
--- a/helper/fairshare/jobmanager.go
+++ b/helper/fairshare/jobmanager.go
@@ -9,7 +9,6 @@ import (
"io/ioutil"
"math"
"sync"
- "sync/atomic"
"time"
"github.com/armon/go-metrics"
@@ -47,7 +46,6 @@ type JobManager struct {
// track queues by index for round robin worker assignment
queuesIndex []string
lastQueueAccessed int
- stopped atomic.Bool
}
// NewJobManager creates a job manager, with an optional name
@@ -100,14 +98,9 @@ func (j *JobManager) Stop() {
j.logger.Trace("terminating job manager...")
close(j.quit)
j.workerPool.stop()
- j.stopped.Store(true)
})
}
-func (j *JobManager) Stopped() bool {
- return j.stopped.Load()
-}
-
// AddJob adds a job to the given queue, creating the queue if it doesn't exist
func (j *JobManager) AddJob(job Job, queueID string) {
j.l.Lock()
@@ -150,7 +143,12 @@ func (j *JobManager) GetPendingJobCount() int {
func (j *JobManager) GetWorkerCounts() map[string]int {
j.l.RLock()
defer j.l.RUnlock()
- return j.workerCount
+ workerCounts := make(map[string]int, len(j.workerCount))
+ for k, v := range j.workerCount {
+ workerCounts[k] = v
+ }
+
+ return workerCounts
}
// GetWorkQueueLengths() returns a map of queue ID to number of jobs in the queue
diff --git a/helper/fairshare/jobmanager_test.go b/helper/fairshare/jobmanager_test.go
index 363093f573a5..288f0d2f949a 100644
--- a/helper/fairshare/jobmanager_test.go
+++ b/helper/fairshare/jobmanager_test.go
@@ -9,8 +9,6 @@ import (
"sync"
"testing"
"time"
-
- "github.com/stretchr/testify/assert"
)
func TestJobManager_NewJobManager(t *testing.T) {
@@ -176,7 +174,6 @@ func TestJobManager_Stop(t *testing.T) {
j := NewJobManager("job-mgr-test", 5, newTestLogger("jobmanager-test"), nil)
j.Start()
- assert.False(t, j.Stopped())
doneCh := make(chan struct{})
timeout := time.After(5 * time.Second)
@@ -188,7 +185,6 @@ func TestJobManager_Stop(t *testing.T) {
select {
case <-doneCh:
- assert.True(t, j.Stopped())
break
case <-timeout:
t.Fatal("timed out")
@@ -751,3 +747,23 @@ func TestFairshare_queueWorkersSaturated(t *testing.T) {
j.l.RUnlock()
}
}
+
+func TestJobManager_GetWorkerCounts_RaceCondition(t *testing.T) {
+ j := NewJobManager("test-job-mgr", 20, nil, nil)
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for i := 0; i < 10; i++ {
+ j.incrementWorkerCount("a")
+ }
+ }()
+ wcs := j.GetWorkerCounts()
+ wcs["foo"] = 10
+ for worker, count := range wcs {
+ _ = worker
+ _ = count
+ }
+
+ wg.Wait()
+}
diff --git a/helper/testhelpers/corehelpers/corehelpers.go b/helper/testhelpers/corehelpers/corehelpers.go
index 8e0e449ad5d8..c2d6bc8a3ce7 100644
--- a/helper/testhelpers/corehelpers/corehelpers.go
+++ b/helper/testhelpers/corehelpers/corehelpers.go
@@ -6,9 +6,10 @@
package corehelpers
import (
- "bytes"
"context"
"crypto/sha256"
+ "encoding/json"
+ "errors"
"fmt"
"io"
"os"
@@ -29,6 +30,11 @@ import (
"github.com/mitchellh/go-testing-interface"
)
+var (
+ _ audit.Backend = (*NoopAudit)(nil)
+ _ eventlogger.Node = (*noopWrapper)(nil)
+)
+
var externalPlugins = []string{"transform", "kmip", "keymgmt"}
// RetryUntil runs f until it returns a nil result or the timeout is reached.
@@ -210,52 +216,51 @@ func (m *mockBuiltinRegistry) DeprecationStatus(name string, pluginType consts.P
return consts.Unknown, false
}
-func TestNoopAudit(t testing.T, config map[string]string) *NoopAudit {
- n, err := NewNoopAudit(config)
+func TestNoopAudit(t testing.T, path string, config map[string]string, opts ...audit.Option) *NoopAudit {
+ cfg := &audit.BackendConfig{Config: config, MountPath: path}
+ n, err := NewNoopAudit(cfg, opts...)
if err != nil {
t.Fatal(err)
}
return n
}
-func NewNoopAudit(config map[string]string) (*NoopAudit, error) {
+// NewNoopAudit should be used to create a NoopAudit as it handles creation of a
+// predictable salt and wraps eventlogger nodes so information can be retrieved on
+// what they've seen or formatted.
+func NewNoopAudit(config *audit.BackendConfig, opts ...audit.Option) (*NoopAudit, error) {
view := &logical.InmemStorage{}
- err := view.Put(context.Background(), &logical.StorageEntry{
- Key: "salt",
- Value: []byte("foo"),
- })
+
+ // Create the salt with a known key for predictable hmac values.
+ se := &logical.StorageEntry{Key: "salt", Value: []byte("foo")}
+ err := view.Put(context.Background(), se)
if err != nil {
return nil, err
}
- n := &NoopAudit{
- Config: &audit.BackendConfig{
- SaltView: view,
- SaltConfig: &salt.Config{
- HMAC: sha256.New,
- HMACType: "hmac-sha256",
- },
- Config: config,
+ // Override the salt related config settings.
+ backendConfig := &audit.BackendConfig{
+ SaltView: view,
+ SaltConfig: &salt.Config{
+ HMAC: sha256.New,
+ HMACType: "hmac-sha256",
},
+ Config: config.Config,
+ MountPath: config.MountPath,
}
+ n := &NoopAudit{Config: backendConfig}
+
cfg, err := audit.NewFormatterConfig()
if err != nil {
return nil, err
}
- f, err := audit.NewEntryFormatter(cfg, n)
+ f, err := audit.NewEntryFormatter(cfg, n, opts...)
if err != nil {
return nil, fmt.Errorf("error creating formatter: %w", err)
}
- fw, err := audit.NewEntryFormatterWriter(cfg, f, &audit.JSONWriter{})
- if err != nil {
- return nil, fmt.Errorf("error creating formatter writer: %w", err)
- }
-
- n.formatter = fw
-
n.nodeIDList = make([]eventlogger.NodeID, 2)
n.nodeMap = make(map[eventlogger.NodeID]eventlogger.Node, 2)
@@ -264,8 +269,11 @@ func NewNoopAudit(config map[string]string) (*NoopAudit, error) {
return nil, fmt.Errorf("error generating random NodeID for formatter node: %w", err)
}
+ // Wrap the formatting node, so we can get any bytes that were formatted etc.
+ wrappedFormatter := &noopWrapper{format: "json", node: f, backend: n}
+
n.nodeIDList[0] = formatterNodeID
- n.nodeMap[formatterNodeID] = f
+ n.nodeMap[formatterNodeID] = wrappedFormatter
sinkNode := event.NewNoopSink()
sinkNodeID, err := event.GenerateNodeID()
@@ -279,9 +287,12 @@ func NewNoopAudit(config map[string]string) (*NoopAudit, error) {
return n, nil
}
+// NoopAuditFactory should be used when the test needs a way to access bytes that
+// have been formatted by the pipeline during audit requests.
+// The records parameter will be repointed to the one used within the pipeline.
func NoopAuditFactory(records **[][]byte) audit.Factory {
- return func(_ context.Context, config *audit.BackendConfig, _ bool, _ audit.HeaderFormatter) (audit.Backend, error) {
- n, err := NewNoopAudit(config.Config)
+ return func(_ context.Context, config *audit.BackendConfig, _ bool, headerFormatter audit.HeaderFormatter) (audit.Backend, error) {
+ n, err := NewNoopAudit(config, audit.WithHeaderFormatter(headerFormatter))
if err != nil {
return nil, err
}
@@ -293,8 +304,19 @@ func NoopAuditFactory(records **[][]byte) audit.Factory {
}
}
+// noopWrapper is designed to wrap a formatter node in order to allow access to
+// bytes formatted, headers formatted and parts of the logical.LogInput.
+// Some older tests relied on being able to query this information so while those
+// tests stick around we should look after them.
+type noopWrapper struct {
+ format string
+ node eventlogger.Node
+ backend *NoopAudit
+}
+
type NoopAudit struct {
- Config *audit.BackendConfig
+ Config *audit.BackendConfig
+
ReqErr error
ReqAuth []*logical.Auth
Req []*logical.Request
@@ -309,81 +331,164 @@ type NoopAudit struct {
RespNonHMACKeys [][]string
RespReqNonHMACKeys [][]string
RespErrs []error
-
- formatter *audit.EntryFormatterWriter
- records [][]byte
- l sync.RWMutex
- salt *salt.Salt
- saltMutex sync.RWMutex
+ records [][]byte
+ l sync.RWMutex
+ salt *salt.Salt
+ saltMutex sync.RWMutex
nodeIDList []eventlogger.NodeID
nodeMap map[eventlogger.NodeID]eventlogger.Node
}
-func (n *NoopAudit) LogRequest(ctx context.Context, in *logical.LogInput) error {
- n.l.Lock()
- defer n.l.Unlock()
+// Process handles the contortions required by older test code to ensure behavior.
+// It will attempt to do some pre/post processing of the logical.LogInput that should
+// form part of the event's payload data, as well as capturing the resulting headers
+// that were formatted and track the overall bytes that a formatted event uses when
+// it's ready to head down the pipeline to the sink node (a noop for us).
+func (n *noopWrapper) Process(ctx context.Context, e *eventlogger.Event) (*eventlogger.Event, error) {
+ n.backend.l.Lock()
+ defer n.backend.l.Unlock()
- if n.formatter != nil {
- var w bytes.Buffer
- err := n.formatter.FormatAndWriteRequest(ctx, &w, in)
- if err != nil {
- return err
- }
- n.records = append(n.records, w.Bytes())
+ var err error
+
+ // We're expecting audit events since this is an audit device.
+ a, ok := e.Payload.(*audit.AuditEvent)
+ if !ok {
+ return nil, errors.New("cannot parse payload as an audit event")
}
- n.ReqAuth = append(n.ReqAuth, in.Auth)
- n.Req = append(n.Req, in.Request)
- n.ReqHeaders = append(n.ReqHeaders, in.Request.Headers)
- n.ReqNonHMACKeys = in.NonHMACReqDataKeys
- n.ReqErrs = append(n.ReqErrs, in.OuterErr)
+ in := a.Data
- return n.ReqErr
-}
+ // Depending on the type of the audit event (request or response) we need to
+ // track different things.
+ switch a.Subtype {
+ case audit.RequestType:
+ n.backend.ReqAuth = append(n.backend.ReqAuth, in.Auth)
+ n.backend.Req = append(n.backend.Req, in.Request)
+ n.backend.ReqNonHMACKeys = in.NonHMACReqDataKeys
+ n.backend.ReqErrs = append(n.backend.ReqErrs, in.OuterErr)
-func (n *NoopAudit) LogResponse(ctx context.Context, in *logical.LogInput) error {
- n.l.Lock()
- defer n.l.Unlock()
+ if n.backend.ReqErr != nil {
+ return nil, n.backend.ReqErr
+ }
+ case audit.ResponseType:
+ n.backend.RespAuth = append(n.backend.RespAuth, in.Auth)
+ n.backend.RespReq = append(n.backend.RespReq, in.Request)
+ n.backend.Resp = append(n.backend.Resp, in.Response)
+ n.backend.RespErrs = append(n.backend.RespErrs, in.OuterErr)
+
+ if in.Response != nil {
+ n.backend.RespNonHMACKeys = append(n.backend.RespNonHMACKeys, in.NonHMACRespDataKeys)
+ n.backend.RespReqNonHMACKeys = append(n.backend.RespReqNonHMACKeys, in.NonHMACReqDataKeys)
+ }
- if n.formatter != nil {
- var w bytes.Buffer
- err := n.formatter.FormatAndWriteResponse(ctx, &w, in)
- if err != nil {
- return err
+ if n.backend.RespErr != nil {
+ return nil, n.backend.RespErr
}
- n.records = append(n.records, w.Bytes())
+ default:
+ return nil, fmt.Errorf("unknown audit event type: %q", a.Subtype)
+ }
+
+ // Once we've taken note of the relevant properties of the event, we get the
+ // underlying (wrapped) node to process it as normal.
+ e, err = n.node.Process(ctx, e)
+ if err != nil {
+ return nil, fmt.Errorf("error processing wrapped node: %w", err)
+ }
+
+ // Once processing has been carried out, the underlying node (a formatter node)
+ // should contain the output ready for the sink node. We'll get that in order
+ // to track how many bytes we formatted.
+ b, ok := e.Format(n.format)
+ if ok {
+ n.backend.records = append(n.backend.records, b)
}
- n.RespAuth = append(n.RespAuth, in.Auth)
- n.RespReq = append(n.RespReq, in.Request)
- n.Resp = append(n.Resp, in.Response)
- n.RespErrs = append(n.RespErrs, in.OuterErr)
+ // Finally, the last bit of post-processing is to make sure that we track the
+ // formatted headers that would have made it to the logs via the sink node.
+ // They only appear in requests.
+ if a.Subtype == audit.RequestType {
+ reqEntry := &audit.RequestEntry{}
+ err = json.Unmarshal(b, &reqEntry)
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse formatted audit entry data: %w", err)
+ }
- if in.Response != nil {
- n.RespNonHMACKeys = append(n.RespNonHMACKeys, in.NonHMACRespDataKeys)
- n.RespReqNonHMACKeys = append(n.RespReqNonHMACKeys, in.NonHMACReqDataKeys)
+ n.backend.ReqHeaders = append(n.backend.ReqHeaders, reqEntry.Request.Headers)
}
- return n.RespErr
+ // Return the event and no error in order to let the pipeline continue on.
+ return e, nil
+}
+
+func (n *noopWrapper) Reopen() error {
+ return n.node.Reopen()
+}
+
+func (n *noopWrapper) Type() eventlogger.NodeType {
+ return n.node.Type()
}
+// Deprecated: use eventlogger.
+func (n *NoopAudit) LogRequest(ctx context.Context, in *logical.LogInput) error {
+ return nil
+}
+
+// Deprecated: use eventlogger.
+func (n *NoopAudit) LogResponse(ctx context.Context, in *logical.LogInput) error {
+ return nil
+}
+
+// LogTestMessage will manually crank the handle on the nodes associated with this backend.
func (n *NoopAudit) LogTestMessage(ctx context.Context, in *logical.LogInput, config map[string]string) error {
n.l.Lock()
defer n.l.Unlock()
- var w bytes.Buffer
- tempFormatter, err := audit.NewTemporaryFormatter(config["format"], config["prefix"])
- if err != nil {
- return err
+ // Fake event for test purposes.
+ e := &eventlogger.Event{
+ Type: eventlogger.EventType(event.AuditType.String()),
+ CreatedAt: time.Now(),
+ Formatted: make(map[string][]byte),
+ Payload: in,
}
- err = tempFormatter.FormatAndWriteResponse(ctx, &w, in)
+ // Try to get the required format from config and default to JSON.
+ format, ok := config["format"]
+ if !ok {
+ format = "json"
+ }
+ cfg, err := audit.NewFormatterConfig(audit.WithFormat(format))
if err != nil {
- return err
+ return fmt.Errorf("cannot create config for formatter node: %w", err)
}
+ // Create a temporary formatter node for reuse.
+ f, err := audit.NewEntryFormatter(cfg, n, audit.WithPrefix(config["prefix"]))
- n.records = append(n.records, w.Bytes())
+ // Go over each node in order from our list.
+ for _, id := range n.nodeIDList {
+ node, ok := n.nodeMap[id]
+ if !ok {
+ return fmt.Errorf("node not found: %v", id)
+ }
+
+ switch node.Type() {
+ case eventlogger.NodeTypeFormatter:
+ // Use a temporary formatter node which doesn't persist its salt anywhere.
+ if formatNode, ok := node.(*audit.EntryFormatter); ok && formatNode != nil {
+ e, err = f.Process(ctx, e)
+
+ // Housekeeping, we should update that we processed some bytes.
+ if e != nil {
+ b, ok := e.Format(format)
+ if ok {
+ n.records = append(n.records, b)
+ }
+ }
+ }
+ default:
+ e, err = node.Process(ctx, e)
+ }
+ }
return nil
}
@@ -507,3 +612,23 @@ func NewTestLogger(t testing.T) *TestLogger {
func (tl *TestLogger) StopLogging() {
tl.InterceptLogger.DeregisterSink(tl.sink)
}
+
+func (n *NoopAudit) EventType() eventlogger.EventType {
+ return eventlogger.EventType(event.AuditType.String())
+}
+
+func (n *NoopAudit) HasFiltering() bool {
+ return false
+}
+
+func (n *NoopAudit) Name() string {
+ return n.Config.MountPath
+}
+
+func (n *NoopAudit) Nodes() map[eventlogger.NodeID]eventlogger.Node {
+ return n.nodeMap
+}
+
+func (n *NoopAudit) NodeIDs() []eventlogger.NodeID {
+ return n.nodeIDList
+}
diff --git a/helper/testhelpers/testhelpers.go b/helper/testhelpers/testhelpers.go
index c3499b8b6b0c..76c7731eba6d 100644
--- a/helper/testhelpers/testhelpers.go
+++ b/helper/testhelpers/testhelpers.go
@@ -11,10 +11,8 @@ import (
"fmt"
"io/ioutil"
"math/rand"
- "net/url"
"os"
"strings"
- "sync/atomic"
"time"
"github.com/armon/go-metrics"
@@ -435,46 +433,9 @@ func RekeyCluster(t testing.T, cluster *vault.TestCluster, recovery bool) [][]by
return newKeys
}
-// TestRaftServerAddressProvider is a ServerAddressProvider that uses the
-// ClusterAddr() of each node to provide raft addresses.
-//
-// Note that TestRaftServerAddressProvider should only be used in cases where
-// cores that are part of a raft configuration have already had
-// startClusterListener() called (via either unsealing or raft joining).
-type TestRaftServerAddressProvider struct {
- Cluster *vault.TestCluster
-}
-
-func (p *TestRaftServerAddressProvider) ServerAddr(id raftlib.ServerID) (raftlib.ServerAddress, error) {
- for _, core := range p.Cluster.Cores {
- if core.NodeID == string(id) {
- parsed, err := url.Parse(core.ClusterAddr())
- if err != nil {
- return "", err
- }
-
- return raftlib.ServerAddress(parsed.Host), nil
- }
- }
-
- return "", errors.New("could not find cluster addr")
-}
-
func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) {
- addressProvider := &TestRaftServerAddressProvider{Cluster: cluster}
-
- atomic.StoreUint32(&vault.TestingUpdateClusterAddr, 1)
-
leader := cluster.Cores[0]
- // Seal the leader so we can install an address provider
- {
- EnsureCoreSealed(t, leader)
- leader.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider)
- cluster.UnsealCore(t, leader)
- vault.TestWaitActive(t, leader.Core)
- }
-
leaderInfos := []*raft.LeaderJoinInfo{
{
LeaderAPIAddr: leader.Client.Address(),
@@ -485,7 +446,6 @@ func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) {
// Join followers
for i := 1; i < len(cluster.Cores); i++ {
core := cluster.Cores[i]
- core.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider)
_, err := core.JoinRaftCluster(namespace.RootContext(context.Background()), leaderInfos, false)
if err != nil {
t.Fatal(err)
diff --git a/helper/testhelpers/teststorage/teststorage.go b/helper/testhelpers/teststorage/teststorage.go
index 75431867a274..6092a6c47a81 100644
--- a/helper/testhelpers/teststorage/teststorage.go
+++ b/helper/testhelpers/teststorage/teststorage.go
@@ -10,8 +10,6 @@ import (
"os"
"time"
- "github.com/hashicorp/vault/internalshared/configutil"
-
"github.com/hashicorp/go-hclog"
logicalKv "github.com/hashicorp/vault-plugin-secrets-kv"
"github.com/hashicorp/vault/audit"
@@ -23,6 +21,7 @@ import (
"github.com/hashicorp/vault/helper/testhelpers"
"github.com/hashicorp/vault/helper/testhelpers/corehelpers"
vaulthttp "github.com/hashicorp/vault/http"
+ "github.com/hashicorp/vault/internalshared/configutil"
"github.com/hashicorp/vault/physical/raft"
"github.com/hashicorp/vault/sdk/logical"
"github.com/hashicorp/vault/sdk/physical"
@@ -105,7 +104,7 @@ func MakeFileBackend(t testing.T, logger hclog.Logger) *vault.PhysicalBackendBun
}
}
-func MakeRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, extraConf map[string]interface{}) *vault.PhysicalBackendBundle {
+func MakeRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, extraConf map[string]interface{}, bridge *raft.ClusterAddrBridge) *vault.PhysicalBackendBundle {
nodeID := fmt.Sprintf("core-%d", coreIdx)
raftDir, err := ioutil.TempDir("", "vault-raft-")
if err != nil {
@@ -118,10 +117,25 @@ func MakeRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, extraConf ma
logger.Info("raft dir", "dir", raftDir)
+ backend, err := makeRaftBackend(logger, nodeID, raftDir, extraConf, bridge)
+ if err != nil {
+ cleanupFunc()
+ t.Fatal(err)
+ }
+
+ return &vault.PhysicalBackendBundle{
+ Backend: backend,
+ Cleanup: cleanupFunc,
+ }
+}
+
+func makeRaftBackend(logger hclog.Logger, nodeID, raftDir string, extraConf map[string]interface{}, bridge *raft.ClusterAddrBridge) (physical.Backend, error) {
conf := map[string]string{
- "path": raftDir,
- "node_id": nodeID,
- "performance_multiplier": "8",
+ "path": raftDir,
+ "node_id": nodeID,
+ "performance_multiplier": "8",
+ "autopilot_reconcile_interval": "300ms",
+ "autopilot_update_interval": "100ms",
}
for k, v := range extraConf {
val, ok := v.(string)
@@ -132,14 +146,13 @@ func MakeRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, extraConf ma
backend, err := raft.NewRaftBackend(conf, logger.Named("raft"))
if err != nil {
- cleanupFunc()
- t.Fatal(err)
+ return nil, err
}
-
- return &vault.PhysicalBackendBundle{
- Backend: backend,
- Cleanup: cleanupFunc,
+ if bridge != nil {
+ backend.(*raft.RaftBackend).SetServerAddressProvider(bridge)
}
+
+ return backend, nil
}
// RaftHAFactory returns a PhysicalBackendBundle with raft set as the HABackend
@@ -222,7 +235,14 @@ func FileBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) {
func RaftBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) {
opts.KeepStandbysSealed = true
- opts.PhysicalFactory = MakeRaftBackend
+ var bridge *raft.ClusterAddrBridge
+ if !opts.InmemClusterLayers && opts.ClusterLayers == nil {
+ bridge = raft.NewClusterAddrBridge()
+ }
+ conf.ClusterAddrBridge = bridge
+ opts.PhysicalFactory = func(t testing.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle {
+ return MakeRaftBackend(t, coreIdx, logger, conf, bridge)
+ }
opts.SetupFunc = func(t testing.T, c *vault.TestCluster) {
if opts.NumCores != 1 {
testhelpers.RaftClusterJoinNodes(t, c)
@@ -232,7 +252,7 @@ func RaftBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) {
}
func RaftHASetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions, bundler PhysicalBackendBundler) {
- opts.KeepStandbysSealed = true
+ opts.InmemClusterLayers = true
opts.PhysicalFactory = RaftHAFactory(bundler)
}
diff --git a/helper/testhelpers/teststorage/teststorage_reusable.go b/helper/testhelpers/teststorage/teststorage_reusable.go
index 1a1ba8b76ef2..89642cf61fd7 100644
--- a/helper/testhelpers/teststorage/teststorage_reusable.go
+++ b/helper/testhelpers/teststorage/teststorage_reusable.go
@@ -9,7 +9,6 @@ import (
"os"
hclog "github.com/hashicorp/go-hclog"
- raftlib "github.com/hashicorp/raft"
"github.com/hashicorp/vault/physical/raft"
"github.com/hashicorp/vault/sdk/physical"
"github.com/hashicorp/vault/vault"
@@ -74,7 +73,7 @@ func MakeReusableStorage(t testing.T, logger hclog.Logger, bundle *vault.Physica
// MakeReusableRaftStorage makes a physical raft backend that can be re-used
// across multiple test clusters in sequence.
-func MakeReusableRaftStorage(t testing.T, logger hclog.Logger, numCores int, addressProvider raftlib.ServerAddressProvider) (ReusableStorage, StorageCleanup) {
+func MakeReusableRaftStorage(t testing.T, logger hclog.Logger, numCores int) (ReusableStorage, StorageCleanup) {
raftDirs := make([]string, numCores)
for i := 0; i < numCores; i++ {
raftDirs[i] = makeRaftDir(t)
@@ -87,7 +86,7 @@ func MakeReusableRaftStorage(t testing.T, logger hclog.Logger, numCores int, add
conf.DisablePerformanceStandby = true
opts.KeepStandbysSealed = true
opts.PhysicalFactory = func(t testing.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle {
- return makeReusableRaftBackend(t, coreIdx, logger, raftDirs[coreIdx], addressProvider, false)
+ return makeReusableRaftBackend(t, coreIdx, logger, raftDirs[coreIdx], false)
}
},
@@ -124,9 +123,10 @@ func MakeReusableRaftHAStorage(t testing.T, logger hclog.Logger, numCores int, b
storage := ReusableStorage{
Setup: func(conf *vault.CoreConfig, opts *vault.TestClusterOptions) {
+ opts.InmemClusterLayers = true
opts.KeepStandbysSealed = true
opts.PhysicalFactory = func(t testing.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle {
- haBundle := makeReusableRaftBackend(t, coreIdx, logger, raftDirs[coreIdx], nil, true)
+ haBundle := makeReusableRaftBackend(t, coreIdx, logger, raftDirs[coreIdx], true)
return &vault.PhysicalBackendBundle{
Backend: bundle.Backend,
@@ -168,25 +168,13 @@ func makeRaftDir(t testing.T) string {
return raftDir
}
-func makeReusableRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, raftDir string, addressProvider raftlib.ServerAddressProvider, ha bool) *vault.PhysicalBackendBundle {
+func makeReusableRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, raftDir string, ha bool) *vault.PhysicalBackendBundle {
nodeID := fmt.Sprintf("core-%d", coreIdx)
- conf := map[string]string{
- "path": raftDir,
- "node_id": nodeID,
- "performance_multiplier": "8",
- "autopilot_reconcile_interval": "300ms",
- "autopilot_update_interval": "100ms",
- }
-
- backend, err := raft.NewRaftBackend(conf, logger)
+ backend, err := makeRaftBackend(logger, nodeID, raftDir, nil, nil)
if err != nil {
t.Fatal(err)
}
- if addressProvider != nil {
- backend.(*raft.RaftBackend).SetServerAddressProvider(addressProvider)
- }
-
bundle := new(vault.PhysicalBackendBundle)
if ha {
diff --git a/http/logical_test.go b/http/logical_test.go
index e5b0caf222d6..88964ac874c2 100644
--- a/http/logical_test.go
+++ b/http/logical_test.go
@@ -569,10 +569,8 @@ func TestLogical_RespondWithStatusCode(t *testing.T) {
}
func TestLogical_Audit_invalidWrappingToken(t *testing.T) {
- t.Setenv("VAULT_AUDIT_DISABLE_EVENTLOGGER", "true")
-
// Create a noop audit backend
- noop := corehelpers.TestNoopAudit(t, nil)
+ noop := corehelpers.TestNoopAudit(t, "noop/", nil)
c, _, root := vault.TestCoreUnsealedWithConfig(t, &vault.CoreConfig{
AuditBackends: map[string]audit.Factory{
"noop": func(ctx context.Context, config *audit.BackendConfig, _ bool, _ audit.HeaderFormatter) (audit.Backend, error) {
@@ -584,7 +582,6 @@ func TestLogical_Audit_invalidWrappingToken(t *testing.T) {
defer ln.Close()
// Enable the audit backend
-
resp := testHttpPost(t, root, addr+"/v1/sys/audit/noop", map[string]interface{}{
"type": "noop",
})
diff --git a/http/sys_generate_root_test.go b/http/sys_generate_root_test.go
index 04352859d428..8358d18a537a 100644
--- a/http/sys_generate_root_test.go
+++ b/http/sys_generate_root_test.go
@@ -247,8 +247,6 @@ func testServerWithAudit(t *testing.T, records **[][]byte) (net.Listener, string
}
func TestSysGenerateRoot_badKey(t *testing.T) {
- t.Setenv("VAULT_AUDIT_DISABLE_EVENTLOGGER", "true")
-
var records *[][]byte
ln, addr, token, _ := testServerWithAudit(t, &records)
defer ln.Close()
diff --git a/internal/observability/event/options.go b/internal/observability/event/options.go
index 80ad1bf9996e..30d667740b0d 100644
--- a/internal/observability/event/options.go
+++ b/internal/observability/event/options.go
@@ -113,7 +113,11 @@ func WithNow(now time.Time) Option {
// WithFacility provides an Option to represent a 'facility' for a syslog sink.
func WithFacility(facility string) Option {
return func(o *options) error {
- o.withFacility = facility
+ facility = strings.TrimSpace(facility)
+
+ if facility != "" {
+ o.withFacility = facility
+ }
return nil
}
@@ -122,7 +126,11 @@ func WithFacility(facility string) Option {
// WithTag provides an Option to represent a 'tag' for a syslog sink.
func WithTag(tag string) Option {
return func(o *options) error {
- o.withTag = tag
+ tag = strings.TrimSpace(tag)
+
+ if tag != "" {
+ o.withTag = tag
+ }
return nil
}
diff --git a/internal/observability/event/options_test.go b/internal/observability/event/options_test.go
index 676c79833078..0f36014740cf 100644
--- a/internal/observability/event/options_test.go
+++ b/internal/observability/event/options_test.go
@@ -205,7 +205,7 @@ func TestOptions_WithFacility(t *testing.T) {
},
"whitespace": {
Value: " ",
- ExpectedValue: " ",
+ ExpectedValue: "",
},
"value": {
Value: "juan",
@@ -213,7 +213,7 @@ func TestOptions_WithFacility(t *testing.T) {
},
"spacey-value": {
Value: " juan ",
- ExpectedValue: " juan ",
+ ExpectedValue: "juan",
},
}
@@ -243,7 +243,7 @@ func TestOptions_WithTag(t *testing.T) {
},
"whitespace": {
Value: " ",
- ExpectedValue: " ",
+ ExpectedValue: "",
},
"value": {
Value: "juan",
@@ -251,7 +251,7 @@ func TestOptions_WithTag(t *testing.T) {
},
"spacey-value": {
Value: " juan ",
- ExpectedValue: " juan ",
+ ExpectedValue: "juan",
},
}
diff --git a/internal/observability/event/pipeline_reader.go b/internal/observability/event/pipeline_reader.go
new file mode 100644
index 000000000000..f35672f8efa6
--- /dev/null
+++ b/internal/observability/event/pipeline_reader.go
@@ -0,0 +1,24 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: BUSL-1.1
+
+package event
+
+import "github.com/hashicorp/eventlogger"
+
+// PipelineReader surfaces information required for pipeline registration.
+type PipelineReader interface {
+ // EventType should return the event type to be used for pipeline registration.
+ EventType() eventlogger.EventType
+
+ // HasFiltering should determine if filter nodes are used by this pipeline.
+ HasFiltering() bool
+
+ // Name for the pipeline which should be used for the eventlogger.PipelineID.
+ Name() string
+
+ // Nodes should return the nodes which should be used by the framework to process events.
+ Nodes() map[eventlogger.NodeID]eventlogger.Node
+
+ // NodeIDs should return the IDs of the nodes, in the order they are required.
+ NodeIDs() []eventlogger.NodeID
+}
diff --git a/internal/observability/event/sink_socket.go b/internal/observability/event/sink_socket.go
index 69f482560b67..e9cb00c19662 100644
--- a/internal/observability/event/sink_socket.go
+++ b/internal/observability/event/sink_socket.go
@@ -7,6 +7,7 @@ import (
"context"
"fmt"
"net"
+ "strings"
"sync"
"time"
@@ -29,9 +30,19 @@ type SocketSink struct {
// NewSocketSink should be used to create a new SocketSink.
// Accepted options: WithMaxDuration and WithSocketType.
-func NewSocketSink(format string, address string, opt ...Option) (*SocketSink, error) {
+func NewSocketSink(address string, format string, opt ...Option) (*SocketSink, error) {
const op = "event.NewSocketSink"
+ address = strings.TrimSpace(address)
+ if address == "" {
+ return nil, fmt.Errorf("%s: address is required: %w", op, ErrInvalidParameter)
+ }
+
+ format = strings.TrimSpace(format)
+ if format == "" {
+ return nil, fmt.Errorf("%s: format is required: %w", op, ErrInvalidParameter)
+ }
+
opts, err := getOpts(opt...)
if err != nil {
return nil, fmt.Errorf("%s: error applying options: %w", op, err)
diff --git a/internal/observability/event/sink_socket_test.go b/internal/observability/event/sink_socket_test.go
new file mode 100644
index 000000000000..3c647f7b3ea1
--- /dev/null
+++ b/internal/observability/event/sink_socket_test.go
@@ -0,0 +1,85 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: BUSL-1.1
+
+package event
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+)
+
+// TestNewSocketSink ensures that we validate the input arguments and can create
+// the SocketSink if everything goes to plan.
+func TestNewSocketSink(t *testing.T) {
+ t.Parallel()
+
+ tests := map[string]struct {
+ address string
+ format string
+ opts []Option
+ want *SocketSink
+ wantErr bool
+ expectedErrMsg string
+ }{
+ "address-empty": {
+ address: "",
+ wantErr: true,
+ expectedErrMsg: "event.NewSocketSink: address is required: invalid parameter",
+ },
+ "address-whitespace": {
+ address: " ",
+ wantErr: true,
+ expectedErrMsg: "event.NewSocketSink: address is required: invalid parameter",
+ },
+ "format-empty": {
+ address: "addr",
+ format: "",
+ wantErr: true,
+ expectedErrMsg: "event.NewSocketSink: format is required: invalid parameter",
+ },
+ "format-whitespace": {
+ address: "addr",
+ format: " ",
+ wantErr: true,
+ expectedErrMsg: "event.NewSocketSink: format is required: invalid parameter",
+ },
+ "bad-max-duration": {
+ address: "addr",
+ format: "json",
+ opts: []Option{WithMaxDuration("bar")},
+ wantErr: true,
+ expectedErrMsg: "event.NewSocketSink: error applying options: time: invalid duration \"bar\"",
+ },
+ "happy": {
+ address: "wss://foo",
+ format: "json",
+ want: &SocketSink{
+ requiredFormat: "json",
+ address: "wss://foo",
+ socketType: "tcp", // defaults to tcp
+ maxDuration: 2 * time.Second, // defaults to 2 secs
+ },
+ },
+ }
+
+ for name, tc := range tests {
+ name := name
+ tc := tc
+ t.Run(name, func(t *testing.T) {
+ t.Parallel()
+
+ got, err := NewSocketSink(tc.address, tc.format, tc.opts...)
+
+ if tc.wantErr {
+ require.Error(t, err)
+ require.EqualError(t, err, tc.expectedErrMsg)
+ require.Nil(t, got)
+ } else {
+ require.NoError(t, err)
+ require.Equal(t, tc.want, got)
+ }
+ })
+ }
+}
diff --git a/internal/observability/event/sink_stdout.go b/internal/observability/event/sink_stdout.go
index 34307251d415..6b1f43dace8f 100644
--- a/internal/observability/event/sink_stdout.go
+++ b/internal/observability/event/sink_stdout.go
@@ -7,6 +7,7 @@ import (
"context"
"fmt"
"os"
+ "strings"
"github.com/hashicorp/eventlogger"
)
@@ -21,10 +22,17 @@ type StdoutSink struct {
// NewStdoutSinkNode creates a new StdoutSink that will persist the events
// it processes using the specified expected format.
-func NewStdoutSinkNode(format string) *StdoutSink {
+func NewStdoutSinkNode(format string) (*StdoutSink, error) {
+ const op = "event.NewStdoutSinkNode"
+
+ format = strings.TrimSpace(format)
+ if format == "" {
+ return nil, fmt.Errorf("%s: format is required: %w", op, ErrInvalidParameter)
+ }
+
return &StdoutSink{
requiredFormat: format,
- }
+ }, nil
}
// Process persists the provided eventlogger.Event to the standard output stream.
diff --git a/internal/observability/event/sink_syslog.go b/internal/observability/event/sink_syslog.go
index 72ac6cdd1e1c..d099ed5c7349 100644
--- a/internal/observability/event/sink_syslog.go
+++ b/internal/observability/event/sink_syslog.go
@@ -6,6 +6,7 @@ package event
import (
"context"
"fmt"
+ "strings"
gsyslog "github.com/hashicorp/go-syslog"
@@ -25,6 +26,11 @@ type SyslogSink struct {
func NewSyslogSink(format string, opt ...Option) (*SyslogSink, error) {
const op = "event.NewSyslogSink"
+ format = strings.TrimSpace(format)
+ if format == "" {
+ return nil, fmt.Errorf("%s: format is required: %w", op, ErrInvalidParameter)
+ }
+
opts, err := getOpts(opt...)
if err != nil {
return nil, fmt.Errorf("%s: error applying options: %w", op, err)
diff --git a/internal/observability/event/sink_syslog_test.go b/internal/observability/event/sink_syslog_test.go
new file mode 100644
index 000000000000..f977a4a50538
--- /dev/null
+++ b/internal/observability/event/sink_syslog_test.go
@@ -0,0 +1,57 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: BUSL-1.1
+
+package event
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+// TestNewSyslogSink ensures that we validate the input arguments and can create
+// the SyslogSink if everything goes to plan.
+func TestNewSyslogSink(t *testing.T) {
+ t.Parallel()
+
+ tests := map[string]struct {
+ format string
+ opts []Option
+ want *SyslogSink
+ wantErr bool
+ expectedErrMsg string
+ }{
+ "format-empty": {
+ format: "",
+ wantErr: true,
+ expectedErrMsg: "event.NewSyslogSink: format is required: invalid parameter",
+ },
+ "format-whitespace": {
+ format: " ",
+ wantErr: true,
+ expectedErrMsg: "event.NewSyslogSink: format is required: invalid parameter",
+ },
+ "happy": {
+ format: "json",
+ },
+ }
+
+ for name, tc := range tests {
+ name := name
+ tc := tc
+ t.Run(name, func(t *testing.T) {
+ t.Parallel()
+
+ got, err := NewSyslogSink(tc.format, tc.opts...)
+
+ if tc.wantErr {
+ require.Error(t, err)
+ require.EqualError(t, err, tc.expectedErrMsg)
+ require.Nil(t, got)
+ } else {
+ require.NoError(t, err)
+ require.NotNil(t, got)
+ }
+ })
+ }
+}
diff --git a/physical/raft/raft.go b/physical/raft/raft.go
index c6ca8a789b40..0a46ff26ae2e 100644
--- a/physical/raft/raft.go
+++ b/physical/raft/raft.go
@@ -11,6 +11,7 @@ import (
"io"
"io/ioutil"
"math/rand"
+ "net/url"
"os"
"path/filepath"
"strconv"
@@ -311,6 +312,33 @@ func EnsurePath(path string, dir bool) error {
return os.MkdirAll(path, 0o700)
}
+func NewClusterAddrBridge() *ClusterAddrBridge {
+ return &ClusterAddrBridge{
+ clusterAddressByNodeID: make(map[string]string),
+ }
+}
+
+type ClusterAddrBridge struct {
+ l sync.RWMutex
+ clusterAddressByNodeID map[string]string
+}
+
+func (c *ClusterAddrBridge) UpdateClusterAddr(nodeId string, clusterAddr string) {
+ c.l.Lock()
+ defer c.l.Unlock()
+ cu, _ := url.Parse(clusterAddr)
+ c.clusterAddressByNodeID[nodeId] = cu.Host
+}
+
+func (c *ClusterAddrBridge) ServerAddr(id raft.ServerID) (raft.ServerAddress, error) {
+ c.l.RLock()
+ defer c.l.RUnlock()
+ if addr, ok := c.clusterAddressByNodeID[string(id)]; ok {
+ return raft.ServerAddress(addr), nil
+ }
+ return "", fmt.Errorf("could not find cluster addr for id=%s", id)
+}
+
// NewRaftBackend constructs a RaftBackend using the given directory
func NewRaftBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
path := os.Getenv(EnvVaultRaftPath)
@@ -1344,7 +1372,7 @@ func (b *RaftBackend) AddPeer(ctx context.Context, peerID, clusterAddr string) e
if b.raft == nil {
return errors.New("raft storage is not initialized")
}
- b.logger.Trace("adding server to raft", "id", peerID)
+ b.logger.Trace("adding server to raft", "id", peerID, "addr", clusterAddr)
future := b.raft.AddVoter(raft.ServerID(peerID), raft.ServerAddress(clusterAddr), 0, 0)
return future.Error()
}
@@ -1353,7 +1381,7 @@ func (b *RaftBackend) AddPeer(ctx context.Context, peerID, clusterAddr string) e
return errors.New("raft storage autopilot is not initialized")
}
- b.logger.Trace("adding server to raft via autopilot", "id", peerID)
+ b.logger.Trace("adding server to raft via autopilot", "id", peerID, "addr", clusterAddr)
return b.autopilot.AddServer(&autopilot.Server{
ID: raft.ServerID(peerID),
Name: peerID,
diff --git a/sdk/helper/docker/testhelpers.go b/sdk/helper/docker/testhelpers.go
index 7902750d6dc5..f98cd9b6be17 100644
--- a/sdk/helper/docker/testhelpers.go
+++ b/sdk/helper/docker/testhelpers.go
@@ -229,19 +229,6 @@ func (d *Runner) StartNewService(ctx context.Context, addSuffix, forceLocalAddr
return nil, "", err
}
- var wg sync.WaitGroup
- consumeLogs := false
- var logStdout, logStderr io.Writer
- if d.RunOptions.LogStdout != nil && d.RunOptions.LogStderr != nil {
- consumeLogs = true
- logStdout = d.RunOptions.LogStdout
- logStderr = d.RunOptions.LogStderr
- } else if d.RunOptions.LogConsumer != nil {
- consumeLogs = true
- logStdout = &LogConsumerWriter{d.RunOptions.LogConsumer}
- logStderr = &LogConsumerWriter{d.RunOptions.LogConsumer}
- }
-
// The waitgroup wg is used here to support some stuff in NewDockerCluster.
// We can't generate the PKI cert for the https listener until we know the
// container's address, meaning we must first start the container, then
@@ -252,28 +239,12 @@ func (d *Runner) StartNewService(ctx context.Context, addSuffix, forceLocalAddr
// passes in (which does all that PKI cert stuff) waits to see output from
// Vault on stdout/stderr before it sends the signal, and we don't want to
// run the PostStart until we've hooked into the docker logs.
- if consumeLogs {
+ var wg sync.WaitGroup
+ logConsumer := d.createLogConsumer(result.Container.ID, &wg)
+
+ if logConsumer != nil {
wg.Add(1)
- go func() {
- // We must run inside a goroutine because we're using Follow:true,
- // and StdCopy will block until the log stream is closed.
- stream, err := d.DockerAPI.ContainerLogs(context.Background(), result.Container.ID, types.ContainerLogsOptions{
- ShowStdout: true,
- ShowStderr: true,
- Timestamps: !d.RunOptions.OmitLogTimestamps,
- Details: true,
- Follow: true,
- })
- wg.Done()
- if err != nil {
- d.RunOptions.LogConsumer(fmt.Sprintf("error reading container logs: %v", err))
- } else {
- _, err := stdcopy.StdCopy(logStdout, logStderr, stream)
- if err != nil {
- d.RunOptions.LogConsumer(fmt.Sprintf("error demultiplexing docker logs: %v", err))
- }
- }
- }()
+ go logConsumer()
}
wg.Wait()
@@ -336,6 +307,46 @@ func (d *Runner) StartNewService(ctx context.Context, addSuffix, forceLocalAddr
}, result.Container.ID, nil
}
+// createLogConsumer returns a function to consume the logs of the container with the given ID.
+// If a wait group is given, `WaitGroup.Done()` will be called as soon as the call to the
+// ContainerLogs Docker API call is done.
+// The returned function will block, so it should be run on a goroutine.
+func (d *Runner) createLogConsumer(containerId string, wg *sync.WaitGroup) func() {
+ if d.RunOptions.LogStdout != nil && d.RunOptions.LogStderr != nil {
+ return func() {
+ d.consumeLogs(containerId, wg, d.RunOptions.LogStdout, d.RunOptions.LogStderr)
+ }
+ }
+ if d.RunOptions.LogConsumer != nil {
+ return func() {
+ d.consumeLogs(containerId, wg, &LogConsumerWriter{d.RunOptions.LogConsumer}, &LogConsumerWriter{d.RunOptions.LogConsumer})
+ }
+ }
+ return nil
+}
+
+// consumeLogs is the function called by the function returned by createLogConsumer.
+func (d *Runner) consumeLogs(containerId string, wg *sync.WaitGroup, logStdout, logStderr io.Writer) {
+ // We must run inside a goroutine because we're using Follow:true,
+ // and StdCopy will block until the log stream is closed.
+ stream, err := d.DockerAPI.ContainerLogs(context.Background(), containerId, types.ContainerLogsOptions{
+ ShowStdout: true,
+ ShowStderr: true,
+ Timestamps: !d.RunOptions.OmitLogTimestamps,
+ Details: true,
+ Follow: true,
+ })
+ wg.Done()
+ if err != nil {
+ d.RunOptions.LogConsumer(fmt.Sprintf("error reading container logs: %v", err))
+ } else {
+ _, err := stdcopy.StdCopy(logStdout, logStderr, stream)
+ if err != nil {
+ d.RunOptions.LogConsumer(fmt.Sprintf("error demultiplexing docker logs: %v", err))
+ }
+ }
+}
+
type Service struct {
Config ServiceConfig
Cleanup func()
@@ -508,6 +519,21 @@ func (d *Runner) Stop(ctx context.Context, containerID string) error {
return nil
}
+func (d *Runner) RestartContainerWithTimeout(ctx context.Context, containerID string, timeout int) error {
+ err := d.DockerAPI.ContainerRestart(ctx, containerID, container.StopOptions{Timeout: &timeout})
+ if err != nil {
+ return fmt.Errorf("failed to restart container: %s", err)
+ }
+ var wg sync.WaitGroup
+ logConsumer := d.createLogConsumer(containerID, &wg)
+ if logConsumer != nil {
+ wg.Add(1)
+ go logConsumer()
+ }
+ // we don't really care about waiting for logs to start showing up, do we?
+ return nil
+}
+
func (d *Runner) Restart(ctx context.Context, containerID string) error {
if err := d.DockerAPI.ContainerStart(ctx, containerID, types.ContainerStartOptions{}); err != nil {
return err
diff --git a/sdk/helper/testcluster/util.go b/sdk/helper/testcluster/util.go
index b2a123556cd7..6c7266536361 100644
--- a/sdk/helper/testcluster/util.go
+++ b/sdk/helper/testcluster/util.go
@@ -174,7 +174,7 @@ func LeaderNode(ctx context.Context, cluster VaultCluster) (int, error) {
leaderActiveTimes := make(map[int]time.Time)
for i, node := range cluster.Nodes() {
client := node.APIClient()
- ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
+ ctx, cancel := context.WithTimeout(ctx, 500*time.Millisecond)
resp, err := client.Sys().LeaderWithContext(ctx)
cancel()
if err != nil || resp == nil || !resp.IsSelf {
diff --git a/sdk/logical/audit.go b/sdk/logical/audit.go
index 30c03e6113ac..12b8bed1cbdb 100644
--- a/sdk/logical/audit.go
+++ b/sdk/logical/audit.go
@@ -20,3 +20,36 @@ type MarshalOptions struct {
type OptMarshaler interface {
MarshalJSONWithOptions(*MarshalOptions) ([]byte, error)
}
+
+// LogInputBexpr is used for evaluating boolean expressions with go-bexpr.
+type LogInputBexpr struct {
+ MountPoint string `bexpr:"mount_point"`
+ MountType string `bexpr:"mount_type"`
+ Namespace string `bexpr:"namespace"`
+ Operation string `bexpr:"operation"`
+ Path string `bexpr:"path"`
+}
+
+// BexprDatum returns values from a LogInput formatted for use in evaluating go-bexpr boolean expressions.
+// The namespace should be supplied from the current request's context.
+func (l *LogInput) BexprDatum(namespace string) *LogInputBexpr {
+ var mountPoint string
+ var mountType string
+ var operation string
+ var path string
+
+ if l.Request != nil {
+ mountPoint = l.Request.MountPoint
+ mountType = l.Request.MountType
+ operation = string(l.Request.Operation)
+ path = l.Request.Path
+ }
+
+ return &LogInputBexpr{
+ MountPoint: mountPoint,
+ MountType: mountType,
+ Namespace: namespace,
+ Operation: operation,
+ Path: path,
+ }
+}
diff --git a/sdk/logical/audit_test.go b/sdk/logical/audit_test.go
new file mode 100644
index 000000000000..710450c2f303
--- /dev/null
+++ b/sdk/logical/audit_test.go
@@ -0,0 +1,77 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: BUSL-1.1
+
+package logical
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+// TestLogInput_BexprDatum ensures that we can transform a LogInput
+// into a LogInputBexpr to be used in audit filtering.
+func TestLogInput_BexprDatum(t *testing.T) {
+ t.Parallel()
+
+ tests := map[string]struct {
+ Request *Request
+ Namespace string
+ ExpectedPath string
+ ExpectedMountPoint string
+ ExpectedMountType string
+ ExpectedNamespace string
+ ExpectedOperation string
+ }{
+ "nil-no-namespace": {
+ Request: nil,
+ Namespace: "",
+ ExpectedPath: "",
+ ExpectedMountPoint: "",
+ ExpectedMountType: "",
+ ExpectedNamespace: "",
+ ExpectedOperation: "",
+ },
+ "nil-namespace": {
+ Request: nil,
+ Namespace: "juan",
+ ExpectedPath: "",
+ ExpectedMountPoint: "",
+ ExpectedMountType: "",
+ ExpectedNamespace: "juan",
+ ExpectedOperation: "",
+ },
+ "happy-path": {
+ Request: &Request{
+ MountPoint: "IAmAMountPoint",
+ MountType: "IAmAMountType",
+ Operation: CreateOperation,
+ Path: "IAmAPath",
+ },
+ Namespace: "juan",
+ ExpectedPath: "IAmAPath",
+ ExpectedMountPoint: "IAmAMountPoint",
+ ExpectedMountType: "IAmAMountType",
+ ExpectedNamespace: "juan",
+ ExpectedOperation: "create",
+ },
+ }
+
+ for name, tc := range tests {
+ name := name
+ tc := tc
+ t.Run(name, func(t *testing.T) {
+ t.Parallel()
+
+ l := &LogInput{Request: tc.Request}
+
+ d := l.BexprDatum(tc.Namespace)
+
+ require.Equal(t, tc.ExpectedPath, d.Path)
+ require.Equal(t, tc.ExpectedMountPoint, d.MountPoint)
+ require.Equal(t, tc.ExpectedMountType, d.MountType)
+ require.Equal(t, tc.ExpectedNamespace, d.Namespace)
+ require.Equal(t, tc.ExpectedOperation, d.Operation)
+ })
+ }
+}
diff --git a/ui/app/adapters/sync/association.js b/ui/app/adapters/sync/association.js
index 7057e2392722..d1419a097aa1 100644
--- a/ui/app/adapters/sync/association.js
+++ b/ui/app/adapters/sync/association.js
@@ -29,7 +29,8 @@ export default class SyncAssociationAdapter extends ApplicationAdapter {
// typically associations are queried for a specific destination which is what the standard query method does
// in specific cases we can query all associations to access total_associations and total_secrets values
queryAll() {
- return this.query(this.store, { modelName: 'sync/association' }).then((response) => {
+ const url = `${this.buildURL('sync/association')}`;
+ return this.ajax(url, 'GET', { data: { list: true } }).then((response) => {
const { total_associations, total_secrets } = response.data;
return { total_associations, total_secrets };
});
@@ -49,8 +50,8 @@ export default class SyncAssociationAdapter extends ApplicationAdapter {
// array of association data for each destination a secret is synced to
fetchSyncStatus({ mount, secretName }) {
- const url = `${this.buildURL()}/${mount}/${secretName}`;
- return this.ajax(url, 'GET').then((resp) => {
+ const url = `${this.buildURL()}/destinations`;
+ return this.ajax(url, 'GET', { data: { mount, secret_name: secretName } }).then((resp) => {
const { associated_destinations } = resp.data;
const syncData = [];
for (const key in associated_destinations) {
diff --git a/ui/app/components/auth-saml.hbs b/ui/app/components/auth-saml.hbs
index 8d69fe8759ce..aa87e3770d5c 100644
--- a/ui/app/components/auth-saml.hbs
+++ b/ui/app/components/auth-saml.hbs
@@ -46,10 +46,11 @@
Logging in with a SAML auth method requires a browser in a secure context.