From 24c6e82a84e44ea83cbf3a2307e7cc8303bd2c52 Mon Sep 17 00:00:00 2001 From: Peter Wilson Date: Fri, 15 Dec 2023 09:26:34 +0000 Subject: [PATCH 01/25] Remove old audit behavior from test code (#24540) * Export audit event * Move older tests away from audit behavior that didn't use eventlogger * spelling--; * no more struct initialization of NoopAudit outside of NewNoopAudit * locking since we're accessing the shared backend --- audit/entry_formatter.go | 4 +- audit/event.go | 8 +- audit/event_test.go | 30 +-- audit/sink_wrapper.go | 4 +- audit/types.go | 4 +- helper/testhelpers/corehelpers/corehelpers.go | 255 ++++++++++++------ http/logical_test.go | 5 +- http/sys_generate_root_test.go | 2 - vault/audit_test.go | 65 +++-- vault/core_test.go | 37 +-- .../identity/login_mfa_totp_test.go | 4 +- 11 files changed, 260 insertions(+), 158 deletions(-) diff --git a/audit/entry_formatter.go b/audit/entry_formatter.go index af404e1d10de..a6b836d0a446 100644 --- a/audit/entry_formatter.go +++ b/audit/entry_formatter.go @@ -29,7 +29,7 @@ var ( ) // NewEntryFormatter should be used to create an EntryFormatter. -// Accepted options: WithPrefix. +// Accepted options: WithPrefix, WithHeaderFormatter. func NewEntryFormatter(config FormatterConfig, salter Salter, opt ...Option) (*EntryFormatter, error) { const op = "audit.NewEntryFormatter" @@ -80,7 +80,7 @@ func (f *EntryFormatter) Process(ctx context.Context, e *eventlogger.Event) (*ev return nil, fmt.Errorf("%s: event is nil: %w", op, event.ErrInvalidParameter) } - a, ok := e.Payload.(*auditEvent) + a, ok := e.Payload.(*AuditEvent) if !ok { return nil, fmt.Errorf("%s: cannot parse event payload: %w", op, event.ErrInvalidParameter) } diff --git a/audit/event.go b/audit/event.go index d7b60b3df58d..6ea3f184914d 100644 --- a/audit/event.go +++ b/audit/event.go @@ -12,7 +12,7 @@ import ( // NewEvent should be used to create an audit event. The subtype field is needed // for audit events. It will generate an ID if no ID is supplied. Supported // options: WithID, WithNow. -func NewEvent(s subtype, opt ...Option) (*auditEvent, error) { +func NewEvent(s subtype, opt ...Option) (*AuditEvent, error) { const op = "audit.newEvent" // Get the default options @@ -30,7 +30,7 @@ func NewEvent(s subtype, opt ...Option) (*auditEvent, error) { } } - audit := &auditEvent{ + audit := &AuditEvent{ ID: opts.withID, Timestamp: opts.withNow, Version: version, @@ -44,8 +44,8 @@ func NewEvent(s subtype, opt ...Option) (*auditEvent, error) { } // validate attempts to ensure the audit event in its present state is valid. -func (a *auditEvent) validate() error { - const op = "audit.(auditEvent).validate" +func (a *AuditEvent) validate() error { + const op = "audit.(AuditEvent).validate" if a == nil { return fmt.Errorf("%s: event is nil: %w", op, event.ErrInvalidParameter) diff --git a/audit/event_test.go b/audit/event_test.go index d6249fd1b8f2..7a520e3483d8 100644 --- a/audit/event_test.go +++ b/audit/event_test.go @@ -29,14 +29,14 @@ func TestAuditEvent_new(t *testing.T) { Subtype: subtype(""), Format: format(""), IsErrorExpected: true, - ExpectedErrorMessage: "audit.newEvent: audit.(auditEvent).validate: audit.(subtype).validate: '' is not a valid event subtype: invalid parameter", + ExpectedErrorMessage: "audit.newEvent: audit.(AuditEvent).validate: audit.(subtype).validate: '' is not a valid event subtype: invalid parameter", }, "empty-Option": { Options: []Option{}, Subtype: subtype(""), Format: format(""), IsErrorExpected: true, - ExpectedErrorMessage: "audit.newEvent: audit.(auditEvent).validate: audit.(subtype).validate: '' is not a valid event subtype: invalid parameter", + ExpectedErrorMessage: "audit.newEvent: audit.(AuditEvent).validate: audit.(subtype).validate: '' is not a valid event subtype: invalid parameter", }, "bad-id": { Options: []Option{WithID("")}, @@ -108,22 +108,22 @@ func TestAuditEvent_new(t *testing.T) { // TestAuditEvent_Validate exercises the validation for an audit event. func TestAuditEvent_Validate(t *testing.T) { tests := map[string]struct { - Value *auditEvent + Value *AuditEvent IsErrorExpected bool ExpectedErrorMessage string }{ "nil": { Value: nil, IsErrorExpected: true, - ExpectedErrorMessage: "audit.(auditEvent).validate: event is nil: invalid parameter", + ExpectedErrorMessage: "audit.(AuditEvent).validate: event is nil: invalid parameter", }, "default": { - Value: &auditEvent{}, + Value: &AuditEvent{}, IsErrorExpected: true, - ExpectedErrorMessage: "audit.(auditEvent).validate: missing ID: invalid parameter", + ExpectedErrorMessage: "audit.(AuditEvent).validate: missing ID: invalid parameter", }, "id-empty": { - Value: &auditEvent{ + Value: &AuditEvent{ ID: "", Version: version, Subtype: RequestType, @@ -131,10 +131,10 @@ func TestAuditEvent_Validate(t *testing.T) { Data: nil, }, IsErrorExpected: true, - ExpectedErrorMessage: "audit.(auditEvent).validate: missing ID: invalid parameter", + ExpectedErrorMessage: "audit.(AuditEvent).validate: missing ID: invalid parameter", }, "version-fiddled": { - Value: &auditEvent{ + Value: &AuditEvent{ ID: "audit_123", Version: "magic-v2", Subtype: RequestType, @@ -142,10 +142,10 @@ func TestAuditEvent_Validate(t *testing.T) { Data: nil, }, IsErrorExpected: true, - ExpectedErrorMessage: "audit.(auditEvent).validate: event version unsupported: invalid parameter", + ExpectedErrorMessage: "audit.(AuditEvent).validate: event version unsupported: invalid parameter", }, "subtype-fiddled": { - Value: &auditEvent{ + Value: &AuditEvent{ ID: "audit_123", Version: version, Subtype: subtype("moon"), @@ -153,10 +153,10 @@ func TestAuditEvent_Validate(t *testing.T) { Data: nil, }, IsErrorExpected: true, - ExpectedErrorMessage: "audit.(auditEvent).validate: audit.(subtype).validate: 'moon' is not a valid event subtype: invalid parameter", + ExpectedErrorMessage: "audit.(AuditEvent).validate: audit.(subtype).validate: 'moon' is not a valid event subtype: invalid parameter", }, "default-time": { - Value: &auditEvent{ + Value: &AuditEvent{ ID: "audit_123", Version: version, Subtype: ResponseType, @@ -164,10 +164,10 @@ func TestAuditEvent_Validate(t *testing.T) { Data: nil, }, IsErrorExpected: true, - ExpectedErrorMessage: "audit.(auditEvent).validate: event timestamp cannot be the zero time instant: invalid parameter", + ExpectedErrorMessage: "audit.(AuditEvent).validate: event timestamp cannot be the zero time instant: invalid parameter", }, "valid": { - Value: &auditEvent{ + Value: &AuditEvent{ ID: "audit_123", Version: version, Subtype: ResponseType, diff --git a/audit/sink_wrapper.go b/audit/sink_wrapper.go index 9edf5c17833f..3cf79c709535 100644 --- a/audit/sink_wrapper.go +++ b/audit/sink_wrapper.go @@ -12,7 +12,7 @@ import ( ) // SinkWrapper is a wrapper for any kind of Sink Node that processes events -// containing an auditEvent payload. +// containing an AuditEvent payload. type SinkWrapper struct { Name string Sink eventlogger.Node @@ -23,7 +23,7 @@ type SinkWrapper struct { // once this method returns. func (s *SinkWrapper) Process(ctx context.Context, e *eventlogger.Event) (*eventlogger.Event, error) { defer func() { - auditEvent, ok := e.Payload.(*auditEvent) + auditEvent, ok := e.Payload.(*AuditEvent) if ok { metrics.MeasureSince([]string{"audit", s.Name, auditEvent.Subtype.MetricTag()}, e.CreatedAt) } diff --git a/audit/types.go b/audit/types.go index 0b0f3982a2d9..af5a5830dfae 100644 --- a/audit/types.go +++ b/audit/types.go @@ -35,8 +35,8 @@ type subtype string // format defines types of format audit events support. type format string -// auditEvent is the audit event. -type auditEvent struct { +// AuditEvent is the audit event. +type AuditEvent struct { ID string `json:"id"` Version string `json:"version"` Subtype subtype `json:"subtype"` // the subtype of the audit event. diff --git a/helper/testhelpers/corehelpers/corehelpers.go b/helper/testhelpers/corehelpers/corehelpers.go index 8e0e449ad5d8..b8d1def9ca73 100644 --- a/helper/testhelpers/corehelpers/corehelpers.go +++ b/helper/testhelpers/corehelpers/corehelpers.go @@ -6,9 +6,10 @@ package corehelpers import ( - "bytes" "context" "crypto/sha256" + "encoding/json" + "errors" "fmt" "io" "os" @@ -29,6 +30,11 @@ import ( "github.com/mitchellh/go-testing-interface" ) +var ( + _ audit.Backend = (*NoopAudit)(nil) + _ eventlogger.Node = (*noopWrapper)(nil) +) + var externalPlugins = []string{"transform", "kmip", "keymgmt"} // RetryUntil runs f until it returns a nil result or the timeout is reached. @@ -210,52 +216,51 @@ func (m *mockBuiltinRegistry) DeprecationStatus(name string, pluginType consts.P return consts.Unknown, false } -func TestNoopAudit(t testing.T, config map[string]string) *NoopAudit { - n, err := NewNoopAudit(config) +func TestNoopAudit(t testing.T, path string, config map[string]string, opts ...audit.Option) *NoopAudit { + cfg := &audit.BackendConfig{Config: config, MountPath: path} + n, err := NewNoopAudit(cfg, opts...) if err != nil { t.Fatal(err) } return n } -func NewNoopAudit(config map[string]string) (*NoopAudit, error) { +// NewNoopAudit should be used to create a NoopAudit as it handles creation of a +// predictable salt and wraps eventlogger nodes so information can be retrieved on +// what they've seen or formatted. +func NewNoopAudit(config *audit.BackendConfig, opts ...audit.Option) (*NoopAudit, error) { view := &logical.InmemStorage{} - err := view.Put(context.Background(), &logical.StorageEntry{ - Key: "salt", - Value: []byte("foo"), - }) + + // Create the salt with a known key for predictable hmac values. + se := &logical.StorageEntry{Key: "salt", Value: []byte("foo")} + err := view.Put(context.Background(), se) if err != nil { return nil, err } - n := &NoopAudit{ - Config: &audit.BackendConfig{ - SaltView: view, - SaltConfig: &salt.Config{ - HMAC: sha256.New, - HMACType: "hmac-sha256", - }, - Config: config, + // Override the salt related config settings. + backendConfig := &audit.BackendConfig{ + SaltView: view, + SaltConfig: &salt.Config{ + HMAC: sha256.New, + HMACType: "hmac-sha256", }, + Config: config.Config, + MountPath: config.MountPath, } + n := &NoopAudit{Config: backendConfig} + cfg, err := audit.NewFormatterConfig() if err != nil { return nil, err } - f, err := audit.NewEntryFormatter(cfg, n) + f, err := audit.NewEntryFormatter(cfg, n, opts...) if err != nil { return nil, fmt.Errorf("error creating formatter: %w", err) } - fw, err := audit.NewEntryFormatterWriter(cfg, f, &audit.JSONWriter{}) - if err != nil { - return nil, fmt.Errorf("error creating formatter writer: %w", err) - } - - n.formatter = fw - n.nodeIDList = make([]eventlogger.NodeID, 2) n.nodeMap = make(map[eventlogger.NodeID]eventlogger.Node, 2) @@ -264,8 +269,11 @@ func NewNoopAudit(config map[string]string) (*NoopAudit, error) { return nil, fmt.Errorf("error generating random NodeID for formatter node: %w", err) } + // Wrap the formatting node, so we can get any bytes that were formatted etc. + wrappedFormatter := &noopWrapper{format: "json", node: f, backend: n} + n.nodeIDList[0] = formatterNodeID - n.nodeMap[formatterNodeID] = f + n.nodeMap[formatterNodeID] = wrappedFormatter sinkNode := event.NewNoopSink() sinkNodeID, err := event.GenerateNodeID() @@ -279,9 +287,12 @@ func NewNoopAudit(config map[string]string) (*NoopAudit, error) { return n, nil } +// NoopAuditFactory should be used when the test needs a way to access bytes that +// have been formatted by the pipeline during audit requests. +// The records parameter will be repointed to the one used within the pipeline. func NoopAuditFactory(records **[][]byte) audit.Factory { - return func(_ context.Context, config *audit.BackendConfig, _ bool, _ audit.HeaderFormatter) (audit.Backend, error) { - n, err := NewNoopAudit(config.Config) + return func(_ context.Context, config *audit.BackendConfig, _ bool, headerFormatter audit.HeaderFormatter) (audit.Backend, error) { + n, err := NewNoopAudit(config, audit.WithHeaderFormatter(headerFormatter)) if err != nil { return nil, err } @@ -293,8 +304,19 @@ func NoopAuditFactory(records **[][]byte) audit.Factory { } } +// noopWrapper is designed to wrap a formatter node in order to allow access to +// bytes formatted, headers formatted and parts of the logical.LogInput. +// Some older tests relied on being able to query this information so while those +// tests stick around we should look after them. +type noopWrapper struct { + format string + node eventlogger.Node + backend *NoopAudit +} + type NoopAudit struct { - Config *audit.BackendConfig + Config *audit.BackendConfig + ReqErr error ReqAuth []*logical.Auth Req []*logical.Request @@ -309,81 +331,164 @@ type NoopAudit struct { RespNonHMACKeys [][]string RespReqNonHMACKeys [][]string RespErrs []error - - formatter *audit.EntryFormatterWriter - records [][]byte - l sync.RWMutex - salt *salt.Salt - saltMutex sync.RWMutex + records [][]byte + l sync.RWMutex + salt *salt.Salt + saltMutex sync.RWMutex nodeIDList []eventlogger.NodeID nodeMap map[eventlogger.NodeID]eventlogger.Node } -func (n *NoopAudit) LogRequest(ctx context.Context, in *logical.LogInput) error { - n.l.Lock() - defer n.l.Unlock() +// Process handles the contortions required by older test code to ensure behavior. +// It will attempt to do some pre/post processing of the logical.LogInput that should +// form part of the event's payload data, as well as capturing the resulting headers +// that were formatted and track the overall bytes that a formatted event uses when +// it's ready to head down the pipeline to the sink node (a noop for us). +func (n *noopWrapper) Process(ctx context.Context, e *eventlogger.Event) (*eventlogger.Event, error) { + n.backend.l.Lock() + defer n.backend.l.Unlock() - if n.formatter != nil { - var w bytes.Buffer - err := n.formatter.FormatAndWriteRequest(ctx, &w, in) - if err != nil { - return err - } - n.records = append(n.records, w.Bytes()) + var err error + + // We're expecting audit events since this is an audit device. + a, ok := e.Payload.(*audit.AuditEvent) + if !ok { + return nil, errors.New("cannot parse payload as an audit event") } - n.ReqAuth = append(n.ReqAuth, in.Auth) - n.Req = append(n.Req, in.Request) - n.ReqHeaders = append(n.ReqHeaders, in.Request.Headers) - n.ReqNonHMACKeys = in.NonHMACReqDataKeys - n.ReqErrs = append(n.ReqErrs, in.OuterErr) + in := a.Data - return n.ReqErr -} + // Depending on the type of the audit event (request or response) we need to + // track different things. + switch a.Subtype { + case audit.RequestType: + n.backend.ReqAuth = append(n.backend.ReqAuth, in.Auth) + n.backend.Req = append(n.backend.Req, in.Request) + n.backend.ReqNonHMACKeys = in.NonHMACReqDataKeys + n.backend.ReqErrs = append(n.backend.ReqErrs, in.OuterErr) -func (n *NoopAudit) LogResponse(ctx context.Context, in *logical.LogInput) error { - n.l.Lock() - defer n.l.Unlock() + if n.backend.ReqErr != nil { + return nil, n.backend.ReqErr + } + case audit.ResponseType: + n.backend.RespAuth = append(n.backend.RespAuth, in.Auth) + n.backend.RespReq = append(n.backend.RespReq, in.Request) + n.backend.Resp = append(n.backend.Resp, in.Response) + n.backend.RespErrs = append(n.backend.RespErrs, in.OuterErr) + + if in.Response != nil { + n.backend.RespNonHMACKeys = append(n.backend.RespNonHMACKeys, in.NonHMACRespDataKeys) + n.backend.RespReqNonHMACKeys = append(n.backend.RespReqNonHMACKeys, in.NonHMACReqDataKeys) + } - if n.formatter != nil { - var w bytes.Buffer - err := n.formatter.FormatAndWriteResponse(ctx, &w, in) - if err != nil { - return err + if n.backend.RespErr != nil { + return nil, n.backend.RespErr } - n.records = append(n.records, w.Bytes()) + default: + return nil, fmt.Errorf("unknown audit event type: %q", a.Subtype) } - n.RespAuth = append(n.RespAuth, in.Auth) - n.RespReq = append(n.RespReq, in.Request) - n.Resp = append(n.Resp, in.Response) - n.RespErrs = append(n.RespErrs, in.OuterErr) + // Once we've taken note of the relevant properties of the event, we get the + // underlying (wrapped) node to process it as normal. + e, err = n.node.Process(ctx, e) + if err != nil { + return nil, fmt.Errorf("error processing wrapped node: %w", err) + } - if in.Response != nil { - n.RespNonHMACKeys = append(n.RespNonHMACKeys, in.NonHMACRespDataKeys) - n.RespReqNonHMACKeys = append(n.RespReqNonHMACKeys, in.NonHMACReqDataKeys) + // Once processing has been carried out, the underlying node (a formatter node) + // should contain the output ready for the sink node. We'll get that in order + // to track how many bytes we formatted. + b, ok := e.Format(n.format) + if ok { + n.backend.records = append(n.backend.records, b) } - return n.RespErr + // Finally, the last bit of post-processing is to make sure that we track the + // formatted headers that would have made it to the logs via the sink node. + // They only appear in requests. + if a.Subtype == audit.RequestType { + reqEntry := &audit.RequestEntry{} + err = json.Unmarshal(b, &reqEntry) + if err != nil { + return nil, fmt.Errorf("unable to parse formatted audit entry data: %w", err) + } + + n.backend.ReqHeaders = append(n.backend.ReqHeaders, reqEntry.Request.Headers) + } + + // Return the event and no error in order to let the pipeline continue on. + return e, nil +} + +func (n *noopWrapper) Reopen() error { + return n.node.Reopen() } +func (n *noopWrapper) Type() eventlogger.NodeType { + return n.node.Type() +} + +// Deprecated: use eventlogger. +func (n *NoopAudit) LogRequest(ctx context.Context, in *logical.LogInput) error { + return nil +} + +// Deprecated: use eventlogger. +func (n *NoopAudit) LogResponse(ctx context.Context, in *logical.LogInput) error { + return nil +} + +// LogTestMessage will manually crank the handle on the nodes associated with this backend. func (n *NoopAudit) LogTestMessage(ctx context.Context, in *logical.LogInput, config map[string]string) error { n.l.Lock() defer n.l.Unlock() - var w bytes.Buffer - tempFormatter, err := audit.NewTemporaryFormatter(config["format"], config["prefix"]) - if err != nil { - return err + // Fake event for test purposes. + e := &eventlogger.Event{ + Type: eventlogger.EventType(event.AuditType.String()), + CreatedAt: time.Now(), + Formatted: make(map[string][]byte), + Payload: in, } - err = tempFormatter.FormatAndWriteResponse(ctx, &w, in) + // Try to get the required format from config and default to JSON. + format, ok := config["format"] + if !ok { + format = "json" + } + cfg, err := audit.NewFormatterConfig(audit.WithFormat(format)) if err != nil { - return err + return fmt.Errorf("cannot create config for formatter node: %w", err) } + // Create a temporary formatter node for reuse. + f, err := audit.NewEntryFormatter(cfg, n, audit.WithPrefix(config["prefix"])) - n.records = append(n.records, w.Bytes()) + // Go over each node in order from our list. + for _, id := range n.nodeIDList { + node, ok := n.nodeMap[id] + if !ok { + return fmt.Errorf("node not found: %v", id) + } + + switch node.Type() { + case eventlogger.NodeTypeFormatter: + // Use a temporary formatter node which doesn't persist its salt anywhere. + if formatNode, ok := node.(*audit.EntryFormatter); ok && formatNode != nil { + e, err = f.Process(ctx, e) + + // Housekeeping, we should update that we processed some bytes. + if e != nil { + b, ok := e.Format(format) + if ok { + n.records = append(n.records, b) + } + } + } + default: + e, err = node.Process(ctx, e) + } + } return nil } diff --git a/http/logical_test.go b/http/logical_test.go index e5b0caf222d6..01e6762ee0a9 100644 --- a/http/logical_test.go +++ b/http/logical_test.go @@ -569,10 +569,8 @@ func TestLogical_RespondWithStatusCode(t *testing.T) { } func TestLogical_Audit_invalidWrappingToken(t *testing.T) { - t.Setenv("VAULT_AUDIT_DISABLE_EVENTLOGGER", "true") - // Create a noop audit backend - noop := corehelpers.TestNoopAudit(t, nil) + noop := corehelpers.TestNoopAudit(t, "noop", nil) c, _, root := vault.TestCoreUnsealedWithConfig(t, &vault.CoreConfig{ AuditBackends: map[string]audit.Factory{ "noop": func(ctx context.Context, config *audit.BackendConfig, _ bool, _ audit.HeaderFormatter) (audit.Backend, error) { @@ -584,7 +582,6 @@ func TestLogical_Audit_invalidWrappingToken(t *testing.T) { defer ln.Close() // Enable the audit backend - resp := testHttpPost(t, root, addr+"/v1/sys/audit/noop", map[string]interface{}{ "type": "noop", }) diff --git a/http/sys_generate_root_test.go b/http/sys_generate_root_test.go index 04352859d428..8358d18a537a 100644 --- a/http/sys_generate_root_test.go +++ b/http/sys_generate_root_test.go @@ -247,8 +247,6 @@ func testServerWithAudit(t *testing.T, records **[][]byte) (net.Listener, string } func TestSysGenerateRoot_badKey(t *testing.T) { - t.Setenv("VAULT_AUDIT_DISABLE_EVENTLOGGER", "true") - var records *[][]byte ln, addr, token, _ := testServerWithAudit(t, &records) defer ln.Close() diff --git a/vault/audit_test.go b/vault/audit_test.go index a7dd44539ab6..60f9f45e1cef 100644 --- a/vault/audit_test.go +++ b/vault/audit_test.go @@ -12,6 +12,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" "github.com/hashicorp/errwrap" @@ -340,14 +342,16 @@ func verifyDefaultAuditTable(t *testing.T, table *MountTable) { func TestAuditBroker_LogRequest(t *testing.T) { l := logging.NewVaultLogger(log.Trace) - b, err := NewAuditBroker(l, false) + b, err := NewAuditBroker(l, true) if err != nil { t.Fatal(err) } - a1 := corehelpers.TestNoopAudit(t, nil) - a2 := corehelpers.TestNoopAudit(t, nil) - b.Register("foo", a1, false) - b.Register("bar", a2, false) + a1 := corehelpers.TestNoopAudit(t, "foo", nil) + a2 := corehelpers.TestNoopAudit(t, "bar", nil) + err = b.Register("foo", a1, false) + require.NoError(t, err) + err = b.Register("bar", a2, false) + require.NoError(t, err) auth := &logical.Auth{ ClientToken: "foo", @@ -423,21 +427,23 @@ func TestAuditBroker_LogRequest(t *testing.T) { // Should FAIL work with both failing backends a2.ReqErr = fmt.Errorf("failed") - if err := b.LogRequest(ctx, logInput, headersConf); !errwrap.Contains(err, "no audit backend succeeded in logging the request") { + if err := b.LogRequest(ctx, logInput, headersConf); !errwrap.Contains(err, "event not processed by enough 'sink' nodes") { t.Fatalf("err: %v", err) } } func TestAuditBroker_LogResponse(t *testing.T) { l := logging.NewVaultLogger(log.Trace) - b, err := NewAuditBroker(l, false) + b, err := NewAuditBroker(l, true) if err != nil { t.Fatal(err) } - a1 := corehelpers.TestNoopAudit(t, nil) - a2 := corehelpers.TestNoopAudit(t, nil) - b.Register("foo", a1, false) - b.Register("bar", a2, false) + a1 := corehelpers.TestNoopAudit(t, "foo", nil) + a2 := corehelpers.TestNoopAudit(t, "bar", nil) + err = b.Register("foo", a1, false) + require.NoError(t, err) + err = b.Register("bar", a2, false) + require.NoError(t, err) auth := &logical.Auth{ NumUses: 10, @@ -531,23 +537,36 @@ func TestAuditBroker_LogResponse(t *testing.T) { // Should FAIL work with both failing backends a2.RespErr = fmt.Errorf("failed") err = b.LogResponse(ctx, logInput, headersConf) - if !strings.Contains(err.Error(), "no audit backend succeeded in logging the response") { + if !strings.Contains(err.Error(), "event not processed by enough 'sink' nodes") { t.Fatalf("err: %v", err) } } func TestAuditBroker_AuditHeaders(t *testing.T) { logger := logging.NewVaultLogger(log.Trace) - b, err := NewAuditBroker(logger, false) + + b, err := NewAuditBroker(logger, true) if err != nil { t.Fatal(err) } _, barrier, _ := mockBarrier(t) view := NewBarrierView(barrier, "headers/") - a1 := corehelpers.TestNoopAudit(t, nil) - a2 := corehelpers.TestNoopAudit(t, nil) - b.Register("foo", a1, false) - b.Register("bar", a2, false) + + headersConf := &AuditedHeadersConfig{ + view: view, + } + err = headersConf.add(context.Background(), "X-Test-Header", false) + require.NoError(t, err) + err = headersConf.add(context.Background(), "X-Vault-Header", false) + require.NoError(t, err) + + a1 := corehelpers.TestNoopAudit(t, "foo", nil, audit.WithHeaderFormatter(headersConf)) + a2 := corehelpers.TestNoopAudit(t, "bar", nil, audit.WithHeaderFormatter(headersConf)) + + err = b.Register("foo", a1, false) + require.NoError(t, err) + err = b.Register("bar", a2, false) + require.NoError(t, err) auth := &logical.Auth{ ClientToken: "foo", @@ -575,19 +594,13 @@ func TestAuditBroker_AuditHeaders(t *testing.T) { } reqCopy := reqCopyRaw.(*logical.Request) - headersConf := &AuditedHeadersConfig{ - view: view, - } - headersConf.add(context.Background(), "X-Test-Header", false) - headersConf.add(context.Background(), "X-Vault-Header", false) - logInput := &logical.LogInput{ Auth: auth, Request: reqCopy, OuterErr: respErr, } ctx := namespace.RootContext(context.Background()) - err = b.LogRequest(ctx, logInput, headersConf) + err = b.LogRequest(ctx, logInput, nil) if err != nil { t.Fatalf("err: %v", err) } @@ -599,7 +612,7 @@ func TestAuditBroker_AuditHeaders(t *testing.T) { for _, a := range []*corehelpers.NoopAudit{a1, a2} { if !reflect.DeepEqual(a.ReqHeaders[0], expected) { - t.Fatalf("Bad audited headers: %#v", a.Req[0].Headers) + t.Fatalf("Bad audited headers: %#v", a.ReqHeaders[0]) } } @@ -618,7 +631,7 @@ func TestAuditBroker_AuditHeaders(t *testing.T) { // Should FAIL work with both failing backends a2.ReqErr = fmt.Errorf("failed") err = b.LogRequest(ctx, logInput, headersConf) - if !errwrap.Contains(err, "no audit backend succeeded in logging the request") { + if !errwrap.Contains(err, "event not processed by enough 'sink' nodes") { t.Fatalf("err: %v", err) } } diff --git a/vault/core_test.go b/vault/core_test.go index 097ea51b4a37..3f16e825aca4 100644 --- a/vault/core_test.go +++ b/vault/core_test.go @@ -1464,16 +1464,13 @@ func TestCore_HandleLogin_Token(t *testing.T) { } func TestCore_HandleRequest_AuditTrail(t *testing.T) { - t.Setenv("VAULT_AUDIT_DISABLE_EVENTLOGGER", "true") - // Create a noop audit backend - noop := &corehelpers.NoopAudit{} + var noop *corehelpers.NoopAudit c, _, root := TestCoreUnsealed(t) - c.auditBackends["noop"] = func(ctx context.Context, config *audit.BackendConfig, _ bool, _ audit.HeaderFormatter) (audit.Backend, error) { - noop = &corehelpers.NoopAudit{ - Config: config, - } - return noop, nil + c.auditBackends["noop"] = func(ctx context.Context, config *audit.BackendConfig, _ bool, headerFormatter audit.HeaderFormatter) (audit.Backend, error) { + var err error + noop, err = corehelpers.NewNoopAudit(config, audit.WithHeaderFormatter(headerFormatter)) + return noop, err } // Enable the audit backend @@ -1530,16 +1527,13 @@ func TestCore_HandleRequest_AuditTrail(t *testing.T) { } func TestCore_HandleRequest_AuditTrail_noHMACKeys(t *testing.T) { - t.Setenv("VAULT_AUDIT_DISABLE_EVENTLOGGER", "true") - // Create a noop audit backend var noop *corehelpers.NoopAudit c, _, root := TestCoreUnsealed(t) - c.auditBackends["noop"] = func(ctx context.Context, config *audit.BackendConfig, _ bool, _ audit.HeaderFormatter) (audit.Backend, error) { - noop = &corehelpers.NoopAudit{ - Config: config, - } - return noop, nil + c.auditBackends["noop"] = func(ctx context.Context, config *audit.BackendConfig, _ bool, headerFormatter audit.HeaderFormatter) (audit.Backend, error) { + var err error + noop, err = corehelpers.NewNoopAudit(config, audit.WithHeaderFormatter(headerFormatter)) + return noop, err } // Specify some keys to not HMAC @@ -1636,10 +1630,8 @@ func TestCore_HandleRequest_AuditTrail_noHMACKeys(t *testing.T) { } func TestCore_HandleLogin_AuditTrail(t *testing.T) { - t.Setenv("VAULT_AUDIT_DISABLE_EVENTLOGGER", "true") - // Create a badass credential backend that always logs in as armon - noop := &corehelpers.NoopAudit{} + var noop *corehelpers.NoopAudit noopBack := &NoopBackend{ Login: []string{"login"}, Response: &logical.Response{ @@ -1659,11 +1651,10 @@ func TestCore_HandleLogin_AuditTrail(t *testing.T) { c.credentialBackends["noop"] = func(context.Context, *logical.BackendConfig) (logical.Backend, error) { return noopBack, nil } - c.auditBackends["noop"] = func(ctx context.Context, config *audit.BackendConfig, _ bool, _ audit.HeaderFormatter) (audit.Backend, error) { - noop = &corehelpers.NoopAudit{ - Config: config, - } - return noop, nil + c.auditBackends["noop"] = func(ctx context.Context, config *audit.BackendConfig, _ bool, headerFormatter audit.HeaderFormatter) (audit.Backend, error) { + var err error + noop, err = corehelpers.NewNoopAudit(config, audit.WithHeaderFormatter(headerFormatter)) + return noop, err } // Enable the credential backend diff --git a/vault/external_tests/identity/login_mfa_totp_test.go b/vault/external_tests/identity/login_mfa_totp_test.go index 7951adc3a588..d975f13f51d0 100644 --- a/vault/external_tests/identity/login_mfa_totp_test.go +++ b/vault/external_tests/identity/login_mfa_totp_test.go @@ -51,9 +51,7 @@ func doTwoPhaseLogin(t *testing.T, client *api.Client, totpCodePath, methodID, u } func TestLoginMfaGenerateTOTPTestAuditIncluded(t *testing.T) { - t.Setenv("VAULT_AUDIT_DISABLE_EVENTLOGGER", "true") - - noop := corehelpers.TestNoopAudit(t, nil) + noop := corehelpers.TestNoopAudit(t, "noop", nil) cluster := vault.NewTestCluster(t, &vault.CoreConfig{ CredentialBackends: map[string]logical.Factory{ From 763095fec6c601bb94713e4d69d34730fc7c1ca0 Mon Sep 17 00:00:00 2001 From: Nick Cabatoff Date: Fri, 15 Dec 2023 08:59:29 -0500 Subject: [PATCH 02/25] Don't touch ActiveTime in preSeal/postUnseal (#24549) --- changelog/24549.txt | 3 +++ vault/core.go | 2 -- vault/ha.go | 2 ++ 3 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changelog/24549.txt diff --git a/changelog/24549.txt b/changelog/24549.txt new file mode 100644 index 000000000000..6838b024c782 --- /dev/null +++ b/changelog/24549.txt @@ -0,0 +1,3 @@ +```release-note:bug +api: sys/leader ActiveTime field no longer gets reset when we do an internal state change that doesn't change our active status. +``` diff --git a/vault/core.go b/vault/core.go index 9d3bf06cbfdf..a04f52de7917 100644 --- a/vault/core.go +++ b/vault/core.go @@ -2390,7 +2390,6 @@ func (s standardUnsealStrategy) unseal(ctx context.Context, logger log.Logger, c // Mark the active time. We do this first so it can be correlated to the logs // for the active startup. - c.activeTime = time.Now().UTC() if err := postUnsealPhysical(c); err != nil { return err @@ -2766,7 +2765,6 @@ func (c *Core) preSeal() error { } // Clear any pending funcs c.postUnsealFuncs = nil - c.activeTime = time.Time{} // Clear any rekey progress c.barrierRekeyConfig = nil diff --git a/vault/ha.go b/vault/ha.go index ba2b490457e3..e81787e7e91d 100644 --- a/vault/ha.go +++ b/vault/ha.go @@ -663,6 +663,7 @@ func (c *Core) waitForLeadership(newLeaderCh chan func(), manualStepDownCh, stop err = c.postUnseal(activeCtx, activeCtxCancel, standardUnsealStrategy{}) if err == nil { c.standby = false + c.activeTime = time.Now() c.leaderUUID = uuid c.metricSink.SetGaugeWithLabels([]string{"core", "active"}, 1, nil) } @@ -715,6 +716,7 @@ func (c *Core) waitForLeadership(newLeaderCh chan func(), manualStepDownCh, stop // Mark as standby c.standby = true + c.activeTime = time.Time{} c.leaderUUID = "" c.metricSink.SetGaugeWithLabels([]string{"core", "active"}, 0, nil) From f460a69ad990859551701cc02bc9d627878b0afb Mon Sep 17 00:00:00 2001 From: Nick Cabatoff Date: Fri, 15 Dec 2023 12:28:32 -0500 Subject: [PATCH 03/25] Simplify raft tests, use inmem networking instead of address providers (#24557) --- helper/testhelpers/teststorage/teststorage.go | 8 +- .../raft/raft_autopilot_test.go | 81 +++------ vault/external_tests/raft/raft_test.go | 157 ++++++++---------- vault/testing.go | 20 +-- 4 files changed, 102 insertions(+), 164 deletions(-) diff --git a/helper/testhelpers/teststorage/teststorage.go b/helper/testhelpers/teststorage/teststorage.go index 75431867a274..2767d022b44b 100644 --- a/helper/testhelpers/teststorage/teststorage.go +++ b/helper/testhelpers/teststorage/teststorage.go @@ -119,9 +119,11 @@ func MakeRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, extraConf ma logger.Info("raft dir", "dir", raftDir) conf := map[string]string{ - "path": raftDir, - "node_id": nodeID, - "performance_multiplier": "8", + "path": raftDir, + "node_id": nodeID, + "performance_multiplier": "8", + "autopilot_reconcile_interval": "300ms", + "autopilot_update_interval": "100ms", } for k, v := range extraConf { val, ok := v.(string) diff --git a/vault/external_tests/raft/raft_autopilot_test.go b/vault/external_tests/raft/raft_autopilot_test.go index 706c25279c0f..477b76c5b1f1 100644 --- a/vault/external_tests/raft/raft_autopilot_test.go +++ b/vault/external_tests/raft/raft_autopilot_test.go @@ -9,23 +9,19 @@ import ( "fmt" "math" "reflect" - "sync/atomic" "testing" "time" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/go-hclog" autopilot "github.com/hashicorp/raft-autopilot" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/helper/constants" "github.com/hashicorp/vault/helper/testhelpers" - "github.com/hashicorp/vault/helper/testhelpers/teststorage" "github.com/hashicorp/vault/physical/raft" "github.com/hashicorp/vault/sdk/helper/testcluster" "github.com/hashicorp/vault/vault" "github.com/hashicorp/vault/version" "github.com/kr/pretty" - testingintf "github.com/mitchellh/go-testing-interface" "github.com/stretchr/testify/require" ) @@ -218,29 +214,25 @@ func TestRaft_Autopilot_Configuration(t *testing.T) { // TestRaft_Autopilot_Stabilization_Delay verifies that if a node takes a long // time to become ready, it doesn't get promoted to voter until then. func TestRaft_Autopilot_Stabilization_Delay(t *testing.T) { - t.Skip() t.Parallel() - conf, opts := teststorage.ClusterSetup(nil, nil, teststorage.RaftBackendSetup) - conf.DisableAutopilot = false - opts.InmemClusterLayers = true - opts.KeepStandbysSealed = true - opts.SetupFunc = nil core2SnapshotDelay := 5 * time.Second - opts.PhysicalFactory = func(t testingintf.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle { - config := map[string]interface{}{ - "snapshot_threshold": "50", - "trailing_logs": "100", - "autopilot_reconcile_interval": "300ms", - "autopilot_update_interval": "100ms", - "snapshot_interval": "1s", - } - if coreIdx == 2 { - config["snapshot_delay"] = core2SnapshotDelay.String() - } - return teststorage.MakeRaftBackend(t, coreIdx, logger, config) - } + conf, opts := raftClusterBuilder(t, &RaftClusterOpts{ + DisableFollowerJoins: true, + InmemCluster: true, + EnableAutopilot: true, + PhysicalFactoryConfig: map[string]interface{}{ + "snapshot_threshold": "50", + "trailing_logs": "100", + "snapshot_interval": "1s", + }, + PerNodePhysicalFactoryConfig: map[int]map[string]interface{}{ + 2: { + "snapshot_delay": core2SnapshotDelay.String(), + }, + }, + }) - cluster := vault.NewTestCluster(t, conf, opts) + cluster := vault.NewTestCluster(t, conf, &opts) defer cluster.Cleanup() testhelpers.WaitForActiveNode(t, cluster) @@ -432,27 +424,20 @@ func TestRaft_VotersStayVoters(t *testing.T) { // remove it from Raft until a replacement voter had joined and stabilized/been promoted. func TestRaft_Autopilot_DeadServerCleanup(t *testing.T) { t.Parallel() - conf, opts := teststorage.ClusterSetup(nil, nil, teststorage.RaftBackendSetup) - conf.DisableAutopilot = false - opts.NumCores = 4 - opts.SetupFunc = nil - opts.PhysicalFactoryConfig = map[string]interface{}{ - "autopilot_reconcile_interval": "300ms", - "autopilot_update_interval": "100ms", - } - - cluster := vault.NewTestCluster(t, conf, opts) + cluster, _ := raftCluster(t, &RaftClusterOpts{ + DisableFollowerJoins: true, + InmemCluster: true, + EnableAutopilot: true, + NumCores: 4, + }) defer cluster.Cleanup() testhelpers.WaitForActiveNode(t, cluster) - leader, addressProvider := setupLeaderAndUnseal(t, cluster) // Join 2 extra nodes manually, store the 3rd for later + leader := cluster.Cores[0] core1 := cluster.Cores[1] core2 := cluster.Cores[2] core3 := cluster.Cores[3] - core1.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) - core2.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) - core3.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) joinAsVoterAndUnseal(t, core1, cluster) joinAsVoterAndUnseal(t, core2, cluster) // Do not join node 3 @@ -606,26 +591,6 @@ func joinAndUnseal(t *testing.T, core *vault.TestClusterCore, cluster *vault.Tes } } -// setupLeaderAndUnseal configures and unseals the leader node. -// It will wait until the node is active before returning the core and the address of the leader. -func setupLeaderAndUnseal(t *testing.T, cluster *vault.TestCluster) (*vault.TestClusterCore, *testhelpers.TestRaftServerAddressProvider) { - t.Helper() - leaderIdx, err := testcluster.LeaderNode(context.Background(), cluster) - require.NoError(t, err) - leader := cluster.Cores[leaderIdx] - - // Lots of tests seem to do this when they deal with a TestRaftServerAddressProvider, it makes the test work rather than error out. - atomic.StoreUint32(&vault.TestingUpdateClusterAddr, 1) - - addressProvider := &testhelpers.TestRaftServerAddressProvider{Cluster: cluster} - testhelpers.EnsureCoreSealed(t, leader) - leader.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) - cluster.UnsealCore(t, leader) - vault.TestWaitActive(t, leader.Core) - - return leader, addressProvider -} - // waitForCoreUnseal waits until the specified core is unsealed. // It fails the calling test if the deadline has elapsed and the core is still sealed. func waitForCoreUnseal(t *testing.T, core *vault.TestClusterCore) { diff --git a/vault/external_tests/raft/raft_test.go b/vault/external_tests/raft/raft_test.go index 73eab754bfb7..6cb433b94049 100644 --- a/vault/external_tests/raft/raft_test.go +++ b/vault/external_tests/raft/raft_test.go @@ -50,11 +50,17 @@ type RaftClusterOpts struct { VersionMap map[int]string RedundancyZoneMap map[int]string EffectiveSDKVersionMap map[int]string + PerNodePhysicalFactoryConfig map[int]map[string]interface{} } -func raftCluster(t testing.TB, ropts *RaftClusterOpts) (*vault.TestCluster, *vault.TestClusterOptions) { +func raftClusterBuilder(t testing.TB, ropts *RaftClusterOpts) (*vault.CoreConfig, vault.TestClusterOptions) { + // TODO remove global + atomic.StoreUint32(&vault.TestingUpdateClusterAddr, 1) + if ropts == nil { - ropts = &RaftClusterOpts{} + ropts = &RaftClusterOpts{ + InmemCluster: true, + } } conf := &vault.CoreConfig{ @@ -73,41 +79,56 @@ func raftCluster(t testing.TB, ropts *RaftClusterOpts) (*vault.TestCluster, *vau opts.PhysicalFactoryConfig = ropts.PhysicalFactoryConfig conf.DisablePerformanceStandby = ropts.DisablePerfStandby opts.NumCores = ropts.NumCores - opts.VersionMap = ropts.VersionMap - opts.RedundancyZoneMap = ropts.RedundancyZoneMap opts.EffectiveSDKVersionMap = ropts.EffectiveSDKVersionMap + opts.PerNodePhysicalFactoryConfig = ropts.PerNodePhysicalFactoryConfig + if len(ropts.VersionMap) > 0 || len(ropts.RedundancyZoneMap) > 0 { + if opts.PerNodePhysicalFactoryConfig == nil { + opts.PerNodePhysicalFactoryConfig = map[int]map[string]interface{}{} + } + for idx, ver := range ropts.VersionMap { + if opts.PerNodePhysicalFactoryConfig[idx] == nil { + opts.PerNodePhysicalFactoryConfig[idx] = map[string]interface{}{} + } + opts.PerNodePhysicalFactoryConfig[idx]["autopilot_upgrade_version"] = ver + } + for idx, zone := range ropts.RedundancyZoneMap { + if opts.PerNodePhysicalFactoryConfig[idx] == nil { + opts.PerNodePhysicalFactoryConfig[idx] = map[string]interface{}{} + } + opts.PerNodePhysicalFactoryConfig[idx]["autopilot_redundancy_zone"] = zone + } + } teststorage.RaftBackendSetup(conf, &opts) if ropts.DisableFollowerJoins { opts.SetupFunc = nil } + return conf, opts +} +func raftCluster(t testing.TB, ropts *RaftClusterOpts) (*vault.TestCluster, *vault.TestClusterOptions) { + conf, opts := raftClusterBuilder(t, ropts) cluster := vault.NewTestCluster(benchhelpers.TBtoT(t), conf, &opts) - cluster.Start() vault.TestWaitActive(benchhelpers.TBtoT(t), cluster.Cores[0].Core) return cluster, &opts } func TestRaft_BoltDBMetrics(t *testing.T) { t.Parallel() - conf := vault.CoreConfig{} - opts := vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - NumCores: 1, - CoreMetricSinkProvider: testhelpers.TestMetricSinkProvider(time.Minute), - DefaultHandlerProperties: vault.HandlerProperties{ - ListenerConfig: &configutil.Listener{ - Telemetry: configutil.ListenerTelemetry{ - UnauthenticatedMetricsAccess: true, - }, + conf, opts := raftClusterBuilder(t, &RaftClusterOpts{ + InmemCluster: true, + NumCores: 1, + }) + opts.CoreMetricSinkProvider = testhelpers.TestMetricSinkProvider(time.Minute) + opts.DefaultHandlerProperties = vault.HandlerProperties{ + ListenerConfig: &configutil.Listener{ + Telemetry: configutil.ListenerTelemetry{ + UnauthenticatedMetricsAccess: true, }, }, } - - teststorage.RaftBackendSetup(&conf, &opts) - cluster := vault.NewTestCluster(t, &conf, &opts) - cluster.Start() + cluster := vault.NewTestCluster(t, conf, &opts) defer cluster.Cleanup() vault.TestWaitActive(t, cluster.Cores[0].Core) @@ -148,30 +169,13 @@ func TestRaft_BoltDBMetrics(t *testing.T) { func TestRaft_RetryAutoJoin(t *testing.T) { t.Parallel() - var ( - conf vault.CoreConfig - - opts = vault.TestClusterOptions{HandlerFunc: vaulthttp.Handler} - ) - - teststorage.RaftBackendSetup(&conf, &opts) - - opts.SetupFunc = nil - cluster := vault.NewTestCluster(t, &conf, &opts) - - cluster.Start() + cluster, _ := raftCluster(t, &RaftClusterOpts{ + InmemCluster: true, + DisableFollowerJoins: true, + }) defer cluster.Cleanup() - addressProvider := &testhelpers.TestRaftServerAddressProvider{Cluster: cluster} leaderCore := cluster.Cores[0] - atomic.StoreUint32(&vault.TestingUpdateClusterAddr, 1) - - { - testhelpers.EnsureCoreSealed(t, leaderCore) - leaderCore.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) - cluster.UnsealCore(t, leaderCore) - vault.TestWaitActive(t, leaderCore.Core) - } leaderInfos := []*raft.LeaderJoinInfo{ { @@ -184,7 +188,6 @@ func TestRaft_RetryAutoJoin(t *testing.T) { { // expected to pass but not join as we're not actually discovering leader addresses core := cluster.Cores[1] - core.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) _, err := core.JoinRaftCluster(namespace.RootContext(context.Background()), leaderInfos, false) require.NoError(t, err) @@ -200,24 +203,14 @@ func TestRaft_RetryAutoJoin(t *testing.T) { func TestRaft_Retry_Join(t *testing.T) { t.Parallel() - var conf vault.CoreConfig - opts := vault.TestClusterOptions{HandlerFunc: vaulthttp.Handler} - teststorage.RaftBackendSetup(&conf, &opts) - opts.SetupFunc = nil - cluster := vault.NewTestCluster(t, &conf, &opts) - cluster.Start() + cluster, _ := raftCluster(t, &RaftClusterOpts{ + InmemCluster: true, + DisableFollowerJoins: true, + }) defer cluster.Cleanup() - addressProvider := &testhelpers.TestRaftServerAddressProvider{Cluster: cluster} - leaderCore := cluster.Cores[0] leaderAPI := leaderCore.Client.Address() - atomic.StoreUint32(&vault.TestingUpdateClusterAddr, 1) - - { - testhelpers.EnsureCoreSealed(t, leaderCore) - leaderCore.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) - } leaderInfos := []*raft.LeaderJoinInfo{ { @@ -233,7 +226,6 @@ func TestRaft_Retry_Join(t *testing.T) { go func(t *testing.T, core *vault.TestClusterCore) { t.Helper() defer wg.Done() - core.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) _, err := core.JoinRaftCluster(namespace.RootContext(context.Background()), leaderInfos, false) if err != nil { t.Error(err) @@ -263,27 +255,13 @@ func TestRaft_Retry_Join(t *testing.T) { func TestRaft_Join(t *testing.T) { t.Parallel() - var conf vault.CoreConfig - opts := vault.TestClusterOptions{HandlerFunc: vaulthttp.Handler} - teststorage.RaftBackendSetup(&conf, &opts) - opts.SetupFunc = nil - cluster := vault.NewTestCluster(t, &conf, &opts) - cluster.Start() + cluster, _ := raftCluster(t, &RaftClusterOpts{ + DisableFollowerJoins: true, + }) defer cluster.Cleanup() - addressProvider := &testhelpers.TestRaftServerAddressProvider{Cluster: cluster} - leaderCore := cluster.Cores[0] leaderAPI := leaderCore.Client.Address() - atomic.StoreUint32(&vault.TestingUpdateClusterAddr, 1) - - // Seal the leader so we can install an address provider - { - testhelpers.EnsureCoreSealed(t, leaderCore) - leaderCore.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) - cluster.UnsealCore(t, leaderCore) - vault.TestWaitActive(t, leaderCore.Core) - } joinFunc := func(client *api.Client, addClientCerts bool) { req := &api.RaftJoinRequest{ @@ -392,6 +370,7 @@ func TestRaft_NodeIDHeader(t *testing.T) { description: "with header configured", ropts: &RaftClusterOpts{ EnableResponseHeaderRaftNodeID: true, + InmemCluster: true, }, headerPresent: true, }, @@ -622,7 +601,10 @@ func TestRaft_SnapshotAPI_RekeyRotate_Backward(t *testing.T) { tCaseLocal := tCase t.Parallel() - cluster, _ := raftCluster(t, &RaftClusterOpts{DisablePerfStandby: tCaseLocal.DisablePerfStandby}) + cluster, _ := raftCluster(t, &RaftClusterOpts{ + DisablePerfStandby: tCaseLocal.DisablePerfStandby, + InmemCluster: true, + }) defer cluster.Cleanup() leaderClient := cluster.Cores[0].Client @@ -824,7 +806,10 @@ func TestRaft_SnapshotAPI_RekeyRotate_Forward(t *testing.T) { tCaseLocal := tCase t.Parallel() - cluster, _ := raftCluster(t, &RaftClusterOpts{DisablePerfStandby: tCaseLocal.DisablePerfStandby}) + cluster, _ := raftCluster(t, &RaftClusterOpts{ + DisablePerfStandby: tCaseLocal.DisablePerfStandby, + InmemCluster: true, + }) defer cluster.Cleanup() leaderClient := cluster.Cores[0].Client @@ -1134,27 +1119,15 @@ func BenchmarkRaft_SingleNode(b *testing.B) { func TestRaft_Join_InitStatus(t *testing.T) { t.Parallel() - var conf vault.CoreConfig - opts := vault.TestClusterOptions{HandlerFunc: vaulthttp.Handler} - teststorage.RaftBackendSetup(&conf, &opts) - opts.SetupFunc = nil - cluster := vault.NewTestCluster(t, &conf, &opts) - cluster.Start() - defer cluster.Cleanup() - addressProvider := &testhelpers.TestRaftServerAddressProvider{Cluster: cluster} + cluster, _ := raftCluster(t, &RaftClusterOpts{ + InmemCluster: true, + DisableFollowerJoins: true, + }) + defer cluster.Cleanup() leaderCore := cluster.Cores[0] leaderAPI := leaderCore.Client.Address() - atomic.StoreUint32(&vault.TestingUpdateClusterAddr, 1) - - // Seal the leader so we can install an address provider - { - testhelpers.EnsureCoreSealed(t, leaderCore) - leaderCore.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) - cluster.UnsealCore(t, leaderCore) - vault.TestWaitActive(t, leaderCore.Core) - } joinFunc := func(client *api.Client) { req := &api.RaftJoinRequest{ diff --git a/vault/testing.go b/vault/testing.go index 14f000a2c520..3aed96600ce1 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -1114,13 +1114,12 @@ type TestClusterOptions struct { CoreMetricSinkProvider func(clusterName string) (*metricsutil.ClusterMetricSink, *metricsutil.MetricsHelper) - PhysicalFactoryConfig map[string]interface{} - LicensePublicKey ed25519.PublicKey - LicensePrivateKey ed25519.PrivateKey + PhysicalFactoryConfig map[string]interface{} + PerNodePhysicalFactoryConfig map[int]map[string]interface{} + + LicensePublicKey ed25519.PublicKey + LicensePrivateKey ed25519.PrivateKey - // this stores the vault version that should be used for each core config - VersionMap map[int]string - RedundancyZoneMap map[int]string KVVersion string EffectiveSDKVersionMap map[int]string @@ -1831,11 +1830,10 @@ func (testCluster *TestCluster) newCore(t testing.T, idx int, coreConfig *CoreCo if pfc == nil { pfc = make(map[string]interface{}) } - if len(opts.VersionMap) > 0 { - pfc["autopilot_upgrade_version"] = opts.VersionMap[idx] - } - if len(opts.RedundancyZoneMap) > 0 { - pfc["autopilot_redundancy_zone"] = opts.RedundancyZoneMap[idx] + if opts.PerNodePhysicalFactoryConfig != nil { + for k, v := range opts.PerNodePhysicalFactoryConfig[idx] { + pfc[k] = v + } } physBundle := opts.PhysicalFactory(t, idx, localConfig.Logger, pfc) switch { From 1b166da3d28ee2309c2cb2acf1ac1f84636eeb4a Mon Sep 17 00:00:00 2001 From: Raymond Ho Date: Fri, 15 Dec 2023 14:28:25 -0800 Subject: [PATCH 04/25] revert stopped method to JobManager (#24526) --- changelog/23950.txt | 3 --- helper/fairshare/jobmanager.go | 7 ------- helper/fairshare/jobmanager_test.go | 4 ---- 3 files changed, 14 deletions(-) delete mode 100644 changelog/23950.txt diff --git a/changelog/23950.txt b/changelog/23950.txt deleted file mode 100644 index d28cdcea2a25..000000000000 --- a/changelog/23950.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -fairshare/jobmanager: Add 'stopped' method -``` diff --git a/helper/fairshare/jobmanager.go b/helper/fairshare/jobmanager.go index c6d3521f9295..e1216dcd3b49 100644 --- a/helper/fairshare/jobmanager.go +++ b/helper/fairshare/jobmanager.go @@ -9,7 +9,6 @@ import ( "io/ioutil" "math" "sync" - "sync/atomic" "time" "github.com/armon/go-metrics" @@ -47,7 +46,6 @@ type JobManager struct { // track queues by index for round robin worker assignment queuesIndex []string lastQueueAccessed int - stopped atomic.Bool } // NewJobManager creates a job manager, with an optional name @@ -100,14 +98,9 @@ func (j *JobManager) Stop() { j.logger.Trace("terminating job manager...") close(j.quit) j.workerPool.stop() - j.stopped.Store(true) }) } -func (j *JobManager) Stopped() bool { - return j.stopped.Load() -} - // AddJob adds a job to the given queue, creating the queue if it doesn't exist func (j *JobManager) AddJob(job Job, queueID string) { j.l.Lock() diff --git a/helper/fairshare/jobmanager_test.go b/helper/fairshare/jobmanager_test.go index 363093f573a5..3d903d6aee22 100644 --- a/helper/fairshare/jobmanager_test.go +++ b/helper/fairshare/jobmanager_test.go @@ -9,8 +9,6 @@ import ( "sync" "testing" "time" - - "github.com/stretchr/testify/assert" ) func TestJobManager_NewJobManager(t *testing.T) { @@ -176,7 +174,6 @@ func TestJobManager_Stop(t *testing.T) { j := NewJobManager("job-mgr-test", 5, newTestLogger("jobmanager-test"), nil) j.Start() - assert.False(t, j.Stopped()) doneCh := make(chan struct{}) timeout := time.After(5 * time.Second) @@ -188,7 +185,6 @@ func TestJobManager_Stop(t *testing.T) { select { case <-doneCh: - assert.True(t, j.Stopped()) break case <-timeout: t.Fatal("timed out") From 423b58c90bf54d83ef5585cd645ae8a997944ce6 Mon Sep 17 00:00:00 2001 From: Nick Cabatoff Date: Mon, 18 Dec 2023 09:42:23 -0500 Subject: [PATCH 05/25] Simplify raft cluster address management in tests (#24560) --- helper/testhelpers/testhelpers.go | 40 ------------------- helper/testhelpers/teststorage/teststorage.go | 40 ++++++++++++++----- .../teststorage/teststorage_reusable.go | 24 +++-------- physical/raft/raft.go | 32 ++++++++++++++- sdk/helper/testcluster/util.go | 2 +- vault/cluster.go | 11 ++++- vault/core.go | 6 +++ .../raft/raft_autopilot_test.go | 25 +++++++----- vault/external_tests/raft/raft_test.go | 4 -- vault/external_tests/raftha/raft_ha_test.go | 35 +--------------- .../sealmigration/seal_migration_test.go | 8 +--- vault/raft.go | 5 +-- vault/testing.go | 4 ++ 13 files changed, 105 insertions(+), 131 deletions(-) diff --git a/helper/testhelpers/testhelpers.go b/helper/testhelpers/testhelpers.go index c3499b8b6b0c..76c7731eba6d 100644 --- a/helper/testhelpers/testhelpers.go +++ b/helper/testhelpers/testhelpers.go @@ -11,10 +11,8 @@ import ( "fmt" "io/ioutil" "math/rand" - "net/url" "os" "strings" - "sync/atomic" "time" "github.com/armon/go-metrics" @@ -435,46 +433,9 @@ func RekeyCluster(t testing.T, cluster *vault.TestCluster, recovery bool) [][]by return newKeys } -// TestRaftServerAddressProvider is a ServerAddressProvider that uses the -// ClusterAddr() of each node to provide raft addresses. -// -// Note that TestRaftServerAddressProvider should only be used in cases where -// cores that are part of a raft configuration have already had -// startClusterListener() called (via either unsealing or raft joining). -type TestRaftServerAddressProvider struct { - Cluster *vault.TestCluster -} - -func (p *TestRaftServerAddressProvider) ServerAddr(id raftlib.ServerID) (raftlib.ServerAddress, error) { - for _, core := range p.Cluster.Cores { - if core.NodeID == string(id) { - parsed, err := url.Parse(core.ClusterAddr()) - if err != nil { - return "", err - } - - return raftlib.ServerAddress(parsed.Host), nil - } - } - - return "", errors.New("could not find cluster addr") -} - func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) { - addressProvider := &TestRaftServerAddressProvider{Cluster: cluster} - - atomic.StoreUint32(&vault.TestingUpdateClusterAddr, 1) - leader := cluster.Cores[0] - // Seal the leader so we can install an address provider - { - EnsureCoreSealed(t, leader) - leader.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) - cluster.UnsealCore(t, leader) - vault.TestWaitActive(t, leader.Core) - } - leaderInfos := []*raft.LeaderJoinInfo{ { LeaderAPIAddr: leader.Client.Address(), @@ -485,7 +446,6 @@ func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) { // Join followers for i := 1; i < len(cluster.Cores); i++ { core := cluster.Cores[i] - core.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) _, err := core.JoinRaftCluster(namespace.RootContext(context.Background()), leaderInfos, false) if err != nil { t.Fatal(err) diff --git a/helper/testhelpers/teststorage/teststorage.go b/helper/testhelpers/teststorage/teststorage.go index 2767d022b44b..6092a6c47a81 100644 --- a/helper/testhelpers/teststorage/teststorage.go +++ b/helper/testhelpers/teststorage/teststorage.go @@ -10,8 +10,6 @@ import ( "os" "time" - "github.com/hashicorp/vault/internalshared/configutil" - "github.com/hashicorp/go-hclog" logicalKv "github.com/hashicorp/vault-plugin-secrets-kv" "github.com/hashicorp/vault/audit" @@ -23,6 +21,7 @@ import ( "github.com/hashicorp/vault/helper/testhelpers" "github.com/hashicorp/vault/helper/testhelpers/corehelpers" vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/physical/raft" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/physical" @@ -105,7 +104,7 @@ func MakeFileBackend(t testing.T, logger hclog.Logger) *vault.PhysicalBackendBun } } -func MakeRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, extraConf map[string]interface{}) *vault.PhysicalBackendBundle { +func MakeRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, extraConf map[string]interface{}, bridge *raft.ClusterAddrBridge) *vault.PhysicalBackendBundle { nodeID := fmt.Sprintf("core-%d", coreIdx) raftDir, err := ioutil.TempDir("", "vault-raft-") if err != nil { @@ -118,6 +117,19 @@ func MakeRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, extraConf ma logger.Info("raft dir", "dir", raftDir) + backend, err := makeRaftBackend(logger, nodeID, raftDir, extraConf, bridge) + if err != nil { + cleanupFunc() + t.Fatal(err) + } + + return &vault.PhysicalBackendBundle{ + Backend: backend, + Cleanup: cleanupFunc, + } +} + +func makeRaftBackend(logger hclog.Logger, nodeID, raftDir string, extraConf map[string]interface{}, bridge *raft.ClusterAddrBridge) (physical.Backend, error) { conf := map[string]string{ "path": raftDir, "node_id": nodeID, @@ -134,14 +146,13 @@ func MakeRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, extraConf ma backend, err := raft.NewRaftBackend(conf, logger.Named("raft")) if err != nil { - cleanupFunc() - t.Fatal(err) + return nil, err } - - return &vault.PhysicalBackendBundle{ - Backend: backend, - Cleanup: cleanupFunc, + if bridge != nil { + backend.(*raft.RaftBackend).SetServerAddressProvider(bridge) } + + return backend, nil } // RaftHAFactory returns a PhysicalBackendBundle with raft set as the HABackend @@ -224,7 +235,14 @@ func FileBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) { func RaftBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) { opts.KeepStandbysSealed = true - opts.PhysicalFactory = MakeRaftBackend + var bridge *raft.ClusterAddrBridge + if !opts.InmemClusterLayers && opts.ClusterLayers == nil { + bridge = raft.NewClusterAddrBridge() + } + conf.ClusterAddrBridge = bridge + opts.PhysicalFactory = func(t testing.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle { + return MakeRaftBackend(t, coreIdx, logger, conf, bridge) + } opts.SetupFunc = func(t testing.T, c *vault.TestCluster) { if opts.NumCores != 1 { testhelpers.RaftClusterJoinNodes(t, c) @@ -234,7 +252,7 @@ func RaftBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) { } func RaftHASetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions, bundler PhysicalBackendBundler) { - opts.KeepStandbysSealed = true + opts.InmemClusterLayers = true opts.PhysicalFactory = RaftHAFactory(bundler) } diff --git a/helper/testhelpers/teststorage/teststorage_reusable.go b/helper/testhelpers/teststorage/teststorage_reusable.go index 1a1ba8b76ef2..89642cf61fd7 100644 --- a/helper/testhelpers/teststorage/teststorage_reusable.go +++ b/helper/testhelpers/teststorage/teststorage_reusable.go @@ -9,7 +9,6 @@ import ( "os" hclog "github.com/hashicorp/go-hclog" - raftlib "github.com/hashicorp/raft" "github.com/hashicorp/vault/physical/raft" "github.com/hashicorp/vault/sdk/physical" "github.com/hashicorp/vault/vault" @@ -74,7 +73,7 @@ func MakeReusableStorage(t testing.T, logger hclog.Logger, bundle *vault.Physica // MakeReusableRaftStorage makes a physical raft backend that can be re-used // across multiple test clusters in sequence. -func MakeReusableRaftStorage(t testing.T, logger hclog.Logger, numCores int, addressProvider raftlib.ServerAddressProvider) (ReusableStorage, StorageCleanup) { +func MakeReusableRaftStorage(t testing.T, logger hclog.Logger, numCores int) (ReusableStorage, StorageCleanup) { raftDirs := make([]string, numCores) for i := 0; i < numCores; i++ { raftDirs[i] = makeRaftDir(t) @@ -87,7 +86,7 @@ func MakeReusableRaftStorage(t testing.T, logger hclog.Logger, numCores int, add conf.DisablePerformanceStandby = true opts.KeepStandbysSealed = true opts.PhysicalFactory = func(t testing.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle { - return makeReusableRaftBackend(t, coreIdx, logger, raftDirs[coreIdx], addressProvider, false) + return makeReusableRaftBackend(t, coreIdx, logger, raftDirs[coreIdx], false) } }, @@ -124,9 +123,10 @@ func MakeReusableRaftHAStorage(t testing.T, logger hclog.Logger, numCores int, b storage := ReusableStorage{ Setup: func(conf *vault.CoreConfig, opts *vault.TestClusterOptions) { + opts.InmemClusterLayers = true opts.KeepStandbysSealed = true opts.PhysicalFactory = func(t testing.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle { - haBundle := makeReusableRaftBackend(t, coreIdx, logger, raftDirs[coreIdx], nil, true) + haBundle := makeReusableRaftBackend(t, coreIdx, logger, raftDirs[coreIdx], true) return &vault.PhysicalBackendBundle{ Backend: bundle.Backend, @@ -168,25 +168,13 @@ func makeRaftDir(t testing.T) string { return raftDir } -func makeReusableRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, raftDir string, addressProvider raftlib.ServerAddressProvider, ha bool) *vault.PhysicalBackendBundle { +func makeReusableRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, raftDir string, ha bool) *vault.PhysicalBackendBundle { nodeID := fmt.Sprintf("core-%d", coreIdx) - conf := map[string]string{ - "path": raftDir, - "node_id": nodeID, - "performance_multiplier": "8", - "autopilot_reconcile_interval": "300ms", - "autopilot_update_interval": "100ms", - } - - backend, err := raft.NewRaftBackend(conf, logger) + backend, err := makeRaftBackend(logger, nodeID, raftDir, nil, nil) if err != nil { t.Fatal(err) } - if addressProvider != nil { - backend.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) - } - bundle := new(vault.PhysicalBackendBundle) if ha { diff --git a/physical/raft/raft.go b/physical/raft/raft.go index c6ca8a789b40..0a46ff26ae2e 100644 --- a/physical/raft/raft.go +++ b/physical/raft/raft.go @@ -11,6 +11,7 @@ import ( "io" "io/ioutil" "math/rand" + "net/url" "os" "path/filepath" "strconv" @@ -311,6 +312,33 @@ func EnsurePath(path string, dir bool) error { return os.MkdirAll(path, 0o700) } +func NewClusterAddrBridge() *ClusterAddrBridge { + return &ClusterAddrBridge{ + clusterAddressByNodeID: make(map[string]string), + } +} + +type ClusterAddrBridge struct { + l sync.RWMutex + clusterAddressByNodeID map[string]string +} + +func (c *ClusterAddrBridge) UpdateClusterAddr(nodeId string, clusterAddr string) { + c.l.Lock() + defer c.l.Unlock() + cu, _ := url.Parse(clusterAddr) + c.clusterAddressByNodeID[nodeId] = cu.Host +} + +func (c *ClusterAddrBridge) ServerAddr(id raft.ServerID) (raft.ServerAddress, error) { + c.l.RLock() + defer c.l.RUnlock() + if addr, ok := c.clusterAddressByNodeID[string(id)]; ok { + return raft.ServerAddress(addr), nil + } + return "", fmt.Errorf("could not find cluster addr for id=%s", id) +} + // NewRaftBackend constructs a RaftBackend using the given directory func NewRaftBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { path := os.Getenv(EnvVaultRaftPath) @@ -1344,7 +1372,7 @@ func (b *RaftBackend) AddPeer(ctx context.Context, peerID, clusterAddr string) e if b.raft == nil { return errors.New("raft storage is not initialized") } - b.logger.Trace("adding server to raft", "id", peerID) + b.logger.Trace("adding server to raft", "id", peerID, "addr", clusterAddr) future := b.raft.AddVoter(raft.ServerID(peerID), raft.ServerAddress(clusterAddr), 0, 0) return future.Error() } @@ -1353,7 +1381,7 @@ func (b *RaftBackend) AddPeer(ctx context.Context, peerID, clusterAddr string) e return errors.New("raft storage autopilot is not initialized") } - b.logger.Trace("adding server to raft via autopilot", "id", peerID) + b.logger.Trace("adding server to raft via autopilot", "id", peerID, "addr", clusterAddr) return b.autopilot.AddServer(&autopilot.Server{ ID: raft.ServerID(peerID), Name: peerID, diff --git a/sdk/helper/testcluster/util.go b/sdk/helper/testcluster/util.go index b2a123556cd7..6c7266536361 100644 --- a/sdk/helper/testcluster/util.go +++ b/sdk/helper/testcluster/util.go @@ -174,7 +174,7 @@ func LeaderNode(ctx context.Context, cluster VaultCluster) (int, error) { leaderActiveTimes := make(map[int]time.Time) for i, node := range cluster.Nodes() { client := node.APIClient() - ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) + ctx, cancel := context.WithTimeout(ctx, 500*time.Millisecond) resp, err := client.Sys().LeaderWithContext(ctx) cancel() if err != nil || resp == nil || !resp.IsSelf { diff --git a/vault/cluster.go b/vault/cluster.go index 7cdb9fa10d6a..903b36c5cf19 100644 --- a/vault/cluster.go +++ b/vault/cluster.go @@ -340,7 +340,7 @@ func (c *Core) startClusterListener(ctx context.Context) error { } if strings.HasSuffix(c.ClusterAddr(), ":0") { // If we listened on port 0, record the port the OS gave us. - c.clusterAddr.Store(fmt.Sprintf("https://%s", c.getClusterListener().Addr())) + c.SetClusterAddr(fmt.Sprintf("https://%s", c.getClusterListener().Addr())) } if len(c.ClusterAddr()) != 0 { @@ -356,6 +356,15 @@ func (c *Core) ClusterAddr() string { return c.clusterAddr.Load().(string) } +func (c *Core) SetClusterAddr(s string) { + c.clusterAddr.Store(s) + rb := c.getRaftBackend() + + if rb != nil && c.clusterAddrBridge != nil { + c.clusterAddrBridge.UpdateClusterAddr(c.GetRaftNodeID(), s) + } +} + func (c *Core) getClusterListener() *cluster.Listener { cl := c.clusterListener.Load() if cl == nil { diff --git a/vault/core.go b/vault/core.go index a04f52de7917..db16cb7aa4f2 100644 --- a/vault/core.go +++ b/vault/core.go @@ -710,6 +710,8 @@ type Core struct { echoDuration *uberAtomic.Duration activeNodeClockSkewMillis *uberAtomic.Int64 periodicLeaderRefreshInterval time.Duration + + clusterAddrBridge *raft.ClusterAddrBridge } func (c *Core) ActiveNodeClockSkewMillis() int64 { @@ -886,6 +888,8 @@ type CoreConfig struct { NumRollbackWorkers int PeriodicLeaderRefreshInterval time.Duration + + ClusterAddrBridge *raft.ClusterAddrBridge } // GetServiceRegistration returns the config's ServiceRegistration, or nil if it does @@ -1309,6 +1313,8 @@ func NewCore(conf *CoreConfig) (*Core, error) { c.events = events c.events.Start() + c.clusterAddrBridge = conf.ClusterAddrBridge + return c, nil } diff --git a/vault/external_tests/raft/raft_autopilot_test.go b/vault/external_tests/raft/raft_autopilot_test.go index 477b76c5b1f1..fa94fb3c510e 100644 --- a/vault/external_tests/raft/raft_autopilot_test.go +++ b/vault/external_tests/raft/raft_autopilot_test.go @@ -7,6 +7,7 @@ import ( "context" "encoding/json" "fmt" + "io" "math" "reflect" "testing" @@ -221,9 +222,7 @@ func TestRaft_Autopilot_Stabilization_Delay(t *testing.T) { InmemCluster: true, EnableAutopilot: true, PhysicalFactoryConfig: map[string]interface{}{ - "snapshot_threshold": "50", - "trailing_logs": "100", - "snapshot_interval": "1s", + "trailing_logs": "10", }, PerNodePhysicalFactoryConfig: map[int]map[string]interface{}{ 2: { @@ -256,12 +255,12 @@ func TestRaft_Autopilot_Stabilization_Delay(t *testing.T) { require.NoError(t, err) // Wait for 110% of the stabilization time to add nodes - stabilizationKickOffWaitDuration := time.Duration(math.Ceil(1.1 * float64(config.ServerStabilizationTime))) - time.Sleep(stabilizationKickOffWaitDuration) + stabilizationPadded := time.Duration(math.Ceil(1.25 * float64(config.ServerStabilizationTime))) + time.Sleep(stabilizationPadded) cli := cluster.Cores[0].Client // Write more keys than snapshot_threshold - for i := 0; i < 250; i++ { + for i := 0; i < 50; i++ { _, err := cli.Logical().Write(fmt.Sprintf("secret/%d", i), map[string]interface{}{ "test": "data", }) @@ -270,16 +269,24 @@ func TestRaft_Autopilot_Stabilization_Delay(t *testing.T) { } } + // Take a snpashot, which should compact the raft log db, which should prevent + // followers from getting logs and require that they instead apply a snapshot, + // which should allow our snapshot_delay to come into play, which should result + // in core2 coming online slower. + err = client.Sys().RaftSnapshot(io.Discard) + require.NoError(t, err) + joinAndUnseal(t, cluster.Cores[1], cluster, false, false) joinAndUnseal(t, cluster.Cores[2], cluster, false, false) - core2shouldBeHealthyAt := time.Now().Add(core2SnapshotDelay).Add(config.ServerStabilizationTime) + // Add an extra fudge factor, since once the snapshot delay completes it can + // take time for the snapshot to actually be applied. + core2shouldBeHealthyAt := time.Now().Add(core2SnapshotDelay).Add(stabilizationPadded).Add(5 * time.Second) // Wait for enough time for stabilization to complete if things were good // - but they're not good, due to our snapshot_delay. So we fail if both // nodes are healthy. - stabilizationWaitDuration := time.Duration(1.25 * float64(config.ServerStabilizationTime)) - testhelpers.RetryUntil(t, stabilizationWaitDuration, func() error { + testhelpers.RetryUntil(t, stabilizationPadded, func() error { state, err := client.Sys().RaftAutopilotState() if err != nil { return err diff --git a/vault/external_tests/raft/raft_test.go b/vault/external_tests/raft/raft_test.go index 6cb433b94049..ce4281734ecd 100644 --- a/vault/external_tests/raft/raft_test.go +++ b/vault/external_tests/raft/raft_test.go @@ -14,7 +14,6 @@ import ( "net/http" "strings" "sync" - "sync/atomic" "testing" "time" @@ -54,9 +53,6 @@ type RaftClusterOpts struct { } func raftClusterBuilder(t testing.TB, ropts *RaftClusterOpts) (*vault.CoreConfig, vault.TestClusterOptions) { - // TODO remove global - atomic.StoreUint32(&vault.TestingUpdateClusterAddr, 1) - if ropts == nil { ropts = &RaftClusterOpts{ InmemCluster: true, diff --git a/vault/external_tests/raftha/raft_ha_test.go b/vault/external_tests/raftha/raft_ha_test.go index 1cf00329f637..3f8c57d53533 100644 --- a/vault/external_tests/raftha/raft_ha_test.go +++ b/vault/external_tests/raftha/raft_ha_test.go @@ -4,7 +4,6 @@ package raftha import ( - "sync/atomic" "testing" "github.com/hashicorp/go-hclog" @@ -13,7 +12,6 @@ import ( "github.com/hashicorp/vault/helper/testhelpers/teststorage" consulstorage "github.com/hashicorp/vault/helper/testhelpers/teststorage/consul" vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/physical/raft" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/vault" ) @@ -62,25 +60,8 @@ func testRaftHANewCluster(t *testing.T, bundler teststorage.PhysicalBackendBundl teststorage.RaftHASetup(&conf, &opts, bundler) cluster := vault.NewTestCluster(t, &conf, &opts) - cluster.Start() defer cluster.Cleanup() - addressProvider := &testhelpers.TestRaftServerAddressProvider{Cluster: cluster} - - leaderCore := cluster.Cores[0] - atomic.StoreUint32(&vault.TestingUpdateClusterAddr, 1) - - // Seal the leader so we can install an address provider - { - testhelpers.EnsureCoreSealed(t, leaderCore) - leaderCore.UnderlyingHAStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) - cluster.UnsealCore(t, leaderCore) - vault.TestWaitActive(t, leaderCore.Core) - } - - // Now unseal core for join commands to work - testhelpers.EnsureCoresUnsealed(t, cluster) - joinFunc := func(client *api.Client, addClientCerts bool) { req := &api.RaftJoinRequest{ LeaderCACert: string(cluster.CACertPEM), @@ -164,7 +145,6 @@ func TestRaft_HA_ExistingCluster(t *testing.T) { storage.Setup(&conf, &opts) cluster := vault.NewTestCluster(t, &conf, &opts) - cluster.Start() defer func() { cluster.Cleanup() storage.Cleanup(t, cluster) @@ -186,7 +166,6 @@ func TestRaft_HA_ExistingCluster(t *testing.T) { haStorage.Setup(&conf, &opts) cluster := vault.NewTestCluster(t, &conf, &opts) - cluster.Start() defer func() { cluster.Cleanup() haStorage.Cleanup(t, cluster) @@ -196,16 +175,8 @@ func TestRaft_HA_ExistingCluster(t *testing.T) { cluster.BarrierKeys = clusterBarrierKeys cluster.RootToken = clusterRootToken - addressProvider := &testhelpers.TestRaftServerAddressProvider{Cluster: cluster} - atomic.StoreUint32(&vault.TestingUpdateClusterAddr, 1) - - // Seal the leader so we can install an address provider leaderCore := cluster.Cores[0] - { - testhelpers.EnsureCoreSealed(t, leaderCore) - leaderCore.UnderlyingHAStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) - testhelpers.EnsureCoreUnsealed(t, cluster, leaderCore) - } + testhelpers.EnsureCoreUnsealed(t, cluster, leaderCore) // Call the bootstrap on the leader and then ensure that it becomes active leaderClient := cluster.Cores[0].Client @@ -218,10 +189,6 @@ func TestRaft_HA_ExistingCluster(t *testing.T) { vault.TestWaitActive(t, leaderCore.Core) } - // Set address provider - cluster.Cores[1].UnderlyingHAStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) - cluster.Cores[2].UnderlyingHAStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) - // Now unseal core for join commands to work testhelpers.EnsureCoresUnsealed(t, cluster) diff --git a/vault/external_tests/sealmigration/seal_migration_test.go b/vault/external_tests/sealmigration/seal_migration_test.go index e973cf2893e4..64594cdf3f52 100644 --- a/vault/external_tests/sealmigration/seal_migration_test.go +++ b/vault/external_tests/sealmigration/seal_migration_test.go @@ -4,14 +4,11 @@ package sealmigration import ( - "sync/atomic" "testing" "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/helper/testhelpers" "github.com/hashicorp/vault/helper/testhelpers/corehelpers" "github.com/hashicorp/vault/helper/testhelpers/teststorage" - "github.com/hashicorp/vault/vault" ) type testFunc func(t *testing.T, logger hclog.Logger, storage teststorage.ReusableStorage, basePort int) @@ -36,10 +33,7 @@ func testVariousBackends(t *testing.T, tf testFunc, basePort int, includeRaft bo logger := logger.Named("raft") raftBasePort := basePort + 400 - atomic.StoreUint32(&vault.TestingUpdateClusterAddr, 1) - addressProvider := testhelpers.NewHardcodedServerAddressProvider(numTestCores, raftBasePort+10) - - storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger, numTestCores, addressProvider) + storage, cleanup := teststorage.MakeReusableRaftStorage(t, logger, numTestCores) defer cleanup() tf(t, logger, storage, raftBasePort) }) diff --git a/vault/raft.go b/vault/raft.go index 76dcf4fa0de0..585c0f08af3f 100644 --- a/vault/raft.go +++ b/vault/raft.go @@ -50,9 +50,6 @@ var ( raftAutopilotConfigurationStoragePath = "core/raft/autopilot/configuration" - // TestingUpdateClusterAddr is used in tests to override the cluster address - TestingUpdateClusterAddr uint32 - ErrJoinWithoutAutoloading = errors.New("attempt to join a cluster using autoloaded licenses while not using autoloading ourself") ) @@ -1303,7 +1300,7 @@ func (c *Core) joinRaftSendAnswer(ctx context.Context, sealAccess seal.Access, r return fmt.Errorf("error parsing cluster address: %w", err) } clusterAddr := parsedClusterAddr.Host - if atomic.LoadUint32(&TestingUpdateClusterAddr) == 1 && strings.HasSuffix(clusterAddr, ":0") { + if c.clusterAddrBridge != nil && strings.HasSuffix(clusterAddr, ":0") { // We are testing and have an address provider, so just create a random // addr, it will be overwritten later. var err error diff --git a/vault/testing.go b/vault/testing.go index 3aed96600ce1..2d17d7447463 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -1508,6 +1508,7 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te coreConfig.PendingRemovalMountsAllowed = base.PendingRemovalMountsAllowed coreConfig.ExpirationRevokeRetryBase = base.ExpirationRevokeRetryBase coreConfig.PeriodicLeaderRefreshInterval = base.PeriodicLeaderRefreshInterval + coreConfig.ClusterAddrBridge = base.ClusterAddrBridge testApplyEntBaseConfig(coreConfig, base) } if coreConfig.ClusterName == "" { @@ -1869,6 +1870,9 @@ func (testCluster *TestCluster) newCore(t testing.T, idx int, coreConfig *CoreCo localConfig.ClusterNetworkLayer = opts.ClusterLayers.Layers()[idx] localConfig.ClusterAddr = "https://" + localConfig.ClusterNetworkLayer.Listeners()[0].Addr().String() } + if opts != nil && opts.BaseClusterListenPort != 0 { + localConfig.ClusterAddr = fmt.Sprintf("https://127.0.0.1:%d", opts.BaseClusterListenPort+idx) + } switch { case localConfig.LicensingConfig != nil: From 52c02ae41d765dd3c3a8f1bc950050262a019e3e Mon Sep 17 00:00:00 2001 From: Chelsea Shaw <82459713+hashishaw@users.noreply.github.com> Date: Mon, 18 Dec 2023 11:03:35 -0600 Subject: [PATCH 06/25] UI: Add a11y testing (#24476) --- changelog/24476.txt | 3 + ui/app/components/auth-saml.hbs | 9 +- ui/app/components/calendar-widget.js | 16 -- ui/app/components/policy-form.hbs | 6 +- ui/app/components/sidebar/frame.hbs | 4 +- ui/app/components/tool-actions-form.js | 3 + ui/app/styles/components/auth-form.scss | 2 +- ui/app/styles/components/calendar-widget.scss | 33 --- .../styles/components/console-ui-panel.scss | 17 ++ ui/app/styles/components/doc-link.scss | 1 - .../components/empty-state-component.scss | 12 +- ui/app/styles/components/masked-input.scss | 7 +- ui/app/styles/components/radio-card.scss | 2 +- ui/app/styles/components/search-select.scss | 2 +- ui/app/styles/components/tool-tip.scss | 15 +- ui/app/styles/core/charts.scss | 4 +- ui/app/styles/core/element-styling.scss | 3 +- ui/app/styles/core/inputs.scss | 2 +- ui/app/styles/core/json-diff-patch.scss | 12 +- ui/app/styles/core/link.scss | 4 - ui/app/styles/core/tag.scss | 7 +- ui/app/styles/helper-classes/colors.scss | 10 +- ui/app/styles/helper-classes/typography.scss | 4 +- ui/app/templates/components/auth-form.hbs | 1 + ui/app/templates/components/auth-jwt.hbs | 5 +- .../templates/components/calendar-widget.hbs | 118 ++++---- .../components/clients/dashboard.hbs | 4 +- .../components/configure-aws-secret.hbs | 38 +-- .../components/console/command-input.hbs | 38 ++- .../console/log-error-with-html.hbs | 2 +- .../components/console/log-error.hbs | 2 +- .../templates/components/console/log-help.hbs | 2 +- .../templates/components/console/log-json.hbs | 2 +- .../templates/components/console/log-list.hbs | 2 +- .../components/console/log-object.hbs | 2 +- .../components/console/log-success.hbs | 2 +- .../templates/components/console/log-text.hbs | 2 +- .../templates/components/console/ui-panel.hbs | 17 +- .../components/control-group-success.hbs | 1 + .../dashboard/client-count-card.hbs | 2 +- .../dashboard/quick-actions-card.hbs | 3 +- .../components/dashboard/replication-card.hbs | 2 +- .../dashboard/secrets-engines-card.hbs | 25 +- .../components/generated-item-list.hbs | 2 +- .../components/identity/entity-nav.hbs | 18 +- .../components/identity/popup-alias.hbs | 2 +- .../components/identity/popup-members.hbs | 2 +- .../components/identity/popup-metadata.hbs | 2 +- .../components/identity/popup-policy.hbs | 2 +- .../templates/components/keymgmt/key-edit.hbs | 2 +- .../components/keymgmt/provider-edit.hbs | 7 +- ui/app/templates/components/link-status.hbs | 4 +- .../mfa/login-enforcement-list-item.hbs | 2 +- .../components/mfa/method-list-item.hbs | 2 +- ui/app/templates/components/mfa/mfa-form.hbs | 2 +- .../mfa/mfa-login-enforcement-form.hbs | 5 + ui/app/templates/components/mfa/nav.hbs | 2 +- .../components/mount-accessor-select.hbs | 8 +- .../components/oidc/assignment-form.hbs | 1 + .../templates/components/oidc/client-form.hbs | 2 +- .../templates/components/oidc/client-list.hbs | 2 +- ui/app/templates/components/oidc/key-form.hbs | 2 +- .../components/oidc/provider-form.hbs | 2 +- .../components/oidc/provider-list.hbs | 2 +- ui/app/templates/components/pgp-file.hbs | 3 +- .../components/secret-edit-toolbar.hbs | 6 +- ui/app/templates/components/secret-edit.hbs | 10 +- .../components/secret-list/aws-role-item.hbs | 2 +- .../secret-list/database-list-item.hbs | 2 +- .../templates/components/secret-list/item.hbs | 2 +- .../components/secret-list/ssh-role-item.hbs | 2 +- .../secret-list/transform-list-item.hbs | 2 +- .../transform-transformation-item.hbs | 2 +- ui/app/templates/components/section-tabs.hbs | 2 +- .../transform-show-transformation.hbs | 16 +- .../components/transform-template-edit.hbs | 2 +- .../components/transit-form-show.hbs | 2 +- .../templates/components/wizard-content.hbs | 2 +- ui/app/templates/error.hbs | 2 +- .../cluster/access/identity/aliases/show.hbs | 2 +- .../vault/cluster/access/identity/index.hbs | 21 +- .../vault/cluster/access/identity/show.hbs | 2 +- .../vault/cluster/access/methods.hbs | 29 +- .../vault/cluster/access/mfa/index.hbs | 4 +- .../vault/cluster/access/namespaces/index.hbs | 12 +- .../templates/vault/cluster/access/oidc.hbs | 6 +- ui/app/templates/vault/cluster/clients.hbs | 2 +- .../vault/cluster/secrets/backends.hbs | 13 +- ui/app/templates/vault/error.hbs | 2 +- .../addon/components/autocomplete-input.hbs | 3 +- ui/lib/core/addon/components/code-snippet.hbs | 20 -- .../addon/components/confirmation-modal.hbs | 5 +- .../addon/components/copy-secret-dropdown.hbs | 3 +- ui/lib/core/addon/components/empty-state.hbs | 8 +- ui/lib/core/addon/components/enable-input.hbs | 2 +- ui/lib/core/addon/components/enable-input.ts | 8 +- ui/lib/core/addon/components/form-field.hbs | 1 + ui/lib/core/addon/components/icon.hbs | 2 +- ui/lib/core/addon/components/info-tooltip.hbs | 3 +- ui/lib/core/addon/components/json-editor.hbs | 8 +- ui/lib/core/addon/components/masked-input.hbs | 1 + .../core/addon/components/overview-card.hbs | 12 +- ui/lib/core/addon/components/page/error.hbs | 11 +- ui/lib/core/addon/components/popup-menu.hbs | 3 +- ui/lib/core/addon/components/radio-card.hbs | 4 +- .../core/addon/components/search-select.hbs | 2 +- .../addon/components/secret-list-header.hbs | 54 ++-- ui/lib/core/addon/components/select.js | 2 + .../addon/components/shamir/dr-token-flow.hbs | 8 +- ui/lib/core/addon/components/shamir/form.hbs | 10 +- ui/lib/core/addon/components/string-list.hbs | 2 +- ui/lib/core/addon/components/text-file.hbs | 12 +- .../core/addon/components/toolbar-actions.hbs | 2 +- .../core/addon/components/toolbar-filters.hbs | 2 +- ui/lib/core/addon/components/ttl-picker.hbs | 4 +- .../components/replication-secondary-card.hbs | 36 +-- .../addon/templates/components/select.hbs | 5 +- ui/lib/core/app/components/code-snippet.js | 6 - .../addon/components/page/credentials.hbs | 3 +- .../addon/components/page/role/details.hbs | 2 +- .../addon/components/tab-page-header.hbs | 8 +- ui/lib/kv/addon/components/kv-list-filter.hbs | 3 +- ui/lib/kv/addon/components/kv-page-header.hbs | 6 +- .../addon/components/page/configuration.hbs | 4 +- ui/lib/kv/addon/components/page/list.hbs | 6 +- .../addon/components/page/secret/details.hbs | 8 +- .../page/secret/metadata/details.hbs | 8 +- .../page/secret/metadata/version-history.hbs | 8 +- .../kv/addon/components/page/secret/paths.hbs | 28 +- .../kv/addon/components/page/secret/paths.js | 10 +- ui/lib/kv/addon/templates/error.hbs | 4 +- .../ldap/addon/components/page/libraries.hbs | 1 + .../addon/components/page/library/details.hbs | 6 +- .../page/library/details/accounts.hbs | 9 +- .../addon/components/page/role/details.hbs | 2 +- ui/lib/ldap/addon/components/page/roles.hbs | 8 +- .../ldap/addon/components/tab-page-header.hbs | 14 +- .../page/pki-configuration-details.hbs | 4 +- .../addon/components/page/pki-issuer-edit.hbs | 4 +- .../page/pki-issuer-rotate-root.hbs | 4 +- .../pki/addon/components/pki-page-header.hbs | 14 +- ui/lib/pki/addon/components/pki-role-form.hbs | 3 +- .../components/secrets/destination-header.hbs | 4 +- .../components/secrets/page/destinations.hbs | 4 +- ui/package.json | 3 +- ui/testem.js | 2 +- .../access/identity/entities/create-test.js | 7 + .../identity/groups/aliases/create-test.js | 7 + .../access/identity/groups/create-test.js | 7 + .../enterprise-control-groups-test.js | 7 + ui/tests/acceptance/enterprise-kmip-test.js | 7 + ui/tests/acceptance/init-test.js | 7 + .../secrets/backend/kubernetes/roles-test.js | 7 + .../secrets/backend/ldap/libraries-test.js | 7 + .../secrets/backend/ldap/roles-test.js | 7 + ui/tests/acceptance/sidebar-nav-test.js | 8 +- ui/tests/acceptance/ssh-test.js | 7 + ui/tests/helpers/general-selectors.js | 6 +- ui/tests/helpers/kubernetes/overview.js | 8 +- ui/tests/helpers/kv/kv-selectors.js | 4 +- ui/tests/helpers/pki/overview.js | 12 +- .../components/autocomplete-input-test.js | 13 + .../components/calendar-widget-test.js | 54 ++-- .../clients/horizontal-bar-chart-test.js | 29 +- .../components/clients/line-chart-test.js | 30 ++- .../components/clients/running-total-test.js | 7 + .../clients/vertical-bar-chart-test.js | 48 ++-- .../components/confirm-action-test.js | 7 + .../components/console/log-json-test.js | 7 + .../components/control-group-success-test.js | 12 +- .../dashboard/quick-actions-card-test.js | 9 + .../database-role-setting-form-test.js | 7 + .../components/enable-input-test.js | 20 +- .../components/filter-input-test.js | 12 +- .../integration/components/form-field-test.js | 13 + .../components/get-credentials-card-test.js | 8 + .../components/json-editor-test.js | 10 +- .../components/keymgmt/distribute-test.js | 9 + .../components/keymgmt/provider-edit-test.js | 9 + .../kubernetes/page/configure-test.js | 8 + .../kubernetes/page/overview-test.js | 8 + .../page/role/create-and-edit-test.js | 10 + .../kubernetes/page/role/details-test.js | 7 + .../components/kv/kv-data-fields-test.js | 7 + .../components/kv/kv-page-header-test.js | 4 +- .../components/kv/page/kv-page-list-test.js | 20 +- .../kv/page/kv-page-secret-details-test.js | 7 + .../kv/page/kv-page-secret-edit-test.js | 8 + .../kv/page/kv-page-secret-paths-test.js | 16 +- .../kv/page/kv-page-secrets-create-test.js | 8 + .../kv/page/kv-page-version-history-test.js | 2 +- .../ldap/page/configuration-test.js | 8 + .../components/ldap/page/libraries-test.js | 6 + .../page/library/details/accounts-test.js | 2 +- .../components/ldap/page/overview-test.js | 8 + .../ldap/page/role/create-and-edit-test.js | 7 + .../components/license-info-test.js | 10 + .../mfa-login-enforcement-form-test.js | 10 + .../mfa-login-enforcement-header-test.js | 14 + .../mount-backend/type-form-test.js | 7 + .../components/oidc/assignment-form-test.js | 8 + .../components/oidc/client-form-test.js | 12 + .../components/oidc/key-form-test.js | 11 + .../components/oidc/provider-form-test.js | 11 + .../components/oidc/scope-form-test.js | 7 + .../components/overview-card-test.js | 15 +- .../path-filter-config-list-test.js | 10 + .../integration/components/pgp-file-test.js | 4 +- .../page/pki-configuration-details-test.js | 7 + .../pki/page/pki-issuer-generate-root-test.js | 10 + .../pki/page/pki-issuer-rotate-root-test.js | 8 + .../components/pki/pki-generate-csr-test.js | 7 + .../components/pki/pki-key-parameters-test.js | 7 + .../components/pki/pki-role-form-test.js | 8 + .../components/policy-example-test.js | 12 + .../components/policy-form-test.js | 8 + .../components/radio-button-test.js | 9 +- .../replication-secondary-card-test.js | 8 + .../components/search-select-test.js | 11 + .../search-select-with-modal-test.js | 11 + .../components/secret-edit-test.js | 18 +- .../integration/components/select-test.js | 4 +- .../components/sidebar/frame-test.js | 20 ++ .../components/sidebar/nav/access-test.js | 12 + .../components/sidebar/nav/cluster-test.js | 12 + .../components/sidebar/nav/policies-test.js | 12 + .../components/sidebar/nav/tools-test.js | 12 + .../components/sidebar/user-menu-test.js | 9 + .../integration/components/text-file-test.js | 14 +- .../integration/components/tool-tip-test.js | 2 +- .../transform-advanced-templating-test.js | 7 + .../components/transit-key-actions-test.js | 31 ++- .../integration/components/ttl-picker-test.js | 7 + ui/tests/test-helper.js | 11 + ui/yarn.lock | 253 ++++++++++++++---- 235 files changed, 1552 insertions(+), 735 deletions(-) create mode 100644 changelog/24476.txt delete mode 100644 ui/lib/core/addon/components/code-snippet.hbs delete mode 100644 ui/lib/core/app/components/code-snippet.js diff --git a/changelog/24476.txt b/changelog/24476.txt new file mode 100644 index 000000000000..797ed9a48d47 --- /dev/null +++ b/changelog/24476.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: improve accessibility - color contrast, labels, and automatic testing +``` diff --git a/ui/app/components/auth-saml.hbs b/ui/app/components/auth-saml.hbs index 8d69fe8759ce..aa87e3770d5c 100644 --- a/ui/app/components/auth-saml.hbs +++ b/ui/app/components/auth-saml.hbs @@ -46,10 +46,11 @@ Logging in with a SAML auth method requires a browser in a secure context. - - Read more about secure contexts. - - + {{/if}} \ No newline at end of file diff --git a/ui/app/components/calendar-widget.js b/ui/app/components/calendar-widget.js index 2e1a3271b1a0..804c9af1963a 100644 --- a/ui/app/components/calendar-widget.js +++ b/ui/app/components/calendar-widget.js @@ -28,8 +28,6 @@ export default class CalendarWidget extends Component { currentDate = timestamp.now(); @tracked calendarDisplayDate = this.currentDate; // init to current date, updates when user clicks on calendar chevrons @tracked showCalendar = false; - @tracked tooltipTarget = null; - @tracked tooltipText = null; // both date getters return a date object get startDate() { @@ -72,20 +70,6 @@ export default class CalendarWidget extends Component { }); } - @action - addTooltip() { - if (this.disablePastYear) { - const previousYear = this.displayYear - 1; - this.tooltipText = `${previousYear} is unavailable because it is before your start date. Change your start month to a date in ${previousYear} to see data for this year.`; - this.tooltipTarget = '#previous-year'; - } - } - - @action - removeTooltip() { - this.tooltipTarget = null; - } - @action addYear() { this.calendarDisplayDate = addYears(this.calendarDisplayDate, 1); diff --git a/ui/app/components/policy-form.hbs b/ui/app/components/policy-form.hbs index a04533b8e3f9..c4085e64f9a6 100644 --- a/ui/app/components/policy-form.hbs +++ b/ui/app/components/policy-form.hbs @@ -23,11 +23,11 @@ {{/if}}
- + {{#if @renderPolicyExampleModal}} {{! only true in policy create and edit routes }} - + {{/if}} - +
{{#if @model.isNew}}
diff --git a/ui/app/components/sidebar/frame.hbs b/ui/app/components/sidebar/frame.hbs index 85113093701d..2139385f3ade 100644 --- a/ui/app/components/sidebar/frame.hbs +++ b/ui/app/components/sidebar/frame.hbs @@ -13,7 +13,7 @@ @icon="vault" @route="vault.cluster.dashboard" @model={{this.currentCluster.cluster.name}} - @ariaLabel="home link" + @ariaLabel="Vault home" data-test-sidebar-logo /> @@ -31,7 +31,7 @@ {{! this block is where the Hds::SideNav::Portal components render into }} <:body> - + <:footer> diff --git a/ui/app/components/tool-actions-form.js b/ui/app/components/tool-actions-form.js index 2441653bd3b7..40b183f3af06 100644 --- a/ui/app/components/tool-actions-form.js +++ b/ui/app/components/tool-actions-form.js @@ -10,6 +10,7 @@ import Component from '@ember/component'; import { setProperties, computed, set } from '@ember/object'; import { addSeconds, parseISO } from 'date-fns'; import { A } from '@ember/array'; +import { capitalize } from '@ember/string'; const DEFAULTS = { token: null, @@ -30,6 +31,7 @@ const DEFAULTS = { const WRAPPING_ENDPOINTS = ['lookup', 'wrap', 'unwrap', 'rewrap']; export default Component.extend(DEFAULTS, { + flashMessages: service(), store: service(), // putting these attrs here so they don't get reset when you click back //random @@ -97,6 +99,7 @@ export default Component.extend(DEFAULTS, { props = assign({}, props, { [keyName]: resp.wrap_info.token }); } setProperties(this, props); + this.flashMessages.success(`${capitalize(action)} was successful.`); }, getData() { diff --git a/ui/app/styles/components/auth-form.scss b/ui/app/styles/components/auth-form.scss index a0f7fc55b692..c6f62abfea41 100644 --- a/ui/app/styles/components/auth-form.scss +++ b/ui/app/styles/components/auth-form.scss @@ -37,6 +37,6 @@ } .is-label { - color: $grey; + color: var(--token-color-foreground-faint); } } diff --git a/ui/app/styles/components/calendar-widget.scss b/ui/app/styles/components/calendar-widget.scss index 258fcf9bdc08..d7db94563855 100644 --- a/ui/app/styles/components/calendar-widget.scss +++ b/ui/app/styles/components/calendar-widget.scss @@ -32,30 +32,6 @@ $dark-gray: #535f73; .calendar-widget { grid-area: calendar-widget; - - > button { - &.is-month-list { - background-color: $white; - color: black; - text-align: center; - border: $light-border; - border-radius: $radius; - } - &.is-current-month { - border: 1px solid $ui-gray-900; - } - &:hover { - background-color: lighten($dark-gray, 30%); - color: $white; - text-align: center; - cursor: pointer; - } - &.is-readOnly { - background-color: $ui-gray-100; - color: lighten($dark-gray, 30%); - pointer-events: none; - } - } } .border-col { @@ -84,15 +60,6 @@ $dark-gray: #535f73; } // for modal-dialog tooltips -.calendar-tooltip { - background-color: $ui-gray-700; - color: $white; - font-size: $size-8; - padding: $spacing-10; - border-radius: $radius-large; - width: 141px; -} - .ember-modal-dialog { z-index: 1000; } diff --git a/ui/app/styles/components/console-ui-panel.scss b/ui/app/styles/components/console-ui-panel.scss index 2342969ec898..ed4b420ef5fd 100644 --- a/ui/app/styles/components/console-ui-panel.scss +++ b/ui/app/styles/components/console-ui-panel.scss @@ -50,6 +50,10 @@ $console-close-height: 35px; margin-left: $spacing-20; padding: $spacing-12 $spacing-20; } + + .console-ui-panel-intro { + color: var(--token-color-palette-neutral-400); + } } .console-ui-input { @@ -83,6 +87,7 @@ $console-close-height: 35px; padding-right: $spacing-36; position: relative; background-color: rgba(#000, 0); + color: var(--token-color-palette-neutral-400); &:hover { background-color: rgba(#000, 0.5); } @@ -91,12 +96,24 @@ $console-close-height: 35px; .console-ui-alert { margin-left: calc(#{$spacing-20} - 0.33rem); position: relative; + color: var(--token-color-palette-neutral-400); svg { position: absolute; left: 0; top: 0; } + + &.console-ui-alert--error { + // HDS tokens are not light enough on the dark background to pass a11y tests. + // hex value for --token-color-foreground-critical + color: lighten(#e52228, 20%); + } + &.console-ui-alert--success { + // HDS tokens are not light enough on the dark background to pass a11y tests. + // hex value for --token-color-foreground-success + color: lighten(#008a22, 20%); + } } .panel-open .console-ui-panel { diff --git a/ui/app/styles/components/doc-link.scss b/ui/app/styles/components/doc-link.scss index 20edb9d62dc8..8629ee56a6a0 100644 --- a/ui/app/styles/components/doc-link.scss +++ b/ui/app/styles/components/doc-link.scss @@ -5,7 +5,6 @@ .doc-link { color: $blue; - text-decoration: none; font-weight: $font-weight-semibold; &:hover { text-decoration: underline !important; diff --git a/ui/app/styles/components/empty-state-component.scss b/ui/app/styles/components/empty-state-component.scss index abac11593bc0..dce11b29e44b 100644 --- a/ui/app/styles/components/empty-state-component.scss +++ b/ui/app/styles/components/empty-state-component.scss @@ -5,8 +5,8 @@ .empty-state { align-items: center; - color: $grey; - background: $ui-gray-010; + color: var(--token-color-foreground-faint); + background: var(--token-color-surface-faint); display: flex; justify-content: center; padding: $spacing-48 $spacing-12; @@ -15,7 +15,7 @@ .empty-state-transparent { align-items: center; - color: $grey; + color: var(--token-color-foreground-faint); background: transparent; display: flex; justify-content: center; @@ -50,12 +50,6 @@ display: flex; justify-content: space-between; - a, - .link, - a:not(.button):not(.file-delete-button):not(.tag) { - text-decoration: none; - } - > * + * { margin-left: $spacing-12; margin-right: $spacing-12; diff --git a/ui/app/styles/components/masked-input.scss b/ui/app/styles/components/masked-input.scss index 5edd649e4d70..12987d057499 100644 --- a/ui/app/styles/components/masked-input.scss +++ b/ui/app/styles/components/masked-input.scss @@ -3,8 +3,9 @@ * SPDX-License-Identifier: BUSL-1.1 */ -.masked-font { - color: $ui-gray-300; +.masked-font, +pre.masked-font { + color: var(--token-color-foreground-faint); } .masked-input { @@ -84,7 +85,7 @@ } .masked-input.masked .masked-value { - color: $grey-light; + color: var(--token-color-foreground-faint); } .masked-input .input:focus + .masked-input-toggle { diff --git a/ui/app/styles/components/radio-card.scss b/ui/app/styles/components/radio-card.scss index d6b9c11138c6..b335ebb6efc4 100644 --- a/ui/app/styles/components/radio-card.scss +++ b/ui/app/styles/components/radio-card.scss @@ -83,7 +83,7 @@ } .radio-card-message-body { line-height: 1.2; - color: $ui-gray-500; + color: var(--token-color-foreground-faint); font-size: $size-8; } diff --git a/ui/app/styles/components/search-select.scss b/ui/app/styles/components/search-select.scss index 3e49c01b69e9..fe279671df6c 100644 --- a/ui/app/styles/components/search-select.scss +++ b/ui/app/styles/components/search-select.scss @@ -113,7 +113,7 @@ div > .ember-power-select-options { } .search-select-list-key { - color: $grey; + color: var(--token-color-foreground-faint); font-size: $size-8; } diff --git a/ui/app/styles/components/tool-tip.scss b/ui/app/styles/components/tool-tip.scss index d452b0d1b491..e77d90033e4e 100644 --- a/ui/app/styles/components/tool-tip.scss +++ b/ui/app/styles/components/tool-tip.scss @@ -7,21 +7,24 @@ font-size: $size-7; text-transform: none; margin: 8px 0px 0 -4px; + border: none; + border-radius: $radius-large; .box { position: relative; color: $white; max-width: 200px; - background: $grey; + background: $black; padding: 0.5rem; line-height: 1.4; + border-radius: $radius-large; } .fit-content { max-width: fit-content; } - @include css-top-arrow(8px, $grey, 1px, $grey-dark, 20px); + @include css-top-arrow(8px, $black, 1px, $black, 20px); &.ember-basic-dropdown-content--below.ember-basic-dropdown--transitioning-in { animation: drop-fade-above 0.15s; } @@ -53,17 +56,17 @@ } .ember-basic-dropdown-content--below.ember-basic-dropdown-content--left.tool-tip { - @include css-top-arrow(8px, $grey, 1px, $grey-dark, calc(100% - 20px)); + @include css-top-arrow(8px, $black, 1px, $black, calc(100% - 20px)); } .ember-basic-dropdown-content--below.ember-basic-dropdown-content--right.tool-tip { - @include css-top-arrow(8px, $grey, 1px, $grey-dark, calc(100% - 20px)); + @include css-top-arrow(8px, $black, 1px, $black, calc(100% - 20px)); } .ember-basic-dropdown-content--above.tool-tip { - @include css-bottom-arrow(8px, $grey, 1px, $grey-dark); + @include css-bottom-arrow(8px, $black, 1px, $black); margin-top: -8px; } .ember-basic-dropdown-content--above.ember-basic-dropdown-content--right.tool-tip { - @include css-bottom-arrow(8px, $grey, 1px, $grey-dark, calc(100% - 20px)); + @include css-bottom-arrow(8px, $black, 1px, $black, calc(100% - 20px)); } .b-checkbox .tool-tip-trigger { diff --git a/ui/app/styles/core/charts.scss b/ui/app/styles/core/charts.scss index ece1e4cab2d5..167d9f1125fd 100644 --- a/ui/app/styles/core/charts.scss +++ b/ui/app/styles/core/charts.scss @@ -250,7 +250,7 @@ p.data-details { } .chart-tooltip { - background-color: $ui-gray-700; + background-color: $black; color: white; font-size: $size-9; padding: 6px; @@ -282,7 +282,7 @@ p.data-details { height: 0; border-left: 5px solid transparent; border-right: 5px solid transparent; - border-top: 9px solid $ui-gray-700; + border-top: 9px solid $black; position: absolute; opacity: 0.8; bottom: -9px; diff --git a/ui/app/styles/core/element-styling.scss b/ui/app/styles/core/element-styling.scss index 383d3e2a7caa..596523db175c 100644 --- a/ui/app/styles/core/element-styling.scss +++ b/ui/app/styles/core/element-styling.scss @@ -33,7 +33,8 @@ h6 { a:hover, body, -pre, +// default set here is too dark for HDS codeblock +pre:not(.hds-code-block__code), strong, table th { color: $ui-gray-900; diff --git a/ui/app/styles/core/inputs.scss b/ui/app/styles/core/inputs.scss index 275200d7b7a0..e7982fd81100 100644 --- a/ui/app/styles/core/inputs.scss +++ b/ui/app/styles/core/inputs.scss @@ -81,5 +81,5 @@ .input-hint { padding: 0 $spacing-10; font-size: $size-8; - color: $grey; + color: var(--token-color-foreground-faint); } diff --git a/ui/app/styles/core/json-diff-patch.scss b/ui/app/styles/core/json-diff-patch.scss index 3e472d9ddb45..170926ff05ae 100644 --- a/ui/app/styles/core/json-diff-patch.scss +++ b/ui/app/styles/core/json-diff-patch.scss @@ -9,15 +9,23 @@ .jsondiffpatch-deleted pre, .jsondiffpatch-modified .jsondiffpatch-left-value pre, .jsondiffpatch-textdiff-deleted { - background: $red-500; + background: var(--token-color-foreground-critical-high-contrast); } .jsondiffpatch-added .jsondiffpatch-property-name, .jsondiffpatch-added .jsondiffpatch-value pre, .jsondiffpatch-modified .jsondiffpatch-right-value pre, .jsondiffpatch-textdiff-added { - background: $green-500; + background: var(--token-color-foreground-success-high-contrast); } .jsondiffpatch-property-name { color: $ui-gray-300; } + +.jsondiffpatch-added > .jsondiffpatch-property-name { + color: var(--token-color-surface-success); +} + +.jsondiffpatch-deleted > .jsondiffpatch-property-name { + color: var(--token-color-surface-critical); +} diff --git a/ui/app/styles/core/link.scss b/ui/app/styles/core/link.scss index 9382d51eef36..efece782fc86 100644 --- a/ui/app/styles/core/link.scss +++ b/ui/app/styles/core/link.scss @@ -21,7 +21,3 @@ cursor: default; } } -// NICE TO HAVE: replace all instances with helper "is-no-underline" -.link-plain { - text-decoration: none; -} diff --git a/ui/app/styles/core/tag.scss b/ui/app/styles/core/tag.scss index 4918d549ed31..fd211e30b12a 100644 --- a/ui/app/styles/core/tag.scss +++ b/ui/app/styles/core/tag.scss @@ -7,9 +7,10 @@ .tag:not(body) { align-items: center; - background-color: $ui-gray-100; + // same as HDS::Badge @color=neutral + background-color: var(--token-color-surface-strong); + color: var(--token-color-foreground-primary); border-radius: $radius; - color: $grey; display: inline-flex; font-size: $size-8; font-weight: $font-weight-normal; @@ -22,7 +23,7 @@ vertical-align: middle; code { - color: $grey; + color: var(--token-color-foreground-primary); } .icon { diff --git a/ui/app/styles/helper-classes/colors.scss b/ui/app/styles/helper-classes/colors.scss index 7f31e1df381d..d505ef8b569f 100644 --- a/ui/app/styles/helper-classes/colors.scss +++ b/ui/app/styles/helper-classes/colors.scss @@ -60,15 +60,11 @@ select.has-error-border, } .has-text-grey-light { - color: $ui-gray-300 !important; -} - -.has-text-grey-400 { - color: $ui-gray-400; + color: var(--token-color-foreground-faint) !important; } .has-text-grey { - color: $ui-gray-500 !important; + color: var(--token-color-foreground-faint) !important; } .has-text-grey-dark { @@ -92,7 +88,7 @@ select.has-error-border, } .has-text-success { - color: $green-500 !important; + color: var(--token-color-foreground-success) !important; } .has-text-highlight { diff --git a/ui/app/styles/helper-classes/typography.scss b/ui/app/styles/helper-classes/typography.scss index 2425adc37508..4cde9b2d3c16 100644 --- a/ui/app/styles/helper-classes/typography.scss +++ b/ui/app/styles/helper-classes/typography.scss @@ -3,7 +3,7 @@ * SPDX-License-Identifier: BUSL-1.1 */ -/* This helper includes styles relating to: font-size, font-family, font-alignment, text-transform, text-weight, font-style, text-decoration. +/* This helper includes styles relating to: font-size, font-family, font-alignment, text-transform, text-weight, font-style, text-decoration. * Deprecated, please use the HDS text component to style fonts https://helios.hashicorp.design/components/text */ // font size helpers @@ -101,7 +101,7 @@ } .sub-text { - color: $grey; + color: var(--token-color-foreground-faint); margin-bottom: $size-11; font-size: $size-8; diff --git a/ui/app/templates/components/auth-form.hbs b/ui/app/templates/components/auth-form.hbs index 8d06e36dcd86..01ea4f760f57 100644 --- a/ui/app/templates/components/auth-form.hbs +++ b/ui/app/templates/components/auth-form.hbs @@ -140,6 +140,7 @@ autocomplete="off" spellcheck="false" data-test-token={{true}} + id="token" />
diff --git a/ui/app/templates/components/auth-jwt.hbs b/ui/app/templates/components/auth-jwt.hbs index c58de16aba1b..5a8f2e28004f 100644 --- a/ui/app/templates/components/auth-jwt.hbs +++ b/ui/app/templates/components/auth-jwt.hbs @@ -28,16 +28,17 @@ {{#unless this.isOIDC}}
- +
diff --git a/ui/app/templates/components/calendar-widget.hbs b/ui/app/templates/components/calendar-widget.hbs index 3fbbf964b1c1..cd428c422b55 100644 --- a/ui/app/templates/components/calendar-widget.hbs +++ b/ui/app/templates/components/calendar-widget.hbs @@ -11,16 +11,14 @@ -