From 0f48b7af5ec5fdf2237ba9e79a63160a071f6a91 Mon Sep 17 00:00:00 2001 From: Ashvitha Date: Wed, 30 Aug 2023 13:25:26 -0400 Subject: [PATCH 1/7] [HCP Telemetry] Move first TelemetryConfig Fetch into the TelemetryConfigProvider (#18318) * Add Enabler interface to turn sink on/off * Use h for hcpProviderImpl vars, fix PR feeback and fix errors * Keep nil check in exporter and fix tests * Clarify comment and fix function name * Use disable instead of enable * Fix errors nit in otlp_transform * Add test for refreshInterval of updateConfig * Add disabled field in MetricsConfig struct * Fix PR feedback: improve comment and remove double colons * Fix deps test which requires a maybe * Update hcp-sdk-go to v0.61.0 * use disabled flag in telemetry_config.go * Handle 4XX errors in telemetry_provider * Fix deps test * Check 4XX instead * Run make go-mod-tidy --- agent/hcp/client/telemetry_config.go | 19 +- agent/hcp/client/telemetry_config_test.go | 47 +---- agent/hcp/deps.go | 39 ++-- agent/hcp/deps_test.go | 84 +-------- agent/hcp/telemetry/otel_exporter.go | 6 + agent/hcp/telemetry/otel_exporter_test.go | 17 +- agent/hcp/telemetry/otel_sink.go | 22 ++- agent/hcp/telemetry/otel_sink_test.go | 32 +++- agent/hcp/telemetry/otlp_transform.go | 10 +- agent/hcp/telemetry/otlp_transform_test.go | 6 +- agent/hcp/telemetry_provider.go | 112 ++++++----- agent/hcp/telemetry_provider_test.go | 208 +++++++++++++-------- go.mod | 3 +- go.sum | 6 +- test-integ/go.mod | 3 +- test-integ/go.sum | 6 +- test/integration/consul-container/go.mod | 3 +- test/integration/consul-container/go.sum | 6 +- troubleshoot/go.sum | 2 +- 19 files changed, 321 insertions(+), 310 deletions(-) diff --git a/agent/hcp/client/telemetry_config.go b/agent/hcp/client/telemetry_config.go index 4c5b27c58b4f..0745f1b7c619 100644 --- a/agent/hcp/client/telemetry_config.go +++ b/agent/hcp/client/telemetry_config.go @@ -20,7 +20,7 @@ import ( var ( // defaultMetricFilters is a regex that matches all metric names. - defaultMetricFilters = regexp.MustCompile(".+") + DefaultMetricFilters = regexp.MustCompile(".+") // Validation errors for AgentTelemetryConfigOK response. errMissingPayload = errors.New("missing payload") @@ -29,6 +29,7 @@ var ( errMissingMetricsConfig = errors.New("missing metrics config") errInvalidRefreshInterval = errors.New("invalid refresh interval") errInvalidEndpoint = errors.New("invalid metrics endpoint") + errEmptyEndpoint = errors.New("empty metrics endpoint") ) // TelemetryConfig contains configuration for telemetry data forwarded by Consul servers @@ -43,6 +44,7 @@ type MetricsConfig struct { Labels map[string]string Filters *regexp.Regexp Endpoint *url.URL + Disabled bool } // RefreshConfig contains configuration for the periodic fetch of configuration from HCP. @@ -50,11 +52,6 @@ type RefreshConfig struct { RefreshInterval time.Duration } -// MetricsEnabled returns true if metrics export is enabled, i.e. a valid metrics endpoint exists. -func (t *TelemetryConfig) MetricsEnabled() bool { - return t.MetricsConfig.Endpoint != nil -} - // validateAgentTelemetryConfigPayload ensures the returned payload from HCP is valid. func validateAgentTelemetryConfigPayload(resp *hcptelemetry.AgentTelemetryConfigOK) error { if resp.Payload == nil { @@ -86,7 +83,7 @@ func convertAgentTelemetryResponse(ctx context.Context, resp *hcptelemetry.Agent telemetryConfig := resp.Payload.TelemetryConfig metricsEndpoint, err := convertMetricEndpoint(telemetryConfig.Endpoint, telemetryConfig.Metrics.Endpoint) if err != nil { - return nil, errInvalidEndpoint + return nil, err } metricsFilters := convertMetricFilters(ctx, telemetryConfig.Metrics.IncludeList) @@ -97,6 +94,7 @@ func convertAgentTelemetryResponse(ctx context.Context, resp *hcptelemetry.Agent Endpoint: metricsEndpoint, Labels: metricLabels, Filters: metricsFilters, + Disabled: telemetryConfig.Metrics.Disabled, }, RefreshConfig: &RefreshConfig{ RefreshInterval: refreshInterval, @@ -114,9 +112,8 @@ func convertMetricEndpoint(telemetryEndpoint string, metricsEndpoint string) (*u endpoint = metricsEndpoint } - // If endpoint is empty, server not registered with CCM, no error returned. if endpoint == "" { - return nil, nil + return nil, errEmptyEndpoint } // Endpoint from CTW has no metrics path, so it must be added. @@ -145,7 +142,7 @@ func convertMetricFilters(ctx context.Context, payloadFilters []string) *regexp. if len(validFilters) == 0 { logger.Error("no valid filters") - return defaultMetricFilters + return DefaultMetricFilters } // Combine the valid regex strings with OR. @@ -153,7 +150,7 @@ func convertMetricFilters(ctx context.Context, payloadFilters []string) *regexp. composedRegex, err := regexp.Compile(finalRegex) if err != nil { logger.Error("failed to compile final regex", "error", err) - return defaultMetricFilters + return DefaultMetricFilters } return composedRegex diff --git a/agent/hcp/client/telemetry_config_test.go b/agent/hcp/client/telemetry_config_test.go index 1e6e2cb23a29..d43024400779 100644 --- a/agent/hcp/client/telemetry_config_test.go +++ b/agent/hcp/client/telemetry_config_test.go @@ -88,7 +88,6 @@ func TestConvertAgentTelemetryResponse(t *testing.T) { resp *consul_telemetry_service.AgentTelemetryConfigOK expectedTelemetryCfg *TelemetryConfig wantErr error - expectedEnabled bool }{ "success": { resp: &consul_telemetry_service.AgentTelemetryConfigOK{ @@ -115,34 +114,6 @@ func TestConvertAgentTelemetryResponse(t *testing.T) { RefreshInterval: 2 * time.Second, }, }, - expectedEnabled: true, - }, - "successNoEndpoint": { - resp: &consul_telemetry_service.AgentTelemetryConfigOK{ - Payload: &models.HashicorpCloudConsulTelemetry20230414AgentTelemetryConfigResponse{ - TelemetryConfig: &models.HashicorpCloudConsulTelemetry20230414TelemetryConfig{ - Endpoint: "", - Labels: map[string]string{"test": "test"}, - Metrics: &models.HashicorpCloudConsulTelemetry20230414TelemetryMetricsConfig{ - IncludeList: []string{"test", "consul"}, - }, - }, - RefreshConfig: &models.HashicorpCloudConsulTelemetry20230414RefreshConfig{ - RefreshInterval: "2s", - }, - }, - }, - expectedTelemetryCfg: &TelemetryConfig{ - MetricsConfig: &MetricsConfig{ - Endpoint: nil, - Labels: map[string]string{"test": "test"}, - Filters: validTestFilters, - }, - RefreshConfig: &RefreshConfig{ - RefreshInterval: 2 * time.Second, - }, - }, - expectedEnabled: false, }, "successBadFilters": { resp: &consul_telemetry_service.AgentTelemetryConfigOK{ @@ -163,13 +134,12 @@ func TestConvertAgentTelemetryResponse(t *testing.T) { MetricsConfig: &MetricsConfig{ Endpoint: validTestURL, Labels: map[string]string{"test": "test"}, - Filters: defaultMetricFilters, + Filters: DefaultMetricFilters, }, RefreshConfig: &RefreshConfig{ RefreshInterval: 2 * time.Second, }, }, - expectedEnabled: true, }, "errorsWithInvalidRefreshInterval": { resp: &consul_telemetry_service.AgentTelemetryConfigOK{ @@ -209,7 +179,6 @@ func TestConvertAgentTelemetryResponse(t *testing.T) { } require.NoError(t, err) require.Equal(t, tc.expectedTelemetryCfg, telemetryCfg) - require.Equal(t, tc.expectedEnabled, telemetryCfg.MetricsEnabled()) }) } } @@ -231,10 +200,10 @@ func TestConvertMetricEndpoint(t *testing.T) { override: "https://override.com", expected: "https://override.com/v1/metrics", }, - "noErrorWithEmptyEndpoints": { + "errorWithEmptyEndpoints": { endpoint: "", override: "", - expected: "", + wantErr: errEmptyEndpoint, }, "errorWithInvalidURL": { endpoint: " ", @@ -252,12 +221,6 @@ func TestConvertMetricEndpoint(t *testing.T) { return } - if tc.expected == "" { - require.Nil(t, u) - require.NoError(t, err) - return - } - require.NotNil(t, u) require.NoError(t, err) require.Equal(t, tc.expected, u.String()) @@ -277,13 +240,13 @@ func TestConvertMetricFilters(t *testing.T) { }{ "badFilterRegex": { filters: []string{"(*LF)"}, - expectedRegexString: defaultMetricFilters.String(), + expectedRegexString: DefaultMetricFilters.String(), matches: []string{"consul.raft.peers", "consul.mem.heap_size"}, wantMatch: true, }, "emptyRegex": { filters: []string{}, - expectedRegexString: defaultMetricFilters.String(), + expectedRegexString: DefaultMetricFilters.String(), matches: []string{"consul.raft.peers", "consul.mem.heap_size"}, wantMatch: true, }, diff --git a/agent/hcp/deps.go b/agent/hcp/deps.go index e098bfc65eec..7bf384747dbd 100644 --- a/agent/hcp/deps.go +++ b/agent/hcp/deps.go @@ -6,19 +6,19 @@ package hcp import ( "context" "fmt" - "time" "github.com/armon/go-metrics" - hcpclient "github.com/hashicorp/consul/agent/hcp/client" + "github.com/hashicorp/go-hclog" + + "github.com/hashicorp/consul/agent/hcp/client" "github.com/hashicorp/consul/agent/hcp/config" "github.com/hashicorp/consul/agent/hcp/scada" "github.com/hashicorp/consul/agent/hcp/telemetry" - "github.com/hashicorp/go-hclog" ) // Deps contains the interfaces that the rest of Consul core depends on for HCP integration. type Deps struct { - Client hcpclient.Client + Client client.Client Provider scada.Provider Sink metrics.MetricSink } @@ -27,7 +27,7 @@ func NewDeps(cfg config.CloudConfig, logger hclog.Logger) (Deps, error) { ctx := context.Background() ctx = hclog.WithContext(ctx, logger) - client, err := hcpclient.NewClient(cfg) + hcpClient, err := client.NewClient(cfg) if err != nil { return Deps{}, fmt.Errorf("failed to init client: %w", err) } @@ -37,50 +37,33 @@ func NewDeps(cfg config.CloudConfig, logger hclog.Logger) (Deps, error) { return Deps{}, fmt.Errorf("failed to init scada: %w", err) } - metricsClient, err := hcpclient.NewMetricsClient(ctx, &cfg) + metricsClient, err := client.NewMetricsClient(ctx, &cfg) if err != nil { logger.Error("failed to init metrics client", "error", err) return Deps{}, fmt.Errorf("failed to init metrics client: %w", err) } - sink, err := sink(ctx, client, metricsClient) + sink, err := sink(ctx, metricsClient, NewHCPProvider(ctx, hcpClient)) if err != nil { // Do not prevent server start if sink init fails, only log error. logger.Error("failed to init sink", "error", err) } return Deps{ - Client: client, + Client: hcpClient, Provider: provider, Sink: sink, }, nil } // sink initializes an OTELSink which forwards Consul metrics to HCP. -// The sink is only initialized if the server is registered with the management plane (CCM). // This step should not block server initialization, errors are returned, only to be logged. func sink( ctx context.Context, - hcpClient hcpclient.Client, metricsClient telemetry.MetricsClient, + cfgProvider *hcpProviderImpl, ) (metrics.MetricSink, error) { - logger := hclog.FromContext(ctx).Named("sink") - reqCtx, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() - - telemetryCfg, err := hcpClient.FetchTelemetryConfig(reqCtx) - if err != nil { - return nil, fmt.Errorf("failed to fetch telemetry config: %w", err) - } - - if !telemetryCfg.MetricsEnabled() { - return nil, nil - } - - cfgProvider, err := NewHCPProvider(ctx, hcpClient, telemetryCfg) - if err != nil { - return nil, fmt.Errorf("failed to init config provider: %w", err) - } + logger := hclog.FromContext(ctx) reader := telemetry.NewOTELReader(metricsClient, cfgProvider) sinkOpts := &telemetry.OTELSinkOpts{ @@ -90,7 +73,7 @@ func sink( sink, err := telemetry.NewOTELSink(ctx, sinkOpts) if err != nil { - return nil, fmt.Errorf("failed create OTELSink: %w", err) + return nil, fmt.Errorf("failed to create OTELSink: %w", err) } logger.Debug("initialized HCP metrics sink") diff --git a/agent/hcp/deps_test.go b/agent/hcp/deps_test.go index 101fe076cb69..7375dad68cb7 100644 --- a/agent/hcp/deps_test.go +++ b/agent/hcp/deps_test.go @@ -5,16 +5,10 @@ package hcp import ( "context" - "fmt" - "net/url" - "regexp" "testing" - "time" - "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/hashicorp/consul/agent/hcp/client" "github.com/hashicorp/consul/agent/hcp/telemetry" ) @@ -24,79 +18,11 @@ type mockMetricsClient struct { func TestSink(t *testing.T) { t.Parallel() - for name, test := range map[string]struct { - expect func(*client.MockClient) - wantErr string - expectedSink bool - }{ - "success": { - expect: func(mockClient *client.MockClient) { - u, _ := url.Parse("https://test.com/v1/metrics") - filters, _ := regexp.Compile("test") - mt := mockTelemetryConfig(1*time.Second, u, filters) - mockClient.EXPECT().FetchTelemetryConfig(mock.Anything).Return(mt, nil) - }, - expectedSink: true, - }, - "noSinkWhenFetchTelemetryConfigFails": { - expect: func(mockClient *client.MockClient) { - mockClient.EXPECT().FetchTelemetryConfig(mock.Anything).Return(nil, fmt.Errorf("fetch failed")) - }, - wantErr: "failed to fetch telemetry config", - }, - "noSinkWhenServerNotRegisteredWithCCM": { - expect: func(mockClient *client.MockClient) { - mt := mockTelemetryConfig(1*time.Second, nil, nil) - mockClient.EXPECT().FetchTelemetryConfig(mock.Anything).Return(mt, nil) - }, - }, - "noSinkWhenTelemetryConfigProviderInitFails": { - expect: func(mockClient *client.MockClient) { - u, _ := url.Parse("https://test.com/v1/metrics") - // Bad refresh interval forces ConfigProvider creation failure. - mt := mockTelemetryConfig(0*time.Second, u, nil) - mockClient.EXPECT().FetchTelemetryConfig(mock.Anything).Return(mt, nil) - }, - wantErr: "failed to init config provider", - }, - } { - test := test - t.Run(name, func(t *testing.T) { - t.Parallel() - c := client.NewMockClient(t) - mc := mockMetricsClient{} - test.expect(c) - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + s, err := sink(ctx, mockMetricsClient{}, &hcpProviderImpl{}) - s, err := sink(ctx, c, mc) - - if test.wantErr != "" { - require.NotNil(t, err) - require.Contains(t, err.Error(), test.wantErr) - require.Nil(t, s) - return - } - - if !test.expectedSink { - require.Nil(t, s) - require.Nil(t, err) - return - } - - require.NotNil(t, s) - }) - } -} - -func mockTelemetryConfig(refreshInterval time.Duration, metricsEndpoint *url.URL, filters *regexp.Regexp) *client.TelemetryConfig { - return &client.TelemetryConfig{ - MetricsConfig: &client.MetricsConfig{ - Endpoint: metricsEndpoint, - Filters: filters, - }, - RefreshConfig: &client.RefreshConfig{ - RefreshInterval: refreshInterval, - }, - } + require.NotNil(t, s) + require.NoError(t, err) } diff --git a/agent/hcp/telemetry/otel_exporter.go b/agent/hcp/telemetry/otel_exporter.go index 461876333655..050d5660668d 100644 --- a/agent/hcp/telemetry/otel_exporter.go +++ b/agent/hcp/telemetry/otel_exporter.go @@ -23,7 +23,9 @@ type MetricsClient interface { // EndpointProvider provides the endpoint where metrics are exported to by the OTELExporter. // EndpointProvider exposes the GetEndpoint() interface method to fetch the endpoint. // This abstraction layer offers flexibility, in particular for dynamic configuration or changes to the endpoint. +// The OTELExporter calls the Disabled interface to verify that it should actually export metrics. type EndpointProvider interface { + Disabled GetEndpoint() *url.URL } @@ -68,6 +70,10 @@ func (e *otelExporter) Aggregation(kind metric.InstrumentKind) aggregation.Aggre // Export serializes and transmits metric data to a receiver. func (e *otelExporter) Export(ctx context.Context, metrics *metricdata.ResourceMetrics) error { + if e.endpointProvider.IsDisabled() { + return nil + } + endpoint := e.endpointProvider.GetEndpoint() if endpoint == nil { return nil diff --git a/agent/hcp/telemetry/otel_exporter_test.go b/agent/hcp/telemetry/otel_exporter_test.go index 610fbb44e74b..ebe6486abca8 100644 --- a/agent/hcp/telemetry/otel_exporter_test.go +++ b/agent/hcp/telemetry/otel_exporter_test.go @@ -34,9 +34,11 @@ func (m *mockMetricsClient) ExportMetrics(ctx context.Context, protoMetrics *met type mockEndpointProvider struct { endpoint *url.URL + disabled bool } func (m *mockEndpointProvider) GetEndpoint() *url.URL { return m.endpoint } +func (m *mockEndpointProvider) IsDisabled() bool { return m.disabled } func TestTemporality(t *testing.T) { t.Parallel() @@ -80,13 +82,20 @@ func TestExport(t *testing.T) { client MetricsClient provider EndpointProvider }{ + "earlyReturnDisabledProvider": { + client: &mockMetricsClient{}, + provider: &mockEndpointProvider{ + disabled: true, + }, + }, "earlyReturnWithoutEndpoint": { client: &mockMetricsClient{}, provider: &mockEndpointProvider{}, }, "earlyReturnWithoutScopeMetrics": { - client: &mockMetricsClient{}, - metrics: mutateMetrics(nil), + client: &mockMetricsClient{}, + metrics: mutateMetrics(nil), + provider: &mockEndpointProvider{}, }, "earlyReturnWithoutMetrics": { client: &mockMetricsClient{}, @@ -94,6 +103,7 @@ func TestExport(t *testing.T) { {Metrics: []metricdata.Metrics{}}, }, ), + provider: &mockEndpointProvider{}, }, "errorWithExportFailure": { client: &mockMetricsClient{ @@ -110,6 +120,9 @@ func TestExport(t *testing.T) { }, }, ), + provider: &mockEndpointProvider{ + endpoint: &url.URL{}, + }, wantErr: "failed to export metrics", }, } { diff --git a/agent/hcp/telemetry/otel_sink.go b/agent/hcp/telemetry/otel_sink.go index 7770f7eaaf16..12aae982016c 100644 --- a/agent/hcp/telemetry/otel_sink.go +++ b/agent/hcp/telemetry/otel_sink.go @@ -36,8 +36,15 @@ const ( defaultExportTimeout = 30 * time.Second ) +// Disabled should be implemented to turn on/off metrics processing +type Disabled interface { + // IsDisabled() can return true disallow the sink from accepting metrics. + IsDisabled() bool +} + // ConfigProvider is required to provide custom metrics processing. type ConfigProvider interface { + Disabled // GetLabels should return a set of OTEL attributes added by default all metrics. GetLabels() map[string]string @@ -147,8 +154,11 @@ func (o *OTELSink) IncrCounter(key []string, val float32) { // AddSampleWithLabels emits a Consul gauge metric that gets // registed by an OpenTelemetry Histogram instrument. func (o *OTELSink) SetGaugeWithLabels(key []string, val float32, labels []gometrics.Label) { - k := o.flattenKey(key) + if o.cfgProvider.IsDisabled() { + return + } + k := o.flattenKey(key) if !o.allowedMetric(k) { return } @@ -175,8 +185,11 @@ func (o *OTELSink) SetGaugeWithLabels(key []string, val float32, labels []gometr // AddSampleWithLabels emits a Consul sample metric that gets registed by an OpenTelemetry Histogram instrument. func (o *OTELSink) AddSampleWithLabels(key []string, val float32, labels []gometrics.Label) { - k := o.flattenKey(key) + if o.cfgProvider.IsDisabled() { + return + } + k := o.flattenKey(key) if !o.allowedMetric(k) { return } @@ -201,8 +214,11 @@ func (o *OTELSink) AddSampleWithLabels(key []string, val float32, labels []gomet // IncrCounterWithLabels emits a Consul counter metric that gets registed by an OpenTelemetry Histogram instrument. func (o *OTELSink) IncrCounterWithLabels(key []string, val float32, labels []gometrics.Label) { - k := o.flattenKey(key) + if o.cfgProvider.IsDisabled() { + return + } + k := o.flattenKey(key) if !o.allowedMetric(k) { return } diff --git a/agent/hcp/telemetry/otel_sink_test.go b/agent/hcp/telemetry/otel_sink_test.go index 13c310b34ca0..30bbeee519b7 100644 --- a/agent/hcp/telemetry/otel_sink_test.go +++ b/agent/hcp/telemetry/otel_sink_test.go @@ -21,8 +21,9 @@ import ( ) type mockConfigProvider struct { - filter *regexp.Regexp - labels map[string]string + filter *regexp.Regexp + labels map[string]string + disabled bool } func (m *mockConfigProvider) GetLabels() map[string]string { @@ -33,6 +34,10 @@ func (m *mockConfigProvider) GetFilters() *regexp.Regexp { return m.filter } +func (m *mockConfigProvider) IsDisabled() bool { + return m.disabled +} + var ( expectedResource = resource.NewSchemaless() @@ -223,6 +228,29 @@ func TestOTELSink(t *testing.T) { isSame(t, expectedSinkMetrics, collected) } +func TestOTELSinkDisabled(t *testing.T) { + reader := metric.NewManualReader() + ctx := context.Background() + + sink, err := NewOTELSink(ctx, &OTELSinkOpts{ + ConfigProvider: &mockConfigProvider{ + filter: regexp.MustCompile("raft"), + disabled: true, + }, + Reader: reader, + }) + require.NoError(t, err) + + sink.SetGauge([]string{"consul", "raft", "gauge"}, 1) + sink.IncrCounter([]string{"consul", "raft", "counter"}, 1) + sink.AddSample([]string{"consul", "raft", "sample"}, 1) + + var collected metricdata.ResourceMetrics + err = reader.Collect(ctx, &collected) + require.NoError(t, err) + require.Empty(t, collected.ScopeMetrics) +} + func TestLabelsToAttributes(t *testing.T) { for name, test := range map[string]struct { providerLabels map[string]string diff --git a/agent/hcp/telemetry/otlp_transform.go b/agent/hcp/telemetry/otlp_transform.go index a244f0f1a5f6..907e7922ad98 100644 --- a/agent/hcp/telemetry/otlp_transform.go +++ b/agent/hcp/telemetry/otlp_transform.go @@ -16,8 +16,8 @@ import ( ) var ( - aggregationErr = errors.New("unsupported aggregation") - temporalityErr = errors.New("unsupported temporality") + errAggregaton = errors.New("unsupported aggregation") + errTemporality = errors.New("unsupported temporality") ) // isEmpty verifies if the given OTLP protobuf metrics contains metric data. @@ -99,7 +99,7 @@ func metricTypeToPB(m metricdata.Metrics) (*mpb.Metric, error) { } case metricdata.Sum[float64]: if a.Temporality != metricdata.CumulativeTemporality { - return out, fmt.Errorf("error: %w: %T", temporalityErr, a) + return out, fmt.Errorf("failed to convert metric to otel format: %w: %T", errTemporality, a) } out.Data = &mpb.Metric_Sum{ Sum: &mpb.Sum{ @@ -110,7 +110,7 @@ func metricTypeToPB(m metricdata.Metrics) (*mpb.Metric, error) { } case metricdata.Histogram[float64]: if a.Temporality != metricdata.CumulativeTemporality { - return out, fmt.Errorf("error: %w: %T", temporalityErr, a) + return out, fmt.Errorf("failed to convert metric to otel format: %w: %T", errTemporality, a) } out.Data = &mpb.Metric_Histogram{ Histogram: &mpb.Histogram{ @@ -119,7 +119,7 @@ func metricTypeToPB(m metricdata.Metrics) (*mpb.Metric, error) { }, } default: - return out, fmt.Errorf("error: %w: %T", aggregationErr, a) + return out, fmt.Errorf("failed to convert metric to otel format: %w: %T", errAggregaton, a) } return out, nil } diff --git a/agent/hcp/telemetry/otlp_transform_test.go b/agent/hcp/telemetry/otlp_transform_test.go index 04ff40382dda..d67df73d8343 100644 --- a/agent/hcp/telemetry/otlp_transform_test.go +++ b/agent/hcp/telemetry/otlp_transform_test.go @@ -260,15 +260,15 @@ func TestTransformOTLP(t *testing.T) { // MetricType Error Test Cases _, err := metricTypeToPB(invalidHistTemporality) require.Error(t, err) - require.ErrorIs(t, err, temporalityErr) + require.ErrorIs(t, err, errTemporality) _, err = metricTypeToPB(invalidSumTemporality) require.Error(t, err) - require.ErrorIs(t, err, temporalityErr) + require.ErrorIs(t, err, errTemporality) _, err = metricTypeToPB(invalidSumAgg) require.Error(t, err) - require.ErrorIs(t, err, aggregationErr) + require.ErrorIs(t, err, errAggregaton) // Metrics Test Case m := metricsToPB(inputMetrics) diff --git a/agent/hcp/telemetry_provider.go b/agent/hcp/telemetry_provider.go index 870d3b3685a4..22bb0f2f00b8 100644 --- a/agent/hcp/telemetry_provider.go +++ b/agent/hcp/telemetry_provider.go @@ -5,13 +5,13 @@ package hcp import ( "context" - "fmt" "net/url" "regexp" "sync" "time" "github.com/armon/go-metrics" + "github.com/go-openapi/runtime" "github.com/hashicorp/go-hclog" "github.com/hashicorp/consul/agent/hcp/client" @@ -23,6 +23,8 @@ var ( internalMetricRefreshFailure []string = []string{"hcp", "telemetry_config_provider", "refresh", "failure"} // internalMetricRefreshSuccess is a metric to monitor refresh successes. internalMetricRefreshSuccess []string = []string{"hcp", "telemetry_config_provider", "refresh", "success"} + // defaultTelemetryConfigRefreshInterval is a default fallback in case the first HCP fetch fails. + defaultTelemetryConfigRefreshInterval = 1 * time.Minute ) // Ensure hcpProviderImpl implements telemetry provider interfaces. @@ -46,47 +48,50 @@ type hcpProviderImpl struct { // dynamicConfig is a set of configurable settings for metrics collection, processing and export. // fields MUST be exported to compute hash for equals method. type dynamicConfig struct { - Endpoint *url.URL - Labels map[string]string - Filters *regexp.Regexp + disabled bool + endpoint *url.URL + labels map[string]string + filters *regexp.Regexp // refreshInterval controls the interval at which configuration is fetched from HCP to refresh config. - RefreshInterval time.Duration + refreshInterval time.Duration } -// NewHCPProvider initializes and starts a HCP Telemetry provider with provided params. -func NewHCPProvider(ctx context.Context, hcpClient client.Client, telemetryCfg *client.TelemetryConfig) (*hcpProviderImpl, error) { - refreshInterval := telemetryCfg.RefreshConfig.RefreshInterval - // refreshInterval must be greater than 0, otherwise time.Ticker panics. - if refreshInterval <= 0 { - return nil, fmt.Errorf("invalid refresh interval: %d", refreshInterval) - } - - cfg := &dynamicConfig{ - Endpoint: telemetryCfg.MetricsConfig.Endpoint, - Labels: telemetryCfg.MetricsConfig.Labels, - Filters: telemetryCfg.MetricsConfig.Filters, - RefreshInterval: refreshInterval, +// defaultDisabledCfg disables metric collection and contains default config values. +func defaultDisabledCfg() *dynamicConfig { + return &dynamicConfig{ + labels: map[string]string{}, + filters: client.DefaultMetricFilters, + refreshInterval: defaultTelemetryConfigRefreshInterval, + endpoint: nil, + disabled: true, } +} - t := &hcpProviderImpl{ - cfg: cfg, +// NewHCPProvider initializes and starts a HCP Telemetry provider. +func NewHCPProvider(ctx context.Context, hcpClient client.Client) *hcpProviderImpl { + h := &hcpProviderImpl{ + // Initialize with default config values. + cfg: defaultDisabledCfg(), hcpClient: hcpClient, } - go t.run(ctx, refreshInterval) + go h.run(ctx) - return t, nil + return h } // run continously checks for updates to the telemetry configuration by making a request to HCP. -func (h *hcpProviderImpl) run(ctx context.Context, refreshInterval time.Duration) { - ticker := time.NewTicker(refreshInterval) +func (h *hcpProviderImpl) run(ctx context.Context) { + // Try to initialize config once before starting periodic fetch. + h.updateConfig(ctx) + + ticker := time.NewTicker(h.cfg.refreshInterval) defer ticker.Stop() for { select { case <-ticker.C: - if newCfg := h.getUpdate(ctx); newCfg != nil { - ticker.Reset(newCfg.RefreshInterval) + if newRefreshInterval := h.updateConfig(ctx); newRefreshInterval > 0 { + ticker.Reset(newRefreshInterval) } case <-ctx.Done(): return @@ -94,9 +99,8 @@ func (h *hcpProviderImpl) run(ctx context.Context, refreshInterval time.Duration } } -// getUpdate makes a HTTP request to HCP to return a new metrics configuration -// and updates the hcpProviderImpl. -func (h *hcpProviderImpl) getUpdate(ctx context.Context) *dynamicConfig { +// updateConfig makes a HTTP request to HCP to update metrics configuration held in the provider. +func (h *hcpProviderImpl) updateConfig(ctx context.Context) time.Duration { logger := hclog.FromContext(ctx).Named("telemetry_config_provider") ctx, cancel := context.WithTimeout(ctx, 5*time.Second) @@ -104,9 +108,18 @@ func (h *hcpProviderImpl) getUpdate(ctx context.Context) *dynamicConfig { telemetryCfg, err := h.hcpClient.FetchTelemetryConfig(ctx) if err != nil { + // Only disable metrics on 404 or 401 to handle the case of an unlinked cluster. + // For other errors such as 5XX ones, we continue metrics collection, as these are potentially transient server-side errors. + apiErr, ok := err.(*runtime.APIError) + if ok && apiErr.IsClientError() { + disabledMetricsCfg := defaultDisabledCfg() + h.modifyDynamicCfg(disabledMetricsCfg) + return disabledMetricsCfg.refreshInterval + } + logger.Error("failed to fetch telemetry config from HCP", "error", err) metrics.IncrCounter(internalMetricRefreshFailure, 1) - return nil + return 0 } // newRefreshInterval of 0 or less can cause ticker Reset() panic. @@ -114,24 +127,29 @@ func (h *hcpProviderImpl) getUpdate(ctx context.Context) *dynamicConfig { if newRefreshInterval <= 0 { logger.Error("invalid refresh interval duration", "refreshInterval", newRefreshInterval) metrics.IncrCounter(internalMetricRefreshFailure, 1) - return nil + return 0 } - newDynamicConfig := &dynamicConfig{ - Filters: telemetryCfg.MetricsConfig.Filters, - Endpoint: telemetryCfg.MetricsConfig.Endpoint, - Labels: telemetryCfg.MetricsConfig.Labels, - RefreshInterval: newRefreshInterval, + newCfg := &dynamicConfig{ + filters: telemetryCfg.MetricsConfig.Filters, + endpoint: telemetryCfg.MetricsConfig.Endpoint, + labels: telemetryCfg.MetricsConfig.Labels, + refreshInterval: telemetryCfg.RefreshConfig.RefreshInterval, + disabled: telemetryCfg.MetricsConfig.Disabled, } - // Acquire write lock to update new configuration. + h.modifyDynamicCfg(newCfg) + + return newCfg.refreshInterval +} + +// modifyDynamicCfg acquires a write lock to update new configuration and emits a success metric. +func (h *hcpProviderImpl) modifyDynamicCfg(newCfg *dynamicConfig) { h.rw.Lock() - h.cfg = newDynamicConfig + h.cfg = newCfg h.rw.Unlock() metrics.IncrCounter(internalMetricRefreshSuccess, 1) - - return newDynamicConfig } // GetEndpoint acquires a read lock to return endpoint configuration for consumers. @@ -139,7 +157,7 @@ func (h *hcpProviderImpl) GetEndpoint() *url.URL { h.rw.RLock() defer h.rw.RUnlock() - return h.cfg.Endpoint + return h.cfg.endpoint } // GetFilters acquires a read lock to return filters configuration for consumers. @@ -147,7 +165,7 @@ func (h *hcpProviderImpl) GetFilters() *regexp.Regexp { h.rw.RLock() defer h.rw.RUnlock() - return h.cfg.Filters + return h.cfg.filters } // GetLabels acquires a read lock to return labels configuration for consumers. @@ -155,5 +173,13 @@ func (h *hcpProviderImpl) GetLabels() map[string]string { h.rw.RLock() defer h.rw.RUnlock() - return h.cfg.Labels + return h.cfg.labels +} + +// IsDisabled acquires a read lock and return true if metrics are enabled. +func (h *hcpProviderImpl) IsDisabled() bool { + h.rw.RLock() + defer h.rw.RUnlock() + + return h.cfg.disabled } diff --git a/agent/hcp/telemetry_provider_test.go b/agent/hcp/telemetry_provider_test.go index 0c20a5742e42..9e6405a516d3 100644 --- a/agent/hcp/telemetry_provider_test.go +++ b/agent/hcp/telemetry_provider_test.go @@ -5,6 +5,7 @@ package hcp import ( "context" + "errors" "fmt" "net/url" "regexp" @@ -14,6 +15,7 @@ import ( "time" "github.com/armon/go-metrics" + "github.com/go-openapi/runtime" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -39,64 +41,49 @@ type testConfig struct { endpoint string labels map[string]string refreshInterval time.Duration + disabled bool } -func TestNewTelemetryConfigProvider(t *testing.T) { +func TestNewTelemetryConfigProvider_DefaultConfig(t *testing.T) { t.Parallel() - for name, tc := range map[string]struct { - testInputs *testConfig - wantErr string - }{ - "success": { - testInputs: &testConfig{ - refreshInterval: 1 * time.Second, - }, - }, - "failsWithInvalidRefreshInterval": { - testInputs: &testConfig{ - refreshInterval: 0 * time.Second, - }, - wantErr: "invalid refresh interval", - }, - } { - tc := tc - t.Run(name, func(t *testing.T) { - t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + // Initialize new provider, but fail all HCP fetches. + mc := client.NewMockClient(t) + mc.EXPECT().FetchTelemetryConfig(mock.Anything).Return(nil, errors.New("failed to fetch config")) - testCfg, err := testTelemetryCfg(tc.testInputs) - require.NoError(t, err) + provider := NewHCPProvider(ctx, mc) + provider.updateConfig(ctx) - cfgProvider, err := NewHCPProvider(ctx, client.NewMockClient(t), testCfg) - if tc.wantErr != "" { - require.Error(t, err) - require.Contains(t, err.Error(), tc.wantErr) - require.Nil(t, cfgProvider) - return - } - require.NotNil(t, cfgProvider) - }) + // Assert provider has default configuration and metrics processing is disabled. + defaultCfg := &dynamicConfig{ + labels: map[string]string{}, + filters: client.DefaultMetricFilters, + refreshInterval: defaultTelemetryConfigRefreshInterval, + endpoint: nil, + disabled: true, } + require.Equal(t, defaultCfg, provider.cfg) } -func TestTelemetryConfigProviderGetUpdate(t *testing.T) { +func TestTelemetryConfigProvider_UpdateConfig(t *testing.T) { for name, tc := range map[string]struct { - mockExpect func(*client.MockClient) - metricKey string - optsInputs *testConfig - expected *testConfig + mockExpect func(*client.MockClient) + metricKey string + initCfg *dynamicConfig + expected *dynamicConfig + expectedInterval time.Duration }{ "noChanges": { - optsInputs: &testConfig{ + initCfg: testDynamicCfg(&testConfig{ endpoint: "http://test.com/v1/metrics", filters: "test", labels: map[string]string{ "test_label": "123", }, refreshInterval: testRefreshInterval, - }, + }), mockExpect: func(m *client.MockClient) { mockCfg, _ := testTelemetryCfg(&testConfig{ endpoint: "http://test.com/v1/metrics", @@ -108,25 +95,26 @@ func TestTelemetryConfigProviderGetUpdate(t *testing.T) { }) m.EXPECT().FetchTelemetryConfig(mock.Anything).Return(mockCfg, nil) }, - expected: &testConfig{ + expected: testDynamicCfg(&testConfig{ endpoint: "http://test.com/v1/metrics", labels: map[string]string{ "test_label": "123", }, filters: "test", refreshInterval: testRefreshInterval, - }, - metricKey: testMetricKeySuccess, + }), + metricKey: testMetricKeySuccess, + expectedInterval: testRefreshInterval, }, "newConfig": { - optsInputs: &testConfig{ + initCfg: testDynamicCfg(&testConfig{ endpoint: "http://test.com/v1/metrics", filters: "test", labels: map[string]string{ "test_label": "123", }, refreshInterval: 2 * time.Second, - }, + }), mockExpect: func(m *client.MockClient) { mockCfg, _ := testTelemetryCfg(&testConfig{ endpoint: "http://newendpoint/v1/metrics", @@ -138,83 +126,136 @@ func TestTelemetryConfigProviderGetUpdate(t *testing.T) { }) m.EXPECT().FetchTelemetryConfig(mock.Anything).Return(mockCfg, nil) }, - expected: &testConfig{ + expected: testDynamicCfg(&testConfig{ endpoint: "http://newendpoint/v1/metrics", filters: "consul", labels: map[string]string{ "new_label": "1234", }, refreshInterval: 2 * time.Second, + }), + expectedInterval: 2 * time.Second, + metricKey: testMetricKeySuccess, + }, + "newConfigMetricsDisabled": { + initCfg: testDynamicCfg(&testConfig{ + endpoint: "http://test.com/v1/metrics", + filters: "test", + labels: map[string]string{ + "test_label": "123", + }, + refreshInterval: 2 * time.Second, + }), + mockExpect: func(m *client.MockClient) { + mockCfg, _ := testTelemetryCfg(&testConfig{ + endpoint: "", + filters: "consul", + labels: map[string]string{ + "new_label": "1234", + }, + refreshInterval: 2 * time.Second, + disabled: true, + }) + m.EXPECT().FetchTelemetryConfig(mock.Anything).Return(mockCfg, nil) }, - metricKey: testMetricKeySuccess, + expected: testDynamicCfg(&testConfig{ + endpoint: "", + filters: "consul", + labels: map[string]string{ + "new_label": "1234", + }, + refreshInterval: 2 * time.Second, + disabled: true, + }), + metricKey: testMetricKeySuccess, + expectedInterval: 2 * time.Second, }, "sameConfigInvalidRefreshInterval": { - optsInputs: &testConfig{ + initCfg: testDynamicCfg(&testConfig{ endpoint: "http://test.com/v1/metrics", filters: "test", labels: map[string]string{ "test_label": "123", }, refreshInterval: testRefreshInterval, - }, + }), mockExpect: func(m *client.MockClient) { mockCfg, _ := testTelemetryCfg(&testConfig{ refreshInterval: 0 * time.Second, }) m.EXPECT().FetchTelemetryConfig(mock.Anything).Return(mockCfg, nil) }, - expected: &testConfig{ + expected: testDynamicCfg(&testConfig{ endpoint: "http://test.com/v1/metrics", labels: map[string]string{ "test_label": "123", }, filters: "test", refreshInterval: testRefreshInterval, - }, - metricKey: testMetricKeyFailure, + }), + metricKey: testMetricKeyFailure, + expectedInterval: 0, }, "sameConfigHCPClientFailure": { - optsInputs: &testConfig{ + initCfg: testDynamicCfg(&testConfig{ endpoint: "http://test.com/v1/metrics", filters: "test", labels: map[string]string{ "test_label": "123", }, refreshInterval: testRefreshInterval, - }, + }), mockExpect: func(m *client.MockClient) { m.EXPECT().FetchTelemetryConfig(mock.Anything).Return(nil, fmt.Errorf("failure")) }, - expected: &testConfig{ + expected: testDynamicCfg(&testConfig{ + endpoint: "http://test.com/v1/metrics", + filters: "test", + labels: map[string]string{ + "test_label": "123", + }, + refreshInterval: testRefreshInterval, + }), + metricKey: testMetricKeyFailure, + expectedInterval: 0, + }, + "disableMetrics404": { + initCfg: testDynamicCfg(&testConfig{ endpoint: "http://test.com/v1/metrics", filters: "test", labels: map[string]string{ "test_label": "123", }, refreshInterval: testRefreshInterval, + }), + mockExpect: func(m *client.MockClient) { + err := runtime.NewAPIError("404 failure", nil, 404) + m.EXPECT().FetchTelemetryConfig(mock.Anything).Return(nil, err) }, - metricKey: testMetricKeyFailure, + expected: defaultDisabledCfg(), + metricKey: testMetricKeySuccess, + expectedInterval: defaultTelemetryConfigRefreshInterval, }, } { + tc := tc t.Run(name, func(t *testing.T) { sink := initGlobalSink() mockClient := client.NewMockClient(t) tc.mockExpect(mockClient) - dynamicCfg, err := testDynamicCfg(tc.optsInputs) - require.NoError(t, err) - provider := &hcpProviderImpl{ hcpClient: mockClient, - cfg: dynamicCfg, + cfg: tc.initCfg, } - provider.getUpdate(context.Background()) + newInterval := provider.updateConfig(context.Background()) + require.Equal(t, tc.expectedInterval, newInterval) // Verify endpoint provider returns correct config values. - require.Equal(t, tc.expected.endpoint, provider.GetEndpoint().String()) - require.Equal(t, tc.expected.filters, provider.GetFilters().String()) + require.Equal(t, tc.expected.endpoint, provider.GetEndpoint()) + require.Equal(t, tc.expected.filters, provider.GetFilters()) require.Equal(t, tc.expected.labels, provider.GetLabels()) + require.Equal(t, tc.expected.disabled, provider.IsDisabled()) // Verify count for transform success metric. interval := sink.Data()[0] @@ -292,8 +333,7 @@ func TestTelemetryConfigProvider_Race(t *testing.T) { } // Start the provider goroutine, which fetches client TelemetryConfig every RefreshInterval. - provider, err := NewHCPProvider(ctx, m, m.cfg) - require.NoError(t, err) + provider := NewHCPProvider(ctx, m) for count := 0; count < testRaceWriteSampleCount; count++ { // Force a TelemetryConfig value change in the mockRaceClient. @@ -301,7 +341,7 @@ func TestTelemetryConfigProvider_Race(t *testing.T) { require.NoError(t, err) // Force provider to obtain new client TelemetryConfig immediately. // This call is necessary to guarantee TelemetryConfig changes to assert on expected values below. - provider.getUpdate(context.Background()) + provider.updateConfig(context.Background()) // Start goroutines to access label configuration. wg := &sync.WaitGroup{} @@ -345,22 +385,20 @@ func initGlobalSink() *metrics.InmemSink { } // testDynamicCfg converts testConfig inputs to a dynamicConfig to be used in tests. -func testDynamicCfg(testCfg *testConfig) (*dynamicConfig, error) { - filters, err := regexp.Compile(testCfg.filters) - if err != nil { - return nil, err - } +func testDynamicCfg(testCfg *testConfig) *dynamicConfig { + filters, _ := regexp.Compile(testCfg.filters) - endpoint, err := url.Parse(testCfg.endpoint) - if err != nil { - return nil, err + var endpoint *url.URL + if testCfg.endpoint != "" { + endpoint, _ = url.Parse(testCfg.endpoint) } return &dynamicConfig{ - Endpoint: endpoint, - Filters: filters, - Labels: testCfg.labels, - RefreshInterval: testCfg.refreshInterval, - }, nil + endpoint: endpoint, + filters: filters, + labels: testCfg.labels, + refreshInterval: testCfg.refreshInterval, + disabled: testCfg.disabled, + } } // testTelemetryCfg converts testConfig inputs to a TelemetryConfig to be used in tests. @@ -370,15 +408,21 @@ func testTelemetryCfg(testCfg *testConfig) (*client.TelemetryConfig, error) { return nil, err } - endpoint, err := url.Parse(testCfg.endpoint) - if err != nil { - return nil, err + var endpoint *url.URL + if testCfg.endpoint != "" { + u, err := url.Parse(testCfg.endpoint) + if err != nil { + return nil, err + } + endpoint = u } + return &client.TelemetryConfig{ MetricsConfig: &client.MetricsConfig{ Endpoint: endpoint, Filters: filters, Labels: testCfg.labels, + Disabled: testCfg.disabled, }, RefreshConfig: &client.RefreshConfig{ RefreshInterval: testCfg.refreshInterval, diff --git a/go.mod b/go.mod index 4e5b45443455..2747f4b91b5e 100644 --- a/go.mod +++ b/go.mod @@ -63,7 +63,7 @@ require ( github.com/hashicorp/hcl v1.0.0 github.com/hashicorp/hcl/v2 v2.6.0 github.com/hashicorp/hcp-scada-provider v0.2.3 - github.com/hashicorp/hcp-sdk-go v0.55.0 + github.com/hashicorp/hcp-sdk-go v0.61.0 github.com/hashicorp/hil v0.0.0-20200423225030-a18a1cd20038 github.com/hashicorp/memberlist v0.5.0 github.com/hashicorp/raft v1.5.0 @@ -244,6 +244,7 @@ require ( github.com/tklauser/numcpus v0.4.0 // indirect github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 // indirect github.com/vmware/govmomi v0.18.0 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect go.mongodb.org/mongo-driver v1.11.0 // indirect go.opencensus.io v0.24.0 // indirect diff --git a/go.sum b/go.sum index 690c27b8af7c..92213557b715 100644 --- a/go.sum +++ b/go.sum @@ -565,8 +565,8 @@ github.com/hashicorp/hcl/v2 v2.6.0 h1:3krZOfGY6SziUXa6H9PJU6TyohHn7I+ARYnhbeNBz+ github.com/hashicorp/hcl/v2 v2.6.0/go.mod h1:bQTN5mpo+jewjJgh8jr0JUguIi7qPHUF6yIfAEN3jqY= github.com/hashicorp/hcp-scada-provider v0.2.3 h1:AarYR+/Pcv+cMvPdAlb92uOBmZfEH6ny4+DT+4NY2VQ= github.com/hashicorp/hcp-scada-provider v0.2.3/go.mod h1:ZFTgGwkzNv99PLQjTsulzaCplCzOTBh0IUQsPKzrQFo= -github.com/hashicorp/hcp-sdk-go v0.55.0 h1:T4sQtgQfQJOD0uucT4hS+GZI1FmoHAQMADj277W++xw= -github.com/hashicorp/hcp-sdk-go v0.55.0/go.mod h1:hZqky4HEzsKwvLOt4QJlZUrjeQmb4UCZUhDP2HyQFfc= +github.com/hashicorp/hcp-sdk-go v0.61.0 h1:x4hJ8SlLI5WCE8Uzcu4q5jfdOEz/hFxfUkhAdoFdzSg= +github.com/hashicorp/hcp-sdk-go v0.61.0/go.mod h1:xP7wmWAmdMxs/7+ovH3jZn+MCDhHRj50Rn+m7JIY3Ck= github.com/hashicorp/hil v0.0.0-20200423225030-a18a1cd20038 h1:n9J0rwVWXDpNd5iZnwY7w4WZyq53/rROeI7OVvLW8Ok= github.com/hashicorp/hil v0.0.0-20200423225030-a18a1cd20038/go.mod h1:n2TSygSNwsLJ76m8qFXTSc7beTb+auJxYdqrnoqwZWE= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= @@ -937,6 +937,8 @@ github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+ github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= diff --git a/test-integ/go.mod b/test-integ/go.mod index df38ac3f2f1b..76c91af83ca9 100644 --- a/test-integ/go.mod +++ b/test-integ/go.mod @@ -120,7 +120,7 @@ require ( github.com/hashicorp/hcl v1.0.0 // indirect github.com/hashicorp/hcl/v2 v2.16.2 // indirect github.com/hashicorp/hcp-scada-provider v0.2.3 // indirect - github.com/hashicorp/hcp-sdk-go v0.55.0 // indirect + github.com/hashicorp/hcp-sdk-go v0.61.0 // indirect github.com/hashicorp/hil v0.0.0-20200423225030-a18a1cd20038 // indirect github.com/hashicorp/memberlist v0.5.0 // indirect github.com/hashicorp/net-rpc-msgpackrpc/v2 v2.0.0 // indirect @@ -189,6 +189,7 @@ require ( github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569 // indirect github.com/testcontainers/testcontainers-go v0.22.0 // indirect github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/zclconf/go-cty v1.12.1 // indirect go.etcd.io/bbolt v1.3.7 // indirect go.mongodb.org/mongo-driver v1.11.0 // indirect diff --git a/test-integ/go.sum b/test-integ/go.sum index 015660056dd5..e662768b13d0 100644 --- a/test-integ/go.sum +++ b/test-integ/go.sum @@ -476,8 +476,8 @@ github.com/hashicorp/hcl/v2 v2.16.2 h1:mpkHZh/Tv+xet3sy3F9Ld4FyI2tUpWe9x3XtPx9f1 github.com/hashicorp/hcl/v2 v2.16.2/go.mod h1:JRmR89jycNkrrqnMmvPDMd56n1rQJ2Q6KocSLCMCXng= github.com/hashicorp/hcp-scada-provider v0.2.3 h1:AarYR+/Pcv+cMvPdAlb92uOBmZfEH6ny4+DT+4NY2VQ= github.com/hashicorp/hcp-scada-provider v0.2.3/go.mod h1:ZFTgGwkzNv99PLQjTsulzaCplCzOTBh0IUQsPKzrQFo= -github.com/hashicorp/hcp-sdk-go v0.55.0 h1:T4sQtgQfQJOD0uucT4hS+GZI1FmoHAQMADj277W++xw= -github.com/hashicorp/hcp-sdk-go v0.55.0/go.mod h1:hZqky4HEzsKwvLOt4QJlZUrjeQmb4UCZUhDP2HyQFfc= +github.com/hashicorp/hcp-sdk-go v0.61.0 h1:x4hJ8SlLI5WCE8Uzcu4q5jfdOEz/hFxfUkhAdoFdzSg= +github.com/hashicorp/hcp-sdk-go v0.61.0/go.mod h1:xP7wmWAmdMxs/7+ovH3jZn+MCDhHRj50Rn+m7JIY3Ck= github.com/hashicorp/hil v0.0.0-20200423225030-a18a1cd20038 h1:n9J0rwVWXDpNd5iZnwY7w4WZyq53/rROeI7OVvLW8Ok= github.com/hashicorp/hil v0.0.0-20200423225030-a18a1cd20038/go.mod h1:n2TSygSNwsLJ76m8qFXTSc7beTb+auJxYdqrnoqwZWE= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= @@ -774,6 +774,8 @@ github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+ github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= diff --git a/test/integration/consul-container/go.mod b/test/integration/consul-container/go.mod index f7b3ab9b4774..8fde4fd94d8e 100644 --- a/test/integration/consul-container/go.mod +++ b/test/integration/consul-container/go.mod @@ -124,7 +124,7 @@ require ( github.com/hashicorp/go-syslog v1.0.0 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/hcp-scada-provider v0.2.3 // indirect - github.com/hashicorp/hcp-sdk-go v0.55.0 // indirect + github.com/hashicorp/hcp-sdk-go v0.61.0 // indirect github.com/hashicorp/hil v0.0.0-20200423225030-a18a1cd20038 // indirect github.com/hashicorp/memberlist v0.5.0 // indirect github.com/hashicorp/net-rpc-msgpackrpc/v2 v2.0.0 // indirect @@ -185,6 +185,7 @@ require ( github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect github.com/stretchr/objx v0.5.0 // indirect github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect go.etcd.io/bbolt v1.3.7 // indirect go.mongodb.org/mongo-driver v1.11.0 // indirect go.opencensus.io v0.24.0 // indirect diff --git a/test/integration/consul-container/go.sum b/test/integration/consul-container/go.sum index c5563b08a8bd..026ae8db72f3 100644 --- a/test/integration/consul-container/go.sum +++ b/test/integration/consul-container/go.sum @@ -470,8 +470,8 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/hcp-scada-provider v0.2.3 h1:AarYR+/Pcv+cMvPdAlb92uOBmZfEH6ny4+DT+4NY2VQ= github.com/hashicorp/hcp-scada-provider v0.2.3/go.mod h1:ZFTgGwkzNv99PLQjTsulzaCplCzOTBh0IUQsPKzrQFo= -github.com/hashicorp/hcp-sdk-go v0.55.0 h1:T4sQtgQfQJOD0uucT4hS+GZI1FmoHAQMADj277W++xw= -github.com/hashicorp/hcp-sdk-go v0.55.0/go.mod h1:hZqky4HEzsKwvLOt4QJlZUrjeQmb4UCZUhDP2HyQFfc= +github.com/hashicorp/hcp-sdk-go v0.61.0 h1:x4hJ8SlLI5WCE8Uzcu4q5jfdOEz/hFxfUkhAdoFdzSg= +github.com/hashicorp/hcp-sdk-go v0.61.0/go.mod h1:xP7wmWAmdMxs/7+ovH3jZn+MCDhHRj50Rn+m7JIY3Ck= github.com/hashicorp/hil v0.0.0-20200423225030-a18a1cd20038 h1:n9J0rwVWXDpNd5iZnwY7w4WZyq53/rROeI7OVvLW8Ok= github.com/hashicorp/hil v0.0.0-20200423225030-a18a1cd20038/go.mod h1:n2TSygSNwsLJ76m8qFXTSc7beTb+auJxYdqrnoqwZWE= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= @@ -763,6 +763,8 @@ github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+ github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= diff --git a/troubleshoot/go.sum b/troubleshoot/go.sum index ee3bf90645d5..18f1c8b90e96 100644 --- a/troubleshoot/go.sum +++ b/troubleshoot/go.sum @@ -161,7 +161,7 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/hashicorp/consul/sdk v0.14.0 h1:Hly+BMNMssVzoWddbBnBFi3W+Fzytvm0haSkihhj3GU= +github.com/hashicorp/consul/sdk v0.14.1 h1:ZiwE2bKb+zro68sWzZ1SgHF3kRMBZ94TwOCFRF4ylPs= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= From 255aa158db8ccbfd445205654e153742b127116d Mon Sep 17 00:00:00 2001 From: John Murret Date: Wed, 30 Aug 2023 11:31:40 -0600 Subject: [PATCH 2/7] update comments and docs about running envoy integration tests with the ENVOY_VERSION set. (#18614) update ENVOY_VERSION and documentation of it used in the bats envoy tests. Co-authored-by: github-team-consul-core --- Makefile | 1 + build-support/windows/build-consul-local-images.sh | 2 +- test/integration/connect/envoy/README.md | 1 + test/integration/connect/envoy/run-tests.sh | 2 +- test/integration/connect/envoy/run-tests.windows.sh | 2 +- 5 files changed, 5 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 1fe0d1b75afd..4ca2072dbabe 100644 --- a/Makefile +++ b/Makefile @@ -334,6 +334,7 @@ other-consul: ## Checking for other consul instances # Use GO_TEST_FLAGS to run specific tests: # make test-envoy-integ GO_TEST_FLAGS="-run TestEnvoy/case-basic" # NOTE: Always uses amd64 images, even when running on M1 macs, to match CI/CD environment. +# You can also specify the envoy version (example: 1.27.0) setting the environment variable: ENVOY_VERSION=1.27.0 .PHONY: test-envoy-integ test-envoy-integ: $(ENVOY_INTEG_DEPS) ## Run integration tests. @go test -v -timeout=30m -tags integration $(GO_TEST_FLAGS) ./test/integration/connect/envoy diff --git a/build-support/windows/build-consul-local-images.sh b/build-support/windows/build-consul-local-images.sh index 8398e813bb17..4a22b35706c4 100644 --- a/build-support/windows/build-consul-local-images.sh +++ b/build-support/windows/build-consul-local-images.sh @@ -10,7 +10,7 @@ VERSION=${VERSION:-"1.16.0"} export VERSION # Build Windows Envoy Version 1.23.1 / 1.21.5 / 1.20.7 -ENVOY_VERSION=${ENVOY_VERSION:-"1.23.1"} +ENVOY_VERSION=${ENVOY_VERSION:-"1.27.0"} export ENVOY_VERSION echo "Building Images" diff --git a/test/integration/connect/envoy/README.md b/test/integration/connect/envoy/README.md index a97acc710a95..ef358e7a2b44 100644 --- a/test/integration/connect/envoy/README.md +++ b/test/integration/connect/envoy/README.md @@ -52,6 +52,7 @@ Where `case-basic` can be replaced by any directory name from this directory. * When tests fail in CI, logs and additional debugging data are available in the artifacts of the test run. * You can re-run the tests locally by running `make test-envoy-integ GO_TEST_FLAGS="-run TestEnvoy/"` where `` is replaced with the name of the directory, e.g. `case-basic`. +* You can override the envoy version by specifying `ENVOY_VERSION=` eg. `ENVOY_VERSION=1.27.0 make test-envoy-integ`. * Locally, all the logs of the failed test will be available in `workdir` in this directory. * You can run with `DEBUG=1` to print out all the commands being run, e.g. `DEBUG=1 make test-envoy-integ GO_TEST_FLAGS="-run TestEnvoy/case-basic"`. * If you want to prevent the Docker containers from being spun down after test failure, add a `sleep 9999` to the `verify.bats` test case that's failing. diff --git a/test/integration/connect/envoy/run-tests.sh b/test/integration/connect/envoy/run-tests.sh index fa1d64312dba..900d3cbfd33b 100755 --- a/test/integration/connect/envoy/run-tests.sh +++ b/test/integration/connect/envoy/run-tests.sh @@ -15,7 +15,7 @@ DEBUG=${DEBUG:-} XDS_TARGET=${XDS_TARGET:-server} # ENVOY_VERSION to run each test against -ENVOY_VERSION=${ENVOY_VERSION:-"1.23.1"} +ENVOY_VERSION=${ENVOY_VERSION:-"1.27.0"} export ENVOY_VERSION export DOCKER_BUILDKIT=1 diff --git a/test/integration/connect/envoy/run-tests.windows.sh b/test/integration/connect/envoy/run-tests.windows.sh index b8ba4a1ee64d..c4dcff6ce486 100644 --- a/test/integration/connect/envoy/run-tests.windows.sh +++ b/test/integration/connect/envoy/run-tests.windows.sh @@ -20,7 +20,7 @@ DEBUG=${DEBUG:-} XDS_TARGET=${XDS_TARGET:-server} # ENVOY_VERSION to run each test against -ENVOY_VERSION=${ENVOY_VERSION:-"1.23.1"} +ENVOY_VERSION=${ENVOY_VERSION:-"1.27.0"} export ENVOY_VERSION export DOCKER_BUILDKIT=0 From f8d77f027af25dd03ab534a98d1056ec6999768b Mon Sep 17 00:00:00 2001 From: Dhia Ayachi Date: Thu, 31 Aug 2023 10:18:25 -0400 Subject: [PATCH 3/7] delete all v2 resources type when deleting a namespace (CE) (#18621) * add namespace scope to ServiceV1Alpha1Type * add CE portion of namespace deletion --- agent/consul/client_test.go | 9 ++++++++- agent/consul/server.go | 3 +++ internal/catalog/internal/types/service.go | 1 + 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/agent/consul/client_test.go b/agent/consul/client_test.go index 9683d313041f..1d6fa28b526c 100644 --- a/agent/consul/client_test.go +++ b/agent/consul/client_test.go @@ -7,6 +7,9 @@ import ( "bytes" "context" "fmt" + "github.com/hashicorp/consul/internal/catalog" + "github.com/hashicorp/consul/internal/mesh" + "github.com/hashicorp/consul/internal/resource/demo" "net" "os" "strings" @@ -559,6 +562,10 @@ func newDefaultDeps(t *testing.T, c *Config) Deps { RPCHoldTimeout: c.RPCHoldTimeout, } connPool.SetRPCClientTimeout(c.RPCClientTimeout) + registry := resource.NewRegistry() + demo.RegisterTypes(registry) + mesh.RegisterTypes(registry) + catalog.RegisterTypes(registry) return Deps{ EventPublisher: stream.NewEventPublisher(10 * time.Second), Logger: logger, @@ -578,7 +585,7 @@ func newDefaultDeps(t *testing.T, c *Config) Deps { GetNetRPCInterceptorFunc: middleware.GetNetRPCInterceptor, EnterpriseDeps: newDefaultDepsEnterprise(t, logger, c), XDSStreamLimiter: limiter.NewSessionLimiter(), - Registry: resource.NewRegistry(), + Registry: registry, } } diff --git a/agent/consul/server.go b/agent/consul/server.go index ad6e5b23f8b8..8a7ec654400f 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -461,6 +461,8 @@ type Server struct { // handles metrics reporting to HashiCorp reportingManager *reporting.ReportingManager + + registry resource.Registry } func (s *Server) DecrementBlockingQueries() uint64 { @@ -547,6 +549,7 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server, publisher: flat.EventPublisher, incomingRPCLimiter: incomingRPCLimiter, routineManager: routine.NewManager(logger.Named(logging.ConsulServer)), + registry: flat.Registry, } incomingRPCLimiter.Register(s) diff --git a/internal/catalog/internal/types/service.go b/internal/catalog/internal/types/service.go index 91c0c732e2e8..b79788ba6370 100644 --- a/internal/catalog/internal/types/service.go +++ b/internal/catalog/internal/types/service.go @@ -32,6 +32,7 @@ func RegisterService(r resource.Registry) { Type: ServiceV1Alpha1Type, Proto: &pbcatalog.Service{}, Validate: ValidateService, + Scope: resource.ScopeNamespace, }) } From 7b9e243297ddd4f6dc91d30b1c66aa2de413f86c Mon Sep 17 00:00:00 2001 From: Semir Patel Date: Thu, 31 Aug 2023 09:24:09 -0500 Subject: [PATCH 4/7] resource: Allow nil tenancy (#18618) --- .../services/resource/delete_test.go | 48 +++++++++++-------- .../services/resource/list_by_owner_test.go | 4 -- .../services/resource/read_test.go | 4 -- .../grpc-external/services/resource/server.go | 14 +++++- .../services/resource/server_test.go | 10 ++++ .../services/resource/write_status.go | 13 ++++- .../services/resource/write_status_test.go | 8 ++-- .../services/resource/write_test.go | 20 ++++++-- 8 files changed, 80 insertions(+), 41 deletions(-) diff --git a/agent/grpc-external/services/resource/delete_test.go b/agent/grpc-external/services/resource/delete_test.go index 3b8a8099021d..5f5d7d7e2192 100644 --- a/agent/grpc-external/services/resource/delete_test.go +++ b/agent/grpc-external/services/resource/delete_test.go @@ -16,7 +16,6 @@ import ( "github.com/hashicorp/consul/acl/resolver" "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource/demo" - "github.com/hashicorp/consul/internal/storage" "github.com/hashicorp/consul/proto-public/pbresource" ) @@ -34,10 +33,6 @@ func TestDelete_InputValidation(t *testing.T) { artistId.Type = nil return artistId }, - "no tenancy": func(artistId, _ *pbresource.ID) *pbresource.ID { - artistId.Tenancy = nil - return artistId - }, "no name": func(artistId, _ *pbresource.ID) *pbresource.ID { artistId.Name = "" return artistId @@ -102,21 +97,23 @@ func TestDelete_ACLs(t *testing.T) { t.Run(desc, func(t *testing.T) { server := testServer(t) client := testClient(t, server) - - mockACLResolver := &MockACLResolver{} - mockACLResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything). - Return(tc.authz, nil) - server.ACLResolver = mockACLResolver demo.RegisterTypes(server.Registry) artist, err := demo.GenerateV2Artist() require.NoError(t, err) - artist, err = server.Backend.WriteCAS(context.Background(), artist) + // Write test resource to delete. + rsp, err := client.Write(context.Background(), &pbresource.WriteRequest{Resource: artist}) require.NoError(t, err) - // exercise ACL - _, err = client.Delete(testContext(t), &pbresource.DeleteRequest{Id: artist.Id}) + // Mock is put in place after the above "write" since the "write" must also pass the ACL check. + mockACLResolver := &MockACLResolver{} + mockACLResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything). + Return(tc.authz, nil) + server.ACLResolver = mockACLResolver + + // Exercise ACL. + _, err = client.Delete(testContext(t), &pbresource.DeleteRequest{Id: rsp.Resource.Id}) tc.assertErrFn(err) }) } @@ -134,15 +131,20 @@ func TestDelete_Success(t *testing.T) { recordLabel, err := demo.GenerateV1RecordLabel("LoonyTunes") require.NoError(t, err) - recordLabel, err = server.Backend.WriteCAS(ctx, recordLabel) + writeRsp, err := client.Write(ctx, &pbresource.WriteRequest{Resource: recordLabel}) require.NoError(t, err) + recordLabel = writeRsp.Resource + originalRecordLabelId := clone(recordLabel.Id) artist, err := demo.GenerateV2Artist() require.NoError(t, err) - artist, err = server.Backend.WriteCAS(ctx, artist) + writeRsp, err = client.Write(ctx, &pbresource.WriteRequest{Resource: artist}) require.NoError(t, err) + artist = writeRsp.Resource + originalArtistId := clone(artist.Id) - // Pick the resource to be deleted based on type's scope + // Pick the resource to be deleted based on type's scope and mod tenancy + // based on the tenancy test case. deleteId := modFn(artist.Id, recordLabel.Id) deleteReq := tc.deleteReqFn(recordLabel) if proto.Equal(deleteId.Type, demo.TypeV2Artist) { @@ -154,19 +156,25 @@ func TestDelete_Success(t *testing.T) { require.NoError(t, err) // Verify deleted - _, err = server.Backend.Read(ctx, storage.StrongConsistency, deleteId) + _, err = client.Read(ctx, &pbresource.ReadRequest{Id: deleteId}) require.Error(t, err) - require.ErrorIs(t, err, storage.ErrNotFound) + require.Equal(t, codes.NotFound.String(), status.Code(err).String()) + + // Derive tombstone name from resource that was deleted. + tname := tombstoneName(originalRecordLabelId) + if proto.Equal(deleteId.Type, demo.TypeV2Artist) { + tname = tombstoneName(originalArtistId) + } // Verify tombstone created _, err = client.Read(ctx, &pbresource.ReadRequest{ Id: &pbresource.ID{ - Name: tombstoneName(deleteReq.Id), + Name: tname, Type: resource.TypeV1Tombstone, Tenancy: deleteReq.Id.Tenancy, }, }) - require.NoError(t, err, "expected tombstome to be found") + require.NoError(t, err, "expected tombstone to be found") }) } }) diff --git a/agent/grpc-external/services/resource/list_by_owner_test.go b/agent/grpc-external/services/resource/list_by_owner_test.go index 4a60e3ac9460..11c6027c0b64 100644 --- a/agent/grpc-external/services/resource/list_by_owner_test.go +++ b/agent/grpc-external/services/resource/list_by_owner_test.go @@ -34,10 +34,6 @@ func TestListByOwner_InputValidation(t *testing.T) { artistId.Type = nil return artistId }, - "no tenancy": func(artistId, _ *pbresource.ID) *pbresource.ID { - artistId.Tenancy = nil - return artistId - }, "no name": func(artistId, _ *pbresource.ID) *pbresource.ID { artistId.Name = "" return artistId diff --git a/agent/grpc-external/services/resource/read_test.go b/agent/grpc-external/services/resource/read_test.go index ff312c82ca00..ff027fa726de 100644 --- a/agent/grpc-external/services/resource/read_test.go +++ b/agent/grpc-external/services/resource/read_test.go @@ -33,10 +33,6 @@ func TestRead_InputValidation(t *testing.T) { artistId.Type = nil return artistId }, - "no tenancy": func(artistId, _ *pbresource.ID) *pbresource.ID { - artistId.Tenancy = nil - return artistId - }, "no name": func(artistId, _ *pbresource.ID) *pbresource.ID { artistId.Name = "" return artistId diff --git a/agent/grpc-external/services/resource/server.go b/agent/grpc-external/services/resource/server.go index 29b18bd964c8..52d993ad4991 100644 --- a/agent/grpc-external/services/resource/server.go +++ b/agent/grpc-external/services/resource/server.go @@ -133,8 +133,6 @@ func validateId(id *pbresource.ID, errorPrefix string) error { switch { case id.Type == nil: field = "type" - case id.Tenancy == nil: - field = "tenancy" case id.Name == "": field = "name" } @@ -142,6 +140,18 @@ func validateId(id *pbresource.ID, errorPrefix string) error { if field != "" { return status.Errorf(codes.InvalidArgument, "%s.%s is required", errorPrefix, field) } + + // Better UX: Allow callers to pass in nil tenancy. Defaulting and inheritance of tenancy + // from the request token will take place further down in the call flow. + if id.Tenancy == nil { + id.Tenancy = &pbresource.Tenancy{ + Partition: "", + Namespace: "", + // TODO(spatel): Remove when peerTenancy introduced. + PeerName: "local", + } + } + resource.Normalize(id.Tenancy) return nil diff --git a/agent/grpc-external/services/resource/server_test.go b/agent/grpc-external/services/resource/server_test.go index 7f745952e5e5..8f3967259a51 100644 --- a/agent/grpc-external/services/resource/server_test.go +++ b/agent/grpc-external/services/resource/server_test.go @@ -246,6 +246,11 @@ func tenancyCases() map[string]func(artistId, recordlabelId *pbresource.ID) *pbr id.Tenancy.Namespace = "" return id }, + "namespaced resource inherits tokens partition and namespace when tenacy nil": func(artistId, _ *pbresource.ID) *pbresource.ID { + id := clone(artistId) + id.Tenancy = nil + return id + }, "partitioned resource provides nonempty partition": func(_, recordLabelId *pbresource.ID) *pbresource.ID { return recordLabelId }, @@ -259,6 +264,11 @@ func tenancyCases() map[string]func(artistId, recordlabelId *pbresource.ID) *pbr id.Tenancy.Partition = "" return id }, + "partitioned resource inherits tokens partition when tenancy nil": func(_, recordLabelId *pbresource.ID) *pbresource.ID { + id := clone(recordLabelId) + id.Tenancy = nil + return id + }, } return tenancyCases } diff --git a/agent/grpc-external/services/resource/write_status.go b/agent/grpc-external/services/resource/write_status.go index 263814237eb3..0c0395a267ba 100644 --- a/agent/grpc-external/services/resource/write_status.go +++ b/agent/grpc-external/services/resource/write_status.go @@ -120,8 +120,6 @@ func (s *Server) validateWriteStatusRequest(req *pbresource.WriteStatusRequest) field = "id" case req.Id.Type == nil: field = "id.type" - case req.Id.Tenancy == nil: - field = "id.tenancy" case req.Id.Name == "": field = "id.name" case req.Id.Uid == "": @@ -169,6 +167,17 @@ func (s *Server) validateWriteStatusRequest(req *pbresource.WriteStatusRequest) return nil, status.Error(codes.InvalidArgument, "status.observed_generation is not valid") } + // Better UX: Allow callers to pass in nil tenancy. Defaulting and inheritance of tenancy + // from the request token will take place further down in the call flow. + if req.Id.Tenancy == nil { + req.Id.Tenancy = &pbresource.Tenancy{ + Partition: "", + Namespace: "", + // TODO(spatel): Remove when peerTenancy introduced. + PeerName: "local", + } + } + // Lowercase resource.Normalize(req.Id.Tenancy) diff --git a/agent/grpc-external/services/resource/write_status_test.go b/agent/grpc-external/services/resource/write_status_test.go index 622cbcccc746..5b71983475d9 100644 --- a/agent/grpc-external/services/resource/write_status_test.go +++ b/agent/grpc-external/services/resource/write_status_test.go @@ -85,10 +85,6 @@ func TestWriteStatus_InputValidation(t *testing.T) { typ: demo.TypeV2Artist, modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Type = nil }, }, - "no tenancy": { - typ: demo.TypeV2Artist, - modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Tenancy = nil }, - }, "no name": { typ: demo.TypeV2Artist, modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Name = "" }, @@ -236,6 +232,10 @@ func TestWriteStatus_Tenancy_Defaults(t *testing.T) { req.Id.Tenancy.Namespace = "" }, }, + "namespaced resource inherits tokens partition and namespace when tenancy nil": { + scope: resource.ScopeNamespace, + modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Tenancy = nil }, + }, "partitioned resource provides nonempty partition": { scope: resource.ScopePartition, modFn: func(req *pbresource.WriteStatusRequest) {}, diff --git a/agent/grpc-external/services/resource/write_test.go b/agent/grpc-external/services/resource/write_test.go index d472a6ec7414..755f8ef2ccbc 100644 --- a/agent/grpc-external/services/resource/write_test.go +++ b/agent/grpc-external/services/resource/write_test.go @@ -42,10 +42,6 @@ func TestWrite_InputValidation(t *testing.T) { artist.Id.Type = nil return artist }, - "no tenancy": func(artist, _ *pbresource.Resource) *pbresource.Resource { - artist.Id.Tenancy = nil - return artist - }, "no name": func(artist, _ *pbresource.Resource) *pbresource.Resource { artist.Id.Name = "" return artist @@ -106,7 +102,7 @@ func TestWrite_OwnerValidation(t *testing.T) { }, "no owner tenancy": { modReqFn: func(req *pbresource.WriteRequest) { req.Resource.Owner.Tenancy = nil }, - errorContains: "resource.owner.tenancy", + errorContains: "resource.owner", }, "no owner name": { modReqFn: func(req *pbresource.WriteRequest) { req.Resource.Owner.Name = "" }, @@ -253,6 +249,13 @@ func TestWrite_Create_Success(t *testing.T) { }, expectedTenancy: resource.DefaultNamespacedTenancy(), }, + "namespaced resource inherits tokens partition and namespace when tenancy nil": { + modFn: func(artist, _ *pbresource.Resource) *pbresource.Resource { + artist.Id.Tenancy = nil + return artist + }, + expectedTenancy: resource.DefaultNamespacedTenancy(), + }, "partitioned resource provides nonempty partition": { modFn: func(_, recordLabel *pbresource.Resource) *pbresource.Resource { return recordLabel @@ -273,6 +276,13 @@ func TestWrite_Create_Success(t *testing.T) { }, expectedTenancy: resource.DefaultPartitionedTenancy(), }, + "partitioned resource inherits tokens partition when tenancy nil": { + modFn: func(_, recordLabel *pbresource.Resource) *pbresource.Resource { + recordLabel.Id.Tenancy = nil + return recordLabel + }, + expectedTenancy: resource.DefaultPartitionedTenancy(), + }, // TODO(spatel): Add cluster scope tests when we have an actual cluster scoped resource (e.g. partition) } for desc, tc := range testCases { From f2ce472ae1e43a579b21996bac4b0ae7b7b01c70 Mon Sep 17 00:00:00 2001 From: Curt Bushko Date: Thu, 31 Aug 2023 10:56:59 -0400 Subject: [PATCH 5/7] PLAT-1192 - Run CI on smaller instances (#18624) Use smaller runners --- .github/scripts/get_runner_classes.sh | 30 +++--- .github/scripts/get_runner_classes_windows.sh | 30 +++--- .github/workflows/frontend.yml | 2 +- .github/workflows/go-tests.yml | 32 +++--- .../nightly-test-integrations-1.15.x.yml | 4 +- .../nightly-test-integrations-1.16.x.yml | 4 +- .../workflows/nightly-test-integrations.yml | 6 +- .../workflows/test-integrations-windows.yml | 5 +- .github/workflows/test-integrations.yml | 98 +------------------ 9 files changed, 60 insertions(+), 151 deletions(-) diff --git a/.github/scripts/get_runner_classes.sh b/.github/scripts/get_runner_classes.sh index abd3f1bce2d0..603ed20ec725 100755 --- a/.github/scripts/get_runner_classes.sh +++ b/.github/scripts/get_runner_classes.sh @@ -8,19 +8,19 @@ set -euo pipefail case "$GITHUB_REPOSITORY" in - *-enterprise) - # shellcheck disable=SC2129 - echo "compute-small=['self-hosted', 'linux', 'small']" >> "$GITHUB_OUTPUT" - echo "compute-medium=['self-hosted', 'linux', 'medium']" >> "$GITHUB_OUTPUT" - echo "compute-large=['self-hosted', 'linux', 'large']" >> "$GITHUB_OUTPUT" - # m5d.8xlarge is equivalent to our xl custom runner in CE - echo "compute-xl=['self-hosted', 'ondemand', 'linux', 'type=m5d.8xlarge']" >> "$GITHUB_OUTPUT" - ;; - *) - # shellcheck disable=SC2129 - echo "compute-small=['custom-linux-s-consul-latest']" >> "$GITHUB_OUTPUT" - echo "compute-medium=['custom-linux-m-consul-latest']" >> "$GITHUB_OUTPUT" - echo "compute-large=['custom-linux-l-consul-latest']" >> "$GITHUB_OUTPUT" - echo "compute-xl=['custom-linux-xl-consul-latest']" >> "$GITHUB_OUTPUT" - ;; +*-enterprise) + # shellcheck disable=SC2129 + echo "compute-small=['self-hosted', 'linux', 'small']" >>"$GITHUB_OUTPUT" + echo "compute-medium=['self-hosted', 'linux', 'medium']" >>"$GITHUB_OUTPUT" + echo "compute-large=['self-hosted', 'linux', 'large']" >>"$GITHUB_OUTPUT" + # m5d.8xlarge is equivalent to our xl custom runner in CE + echo "compute-xl=['self-hosted', 'ondemand', 'linux', 'type=m6a.2xlarge']" >>"$GITHUB_OUTPUT" + ;; +*) + # shellcheck disable=SC2129 + echo "compute-small=['custom-linux-s-consul-latest']" >>"$GITHUB_OUTPUT" + echo "compute-medium=['custom-linux-m-consul-latest']" >>"$GITHUB_OUTPUT" + echo "compute-large=['custom-linux-l-consul-latest']" >>"$GITHUB_OUTPUT" + echo "compute-xl=['custom-linux-xl-consul-latest']" >>"$GITHUB_OUTPUT" + ;; esac diff --git a/.github/scripts/get_runner_classes_windows.sh b/.github/scripts/get_runner_classes_windows.sh index d9f46488798f..a29ea5debf75 100755 --- a/.github/scripts/get_runner_classes_windows.sh +++ b/.github/scripts/get_runner_classes_windows.sh @@ -8,19 +8,19 @@ set -euo pipefail case "$GITHUB_REPOSITORY" in - *-enterprise) - # shellcheck disable=SC2129 - echo "compute-small=['self-hosted', 'windows', 'small']" >> "$GITHUB_OUTPUT" - echo "compute-medium=['self-hosted', 'windows', 'medium']" >> "$GITHUB_OUTPUT" - echo "compute-large=['self-hosted', 'windows', 'large']" >> "$GITHUB_OUTPUT" - # m5d.8xlarge is equivalent to our xl custom runner in CE - echo "compute-xl=['self-hosted', 'ondemand', 'windows', 'type=m5d.8xlarge']" >> "$GITHUB_OUTPUT" - ;; - *) - # shellcheck disable=SC2129 - echo "compute-small=['windows-2019']" >> "$GITHUB_OUTPUT" - echo "compute-medium=['windows-2019']" >> "$GITHUB_OUTPUT" - echo "compute-large=['windows-2019']" >> "$GITHUB_OUTPUT" - echo "compute-xl=['windows-2019']" >> "$GITHUB_OUTPUT" - ;; +*-enterprise) + # shellcheck disable=SC2129 + echo "compute-small=['self-hosted', 'windows', 'small']" >>"$GITHUB_OUTPUT" + echo "compute-medium=['self-hosted', 'windows', 'medium']" >>"$GITHUB_OUTPUT" + echo "compute-large=['self-hosted', 'windows', 'large']" >>"$GITHUB_OUTPUT" + # m5d.8xlarge is equivalent to our xl custom runner in CE + echo "compute-xl=['self-hosted', 'ondemand', 'windows', 'type=m6a.2xlarge']" >>"$GITHUB_OUTPUT" + ;; +*) + # shellcheck disable=SC2129 + echo "compute-small=['windows-2019']" >>"$GITHUB_OUTPUT" + echo "compute-medium=['windows-2019']" >>"$GITHUB_OUTPUT" + echo "compute-large=['windows-2019']" >>"$GITHUB_OUTPUT" + echo "compute-xl=['windows-2019']" >>"$GITHUB_OUTPUT" + ;; esac diff --git a/.github/workflows/frontend.yml b/.github/workflows/frontend.yml index 63d02962fb01..28a2baebea9f 100644 --- a/.github/workflows/frontend.yml +++ b/.github/workflows/frontend.yml @@ -74,7 +74,7 @@ jobs: ember-build-test: needs: setup - runs-on: ${{ fromJSON(needs.setup.outputs.compute-xl) }} + runs-on: ${{ fromJSON(needs.setup.outputs.compute-large ) }} strategy: matrix: partition: [1, 2, 3, 4] diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index db69f2dbbd98..afa0c1ba8084 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -178,7 +178,7 @@ jobs: - setup uses: ./.github/workflows/reusable-lint.yml with: - runs-on: ${{ needs.setup.outputs.compute-xl }} + runs-on: ${{ needs.setup.outputs.compute-large }} repository-name: ${{ github.repository }} secrets: elevated-github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} @@ -189,7 +189,7 @@ jobs: uses: ./.github/workflows/reusable-lint.yml with: go-arch: "386" - runs-on: ${{ needs.setup.outputs.compute-xl }} + runs-on: ${{ needs.setup.outputs.compute-large }} repository-name: ${{ github.repository }} secrets: elevated-github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} @@ -200,7 +200,7 @@ jobs: - setup uses: ./.github/workflows/reusable-dev-build.yml with: - runs-on: ${{ needs.setup.outputs.compute-xl }} + runs-on: ${{ needs.setup.outputs.compute-large }} repository-name: ${{ github.repository }} secrets: elevated-github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} @@ -212,7 +212,7 @@ jobs: # uses: ./.github/workflows/reusable-dev-build.yml # with: # uploaded-binary-name: 'consul-bin-s390x' - # runs-on: ${{ needs.setup.outputs.compute-xl }} + # runs-on: ${{ needs.setup.outputs.compute-large }} # go-arch: "s390x" # repository-name: ${{ github.repository }} # secrets: @@ -226,7 +226,7 @@ jobs: # uses: ./.github/workflows/reusable-dev-build.yml # with: # uploaded-binary-name: 'consul-bin-arm64' - # runs-on: ${{ needs.setup.outputs.compute-xl }} + # runs-on: ${{ needs.setup.outputs.compute-large }} # go-arch: "arm64" # repository-name: ${{ github.repository }} # secrets: @@ -259,7 +259,7 @@ jobs: with: directory: . runner-count: 12 - runs-on: ${{ needs.setup.outputs.compute-xl }} + runs-on: ${{ needs.setup.outputs.compute-large }} repository-name: ${{ github.repository }} go-tags: "" permissions: @@ -279,7 +279,7 @@ jobs: with: directory: . runner-count: 12 - runs-on: ${{ needs.setup.outputs.compute-xl }} + runs-on: ${{ needs.setup.outputs.compute-large }} repository-name: ${{ github.repository }} go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" permissions: @@ -299,7 +299,7 @@ jobs: directory: . go-test-flags: 'GO_TEST_FLAGS="-race -gcflags=all=-d=checkptr=0"' package-names-command: "go list ./... | grep -E -v '^github.com/hashicorp/consul/agent(/consul|/local|/routine-leak-checker)?$' | grep -E -v '^github.com/hashicorp/consul(/command|/connect|/snapshot)'" - runs-on: ${{ needs.setup.outputs.compute-xl }} + runs-on: ${{ needs.setup.outputs.compute-large }} repository-name: ${{ github.repository }} go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" permissions: @@ -319,7 +319,7 @@ jobs: directory: . go-arch: "386" go-test-flags: 'export GO_TEST_FLAGS="-short"' - runs-on: ${{ needs.setup.outputs.compute-xl }} + runs-on: ${{ needs.setup.outputs.compute-large }} repository-name: ${{ github.repository }} go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" permissions: @@ -340,7 +340,7 @@ jobs: # uploaded-binary-name: 'consul-bin-s390x' # directory: . # go-test-flags: 'export GO_TEST_FLAGS="-short"' - # runs-on: ${{ needs.setup.outputs.compute-xl }} + # runs-on: ${{ needs.setup.outputs.compute-large }} # repository-name: ${{ github.repository }} # go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" # permissions: @@ -358,7 +358,7 @@ jobs: uses: ./.github/workflows/reusable-unit.yml with: directory: envoyextensions - runs-on: ${{ needs.setup.outputs.compute-xl }} + runs-on: ${{ needs.setup.outputs.compute-large }} repository-name: ${{ github.repository }} go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" permissions: @@ -376,7 +376,7 @@ jobs: uses: ./.github/workflows/reusable-unit.yml with: directory: troubleshoot - runs-on: ${{ needs.setup.outputs.compute-xl }} + runs-on: ${{ needs.setup.outputs.compute-large }} repository-name: ${{ github.repository }} go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" permissions: @@ -394,7 +394,7 @@ jobs: uses: ./.github/workflows/reusable-unit.yml with: directory: api - runs-on: ${{ needs.setup.outputs.compute-xl }} + runs-on: ${{ needs.setup.outputs.compute-large }} repository-name: ${{ github.repository }} go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" go-version: "1.19" @@ -413,7 +413,7 @@ jobs: uses: ./.github/workflows/reusable-unit.yml with: directory: api - runs-on: ${{ needs.setup.outputs.compute-xl }} + runs-on: ${{ needs.setup.outputs.compute-large }} repository-name: ${{ github.repository }} go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" go-version: "1.20" @@ -432,7 +432,7 @@ jobs: uses: ./.github/workflows/reusable-unit.yml with: directory: sdk - runs-on: ${{ needs.setup.outputs.compute-xl }} + runs-on: ${{ needs.setup.outputs.compute-large }} repository-name: ${{ github.repository }} go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" go-version: "1.19" @@ -451,7 +451,7 @@ jobs: uses: ./.github/workflows/reusable-unit.yml with: directory: sdk - runs-on: ${{ needs.setup.outputs.compute-xl }} + runs-on: ${{ needs.setup.outputs.compute-large }} repository-name: ${{ github.repository }} go-tags: "${{ github.event.repository.name == 'consul-enterprise' && 'consulent consulprem consuldev' || '' }}" go-version: "1.20" diff --git a/.github/workflows/nightly-test-integrations-1.15.x.yml b/.github/workflows/nightly-test-integrations-1.15.x.yml index 587d5a62f23c..80597f64e2fa 100644 --- a/.github/workflows/nightly-test-integrations-1.15.x.yml +++ b/.github/workflows/nightly-test-integrations-1.15.x.yml @@ -91,7 +91,7 @@ jobs: } >> "$GITHUB_OUTPUT" envoy-integration-test: - runs-on: ${{ fromJSON(needs.setup.outputs.compute-xl) }} + runs-on: ${{ fromJSON(needs.setup.outputs.compute-large) }} needs: - setup - generate-envoy-job-matrices @@ -183,7 +183,7 @@ jobs: run: datadog-ci junit upload --service "$GITHUB_REPOSITORY" $TEST_RESULTS_DIR/results.xml upgrade-integration-test: - runs-on: ${{ fromJSON(needs.setup.outputs.compute-xl) }} + runs-on: ${{ fromJSON(needs.setup.outputs.compute-large) }} needs: - setup - dev-build diff --git a/.github/workflows/nightly-test-integrations-1.16.x.yml b/.github/workflows/nightly-test-integrations-1.16.x.yml index d4e955ec4985..c4f16d9be618 100644 --- a/.github/workflows/nightly-test-integrations-1.16.x.yml +++ b/.github/workflows/nightly-test-integrations-1.16.x.yml @@ -91,7 +91,7 @@ jobs: } >> "$GITHUB_OUTPUT" envoy-integration-test: - runs-on: ${{ fromJSON(needs.setup.outputs.compute-xl) }} + runs-on: ${{ fromJSON(needs.setup.outputs.compute-large) }} needs: - setup - generate-envoy-job-matrices @@ -186,7 +186,7 @@ jobs: run: datadog-ci junit upload --service "$GITHUB_REPOSITORY" $TEST_RESULTS_DIR/results.xml upgrade-integration-test: - runs-on: ${{ fromJSON(needs.setup.outputs.compute-xl) }} + runs-on: ${{ fromJSON(needs.setup.outputs.compute-large) }} needs: - setup - dev-build diff --git a/.github/workflows/nightly-test-integrations.yml b/.github/workflows/nightly-test-integrations.yml index cfe3041511cc..b98a8d78a294 100644 --- a/.github/workflows/nightly-test-integrations.yml +++ b/.github/workflows/nightly-test-integrations.yml @@ -42,7 +42,7 @@ jobs: needs: [setup] uses: ./.github/workflows/reusable-dev-build.yml with: - runs-on: ${{ needs.setup.outputs.compute-xl }} + runs-on: ${{ needs.setup.outputs.compute-large }} repository-name: ${{ github.repository }} uploaded-binary-name: 'consul-bin' secrets: @@ -88,7 +88,7 @@ jobs: } >> "$GITHUB_OUTPUT" envoy-integration-test: - runs-on: ${{ fromJSON(needs.setup.outputs.compute-xl) }} + runs-on: ${{ fromJSON(needs.setup.outputs.compute-large ) }} needs: - setup - generate-envoy-job-matrices @@ -183,7 +183,7 @@ jobs: run: datadog-ci junit upload --service "$GITHUB_REPOSITORY" $TEST_RESULTS_DIR/results.xml upgrade-integration-test: - runs-on: ${{ fromJSON(needs.setup.outputs.compute-xl) }} + runs-on: ${{ fromJSON(needs.setup.outputs.compute-large ) }} needs: - setup - dev-build diff --git a/.github/workflows/test-integrations-windows.yml b/.github/workflows/test-integrations-windows.yml index c3e977e97d58..95a30ce6c4c9 100644 --- a/.github/workflows/test-integrations-windows.yml +++ b/.github/workflows/test-integrations-windows.yml @@ -8,6 +8,7 @@ on: # * is a special character in YAML so you have to quote this string # Run nightly at 12AM UTC/8PM EST/5PM PST. - cron: '0 0 * * *' + workflow_dispatch: env: TEST_RESULTS_DIR: /tmp/test-results @@ -38,7 +39,7 @@ jobs: dev-build: uses: ./.github/workflows/reusable-dev-build-windows.yml with: - runs-on: ${{ needs.setup.outputs.compute-xl }} + runs-on: ${{ needs.setup.outputs.compute-large }} repository-name: ${{ github.repository }} uploaded-binary-name: 'consul.exe' secrets: @@ -1206,4 +1207,4 @@ jobs: if printf '${{ toJSON(needs) }}' | grep -E -i '\"result\": \"(failure|cancelled)\"'; then printf "Tests failed or workflow cancelled:\n\n${{ toJSON(needs) }}" exit 1 - fi \ No newline at end of file + fi diff --git a/.github/workflows/test-integrations.yml b/.github/workflows/test-integrations.yml index 6e7bf3e7f03b..1c487c99614f 100644 --- a/.github/workflows/test-integrations.yml +++ b/.github/workflows/test-integrations.yml @@ -63,7 +63,7 @@ jobs: needs: [setup] uses: ./.github/workflows/reusable-dev-build.yml with: - runs-on: ${{ needs.setup.outputs.compute-xl }} + runs-on: ${{ needs.setup.outputs.compute-large }} repository-name: ${{ github.repository }} uploaded-binary-name: 'consul-bin' secrets: @@ -282,7 +282,7 @@ jobs: } >> "$GITHUB_OUTPUT" envoy-integration-test: - runs-on: ${{ fromJSON(needs.setup.outputs.compute-xl) }} + runs-on: ${{ fromJSON(needs.setup.outputs.compute-large) }} needs: - setup - generate-envoy-job-matrices @@ -374,7 +374,7 @@ jobs: run: datadog-ci junit upload --service "$GITHUB_REPOSITORY" $TEST_RESULTS_DIR/results.xml compatibility-integration-test: - runs-on: ${{ fromJSON(needs.setup.outputs.compute-xl) }} + runs-on: ${{ fromJSON(needs.setup.outputs.compute-large) }} needs: - setup - dev-build @@ -483,97 +483,6 @@ jobs: DD_ENV: ci run: datadog-ci junit upload --service "$GITHUB_REPOSITORY" $TEST_RESULTS_DIR/results.xml - peering_commontopo-integration-test: - runs-on: ${{ fromJSON(needs.setup.outputs.compute-xl) }} - needs: - - setup - - dev-build - permissions: - id-token: write # NOTE: this permission is explicitly required for Vault auth. - contents: read - strategy: - fail-fast: false - env: - ENVOY_VERSION: "1.24.6" - steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - # NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos. - - name: Setup Git - if: ${{ endsWith(github.repository, '-enterprise') }} - run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com" - - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 - with: - go-version-file: 'go.mod' - - run: go env - - # Get go binary from workspace - - name: fetch binary - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 - with: - name: '${{ env.CONSUL_BINARY_UPLOAD_NAME }}' - path: . - - name: restore mode+x - run: chmod +x consul - - name: Build consul:local image - run: docker build -t ${{ env.CONSUL_LATEST_IMAGE_NAME }}:local -f ./build-support/docker/Consul-Dev.dockerfile . - - name: Peering commonTopo Integration Tests - run: | - mkdir -p "${{ env.TEST_RESULTS_DIR }}" - cd ./test-integ/peering_commontopo - docker run --rm ${{ env.CONSUL_LATEST_IMAGE_NAME }}:local consul version - go run gotest.tools/gotestsum@v${{env.GOTESTSUM_VERSION}} \ - --raw-command \ - --format=short-verbose \ - --debug \ - --packages="./..." \ - -- \ - go test \ - -tags "${{ env.GOTAGS }}" \ - -timeout=30m \ - -json . \ - --target-image ${{ env.CONSUL_LATEST_IMAGE_NAME }} \ - --target-version local \ - --latest-image docker.mirror.hashicorp.services/${{ env.CONSUL_LATEST_IMAGE_NAME }} \ - --latest-version latest - ls -lrt - env: - # this is needed because of incompatibility between RYUK container and GHA - GOTESTSUM_JUNITFILE: ${{ env.TEST_RESULTS_DIR }}/results.xml - GOTESTSUM_FORMAT: standard-verbose - COMPOSE_INTERACTIVE_NO_CLI: 1 - # tput complains if this isn't set to something. - TERM: ansi - # NOTE: ENT specific step as we store secrets in Vault. - - name: Authenticate to Vault - if: ${{ endsWith(github.repository, '-enterprise') }} - id: vault-auth - run: vault-auth - - # NOTE: ENT specific step as we store secrets in Vault. - - name: Fetch Secrets - if: ${{ endsWith(github.repository, '-enterprise') }} - id: secrets - uses: hashicorp/vault-action@v2.5.0 - with: - url: ${{ steps.vault-auth.outputs.addr }} - caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }} - token: ${{ steps.vault-auth.outputs.token }} - secrets: | - kv/data/github/${{ github.repository }}/datadog apikey | DATADOG_API_KEY; - - - name: prepare datadog-ci - if: ${{ !endsWith(github.repository, '-enterprise') }} - run: | - curl -L --fail "https://github.com/DataDog/datadog-ci/releases/latest/download/datadog-ci_linux-x64" --output "/usr/local/bin/datadog-ci" - chmod +x /usr/local/bin/datadog-ci - - - name: upload coverage - # do not run on forks - if: github.event.pull_request.head.repo.full_name == github.repository - env: - DATADOG_API_KEY: "${{ endsWith(github.repository, '-enterprise') && env.DATADOG_API_KEY || secrets.DATADOG_API_KEY }}" - DD_ENV: ci - run: datadog-ci junit upload --service "$GITHUB_REPOSITORY" $TEST_RESULTS_DIR/results.xml test-integrations-success: needs: @@ -585,7 +494,6 @@ jobs: - generate-envoy-job-matrices - envoy-integration-test - compatibility-integration-test - - peering_commontopo-integration-test runs-on: ${{ fromJSON(needs.setup.outputs.compute-small) }} if: always() && needs.conditional-skip.outputs.trigger-ci == 'true' steps: From d45c3c275574315c60183c6bd5f4ed482cd286e4 Mon Sep 17 00:00:00 2001 From: Ashesh Vidyut <134911583+absolutelightning@users.noreply.github.com> Date: Thu, 31 Aug 2023 21:51:09 +0530 Subject: [PATCH 6/7] NET-3181 - Allow log file naming like Nomad (#18617) * fixes file name for consul * added log file * added tests for rename method --- .changelog/18617.txt | 4 ++++ logging/logfile.go | 20 +++++++++++++++++--- logging/logfile_test.go | 16 ++++++++++++++++ 3 files changed, 37 insertions(+), 3 deletions(-) create mode 100644 .changelog/18617.txt diff --git a/.changelog/18617.txt b/.changelog/18617.txt new file mode 100644 index 000000000000..1f840d836dee --- /dev/null +++ b/.changelog/18617.txt @@ -0,0 +1,4 @@ +```release-note:improvement +log: Currently consul logs files like this consul-{timestamp}.log. This change makes sure that there is always +consul.log file with the latest logs in it. +``` \ No newline at end of file diff --git a/logging/logfile.go b/logging/logfile.go index bc7b38d91c26..7f8e9db6d82f 100644 --- a/logging/logfile.go +++ b/logging/logfile.go @@ -60,10 +60,8 @@ func (l *LogFile) fileNamePattern() string { } func (l *LogFile) openNew() error { - fileNamePattern := l.fileNamePattern() - createTime := now() - newfileName := fmt.Sprintf(fileNamePattern, strconv.FormatInt(createTime.UnixNano(), 10)) + newfileName := l.fileName newfilePath := filepath.Join(l.logPath, newfileName) // Try creating a file. We truncate the file because we are the only authority to write the logs @@ -79,12 +77,28 @@ func (l *LogFile) openNew() error { return nil } +func (l *LogFile) renameCurrentFile() error { + fileNamePattern := l.fileNamePattern() + + createTime := now() + // Current file is consul.log always + currentFilePath := filepath.Join(l.logPath, l.fileName) + + oldFileName := fmt.Sprintf(fileNamePattern, strconv.FormatInt(createTime.UnixNano(), 10)) + oldFilePath := filepath.Join(l.logPath, oldFileName) + + return os.Rename(currentFilePath, oldFilePath) +} + func (l *LogFile) rotate() error { // Get the time from the last point of contact timeElapsed := time.Since(l.LastCreated) // Rotate if we hit the byte file limit or the time limit if (l.BytesWritten >= int64(l.MaxBytes) && (l.MaxBytes > 0)) || timeElapsed >= l.duration { l.FileInfo.Close() + if err := l.renameCurrentFile(); err != nil { + return err + } if err := l.pruneFiles(); err != nil { return err } diff --git a/logging/logfile_test.go b/logging/logfile_test.go index ae9d8fb1b0cd..26c23439f20b 100644 --- a/logging/logfile_test.go +++ b/logging/logfile_test.go @@ -51,6 +51,22 @@ func TestLogFile_openNew(t *testing.T) { require.Contains(t, string(content), msg) } +func TestLogFile_renameCurrentFile(t *testing.T) { + logFile := LogFile{ + fileName: "consul.log", + logPath: testutil.TempDir(t, ""), + duration: defaultRotateDuration, + } + err := logFile.openNew() + require.NoError(t, err) + + err = logFile.renameCurrentFile() + require.NoError(t, err) + + _, err = os.ReadFile(logFile.FileInfo.Name()) + require.Contains(t, err.Error(), "no such file or directory") +} + func TestLogFile_Rotation_MaxBytes(t *testing.T) { tempDir := testutil.TempDir(t, "LogWriterBytes") logFile := LogFile{ From 9876923e230647a9b0ae636466988dad152509dd Mon Sep 17 00:00:00 2001 From: John Maguire Date: Thu, 31 Aug 2023 12:23:59 -0400 Subject: [PATCH 7/7] Add the plumbing for APIGW JWT work (#18609) * Add the plumbing for APIGW JWT work * Remove unneeded import * Add deep equal function for HTTPMatch * Added plumbing for status conditions * Remove unneeded comment * Fix comments * Add calls in xds listener for apigateway to setup listener jwt auth --- agent/consul/gateways/controller_gateways.go | 65 ++- .../consul/gateways/controller_gateways_ce.go | 27 + agent/proxycfg/api_gateway.go | 21 +- agent/structs/config_entry_routes.go | 32 ++ agent/structs/config_entry_routes_test.go | 473 ++++++++++++++++++ agent/xds/gw_per_route_filters_ce.go | 2 +- agent/xds/listeners_apigateway.go | 51 +- agent/xds/routes.go | 24 +- api/config_entry_status.go | 19 + 9 files changed, 695 insertions(+), 19 deletions(-) create mode 100644 agent/consul/gateways/controller_gateways_ce.go diff --git a/agent/consul/gateways/controller_gateways.go b/agent/consul/gateways/controller_gateways.go index fe8ddbcedc6f..f5adc58ae25d 100644 --- a/agent/consul/gateways/controller_gateways.go +++ b/agent/consul/gateways/controller_gateways.go @@ -63,6 +63,8 @@ func (r *apiGatewayReconciler) Reconcile(ctx context.Context, req controller.Req return reconcileEntry(r.fsm.State(), r.logger, ctx, req, r.reconcileTCPRoute, r.cleanupRoute) case structs.InlineCertificate: return r.enqueueCertificateReferencedGateways(r.fsm.State(), ctx, req) + case structs.JWTProvider: + return r.enqueueJWTProviderReferencedGatewaysAndHTTPRoutes(r.fsm.State(), ctx, req) default: return nil } @@ -233,6 +235,7 @@ func (r *apiGatewayReconciler) reconcileGateway(_ context.Context, req controlle logger.Warn("error retrieving bound api gateway", "error", err) return err } + meta := newGatewayMeta(gateway, bound) certificateErrors, err := meta.checkCertificates(store) @@ -241,6 +244,12 @@ func (r *apiGatewayReconciler) reconcileGateway(_ context.Context, req controlle return err } + jwtErrors, err := meta.checkJWTProviders(store) + if err != nil { + logger.Warn("error checking gateway JWT Providers", "error", err) + return err + } + // set each listener as having valid certs, then overwrite that status condition // if there are any certificate errors meta.eachListener(func(listener *structs.APIGatewayListener, bound *structs.BoundAPIGatewayListener) error { @@ -260,7 +269,12 @@ func (r *apiGatewayReconciler) reconcileGateway(_ context.Context, req controlle if len(certificateErrors) > 0 { updater.SetCondition(invalidCertificates()) - } else { + } + if len(jwtErrors) > 0 { + updater.SetCondition(invalidJWTProviders()) + } + + if len(certificateErrors) == 0 && len(jwtErrors) == 0 { updater.SetCondition(gatewayAccepted()) } @@ -463,6 +477,13 @@ func (r *apiGatewayReconciler) reconcileRoute(_ context.Context, req controller. updater.SetCondition(routeNoUpstreams()) } + if httpRoute, ok := route.(*structs.HTTPRouteConfigEntry); ok { + err := validateJWTForRoute(store, updater, httpRoute) + if err != nil { + return err + } + } + // the route is valid, attempt to bind it to all gateways r.logger.Trace("binding routes to gateway") modifiedGateways, boundRefs, bindErrors := bindRoutesToGateways(route, meta...) @@ -536,6 +557,11 @@ func NewAPIGatewayController(fsm *fsm.FSM, publisher state.EventPublisher, updat &stream.SubscribeRequest{ Topic: state.EventTopicInlineCertificate, Subject: stream.SubjectWildcard, + }, + ).Subscribe( + &stream.SubscribeRequest{ + Topic: state.EventTopicJWTProvider, + Subject: stream.SubjectWildcard, }) } @@ -897,6 +923,31 @@ func invalidCertificates() structs.Condition { ) } +// invalidJWTProvider returns a condition used when a gateway listener references +// a JWTProvider that does not exist. It takes a ref used to scope the condition +// to a given APIGateway listener. +func invalidJWTProvider(ref structs.ResourceReference, err error) structs.Condition { + return structs.NewGatewayCondition( + api.GatewayConditionResolvedRefs, + api.ConditionStatusFalse, + api.GatewayListenerReasonInvalidJWTProviderRef, + err.Error(), + ref, + ) +} + +// invalidJWTProviders is used to set the overall condition of the APIGateway +// to invalid due to missing JWT providers that it references. +func invalidJWTProviders() structs.Condition { + return structs.NewGatewayCondition( + api.GatewayConditionAccepted, + api.ConditionStatusFalse, + api.GatewayReasonInvalidJWTProviders, + "gateway references invalid JWT Providers", + structs.ResourceReference{}, + ) +} + // gatewayListenerNoConflicts marks an APIGateway listener as having no conflicts within its // bound routes func gatewayListenerNoConflicts(ref structs.ResourceReference) structs.Condition { @@ -944,6 +995,18 @@ func gatewayNotFound(ref structs.ResourceReference) structs.Condition { ) } +// jwtProviderNotFound marks a Route as having failed to bind to a referenced APIGateway due to +// one or more of the referenced JWT providers not existing (or having not been reconciled yet) +func jwtProviderNotFound(ref structs.ResourceReference, err error) structs.Condition { + return structs.NewRouteCondition( + api.RouteConditionBound, + api.ConditionStatusFalse, + api.RouteReasonGatewayNotFound, + err.Error(), + ref, + ) +} + // routeUnbound marks the route as having failed to bind to the referenced APIGateway func routeUnbound(ref structs.ResourceReference, err error) structs.Condition { return structs.NewRouteCondition( diff --git a/agent/consul/gateways/controller_gateways_ce.go b/agent/consul/gateways/controller_gateways_ce.go new file mode 100644 index 000000000000..d5c83bff80fd --- /dev/null +++ b/agent/consul/gateways/controller_gateways_ce.go @@ -0,0 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !consulent +// +build !consulent + +package gateways + +import ( + "context" + + "github.com/hashicorp/consul/agent/consul/controller" + "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/structs" +) + +func (r *apiGatewayReconciler) enqueueJWTProviderReferencedGatewaysAndHTTPRoutes(_ *state.Store, _ context.Context, _ controller.Request) error { + return nil +} + +func (m *gatewayMeta) checkJWTProviders(_ *state.Store) (map[structs.ResourceReference]error, error) { + return nil, nil +} + +func validateJWTForRoute(_ *state.Store, _ *structs.StatusUpdater, _ *structs.HTTPRouteConfigEntry) error { + return nil +} diff --git a/agent/proxycfg/api_gateway.go b/agent/proxycfg/api_gateway.go index 3ed39481204c..43798239a353 100644 --- a/agent/proxycfg/api_gateway.go +++ b/agent/proxycfg/api_gateway.go @@ -54,6 +54,11 @@ func (h *handlerAPIGateway) initialize(ctx context.Context) (ConfigSnapshot, err return snap, err } + err = watchJWTProviders(ctx, h) + if err != nil { + return snap, err + } + snap.APIGateway.Listeners = make(map[string]structs.APIGatewayListener) snap.APIGateway.BoundListeners = make(map[string]structs.BoundAPIGatewayListener) snap.APIGateway.HTTPRoutes = watch.NewMap[structs.ResourceReference, *structs.HTTPRouteConfigEntry]() @@ -97,27 +102,33 @@ func (h *handlerAPIGateway) handleUpdate(ctx context.Context, u UpdateEvent, sna return fmt.Errorf("error filling agent cache: %v", u.Err) } - switch { - case u.CorrelationID == rootsWatchID: + switch u.CorrelationID { + case rootsWatchID: // Handle change in the CA roots if err := h.handleRootCAUpdate(u, snap); err != nil { return err } - case u.CorrelationID == apiGatewayConfigWatchID || u.CorrelationID == boundGatewayConfigWatchID: + case apiGatewayConfigWatchID, boundGatewayConfigWatchID: // Handle change in the api-gateway or bound-api-gateway config entry if err := h.handleGatewayConfigUpdate(ctx, u, snap, u.CorrelationID); err != nil { return err } - case u.CorrelationID == inlineCertificateConfigWatchID: + case inlineCertificateConfigWatchID: // Handle change in an attached inline-certificate config entry if err := h.handleInlineCertConfigUpdate(ctx, u, snap); err != nil { return err } - case u.CorrelationID == routeConfigWatchID: + case routeConfigWatchID: // Handle change in an attached http-route or tcp-route config entry if err := h.handleRouteConfigUpdate(ctx, u, snap); err != nil { return err } + case jwtProviderID: + err := setJWTProvider(u, snap) + if err != nil { + return err + } + default: if err := (*handlerUpstreams)(h).handleUpdateUpstreams(ctx, u, snap); err != nil { return err diff --git a/agent/structs/config_entry_routes.go b/agent/structs/config_entry_routes.go index d285a4062443..d02d92398873 100644 --- a/agent/structs/config_entry_routes.go +++ b/agent/structs/config_entry_routes.go @@ -343,6 +343,38 @@ type HTTPMatch struct { Query []HTTPQueryMatch } +func (m HTTPMatch) DeepEqual(other HTTPMatch) bool { + if m.Method != other.Method { + return false + } + + if m.Path != other.Path { + return false + } + + if len(m.Headers) != len(other.Headers) { + return false + } + + if len(m.Query) != len(other.Query) { + return false + } + + for i := 0; i < len(m.Headers); i++ { + if m.Headers[i] != other.Headers[i] { + return false + } + } + + for i := 0; i < len(m.Query); i++ { + if m.Query[i] != other.Query[i] { + return false + } + } + + return true +} + // HTTPMatchMethod specifies which type of HTTP verb should // be used for matching a given request. type HTTPMatchMethod string diff --git a/agent/structs/config_entry_routes_test.go b/agent/structs/config_entry_routes_test.go index 5ab85e5977e9..2aac0051a326 100644 --- a/agent/structs/config_entry_routes_test.go +++ b/agent/structs/config_entry_routes_test.go @@ -437,3 +437,476 @@ func TestHTTPRoute(t *testing.T) { } testConfigEntryNormalizeAndValidate(t, cases) } + +func TestHTTPMatch_DeepEqual(t *testing.T) { + type fields struct { + Headers []HTTPHeaderMatch + Method HTTPMatchMethod + Path HTTPPathMatch + Query []HTTPQueryMatch + } + type args struct { + other HTTPMatch + } + tests := map[string]struct { + match HTTPMatch + other HTTPMatch + want bool + }{ + "all fields equal": { + match: HTTPMatch{ + Headers: []HTTPHeaderMatch{ + { + Match: HTTPHeaderMatchExact, + Name: "h1", + Value: "a", + }, + { + Match: HTTPHeaderMatchPrefix, + Name: "h2", + Value: "b", + }, + }, + Method: HTTPMatchMethodGet, + Path: HTTPPathMatch{ + Match: HTTPPathMatchType(HTTPHeaderMatchPrefix), + Value: "/bender", + }, + Query: []HTTPQueryMatch{ + { + Match: HTTPQueryMatchExact, + Name: "q", + Value: "nibbler", + }, + { + Match: HTTPQueryMatchPresent, + Name: "ship", + Value: "planet express", + }, + }, + }, + other: HTTPMatch{ + Headers: []HTTPHeaderMatch{ + { + Match: HTTPHeaderMatchExact, + Name: "h1", + Value: "a", + }, + { + Match: HTTPHeaderMatchPrefix, + Name: "h2", + Value: "b", + }, + }, + Method: HTTPMatchMethodGet, + Path: HTTPPathMatch{ + Match: HTTPPathMatchType(HTTPHeaderMatchPrefix), + Value: "/bender", + }, + Query: []HTTPQueryMatch{ + { + Match: HTTPQueryMatchExact, + Name: "q", + Value: "nibbler", + }, + { + Match: HTTPQueryMatchPresent, + Name: "ship", + Value: "planet express", + }, + }, + }, + want: true, + }, + "differing number of header matches": { + match: HTTPMatch{ + Headers: []HTTPHeaderMatch{ + { + Match: HTTPHeaderMatchExact, + Name: "h1", + Value: "a", + }, + { + Match: HTTPHeaderMatchPrefix, + Name: "h2", + Value: "b", + }, + }, + Method: HTTPMatchMethodGet, + Path: HTTPPathMatch{ + Match: HTTPPathMatchType(HTTPHeaderMatchPrefix), + Value: "/bender", + }, + Query: []HTTPQueryMatch{ + { + Match: HTTPQueryMatchExact, + Name: "q", + Value: "nibbler", + }, + { + Match: HTTPQueryMatchPresent, + Name: "ship", + Value: "planet express", + }, + }, + }, + other: HTTPMatch{ + Headers: []HTTPHeaderMatch{ + { + Match: HTTPHeaderMatchExact, + Name: "h1", + Value: "a", + }, + }, + Method: HTTPMatchMethodGet, + Path: HTTPPathMatch{ + Match: HTTPPathMatchType(HTTPHeaderMatchPrefix), + Value: "/bender", + }, + Query: []HTTPQueryMatch{ + { + Match: HTTPQueryMatchExact, + Name: "q", + Value: "nibbler", + }, + { + Match: HTTPQueryMatchPresent, + Name: "ship", + Value: "planet express", + }, + }, + }, + want: false, + }, + "differing header matches": { + match: HTTPMatch{ + Headers: []HTTPHeaderMatch{ + { + Match: HTTPHeaderMatchExact, + Name: "h4", + Value: "a", + }, + { + Match: HTTPHeaderMatchPrefix, + Name: "h2", + Value: "b", + }, + }, + Method: HTTPMatchMethodGet, + Path: HTTPPathMatch{ + Match: HTTPPathMatchType(HTTPHeaderMatchPrefix), + Value: "/bender", + }, + Query: []HTTPQueryMatch{ + { + Match: HTTPQueryMatchExact, + Name: "q", + Value: "nibbler", + }, + { + Match: HTTPQueryMatchPresent, + Name: "ship", + Value: "planet express", + }, + }, + }, + other: HTTPMatch{ + Headers: []HTTPHeaderMatch{ + { + Match: HTTPHeaderMatchExact, + Name: "h1", + Value: "a", + }, + { + Match: HTTPHeaderMatchPrefix, + Name: "h2", + Value: "b", + }, + }, + Method: HTTPMatchMethodGet, + Path: HTTPPathMatch{ + Match: HTTPPathMatchType(HTTPHeaderMatchPrefix), + Value: "/bender", + }, + Query: []HTTPQueryMatch{ + { + Match: HTTPQueryMatchExact, + Name: "q", + Value: "nibbler", + }, + { + Match: HTTPQueryMatchPresent, + Name: "ship", + Value: "planet express", + }, + }, + }, + want: false, + }, + "different path matching": { + match: HTTPMatch{ + Headers: []HTTPHeaderMatch{ + { + Match: HTTPHeaderMatchExact, + Name: "h1", + Value: "a", + }, + { + Match: HTTPHeaderMatchPrefix, + Name: "h2", + Value: "b", + }, + }, + Method: HTTPMatchMethodGet, + Path: HTTPPathMatch{ + Match: HTTPPathMatchType(HTTPHeaderMatchPrefix), + Value: "/zoidberg", + }, + Query: []HTTPQueryMatch{ + { + Match: HTTPQueryMatchExact, + Name: "q", + Value: "nibbler", + }, + { + Match: HTTPQueryMatchPresent, + Name: "ship", + Value: "planet express", + }, + }, + }, + other: HTTPMatch{ + Headers: []HTTPHeaderMatch{ + { + Match: HTTPHeaderMatchExact, + Name: "h1", + Value: "a", + }, + { + Match: HTTPHeaderMatchPrefix, + Name: "h2", + Value: "b", + }, + }, + Method: HTTPMatchMethodGet, + Path: HTTPPathMatch{ + Match: HTTPPathMatchType(HTTPHeaderMatchPrefix), + Value: "/bender", + }, + Query: []HTTPQueryMatch{ + { + Match: HTTPQueryMatchExact, + Name: "q", + Value: "nibbler", + }, + { + Match: HTTPQueryMatchPresent, + Name: "ship", + Value: "planet express", + }, + }, + }, + want: false, + }, + "differing methods": { + match: HTTPMatch{ + Headers: []HTTPHeaderMatch{ + { + Match: HTTPHeaderMatchExact, + Name: "h1", + Value: "a", + }, + { + Match: HTTPHeaderMatchPrefix, + Name: "h2", + Value: "b", + }, + }, + Method: HTTPMatchMethodConnect, + Path: HTTPPathMatch{ + Match: HTTPPathMatchType(HTTPHeaderMatchPrefix), + Value: "/bender", + }, + Query: []HTTPQueryMatch{ + { + Match: HTTPQueryMatchExact, + Name: "q", + Value: "nibbler", + }, + { + Match: HTTPQueryMatchPresent, + Name: "ship", + Value: "planet express", + }, + }, + }, + other: HTTPMatch{ + Headers: []HTTPHeaderMatch{ + { + Match: HTTPHeaderMatchExact, + Name: "h1", + Value: "a", + }, + { + Match: HTTPHeaderMatchPrefix, + Name: "h2", + Value: "b", + }, + }, + Method: HTTPMatchMethodGet, + Path: HTTPPathMatch{ + Match: HTTPPathMatchType(HTTPHeaderMatchPrefix), + Value: "/bender", + }, + Query: []HTTPQueryMatch{ + { + Match: HTTPQueryMatchExact, + Name: "q", + Value: "nibbler", + }, + { + Match: HTTPQueryMatchPresent, + Name: "ship", + Value: "planet express", + }, + }, + }, + want: false, + }, + "differing number of query matches": { + match: HTTPMatch{ + Headers: []HTTPHeaderMatch{ + { + Match: HTTPHeaderMatchExact, + Name: "h1", + Value: "a", + }, + { + Match: HTTPHeaderMatchPrefix, + Name: "h2", + Value: "b", + }, + }, + Method: HTTPMatchMethodGet, + Path: HTTPPathMatch{ + Match: HTTPPathMatchType(HTTPHeaderMatchPrefix), + Value: "/bender", + }, + Query: []HTTPQueryMatch{ + { + Match: HTTPQueryMatchPresent, + Name: "ship", + Value: "planet express", + }, + }, + }, + other: HTTPMatch{ + Headers: []HTTPHeaderMatch{ + { + Match: HTTPHeaderMatchExact, + Name: "h1", + Value: "a", + }, + { + Match: HTTPHeaderMatchPrefix, + Name: "h2", + Value: "b", + }, + }, + Method: HTTPMatchMethodGet, + Path: HTTPPathMatch{ + Match: HTTPPathMatchType(HTTPHeaderMatchPrefix), + Value: "/bender", + }, + Query: []HTTPQueryMatch{ + { + Match: HTTPQueryMatchExact, + Name: "q", + Value: "nibbler", + }, + { + Match: HTTPQueryMatchPresent, + Name: "ship", + Value: "planet express", + }, + }, + }, + want: false, + }, + "different query matches": { + match: HTTPMatch{ + Headers: []HTTPHeaderMatch{ + { + Match: HTTPHeaderMatchExact, + Name: "h1", + Value: "a", + }, + { + Match: HTTPHeaderMatchPrefix, + Name: "h2", + Value: "b", + }, + }, + Method: HTTPMatchMethodGet, + Path: HTTPPathMatch{ + Match: HTTPPathMatchType(HTTPHeaderMatchPrefix), + Value: "/bender", + }, + Query: []HTTPQueryMatch{ + { + Match: HTTPQueryMatchExact, + Name: "q", + Value: "another", + }, + { + Match: HTTPQueryMatchPresent, + Name: "ship", + Value: "planet express", + }, + }, + }, + other: HTTPMatch{ + Headers: []HTTPHeaderMatch{ + { + Match: HTTPHeaderMatchExact, + Name: "h1", + Value: "a", + }, + { + Match: HTTPHeaderMatchPrefix, + Name: "h2", + Value: "b", + }, + }, + Method: HTTPMatchMethodGet, + Path: HTTPPathMatch{ + Match: HTTPPathMatchType(HTTPHeaderMatchPrefix), + Value: "/bender", + }, + Query: []HTTPQueryMatch{ + { + Match: HTTPQueryMatchExact, + Name: "q", + Value: "nibbler", + }, + { + Match: HTTPQueryMatchPresent, + Name: "ship", + Value: "planet express", + }, + }, + }, + want: false, + }, + } + for name, tt := range tests { + name := name + tt := tt + t.Run(name, func(t *testing.T) { + t.Parallel() + if got := tt.match.DeepEqual(tt.other); got != tt.want { + t.Errorf("HTTPMatch.DeepEqual() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/agent/xds/gw_per_route_filters_ce.go b/agent/xds/gw_per_route_filters_ce.go index cbf406cd07a1..ba14359b792a 100644 --- a/agent/xds/gw_per_route_filters_ce.go +++ b/agent/xds/gw_per_route_filters_ce.go @@ -19,6 +19,6 @@ type perRouteFilterBuilder struct { route *structs.HTTPRouteConfigEntry } -func (p perRouteFilterBuilder) buildFilter(match *envoy_route_v3.RouteMatch) (map[string]*anypb.Any, error) { +func (p perRouteFilterBuilder) buildTypedPerFilterConfig(match *envoy_route_v3.RouteMatch, routeAction *envoy_route_v3.Route_Route) (map[string]*anypb.Any, error) { return nil, nil } diff --git a/agent/xds/listeners_apigateway.go b/agent/xds/listeners_apigateway.go index 07566017cea9..496d3d369718 100644 --- a/agent/xds/listeners_apigateway.go +++ b/agent/xds/listeners_apigateway.go @@ -8,7 +8,10 @@ import ( envoy_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" envoy_listener_v3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + envoy_http_jwt_authn_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/jwt_authn/v3" + envoy_http_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" envoy_tls_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + "github.com/hashicorp/consul/agent/xds/naming" "google.golang.org/protobuf/proto" @@ -137,6 +140,45 @@ func (s *ResourceGenerator) makeAPIGatewayListeners(address string, cfgSnap *pro logger: s.Logger, } listener := makeListener(listenerOpts) + + route, _ := cfgSnap.APIGateway.HTTPRoutes.Get(readyListener.routeReference) + foundJWT := false + if listenerCfg.Override != nil && listenerCfg.Override.JWT != nil { + foundJWT = true + } + + if !foundJWT && listenerCfg.Default != nil && listenerCfg.Default.JWT != nil { + foundJWT = true + } + + if !foundJWT { + for _, rule := range route.Rules { + if rule.Filters.JWT != nil { + foundJWT = true + break + } + for _, svc := range rule.Services { + if svc.Filters.JWT != nil { + foundJWT = true + break + } + } + } + } + + var authFilters []*envoy_http_v3.HttpFilter + if foundJWT { + builder := &GatewayAuthFilterBuilder{ + listener: listenerCfg, + route: route, + providers: cfgSnap.JWTProviders, + envoyProviders: make(map[string]*envoy_http_jwt_authn_v3.JwtProvider, len(cfgSnap.JWTProviders)), + } + authFilters, err = builder.makeGatewayAuthFilters() + if err != nil { + return nil, err + } + } filterOpts := listenerFilterOpts{ useRDS: true, protocol: listenerKey.Protocol, @@ -145,7 +187,7 @@ func (s *ResourceGenerator) makeAPIGatewayListeners(address string, cfgSnap *pro cluster: "", statPrefix: "ingress_upstream_", routePath: "", - httpAuthzFilters: nil, + httpAuthzFilters: authFilters, accessLogs: &cfgSnap.Proxy.AccessLogs, logger: s.Logger, } @@ -210,7 +252,6 @@ type readyListener struct { // getReadyListeners returns a map containing the list of upstreams for each listener that is ready func getReadyListeners(cfgSnap *proxycfg.ConfigSnapshot) map[string]readyListener { - ready := map[string]readyListener{} for _, l := range cfgSnap.APIGateway.Listeners { // Only include upstreams for listeners that are ready @@ -278,7 +319,7 @@ func makeDownstreamTLSContextFromSnapshotAPIListenerConfig(cfgSnap *proxycfg.Con func makeCommonTLSContextFromSnapshotAPIGatewayListenerConfig(cfgSnap *proxycfg.ConfigSnapshot, listenerCfg structs.APIGatewayListener) (*envoy_tls_v3.CommonTlsContext, error) { var tlsContext *envoy_tls_v3.CommonTlsContext - //API Gateway TLS config is per listener + // API Gateway TLS config is per listener tlsCfg, err := resolveAPIListenerTLSConfig(listenerCfg.TLS) if err != nil { return nil, err @@ -321,8 +362,8 @@ func makeInlineOverrideFilterChains(cfgSnap *proxycfg.ConfigSnapshot, tlsCfg structs.GatewayTLSConfig, protocol string, filterOpts listenerFilterOpts, - certs []structs.InlineCertificateConfigEntry) ([]*envoy_listener_v3.FilterChain, error) { - + certs []structs.InlineCertificateConfigEntry, +) ([]*envoy_listener_v3.FilterChain, error) { var chains []*envoy_listener_v3.FilterChain constructChain := func(name string, hosts []string, tlsContext *envoy_tls_v3.CommonTlsContext) error { diff --git a/agent/xds/routes.go b/agent/xds/routes.go index 6f0d18b05dcb..18e642cf160a 100644 --- a/agent/xds/routes.go +++ b/agent/xds/routes.go @@ -58,7 +58,7 @@ func (s *ResourceGenerator) routesForConnectProxy(cfgSnap *proxycfg.ConfigSnapsh continue } - virtualHost, err := s.makeUpstreamRouteForDiscoveryChain(cfgSnap, uid, chain, []string{"*"}, false) + virtualHost, err := s.makeUpstreamRouteForDiscoveryChain(cfgSnap, uid, chain, []string{"*"}, false, perRouteFilterBuilder{}) if err != nil { return nil, err } @@ -94,12 +94,12 @@ func (s *ResourceGenerator) routesForConnectProxy(cfgSnap *proxycfg.ConfigSnapsh addressesMap[routeName] = make(map[string]string) } // cluster name is unique per address/port so we should not be doing any override here + clusterName := clusterNameForDestination(cfgSnap, svcConfig.Name, address, svcConfig.NamespaceOrDefault(), svcConfig.PartitionOrDefault()) addressesMap[routeName][clusterName] = address } return nil }) - if err != nil { return nil, err } @@ -119,7 +119,6 @@ func (s *ResourceGenerator) routesForConnectProxy(cfgSnap *proxycfg.ConfigSnapsh } func (s *ResourceGenerator) makeRoutesForAddresses(routeName string, addresses map[string]string) ([]proto.Message, error) { - var resources []proto.Message route, err := makeNamedAddressesRoute(routeName, addresses) @@ -201,7 +200,8 @@ func (s *ResourceGenerator) makeRoutes( cfgSnap *proxycfg.ConfigSnapshot, svc structs.ServiceName, clusterName string, - autoHostRewrite bool) ([]proto.Message, error) { + autoHostRewrite bool, +) ([]proto.Message, error) { resolver, hasResolver := cfgSnap.TerminatingGateway.ServiceResolvers[svc] if !hasResolver { @@ -255,6 +255,7 @@ func (s *ResourceGenerator) routesForMeshGateway(cfgSnap *proxycfg.ConfigSnapsho chain, []string{"*"}, true, + perRouteFilterBuilder{}, ) if err != nil { return nil, err @@ -378,7 +379,7 @@ func (s *ResourceGenerator) routesForIngressGateway(cfgSnap *proxycfg.ConfigSnap } domains := generateUpstreamIngressDomains(listenerKey, u) - virtualHost, err := s.makeUpstreamRouteForDiscoveryChain(cfgSnap, uid, chain, domains, false) + virtualHost, err := s.makeUpstreamRouteForDiscoveryChain(cfgSnap, uid, chain, domains, false, perRouteFilterBuilder{}) if err != nil { return nil, err } @@ -435,6 +436,7 @@ func (s *ResourceGenerator) routesForAPIGateway(cfgSnap *proxycfg.ConfigSnapshot readyUpstreamsList := getReadyListeners(cfgSnap) for _, readyUpstreams := range readyUpstreamsList { + readyUpstreams := readyUpstreams listenerCfg := readyUpstreams.listenerCfg // Do not create any route configuration for TCP listeners if listenerCfg.Protocol != structs.ListenerProtocolHTTP { @@ -461,12 +463,13 @@ func (s *ResourceGenerator) routesForAPIGateway(cfgSnap *proxycfg.ConfigSnapshot // specific naming convention in discoverychain.consolidateHTTPRoutes. If we don't // convert our route to use the same naming convention, we won't find any chains below. reformatedRoutes := discoverychain.ReformatHTTPRoute(route, &listenerCfg, cfgSnap.APIGateway.GatewayConfig) - + filterBuilder := perRouteFilterBuilder{providerMap: cfgSnap.JWTProviders, listener: &listenerCfg, route: route} for _, reformatedRoute := range reformatedRoutes { reformatedRoute := reformatedRoute upstream := buildHTTPRouteUpstream(reformatedRoute, listenerCfg) uid := proxycfg.NewUpstreamID(&upstream) + chain := cfgSnap.APIGateway.DiscoveryChain[uid] if chain == nil { // Note that if we continue here we must also do this in the cluster generation @@ -476,7 +479,7 @@ func (s *ResourceGenerator) routesForAPIGateway(cfgSnap *proxycfg.ConfigSnapshot domains := generateUpstreamAPIsDomains(listenerKey, upstream, reformatedRoute.Hostnames) - virtualHost, err := s.makeUpstreamRouteForDiscoveryChain(cfgSnap, uid, chain, domains, false) + virtualHost, err := s.makeUpstreamRouteForDiscoveryChain(cfgSnap, uid, chain, domains, false, filterBuilder) if err != nil { return nil, err } @@ -605,6 +608,7 @@ func (s *ResourceGenerator) makeUpstreamRouteForDiscoveryChain( chain *structs.CompiledDiscoveryChain, serviceDomains []string, forMeshGateway bool, + filterBuilder perRouteFilterBuilder, ) (*envoy_route_v3.VirtualHost, error) { routeName := uid.EnvoyID() var routes []*envoy_route_v3.Route @@ -624,6 +628,7 @@ func (s *ResourceGenerator) makeUpstreamRouteForDiscoveryChain( routes = make([]*envoy_route_v3.Route, 0, len(startNode.Routes)) for _, discoveryRoute := range startNode.Routes { + discoveryRoute := discoveryRoute routeMatch := makeRouteMatchForDiscoveryRoute(discoveryRoute) var ( @@ -688,8 +693,13 @@ func (s *ResourceGenerator) makeUpstreamRouteForDiscoveryChain( } } + filter, err := filterBuilder.buildTypedPerFilterConfig(routeMatch, routeAction) + if err != nil { + return nil, err + } route.Match = routeMatch route.Action = routeAction + route.TypedPerFilterConfig = filter routes = append(routes, route) } diff --git a/api/config_entry_status.go b/api/config_entry_status.go index 2d16ea0fc4e5..997066f24fe9 100644 --- a/api/config_entry_status.go +++ b/api/config_entry_status.go @@ -106,6 +106,10 @@ const ( // certificates and cannot bind to any routes GatewayReasonInvalidCertificates GatewayConditionReason = "InvalidCertificates" + // This reason is used with the "Accepted" condition when the gateway has multiple invalid + // JWT providers and cannot bind to any routes + GatewayReasonInvalidJWTProviders GatewayConditionReason = "InvalidJWTProviders" + // This condition indicates that the gateway was unable to resolve // conflicting specification requirements for this Listener. If a // Listener is conflicted, its network port should not be configured @@ -163,6 +167,14 @@ const ( // If the reference is not allowed, the reason RefNotPermitted must be used // instead. GatewayListenerReasonInvalidCertificateRef GatewayConditionReason = "InvalidCertificateRef" + + // This reason is used with the "ResolvedRefs" condition when a + // Listener has a JWT configuration with at least one JWTProvider + // that is invalid or does not exist. + // A JWTProvider is considered invalid when it refers to a nonexistent + // or unsupported resource or kind, or when the data within that resource + // is malformed. + GatewayListenerReasonInvalidJWTProviderRef GatewayConditionReason = "InvalidJWTProviderRef" ) var validGatewayConditionReasonsMapping = map[GatewayConditionType]map[ConditionStatus][]GatewayConditionReason{ @@ -172,6 +184,7 @@ var validGatewayConditionReasonsMapping = map[GatewayConditionType]map[Condition }, ConditionStatusFalse: { GatewayReasonInvalidCertificates, + GatewayReasonInvalidJWTProviders, }, ConditionStatusUnknown: {}, }, @@ -190,6 +203,7 @@ var validGatewayConditionReasonsMapping = map[GatewayConditionType]map[Condition }, ConditionStatusFalse: { GatewayListenerReasonInvalidCertificateRef, + GatewayListenerReasonInvalidJWTProviderRef, }, ConditionStatusUnknown: {}, }, @@ -282,6 +296,10 @@ const ( // This reason is used with the "Bound" condition when the route fails // to find the gateway RouteReasonGatewayNotFound RouteConditionReason = "GatewayNotFound" + + // This reason is used with the "Accepted" condition when the route references non-existent + // JWTProviders + RouteReasonJWTProvidersNotFound RouteConditionReason = "JWTProvidersNotFound" ) var validRouteConditionReasonsMapping = map[RouteConditionType]map[ConditionStatus][]RouteConditionReason{ @@ -302,6 +320,7 @@ var validRouteConditionReasonsMapping = map[RouteConditionType]map[ConditionStat ConditionStatusFalse: { RouteReasonGatewayNotFound, RouteReasonFailedToBind, + RouteReasonJWTProvidersNotFound, }, ConditionStatusUnknown: {}, },