From 18cef217fc4accce30d18df5fb51e354b961dcf3 Mon Sep 17 00:00:00 2001 From: Paul Rogers <129207811+paul1r@users.noreply.github.com> Date: Fri, 6 Sep 2024 13:12:14 -0400 Subject: [PATCH] chore: Linting updates in prep for Go 1.23 (#14071) --- .gitignore | 3 + clients/cmd/docker-driver/http.go | 2 +- clients/pkg/logentry/stages/match.go | 4 +- clients/pkg/promtail/client/client_test.go | 2 +- .../discovery/consulagent/consul_test.go | 4 +- .../targets/cloudflare/target_test.go | 10 +-- .../promtail/targets/gcplog/pull_target.go | 2 +- .../targets/journal/journaltarget_test.go | 2 +- .../promtail/targets/kafka/consumer_test.go | 2 +- .../targets/kafka/target_syncer_test.go | 2 +- .../syslog/syslogparser/syslogparser_test.go | 4 +- cmd/logcli/main.go | 10 +-- cmd/lokitool/main.go | 2 +- integration/loki_rule_eval_test.go | 2 +- pkg/bloombuild/builder/batch.go | 2 +- pkg/bloombuild/builder/batch_test.go | 2 +- pkg/canary/comparator/comparator.go | 4 +- pkg/chunkenc/memchunk_test.go | 6 +- pkg/compactor/deletion/delete_request_test.go | 10 +-- .../deletion/delete_requests_manager_test.go | 28 ++++---- .../deletion/delete_requests_store.go | 6 +- .../deletion/tenant_request_handler_test.go | 2 +- pkg/compactor/retention/marker_test.go | 2 +- pkg/compactor/retention/retention_test.go | 14 ++-- pkg/compactor/table.go | 2 +- pkg/compactor/table_test.go | 8 +-- pkg/configs/client/client.go | 4 +- pkg/configs/client/configs_test.go | 2 +- pkg/distributor/distributor.go | 6 +- pkg/distributor/distributor_test.go | 20 +++--- pkg/distributor/writefailures/manager_test.go | 12 ++-- pkg/indexgateway/client.go | 2 +- pkg/indexgateway/client_test.go | 2 +- pkg/indexgateway/gateway_test.go | 4 +- pkg/ingester-rf1/flush.go | 2 +- pkg/ingester-rf1/instance.go | 4 +- pkg/ingester-rf1/objstore/storage.go | 2 +- pkg/ingester-rf1/stream.go | 2 +- pkg/ingester/ingester_test.go | 2 +- pkg/ingester/instance.go | 2 +- pkg/ingester/stream.go | 2 +- pkg/ingester/stream_test.go | 2 +- pkg/iter/v2/ordering_test.go | 2 +- pkg/logcli/client/client.go | 2 +- pkg/loghttp/query.go | 2 +- pkg/loghttp/tail.go | 2 +- pkg/logql/accumulator_test.go | 2 +- pkg/logql/log/jsonexpr/lexer.go | 4 +- pkg/logql/log/logfmt/lexer.go | 2 +- pkg/logql/log/metrics_extraction.go | 2 +- pkg/logql/syntax/ast.go | 2 +- pkg/logql/syntax/visit_test.go | 8 +-- pkg/loki/modules.go | 2 +- pkg/loki/version_handler.go | 2 +- .../frontend/transport/handler.go | 4 +- pkg/lokifrontend/frontend/v1/frontend_test.go | 8 +-- .../frontend/v2/frontend_scheduler_worker.go | 2 +- pkg/lokifrontend/frontend/v2/frontend_test.go | 2 +- pkg/pattern/drain/drain.go | 6 +- pkg/pattern/ingester_querier.go | 2 +- pkg/pattern/instance.go | 6 +- pkg/querier-rf1/http.go | 4 +- pkg/querier-rf1/querier.go | 2 +- pkg/querier/http.go | 10 +-- pkg/querier/querier_mock_test.go | 2 +- pkg/querier/querier_test.go | 10 +-- pkg/querier/queryrange/codec.go | 68 +++++++++---------- .../queryrange/index_stats_cache_test.go | 2 +- pkg/querier/queryrange/limits.go | 10 +-- pkg/querier/queryrange/limits_test.go | 6 +- pkg/querier/queryrange/log_result_cache.go | 2 +- pkg/querier/queryrange/marshal.go | 4 +- .../queryrangebase/middleware_test.go | 2 +- .../queryrange/queryrangebase/promql_test.go | 2 +- .../queryrange/queryrangebase/query_range.go | 2 +- .../queryrangebase/results_cache_test.go | 6 +- .../queryrange/queryrangebase/retry_test.go | 16 ++--- pkg/querier/queryrange/querysharding.go | 6 +- pkg/querier/queryrange/querysharding_test.go | 18 ++--- pkg/querier/queryrange/roundtrip.go | 6 +- pkg/querier/queryrange/roundtrip_test.go | 12 ++-- pkg/querier/queryrange/serialize_test.go | 2 +- pkg/querier/queryrange/split_by_interval.go | 2 +- .../queryrange/split_by_interval_test.go | 8 +-- pkg/querier/queryrange/split_by_range.go | 2 +- pkg/querier/queryrange/split_by_range_test.go | 6 +- pkg/querier/queryrange/stats_test.go | 16 ++--- pkg/querier/queryrange/views.go | 2 +- pkg/querier/queryrange/views_test.go | 2 +- pkg/querier/queryrange/volume_test.go | 2 +- pkg/querier/tail_test.go | 4 +- .../worker/scheduler_processor_test.go | 2 +- pkg/queue/queue_test.go | 4 +- pkg/ruler/base/compat_test.go | 4 +- pkg/ruler/base/ruler.go | 2 +- pkg/ruler/base/ruler_test.go | 14 ++-- pkg/ruler/compat.go | 4 +- pkg/ruler/evaluator_remote_test.go | 20 +++--- pkg/ruler/memstore_test.go | 6 +- pkg/ruler/rulestore/config_test.go | 2 +- pkg/ruler/storage/cleaner/cleaner_test.go | 6 +- pkg/ruler/storage/instance/instance.go | 2 +- pkg/ruler/storage/instance/manager_test.go | 12 ++-- pkg/storage/async_store.go | 6 +- pkg/storage/bloom/v1/bloom_tester.go | 6 +- pkg/storage/bloom/v1/builder_test.go | 6 +- pkg/storage/chunk/cache/embeddedcache_test.go | 4 +- pkg/storage/chunk/cache/redis_client_test.go | 8 +-- pkg/storage/chunk/cache/resultscache/cache.go | 4 +- .../chunk/cache/resultscache/cache_test.go | 10 +-- .../chunk/client/alibaba/oss_object_client.go | 10 +-- .../chunk/client/aws/dynamodb_index_reader.go | 2 +- .../client/aws/dynamodb_storage_client.go | 4 +- .../chunk/client/aws/s3_storage_client.go | 2 +- .../client/aws/s3_storage_client_test.go | 10 +-- .../chunk/client/azure/blob_storage_client.go | 6 +- .../client/azure/blob_storage_client_test.go | 2 +- .../client/baidubce/bos_storage_client.go | 12 ++-- pkg/storage/chunk/client/gcp/fixtures.go | 2 +- .../client/gcp/gcs_object_client_test.go | 6 +- .../chunk/client/grpc/grpc_client_test.go | 2 +- .../chunk/client/hedging/hedging_test.go | 4 +- .../client/ibmcloud/cos_object_client.go | 2 +- .../client/ibmcloud/cos_object_client_test.go | 2 +- .../chunk/client/local/boltdb_table_client.go | 2 +- .../chunk/client/local/fs_object_client.go | 2 +- .../client/util/parallel_chunk_fetch_test.go | 2 +- .../stores/series/series_index_store.go | 4 +- .../shipper/bloomshipper/fetcher_test.go | 2 +- .../stores/shipper/bloomshipper/store.go | 2 +- .../stores/shipper/bloomshipper/store_test.go | 2 +- .../boltdb/compactor/table_compactor.go | 20 +++--- .../shipper/indexshipper/boltdb/table.go | 2 +- .../indexshipper/boltdb/table_manager_test.go | 4 +- .../shipper/indexshipper/testutil/testutil.go | 2 +- .../shipper/indexshipper/tsdb/compactor.go | 2 +- .../indexshipper/tsdb/compactor_test.go | 6 +- .../shipper/indexshipper/tsdb/manager.go | 2 +- .../indexshipper/tsdb/single_file_index.go | 2 +- .../shipper/indexshipper/util/queries_test.go | 2 +- pkg/tool/commands/rules.go | 2 +- pkg/tool/rules/rules.go | 2 +- pkg/tool/rules/rules_test.go | 2 +- pkg/util/cfg/dynamic_test.go | 4 +- pkg/util/cfg/flag.go | 2 +- pkg/util/fakeauth/fake_auth.go | 2 +- pkg/util/httpreq/tags_test.go | 4 +- .../memcached_client_selector_test.go | 2 +- pkg/util/limiter/query_limiter.go | 2 +- pkg/util/marshal/marshal_test.go | 2 +- pkg/util/querylimits/middleware_test.go | 6 +- pkg/util/ring/ringmanager.go | 2 +- pkg/util/server/error_test.go | 2 +- pkg/util/server/middleware.go | 2 +- pkg/util/server/middleware_test.go | 2 +- pkg/util/server/recovery_test.go | 6 +- .../checker/checker.go | 2 +- tools/querytee/proxy_endpoint_test.go | 8 +-- tools/tsdb/index-analyzer/analytics.go | 2 +- tools/tsdb/tsdb-map/main.go | 2 +- 160 files changed, 406 insertions(+), 403 deletions(-) diff --git a/.gitignore b/.gitignore index a68b1396d1dba..dfb63bf94da69 100644 --- a/.gitignore +++ b/.gitignore @@ -60,3 +60,6 @@ nix/result # helm test production/helm/loki/src/helm-test/helm-test + +# Go telemetry +.config diff --git a/clients/cmd/docker-driver/http.go b/clients/cmd/docker-driver/http.go index 5045e2c9d1c41..a120c71220b8a 100644 --- a/clients/cmd/docker-driver/http.go +++ b/clients/cmd/docker-driver/http.go @@ -56,7 +56,7 @@ func handlers(h *sdk.Handler, d *driver) { respond(nil, w) }) - h.HandleFunc("/LogDriver.Capabilities", func(w http.ResponseWriter, r *http.Request) { + h.HandleFunc("/LogDriver.Capabilities", func(w http.ResponseWriter, _ *http.Request) { _ = json.NewEncoder(w).Encode(&CapabilitiesResponse{ Cap: logger.Capability{ReadLogs: true}, }) diff --git a/clients/pkg/logentry/stages/match.go b/clients/pkg/logentry/stages/match.go index 176f383c4178f..0cf4d5da2d893 100644 --- a/clients/pkg/logentry/stages/match.go +++ b/clients/pkg/logentry/stages/match.go @@ -51,10 +51,10 @@ func validateMatcherConfig(cfg *MatcherConfig) (logql.Expr, error) { return nil, errors.New(ErrUnknownMatchAction) } - if cfg.Action == MatchActionKeep && (cfg.Stages == nil || len(cfg.Stages) == 0) { + if cfg.Action == MatchActionKeep && (len(cfg.Stages) == 0) { return nil, errors.New(ErrMatchRequiresStages) } - if cfg.Action == MatchActionDrop && (cfg.Stages != nil && len(cfg.Stages) != 0) { + if cfg.Action == MatchActionDrop && (len(cfg.Stages) != 0) { return nil, errors.New(ErrStagesWithDropLine) } diff --git a/clients/pkg/promtail/client/client_test.go b/clients/pkg/promtail/client/client_test.go index ea3039879605b..f5871790e12dc 100644 --- a/clients/pkg/promtail/client/client_test.go +++ b/clients/pkg/promtail/client/client_test.go @@ -727,7 +727,7 @@ func Test_Tripperware(t *testing.T) { var called bool c, err := NewWithTripperware(metrics, Config{ URL: flagext.URLValue{URL: url}, - }, 0, 0, false, log.NewNopLogger(), func(rt http.RoundTripper) http.RoundTripper { + }, 0, 0, false, log.NewNopLogger(), func(_ http.RoundTripper) http.RoundTripper { return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { require.Equal(t, r.URL.String(), "http://foo.com") called = true diff --git a/clients/pkg/promtail/discovery/consulagent/consul_test.go b/clients/pkg/promtail/discovery/consulagent/consul_test.go index 1ede3e4524d64..1edf706497610 100644 --- a/clients/pkg/promtail/discovery/consulagent/consul_test.go +++ b/clients/pkg/promtail/discovery/consulagent/consul_test.go @@ -464,14 +464,14 @@ func TestGetDatacenterShouldReturnError(t *testing.T) { }{ { // Define a handler that will return status 500. - handler: func(w http.ResponseWriter, r *http.Request) { + handler: func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(500) }, errMessage: "Unexpected response code: 500 ()", }, { // Define a handler that will return incorrect response. - handler: func(w http.ResponseWriter, r *http.Request) { + handler: func(w http.ResponseWriter, _ *http.Request) { _, err := w.Write([]byte(`{"Config": {"Not-Datacenter": "test-dc"}}`)) require.NoError(t, err) }, diff --git a/clients/pkg/promtail/targets/cloudflare/target_test.go b/clients/pkg/promtail/targets/cloudflare/target_test.go index 64cb6c4492e5e..eee359cb3daa3 100644 --- a/clients/pkg/promtail/targets/cloudflare/target_test.go +++ b/clients/pkg/promtail/targets/cloudflare/target_test.go @@ -65,7 +65,7 @@ func Test_CloudflareTarget(t *testing.T) { logs: []string{}, }, nil) // replace the client. - getClient = func(apiKey, zoneID string, fields []string) (Client, error) { + getClient = func(_, _ string, _ []string) (Client, error) { return cfClient, nil } @@ -114,7 +114,7 @@ func Test_RetryErrorLogpullReceived(t *testing.T) { err: ErrorLogpullReceived, }, nil).Times(2) // just retry once // replace the client - getClient = func(apiKey, zoneID string, fields []string) (Client, error) { + getClient = func(_, _ string, _ []string) (Client, error) { return cfClient, nil } defaultBackoff.MinBackoff = 0 @@ -159,7 +159,7 @@ func Test_RetryErrorIterating(t *testing.T) { err: ErrorLogpullReceived, }, nil).Once() // replace the client. - getClient = func(apiKey, zoneID string, fields []string) (Client, error) { + getClient = func(_, _ string, _ []string) (Client, error) { return cfClient, nil } // retries as fast as possible. @@ -210,7 +210,7 @@ func Test_CloudflareTargetError(t *testing.T) { // setup errors for all retries cfClient.On("LogpullReceived", mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.New("no logs")) // replace the client. - getClient = func(apiKey, zoneID string, fields []string) (Client, error) { + getClient = func(_, _ string, _ []string) (Client, error) { return cfClient, nil } @@ -263,7 +263,7 @@ func Test_CloudflareTargetError168h(t *testing.T) { // setup errors for all retries cfClient.On("LogpullReceived", mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.New("HTTP status 400: bad query: error parsing time: invalid time range: too early: logs older than 168h0m0s are not available")) // replace the client. - getClient = func(apiKey, zoneID string, fields []string) (Client, error) { + getClient = func(_, _ string, _ []string) (Client, error) { return cfClient, nil } diff --git a/clients/pkg/promtail/targets/gcplog/pull_target.go b/clients/pkg/promtail/targets/gcplog/pull_target.go index 671b160c6f4ca..33df80a4c8bfc 100644 --- a/clients/pkg/promtail/targets/gcplog/pull_target.go +++ b/clients/pkg/promtail/targets/gcplog/pull_target.go @@ -127,7 +127,7 @@ func (t *pullTarget) consumeSubscription() { defer t.cancel() for t.backoff.Ongoing() { - err := t.sub.Receive(t.ctx, func(ctx context.Context, m *pubsub.Message) { + err := t.sub.Receive(t.ctx, func(_ context.Context, m *pubsub.Message) { t.msgs <- m t.backoff.Reset() }) diff --git a/clients/pkg/promtail/targets/journal/journaltarget_test.go b/clients/pkg/promtail/targets/journal/journaltarget_test.go index 768cb7f5c1510..2aa141be610e3 100644 --- a/clients/pkg/promtail/targets/journal/journaltarget_test.go +++ b/clients/pkg/promtail/targets/journal/journaltarget_test.go @@ -45,7 +45,7 @@ func (r *mockJournalReader) Follow(until <-chan time.Time, _ io.Writer) error { } func newMockJournalEntry(entry *sdjournal.JournalEntry) journalEntryFunc { - return func(c sdjournal.JournalReaderConfig, cursor string) (*sdjournal.JournalEntry, error) { + return func(_ sdjournal.JournalReaderConfig, _ string) (*sdjournal.JournalEntry, error) { return entry, nil } } diff --git a/clients/pkg/promtail/targets/kafka/consumer_test.go b/clients/pkg/promtail/targets/kafka/consumer_test.go index a4d87e7c3c71e..d6ef82ba9addf 100644 --- a/clients/pkg/promtail/targets/kafka/consumer_test.go +++ b/clients/pkg/promtail/targets/kafka/consumer_test.go @@ -43,7 +43,7 @@ func Test_ComsumerConsume(t *testing.T) { ctx: context.Background(), cancel: func() {}, ConsumerGroup: group, - discoverer: DiscovererFn(func(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) (RunnableTarget, error) { + discoverer: DiscovererFn(func(_ sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) (RunnableTarget, error) { if claim.Topic() != "dropped" { return &fakeTarget{ ctx: ctx, diff --git a/clients/pkg/promtail/targets/kafka/target_syncer_test.go b/clients/pkg/promtail/targets/kafka/target_syncer_test.go index 6514afeefcb01..f450a10d67f36 100644 --- a/clients/pkg/promtail/targets/kafka/target_syncer_test.go +++ b/clients/pkg/promtail/targets/kafka/target_syncer_test.go @@ -46,7 +46,7 @@ func Test_TopicDiscovery(t *testing.T) { cancel: func() {}, ConsumerGroup: group, logger: log.NewNopLogger(), - discoverer: DiscovererFn(func(s sarama.ConsumerGroupSession, c sarama.ConsumerGroupClaim) (RunnableTarget, error) { + discoverer: DiscovererFn(func(_ sarama.ConsumerGroupSession, _ sarama.ConsumerGroupClaim) (RunnableTarget, error) { return nil, nil }), }, diff --git a/clients/pkg/promtail/targets/syslog/syslogparser/syslogparser_test.go b/clients/pkg/promtail/targets/syslog/syslogparser/syslogparser_test.go index f58181ba7ad75..bec94a0e559a4 100644 --- a/clients/pkg/promtail/targets/syslog/syslogparser/syslogparser_test.go +++ b/clients/pkg/promtail/targets/syslog/syslogparser/syslogparser_test.go @@ -87,13 +87,13 @@ func TestParseStream_NewlineSeparated(t *testing.T) { func TestParseStream_InvalidStream(t *testing.T) { r := strings.NewReader("invalid") - err := syslogparser.ParseStream(false, r, func(res *syslog.Result) {}, defaultMaxMessageLength) + err := syslogparser.ParseStream(false, r, func(_ *syslog.Result) {}, defaultMaxMessageLength) require.EqualError(t, err, "invalid or unsupported framing. first byte: 'i'") } func TestParseStream_EmptyStream(t *testing.T) { r := strings.NewReader("") - err := syslogparser.ParseStream(false, r, func(res *syslog.Result) {}, defaultMaxMessageLength) + err := syslogparser.ParseStream(false, r, func(_ *syslog.Result) {}, defaultMaxMessageLength) require.Equal(t, err, io.EOF) } diff --git a/cmd/logcli/main.go b/cmd/logcli/main.go index 08fbcc195ba64..32539e3c83355 100644 --- a/cmd/logcli/main.go +++ b/cmd/logcli/main.go @@ -450,7 +450,7 @@ func newQueryClient(app *kingpin.Application) client.Client { } // extract host - addressAction := func(c *kingpin.ParseContext) error { + addressAction := func(_ *kingpin.ParseContext) error { // If a proxy is to be used do not set TLS ServerName. In the case of HTTPS proxy this ensures // the http client validates both the proxy's cert and the cert used by loki behind the proxy // using the ServerName's from the provided --addr and --proxy-url flags. @@ -494,7 +494,7 @@ func newLabelQuery(cmd *kingpin.CmdClause) *labelquery.LabelQuery { q := &labelquery.LabelQuery{} // executed after all command flags are parsed - cmd.Action(func(c *kingpin.ParseContext) error { + cmd.Action(func(_ *kingpin.ParseContext) error { defaultEnd := time.Now() defaultStart := defaultEnd.Add(-since) @@ -522,7 +522,7 @@ func newSeriesQuery(cmd *kingpin.CmdClause) *seriesquery.SeriesQuery { q := &seriesquery.SeriesQuery{} // executed after all command flags are parsed - cmd.Action(func(c *kingpin.ParseContext) error { + cmd.Action(func(_ *kingpin.ParseContext) error { defaultEnd := time.Now() defaultStart := defaultEnd.Add(-since) @@ -550,7 +550,7 @@ func newQuery(instant bool, cmd *kingpin.CmdClause) *query.Query { q := &query.Query{} // executed after all command flags are parsed - cmd.Action(func(c *kingpin.ParseContext) error { + cmd.Action(func(_ *kingpin.ParseContext) error { if instant { q.SetInstant(mustParse(now, time.Now())) @@ -698,7 +698,7 @@ func newDetectedFieldsQuery(cmd *kingpin.CmdClause) *detected.FieldsQuery { q := &detected.FieldsQuery{} // executed after all command flags are parsed - cmd.Action(func(c *kingpin.ParseContext) error { + cmd.Action(func(_ *kingpin.ParseContext) error { defaultEnd := time.Now() defaultStart := defaultEnd.Add(-since) diff --git a/cmd/lokitool/main.go b/cmd/lokitool/main.go index 0bdff6614f5ff..9989a1f03a18d 100644 --- a/cmd/lokitool/main.go +++ b/cmd/lokitool/main.go @@ -21,7 +21,7 @@ func main() { ruleCommand.Register(app) auditCommand.Register(app) - app.Command("version", "Get the version of the lokitool CLI").Action(func(k *kingpin.ParseContext) error { + app.Command("version", "Get the version of the lokitool CLI").Action(func(_ *kingpin.ParseContext) error { fmt.Println(version.Print("loki")) return nil }) diff --git a/integration/loki_rule_eval_test.go b/integration/loki_rule_eval_test.go index e0898c50829a3..b3811ac624252 100644 --- a/integration/loki_rule_eval_test.go +++ b/integration/loki_rule_eval_test.go @@ -103,7 +103,7 @@ func testRuleEval(t *testing.T, mode string) { // this is the function that will be called when the remote-write receiver receives a request. // it tests that the expected payload is received. - expectedResults := func(w http.ResponseWriter, r *http.Request) { + expectedResults := func(_ http.ResponseWriter, r *http.Request) { wr, err := remote.DecodeWriteRequest(r.Body) require.NoError(t, err) diff --git a/pkg/bloombuild/builder/batch.go b/pkg/bloombuild/builder/batch.go index 9961aa23d7c74..d86111d2924a7 100644 --- a/pkg/bloombuild/builder/batch.go +++ b/pkg/bloombuild/builder/batch.go @@ -234,7 +234,7 @@ func (i *blockLoadingIter) init() { // set "match all" filter function if not present if i.filter == nil { - i.filter = func(cbq *bloomshipper.CloseableBlockQuerier) bool { return true } + i.filter = func(_ *bloomshipper.CloseableBlockQuerier) bool { return true } } // done diff --git a/pkg/bloombuild/builder/batch_test.go b/pkg/bloombuild/builder/batch_test.go index 37109d0196af6..e0fe37a0e448f 100644 --- a/pkg/bloombuild/builder/batch_test.go +++ b/pkg/bloombuild/builder/batch_test.go @@ -16,7 +16,7 @@ import ( func TestBatchedLoader(t *testing.T) { t.Parallel() - errMapper := func(i int) (int, error) { + errMapper := func(_ int) (int, error) { return 0, errors.New("bzzt") } successMapper := func(i int) (int, error) { diff --git a/pkg/canary/comparator/comparator.go b/pkg/canary/comparator/comparator.go index a575c74eccd81..8d72fac6260f9 100644 --- a/pkg/canary/comparator/comparator.go +++ b/pkg/canary/comparator/comparator.go @@ -427,7 +427,7 @@ func (c *Comparator) spotCheckEntries(currTime time.Time) { func(_ int, t *time.Time) bool { return t.Before(currTime.Add(-c.spotCheckMax)) }, - func(_ int, t *time.Time) { + func(_ int, _ *time.Time) { }) @@ -513,7 +513,7 @@ func (c *Comparator) pruneEntries(currentTime time.Time) { func(_ int, t *time.Time) bool { return t.Before(currentTime.Add(-c.wait)) }, - func(_ int, t *time.Time) { + func(_ int, _ *time.Time) { }) } diff --git a/pkg/chunkenc/memchunk_test.go b/pkg/chunkenc/memchunk_test.go index daa97a2616917..85cccd743cfbb 100644 --- a/pkg/chunkenc/memchunk_test.go +++ b/pkg/chunkenc/memchunk_test.go @@ -1539,7 +1539,7 @@ func TestMemChunk_ReboundAndFilter_with_filter(t *testing.T) { { name: "no matches - chunk without structured metadata", testMemChunk: buildFilterableTestMemChunk(t, chkFrom, chkThrough, &chkFrom, &chkThroughPlus1, false), - filterFunc: func(_ time.Time, in string, structuredMetadata ...labels.Label) bool { + filterFunc: func(_ time.Time, _ string, structuredMetadata ...labels.Label) bool { return labels.Labels(structuredMetadata).Get(lblPing) == lblPong }, nrMatching: 0, @@ -1548,7 +1548,7 @@ func TestMemChunk_ReboundAndFilter_with_filter(t *testing.T) { { name: "structured metadata not matching", testMemChunk: buildFilterableTestMemChunk(t, chkFrom, chkThrough, &chkFrom, &chkThroughPlus1, true), - filterFunc: func(_ time.Time, in string, structuredMetadata ...labels.Label) bool { + filterFunc: func(_ time.Time, _ string, structuredMetadata ...labels.Label) bool { return labels.Labels(structuredMetadata).Get("ding") == "dong" }, nrMatching: 0, @@ -1557,7 +1557,7 @@ func TestMemChunk_ReboundAndFilter_with_filter(t *testing.T) { { name: "some lines removed - with structured metadata", testMemChunk: buildFilterableTestMemChunk(t, chkFrom, chkThrough, &chkFrom, &chkFromPlus5, true), - filterFunc: func(_ time.Time, in string, structuredMetadata ...labels.Label) bool { + filterFunc: func(_ time.Time, _ string, structuredMetadata ...labels.Label) bool { return labels.Labels(structuredMetadata).Get(lblPing) == lblPong }, nrMatching: 5, diff --git a/pkg/compactor/deletion/delete_request_test.go b/pkg/compactor/deletion/delete_request_test.go index f67a06dc483fb..899e83f802e37 100644 --- a/pkg/compactor/deletion/delete_request_test.go +++ b/pkg/compactor/deletion/delete_request_test.go @@ -93,7 +93,7 @@ func TestDeleteRequest_IsDeleted(t *testing.T) { }, expectedResp: resp{ isDeleted: true, - expectedFilter: func(ts time.Time, s string, structuredMetadata ...labels.Label) bool { + expectedFilter: func(ts time.Time, _ string, structuredMetadata ...labels.Label) bool { tsUnixNano := ts.UnixNano() if labels.Labels(structuredMetadata).Get(lblPing) == lblPong && now.Add(-3*time.Hour).UnixNano() <= tsUnixNano && tsUnixNano <= now.Add(-time.Hour).UnixNano() { return true @@ -131,7 +131,7 @@ func TestDeleteRequest_IsDeleted(t *testing.T) { }, expectedResp: resp{ isDeleted: true, - expectedFilter: func(ts time.Time, s string, _ ...labels.Label) bool { + expectedFilter: func(ts time.Time, _ string, _ ...labels.Label) bool { tsUnixNano := ts.UnixNano() if now.Add(-3*time.Hour).UnixNano() <= tsUnixNano && tsUnixNano <= now.Add(-2*time.Hour).UnixNano() { return true @@ -150,7 +150,7 @@ func TestDeleteRequest_IsDeleted(t *testing.T) { }, expectedResp: resp{ isDeleted: true, - expectedFilter: func(ts time.Time, s string, _ ...labels.Label) bool { + expectedFilter: func(ts time.Time, _ string, _ ...labels.Label) bool { tsUnixNano := ts.UnixNano() if now.Add(-2*time.Hour).UnixNano() <= tsUnixNano && tsUnixNano <= now.UnixNano() { return true @@ -188,7 +188,7 @@ func TestDeleteRequest_IsDeleted(t *testing.T) { }, expectedResp: resp{ isDeleted: true, - expectedFilter: func(ts time.Time, s string, structuredMetadata ...labels.Label) bool { + expectedFilter: func(ts time.Time, _ string, structuredMetadata ...labels.Label) bool { tsUnixNano := ts.UnixNano() if labels.Labels(structuredMetadata).Get(lblPing) == lblPong && now.Add(-2*time.Hour).UnixNano() <= tsUnixNano && tsUnixNano <= now.UnixNano() { return true @@ -226,7 +226,7 @@ func TestDeleteRequest_IsDeleted(t *testing.T) { }, expectedResp: resp{ isDeleted: true, - expectedFilter: func(ts time.Time, s string, _ ...labels.Label) bool { + expectedFilter: func(ts time.Time, _ string, _ ...labels.Label) bool { tsUnixNano := ts.UnixNano() if now.Add(-(2*time.Hour+30*time.Minute)).UnixNano() <= tsUnixNano && tsUnixNano <= now.Add(-(time.Hour+30*time.Minute)).UnixNano() { return true diff --git a/pkg/compactor/deletion/delete_requests_manager_test.go b/pkg/compactor/deletion/delete_requests_manager_test.go index 04aa986ac492d..6eabf2de38799 100644 --- a/pkg/compactor/deletion/delete_requests_manager_test.go +++ b/pkg/compactor/deletion/delete_requests_manager_test.go @@ -168,7 +168,7 @@ func TestDeleteRequestsManager_Expired(t *testing.T) { }, expectedResp: resp{ isExpired: true, - expectedFilter: func(ts time.Time, s string, _ ...labels.Label) bool { + expectedFilter: func(_ time.Time, s string, _ ...labels.Label) bool { return strings.Contains(s, "fizz") }, }, @@ -195,7 +195,7 @@ func TestDeleteRequestsManager_Expired(t *testing.T) { }, expectedResp: resp{ isExpired: true, - expectedFilter: func(ts time.Time, s string, structuredMetadata ...labels.Label) bool { + expectedFilter: func(_ time.Time, _ string, structuredMetadata ...labels.Label) bool { return labels.Labels(structuredMetadata).Get(lblPing) == lblPong }, }, @@ -222,7 +222,7 @@ func TestDeleteRequestsManager_Expired(t *testing.T) { }, expectedResp: resp{ isExpired: true, - expectedFilter: func(ts time.Time, s string, structuredMetadata ...labels.Label) bool { + expectedFilter: func(_ time.Time, s string, structuredMetadata ...labels.Label) bool { return labels.Labels(structuredMetadata).Get(lblPing) == lblPong && strings.Contains(s, "fizz") }, }, @@ -346,7 +346,7 @@ func TestDeleteRequestsManager_Expired(t *testing.T) { }, expectedResp: resp{ isExpired: true, - expectedFilter: func(ts time.Time, s string, _ ...labels.Label) bool { + expectedFilter: func(_ time.Time, s string, _ ...labels.Label) bool { return strings.Contains(s, "fizz") }, }, @@ -380,7 +380,7 @@ func TestDeleteRequestsManager_Expired(t *testing.T) { }, expectedResp: resp{ isExpired: true, - expectedFilter: func(ts time.Time, s string, structuredMetadata ...labels.Label) bool { + expectedFilter: func(_ time.Time, _ string, structuredMetadata ...labels.Label) bool { return labels.Labels(structuredMetadata).Get(lblPing) == lblPong }, }, @@ -428,7 +428,7 @@ func TestDeleteRequestsManager_Expired(t *testing.T) { }, expectedResp: resp{ isExpired: true, - expectedFilter: func(ts time.Time, s string, _ ...labels.Label) bool { + expectedFilter: func(ts time.Time, _ string, _ ...labels.Label) bool { tsUnixNano := ts.UnixNano() if (now.Add(-13*time.Hour).UnixNano() <= tsUnixNano && tsUnixNano <= now.Add(-11*time.Hour).UnixNano()) || (now.Add(-10*time.Hour).UnixNano() <= tsUnixNano && tsUnixNano <= now.Add(-8*time.Hour).UnixNano()) || @@ -469,7 +469,7 @@ func TestDeleteRequestsManager_Expired(t *testing.T) { }, expectedResp: resp{ isExpired: true, - expectedFilter: func(ts time.Time, s string, _ ...labels.Label) bool { + expectedFilter: func(_ time.Time, _ string, _ ...labels.Label) bool { return true }, }, @@ -503,7 +503,7 @@ func TestDeleteRequestsManager_Expired(t *testing.T) { }, expectedResp: resp{ isExpired: true, - expectedFilter: func(ts time.Time, s string, _ ...labels.Label) bool { + expectedFilter: func(_ time.Time, s string, _ ...labels.Label) bool { return strings.Contains(s, "fizz") }, }, @@ -537,7 +537,7 @@ func TestDeleteRequestsManager_Expired(t *testing.T) { }, expectedResp: resp{ isExpired: true, - expectedFilter: func(ts time.Time, s string, structuredMetadata ...labels.Label) bool { + expectedFilter: func(_ time.Time, _ string, structuredMetadata ...labels.Label) bool { return labels.Labels(structuredMetadata).Get(lblPing) == lblPong }, }, @@ -578,7 +578,7 @@ func TestDeleteRequestsManager_Expired(t *testing.T) { }, expectedResp: resp{ isExpired: true, - expectedFilter: func(ts time.Time, s string, _ ...labels.Label) bool { + expectedFilter: func(_ time.Time, _ string, _ ...labels.Label) bool { return true }, }, @@ -619,7 +619,7 @@ func TestDeleteRequestsManager_Expired(t *testing.T) { }, expectedResp: resp{ isExpired: true, - expectedFilter: func(ts time.Time, s string, _ ...labels.Label) bool { + expectedFilter: func(_ time.Time, s string, _ ...labels.Label) bool { return strings.Contains(s, "fizz") }, }, @@ -660,7 +660,7 @@ func TestDeleteRequestsManager_Expired(t *testing.T) { }, expectedResp: resp{ isExpired: true, - expectedFilter: func(ts time.Time, s string, structuredMetadata ...labels.Label) bool { + expectedFilter: func(_ time.Time, _ string, structuredMetadata ...labels.Label) bool { return labels.Labels(structuredMetadata).Get(lblPing) == lblPong }, }, @@ -784,7 +784,7 @@ func TestDeleteRequestsManager_Expired(t *testing.T) { }, expectedResp: resp{ isExpired: true, - expectedFilter: func(ts time.Time, s string, _ ...labels.Label) bool { + expectedFilter: func(ts time.Time, _ string, _ ...labels.Label) bool { tsUnixNano := ts.UnixNano() if (now.Add(-13*time.Hour).UnixNano() <= tsUnixNano && tsUnixNano <= now.Add(-11*time.Hour).UnixNano()) || (now.Add(-10*time.Hour).UnixNano() <= tsUnixNano && tsUnixNano <= now.Add(-8*time.Hour).UnixNano()) { @@ -852,7 +852,7 @@ func TestDeleteRequestsManager_Expired(t *testing.T) { }, expectedResp: resp{ isExpired: true, - expectedFilter: func(ts time.Time, s string, _ ...labels.Label) bool { + expectedFilter: func(ts time.Time, _ string, _ ...labels.Label) bool { tsUnixNano := ts.UnixNano() if (now.Add(-13*time.Hour).UnixNano() <= tsUnixNano && tsUnixNano <= now.Add(-11*time.Hour).UnixNano()) || (now.Add(-10*time.Hour).UnixNano() <= tsUnixNano && tsUnixNano <= now.Add(-8*time.Hour).UnixNano()) { diff --git a/pkg/compactor/deletion/delete_requests_store.go b/pkg/compactor/deletion/delete_requests_store.go index ee8f324d6b0be..b7ddfe13a6182 100644 --- a/pkg/compactor/deletion/delete_requests_store.go +++ b/pkg/compactor/deletion/delete_requests_store.go @@ -225,7 +225,7 @@ func (ds *deleteRequestsStore) GetCacheGenerationNumber(ctx context.Context, use ctx = user.InjectOrgID(ctx, userID) genNumber := "" - err := ds.indexClient.QueryPages(ctx, []index.Query{query}, func(query index.Query, batch index.ReadBatchResult) (shouldContinue bool) { + err := ds.indexClient.QueryPages(ctx, []index.Query{query}, func(_ index.Query, batch index.ReadBatchResult) (shouldContinue bool) { itr := batch.Iterator() for itr.Next() { genNumber = string(itr.Value()) @@ -244,7 +244,7 @@ func (ds *deleteRequestsStore) GetCacheGenerationNumber(ctx context.Context, use func (ds *deleteRequestsStore) queryDeleteRequests(ctx context.Context, deleteQuery index.Query) ([]DeleteRequest, error) { var deleteRequests []DeleteRequest var err error - err = ds.indexClient.QueryPages(ctx, []index.Query{deleteQuery}, func(query index.Query, batch index.ReadBatchResult) (shouldContinue bool) { + err = ds.indexClient.QueryPages(ctx, []index.Query{deleteQuery}, func(_ index.Query, batch index.ReadBatchResult) (shouldContinue bool) { // No need to lock inside the callback since we run a single index query. itr := batch.Iterator() for itr.Next() { @@ -297,7 +297,7 @@ func (ds *deleteRequestsStore) queryDeleteRequestDetails(ctx context.Context, de var marshalError error var requestWithDetails DeleteRequest - err := ds.indexClient.QueryPages(ctx, deleteRequestQuery, func(query index.Query, batch index.ReadBatchResult) (shouldContinue bool) { + err := ds.indexClient.QueryPages(ctx, deleteRequestQuery, func(_ index.Query, batch index.ReadBatchResult) (shouldContinue bool) { if requestWithDetails, marshalError = unmarshalDeleteRequestDetails(batch.Iterator(), deleteRequest); marshalError != nil { return false } diff --git a/pkg/compactor/deletion/tenant_request_handler_test.go b/pkg/compactor/deletion/tenant_request_handler_test.go index c57dc84ba4caf..cca06f4c18cfe 100644 --- a/pkg/compactor/deletion/tenant_request_handler_test.go +++ b/pkg/compactor/deletion/tenant_request_handler_test.go @@ -21,7 +21,7 @@ func TestDeleteRequestHandlerDeletionMiddleware(t *testing.T) { } // Setup handler - middle := TenantMiddleware(fl, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})) + middle := TenantMiddleware(fl, http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) {})) // User that has deletion enabled req := httptest.NewRequest(http.MethodGet, "http://www.your-domain.com", nil) diff --git a/pkg/compactor/retention/marker_test.go b/pkg/compactor/retention/marker_test.go index 48aab32b73f4a..e90ac7dc4aaf9 100644 --- a/pkg/compactor/retention/marker_test.go +++ b/pkg/compactor/retention/marker_test.go @@ -50,7 +50,7 @@ func Test_marlkerProcessor_Deadlock(t *testing.T) { paths, _, err := p.availablePath() require.NoError(t, err) for _, path := range paths { - require.NoError(t, p.processPath(path, func(ctx context.Context, chunkId []byte) error { return nil })) + require.NoError(t, p.processPath(path, func(_ context.Context, _ []byte) error { return nil })) require.NoError(t, p.deleteEmptyMarks(path)) } paths, _, err = p.availablePath() diff --git a/pkg/compactor/retention/retention_test.go b/pkg/compactor/retention/retention_test.go index b140b3661f4d4..4885c835003c2 100644 --- a/pkg/compactor/retention/retention_test.go +++ b/pkg/compactor/retention/retention_test.go @@ -47,7 +47,7 @@ func (m *mockChunkClient) IsChunkNotFoundErr(_ error) bool { return false } -func (m *mockChunkClient) getDeletedChunkIds() []string { +func (m *mockChunkClient) getDeletedChunkIDs() []string { m.mtx.Lock() defer m.mtx.Unlock() @@ -166,7 +166,7 @@ func Test_Retention(t *testing.T) { store.Stop() if len(expectDeleted) != 0 { require.Eventually(t, func() bool { - actual := chunkClient.getDeletedChunkIds() + actual := chunkClient.getDeletedChunkIDs() sort.Strings(actual) return assert.ObjectsAreEqual(expectDeleted, actual) }, 10*time.Second, 1*time.Second) @@ -301,7 +301,7 @@ func TestChunkRewriter(t *testing.T) { { name: "no rewrites", chunk: createChunk(t, "1", labels.Labels{labels.Label{Name: "foo", Value: "bar"}}, todaysTableInterval.Start, todaysTableInterval.Start.Add(time.Hour)), - filterFunc: func(ts time.Time, s string, _ ...labels.Label) bool { + filterFunc: func(_ time.Time, _ string, _ ...labels.Label) bool { return false }, expectedRespByTables: map[string]tableResp{ @@ -311,7 +311,7 @@ func TestChunkRewriter(t *testing.T) { { name: "no rewrites with chunk spanning multiple tables", chunk: createChunk(t, "1", labels.Labels{labels.Label{Name: "foo", Value: "bar"}}, todaysTableInterval.End.Add(-48*time.Hour), todaysTableInterval.End), - filterFunc: func(ts time.Time, s string, _ ...labels.Label) bool { + filterFunc: func(_ time.Time, _ string, _ ...labels.Label) bool { return false }, expectedRespByTables: map[string]tableResp{ @@ -672,7 +672,7 @@ func TestMarkForDelete_SeriesCleanup(t *testing.T) { expiry: []chunkExpiry{ { isExpired: true, - filterFunc: func(ts time.Time, s string, _ ...labels.Label) bool { + filterFunc: func(_ time.Time, _ string, _ ...labels.Label) bool { return false }, }, @@ -814,7 +814,7 @@ func TestMarkForDelete_SeriesCleanup(t *testing.T) { expiry: []chunkExpiry{ { isExpired: true, - filterFunc: func(ts time.Time, s string, _ ...labels.Label) bool { + filterFunc: func(ts time.Time, _ string, _ ...labels.Label) bool { return ts.UnixNano() < todaysTableInterval.Start.UnixNano() }, }, @@ -840,7 +840,7 @@ func TestMarkForDelete_SeriesCleanup(t *testing.T) { expiry: []chunkExpiry{ { isExpired: true, - filterFunc: func(ts time.Time, s string, _ ...labels.Label) bool { + filterFunc: func(ts time.Time, _ string, _ ...labels.Label) bool { return ts.UnixNano() < todaysTableInterval.Start.Add(-30*time.Minute).UnixNano() }, }, diff --git a/pkg/compactor/table.go b/pkg/compactor/table.go index c371a5db88f59..8be8190c0ac06 100644 --- a/pkg/compactor/table.go +++ b/pkg/compactor/table.go @@ -198,7 +198,7 @@ func (t *table) done() error { userIDs = append(userIDs, userID) } - err := concurrency.ForEachJob(t.ctx, len(userIDs), t.uploadConcurrency, func(ctx context.Context, idx int) error { + err := concurrency.ForEachJob(t.ctx, len(userIDs), t.uploadConcurrency, func(_ context.Context, idx int) error { return t.indexSets[userIDs[idx]].done() }) if err != nil { diff --git a/pkg/compactor/table_test.go b/pkg/compactor/table_test.go index 462511eca4782..b4f71bbc93956 100644 --- a/pkg/compactor/table_test.go +++ b/pkg/compactor/table_test.go @@ -305,7 +305,7 @@ func TestTable_CompactionRetention(t *testing.T) { _, err := os.ReadDir(filepath.Join(storagePath, tableName)) require.True(t, os.IsNotExist(err)) }, - tableMarker: TableMarkerFunc(func(ctx context.Context, tableName, userID string, indexFile retention.IndexProcessor, logger log.Logger) (bool, bool, error) { + tableMarker: TableMarkerFunc(func(_ context.Context, _, _ string, _ retention.IndexProcessor, _ log.Logger) (bool, bool, error) { return true, true, nil }), }, @@ -325,7 +325,7 @@ func TestTable_CompactionRetention(t *testing.T) { require.True(t, strings.HasSuffix(filename, ".gz")) }) }, - tableMarker: TableMarkerFunc(func(ctx context.Context, tableName, userID string, indexFile retention.IndexProcessor, logger log.Logger) (bool, bool, error) { + tableMarker: TableMarkerFunc(func(_ context.Context, _, _ string, _ retention.IndexProcessor, _ log.Logger) (bool, bool, error) { return false, true, nil }), }, @@ -345,7 +345,7 @@ func TestTable_CompactionRetention(t *testing.T) { require.True(t, strings.HasSuffix(filename, ".gz")) }) }, - tableMarker: TableMarkerFunc(func(ctx context.Context, tableName, userID string, indexFile retention.IndexProcessor, logger log.Logger) (bool, bool, error) { + tableMarker: TableMarkerFunc(func(_ context.Context, _, _ string, _ retention.IndexProcessor, _ log.Logger) (bool, bool, error) { return false, false, nil }), }, @@ -377,7 +377,7 @@ func TestTable_CompactionRetention(t *testing.T) { table, err := newTable(context.Background(), tableWorkingDirectory, storage.NewIndexStorageClient(objectClient, ""), newTestIndexCompactor(), config.PeriodConfig{}, - tt.tableMarker, IntervalMayHaveExpiredChunksFunc(func(interval model.Interval, userID string) bool { + tt.tableMarker, IntervalMayHaveExpiredChunksFunc(func(_ model.Interval, _ string) bool { return true }), 10) require.NoError(t, err) diff --git a/pkg/configs/client/client.go b/pkg/configs/client/client.go index 5592fbe1b83dc..44af1bda4f504 100644 --- a/pkg/configs/client/client.go +++ b/pkg/configs/client/client.go @@ -96,7 +96,7 @@ func (c ConfigDBClient) GetRules(ctx context.Context, since userconfig.ID) (map[ } endpoint := fmt.Sprintf("%s/private/api/prom/configs/rules%s", c.URL.String(), suffix) var response *ConfigsResponse - err := instrument.CollectedRequest(ctx, "GetRules", configsRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + err := instrument.CollectedRequest(ctx, "GetRules", configsRequestDuration, instrument.ErrorCode, func(_ context.Context) error { var err error response, err = doRequest(endpoint, c.Timeout, c.TLSConfig, since) return err @@ -122,7 +122,7 @@ func (c ConfigDBClient) GetAlerts(ctx context.Context, since userconfig.ID) (*Co } endpoint := fmt.Sprintf("%s/private/api/prom/configs/alertmanager%s", c.URL.String(), suffix) var response *ConfigsResponse - err := instrument.CollectedRequest(ctx, "GetAlerts", configsRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + err := instrument.CollectedRequest(ctx, "GetAlerts", configsRequestDuration, instrument.ErrorCode, func(_ context.Context) error { var err error response, err = doRequest(endpoint, c.Timeout, c.TLSConfig, since) return err diff --git a/pkg/configs/client/configs_test.go b/pkg/configs/client/configs_test.go index 311c33ca91ad9..64f4b98d202e0 100644 --- a/pkg/configs/client/configs_test.go +++ b/pkg/configs/client/configs_test.go @@ -28,7 +28,7 @@ var response = `{ ` func TestDoRequest(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { _, err := w.Write([]byte(response)) require.NoError(t, err) })) diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 0a4dfe6d146a2..080db0d87221c 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -445,7 +445,7 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log var validationErr error if validationErrors.Err() != nil { - validationErr = httpgrpc.Errorf(http.StatusBadRequest, validationErrors.Error()) + validationErr = httpgrpc.Errorf(http.StatusBadRequest, "%s", validationErrors.Error()) } // Return early if none of the streams contained entries @@ -468,7 +468,7 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log return &logproto.PushResponse{}, nil } - return nil, httpgrpc.Errorf(retStatusCode, err.Error()) + return nil, httpgrpc.Errorf(retStatusCode, "%s", err.Error()) } if !d.ingestionRateLimiter.AllowN(now, tenantID, validatedLineSize) { @@ -496,7 +496,7 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log err = fmt.Errorf(validation.RateLimitedErrorMsg, tenantID, int(d.ingestionRateLimiter.Limit(now, tenantID)), validatedLineCount, validatedLineSize) d.writeFailuresManager.Log(tenantID, err) - return nil, httpgrpc.Errorf(http.StatusTooManyRequests, err.Error()) + return nil, httpgrpc.Errorf(http.StatusTooManyRequests, "%s", err.Error()) } // Nil check for performance reasons, to avoid dynamic lookup and/or no-op diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index 3335f64b523b7..e9528c361bda0 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -404,7 +404,7 @@ func Test_IncrementTimestamp(t *testing.T) { t.Run(testName, func(t *testing.T) { ing := &mockIngester{} - distributors, _ := prepare(t, 1, 3, testData.limits, func(addr string) (ring_client.PoolClient, error) { return ing, nil }) + distributors, _ := prepare(t, 1, 3, testData.limits, func(_ string) (ring_client.PoolClient, error) { return ing, nil }) _, err := distributors[0].Push(ctx, testData.push) assert.NoError(t, err) topVal := ing.Peek() @@ -510,7 +510,7 @@ func Test_SortLabelsOnPush(t *testing.T) { limits := &validation.Limits{} flagext.DefaultValues(limits) ingester := &mockIngester{} - distributors, _ := prepare(t, 1, 5, limits, func(addr string) (ring_client.PoolClient, error) { return ingester, nil }) + distributors, _ := prepare(t, 1, 5, limits, func(_ string) (ring_client.PoolClient, error) { return ingester, nil }) request := makeWriteRequest(10, 10) request.Streams[0].Labels = `{buzz="f", service_name="foo", a="b"}` @@ -533,7 +533,7 @@ func Test_TruncateLogLines(t *testing.T) { t.Run("it truncates lines to MaxLineSize when MaxLineSizeTruncate is true", func(t *testing.T) { limits, ingester := setup() - distributors, _ := prepare(t, 1, 5, limits, func(addr string) (ring_client.PoolClient, error) { return ingester, nil }) + distributors, _ := prepare(t, 1, 5, limits, func(_ string) (ring_client.PoolClient, error) { return ingester, nil }) _, err := distributors[0].Push(ctx, makeWriteRequest(1, 10)) require.NoError(t, err) @@ -553,10 +553,10 @@ func Test_DiscardEmptyStreamsAfterValidation(t *testing.T) { t.Run("it discards invalid entries and discards resulting empty streams completely", func(t *testing.T) { limits, ingester := setup() - distributors, _ := prepare(t, 1, 5, limits, func(addr string) (ring_client.PoolClient, error) { return ingester, nil }) + distributors, _ := prepare(t, 1, 5, limits, func(_ string) (ring_client.PoolClient, error) { return ingester, nil }) _, err := distributors[0].Push(ctx, makeWriteRequest(1, 10)) - require.Equal(t, err, httpgrpc.Errorf(http.StatusBadRequest, fmt.Sprintf(validation.LineTooLongErrorMsg, 5, "{foo=\"bar\"}", 10))) + require.Equal(t, err, httpgrpc.Errorf(http.StatusBadRequest, "%s", fmt.Sprintf(validation.LineTooLongErrorMsg, 5, "{foo=\"bar\"}", 10))) topVal := ingester.Peek() require.Nil(t, topVal) }) @@ -1506,7 +1506,7 @@ func Test_DetectLogLevels(t *testing.T) { t.Run("log level detection disabled", func(t *testing.T) { limits, ingester := setup(false) - distributors, _ := prepare(t, 1, 5, limits, func(addr string) (ring_client.PoolClient, error) { return ingester, nil }) + distributors, _ := prepare(t, 1, 5, limits, func(_ string) (ring_client.PoolClient, error) { return ingester, nil }) writeReq := makeWriteRequestWithLabels(1, 10, []string{`{foo="bar"}`}) _, err := distributors[0].Push(ctx, writeReq) @@ -1518,7 +1518,7 @@ func Test_DetectLogLevels(t *testing.T) { t.Run("log level detection enabled but level cannot be detected", func(t *testing.T) { limits, ingester := setup(true) - distributors, _ := prepare(t, 1, 5, limits, func(addr string) (ring_client.PoolClient, error) { return ingester, nil }) + distributors, _ := prepare(t, 1, 5, limits, func(_ string) (ring_client.PoolClient, error) { return ingester, nil }) writeReq := makeWriteRequestWithLabels(1, 10, []string{`{foo="bar"}`}) _, err := distributors[0].Push(ctx, writeReq) @@ -1530,7 +1530,7 @@ func Test_DetectLogLevels(t *testing.T) { t.Run("log level detection enabled and warn logs", func(t *testing.T) { limits, ingester := setup(true) - distributors, _ := prepare(t, 1, 5, limits, func(addr string) (ring_client.PoolClient, error) { return ingester, nil }) + distributors, _ := prepare(t, 1, 5, limits, func(_ string) (ring_client.PoolClient, error) { return ingester, nil }) writeReq := makeWriteRequestWithLabelsWithLevel(1, 10, []string{`{foo="bar"}`}, "warn") _, err := distributors[0].Push(ctx, writeReq) @@ -1547,7 +1547,7 @@ func Test_DetectLogLevels(t *testing.T) { t.Run("log level detection enabled but log level already present in stream", func(t *testing.T) { limits, ingester := setup(true) - distributors, _ := prepare(t, 1, 5, limits, func(addr string) (ring_client.PoolClient, error) { return ingester, nil }) + distributors, _ := prepare(t, 1, 5, limits, func(_ string) (ring_client.PoolClient, error) { return ingester, nil }) writeReq := makeWriteRequestWithLabels(1, 10, []string{`{foo="bar", level="debug"}`}) _, err := distributors[0].Push(ctx, writeReq) @@ -1562,7 +1562,7 @@ func Test_DetectLogLevels(t *testing.T) { t.Run("log level detection enabled but log level already present as structured metadata", func(t *testing.T) { limits, ingester := setup(true) - distributors, _ := prepare(t, 1, 5, limits, func(addr string) (ring_client.PoolClient, error) { return ingester, nil }) + distributors, _ := prepare(t, 1, 5, limits, func(_ string) (ring_client.PoolClient, error) { return ingester, nil }) writeReq := makeWriteRequestWithLabels(1, 10, []string{`{foo="bar"}`}) writeReq.Streams[0].Entries[0].StructuredMetadata = push.LabelsAdapter{ diff --git a/pkg/distributor/writefailures/manager_test.go b/pkg/distributor/writefailures/manager_test.go index fb3d7577953a7..1618e3f048e9c 100644 --- a/pkg/distributor/writefailures/manager_test.go +++ b/pkg/distributor/writefailures/manager_test.go @@ -58,7 +58,7 @@ func TestWriteFailuresRateLimiting(t *testing.T) { logger := log.NewLogfmtLogger(buf) provider := &providerMock{ - tenantConfig: func(tenantID string) *runtime.Config { + tenantConfig: func(_ string) *runtime.Config { return &runtime.Config{ LimitedLogPushErrors: true, } @@ -84,7 +84,7 @@ func TestWriteFailuresRateLimiting(t *testing.T) { errorStr.WriteRune('z') } - manager.Log("known-tenant", fmt.Errorf(errorStr.String())) + manager.Log("known-tenant", fmt.Errorf("%s", errorStr.String())) content := buf.String() require.Empty(t, content) @@ -98,7 +98,7 @@ func TestWriteFailuresRateLimiting(t *testing.T) { errorStr.WriteRune('z') } - manager.Log("known-tenant", fmt.Errorf(errorStr.String())) + manager.Log("known-tenant", fmt.Errorf("%s", errorStr.String())) content := buf.String() require.NotEmpty(t, content) @@ -117,10 +117,10 @@ func TestWriteFailuresRateLimiting(t *testing.T) { errorStr2.WriteRune('y') } - manager.Log("known-tenant", fmt.Errorf(errorStr1.String())) - manager.Log("known-tenant", fmt.Errorf(errorStr2.String())) // more than 1KB/s + manager.Log("known-tenant", fmt.Errorf("%s", errorStr1.String())) + manager.Log("known-tenant", fmt.Errorf("%s", errorStr2.String())) // more than 1KB/s time.Sleep(time.Second) - manager.Log("known-tenant", fmt.Errorf(errorStr3.String())) + manager.Log("known-tenant", fmt.Errorf("%s", errorStr3.String())) content := buf.String() require.NotEmpty(t, content) diff --git a/pkg/indexgateway/client.go b/pkg/indexgateway/client.go index b5e05b7e26ecd..e8c4c23c243c2 100644 --- a/pkg/indexgateway/client.go +++ b/pkg/indexgateway/client.go @@ -349,7 +349,7 @@ func (s *GatewayClient) GetShards( return nil }, - func(err error) bool { + func(_ error) bool { errCt++ return errCt <= maxErrs }, diff --git a/pkg/indexgateway/client_test.go b/pkg/indexgateway/client_test.go index 03fdfbcbc1a3c..91005a591eb15 100644 --- a/pkg/indexgateway/client_test.go +++ b/pkg/indexgateway/client_test.go @@ -259,7 +259,7 @@ func TestGatewayClient(t *testing.T) { } numCallbacks := 0 - err = gatewayClient.QueryPages(ctx, queries, func(query index.Query, batch index.ReadBatchResult) (shouldContinue bool) { + err = gatewayClient.QueryPages(ctx, queries, func(_ index.Query, batch index.ReadBatchResult) (shouldContinue bool) { itr := batch.Iterator() for j := 0; j <= numCallbacks; j++ { diff --git a/pkg/indexgateway/gateway_test.go b/pkg/indexgateway/gateway_test.go index cf5cd7256486e..9396e865da71b 100644 --- a/pkg/indexgateway/gateway_test.go +++ b/pkg/indexgateway/gateway_test.go @@ -446,7 +446,7 @@ func TestAccumulateChunksToShards(t *testing.T) { fsImpl := func(series [][]refWithSizingInfo) sharding.ForSeriesFunc { return sharding.ForSeriesFunc( func( - ctx context.Context, + _ context.Context, _ string, _ tsdb_index.FingerprintFilter, _, _ model.Time, @@ -454,7 +454,7 @@ func TestAccumulateChunksToShards(t *testing.T) { _ labels.Labels, fp model.Fingerprint, chks []tsdb_index.ChunkMeta, - ) (stop bool), matchers ...*labels.Matcher) error { + ) (stop bool), _ ...*labels.Matcher) error { for _, s := range series { chks := []tsdb_index.ChunkMeta{} diff --git a/pkg/ingester-rf1/flush.go b/pkg/ingester-rf1/flush.go index 2d194a12f5574..f2710695d7045 100644 --- a/pkg/ingester-rf1/flush.go +++ b/pkg/ingester-rf1/flush.go @@ -114,7 +114,7 @@ func (i *Ingester) flushSegment(ctx context.Context, j int, w *wal.SegmentWriter wal.ReportSegmentStats(stats, i.metrics.segmentMetrics) id := ulid.MustNew(ulid.Timestamp(time.Now()), rand.Reader).String() - if err := i.store.PutObject(ctx, fmt.Sprintf(wal.Dir+id), buf); err != nil { + if err := i.store.PutObject(ctx, wal.Dir+id, buf); err != nil { i.metrics.flushFailuresTotal.Inc() return fmt.Errorf("failed to put object: %w", err) } diff --git a/pkg/ingester-rf1/instance.go b/pkg/ingester-rf1/instance.go index e05e99ba8b2f6..0444475f7a6bf 100644 --- a/pkg/ingester-rf1/instance.go +++ b/pkg/ingester-rf1/instance.go @@ -98,7 +98,7 @@ func (i *instance) Push(ctx context.Context, w *wal.Manager, req *logproto.PushR s, err := i.createStream(ctx, reqStream) return s, err }, - func(s *stream) error { + func(_ *stream) error { return nil }, ) @@ -185,7 +185,7 @@ func (i *instance) createStream(ctx context.Context, pushReqStream logproto.Stre "stream", pushReqStream.Labels, ) } - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } if err != nil { diff --git a/pkg/ingester-rf1/objstore/storage.go b/pkg/ingester-rf1/objstore/storage.go index 889c7e0bb87a4..ec0d734b316b7 100644 --- a/pkg/ingester-rf1/objstore/storage.go +++ b/pkg/ingester-rf1/objstore/storage.go @@ -37,7 +37,7 @@ func New( } // sort by From time sort.Slice(periodicConfigs, func(i, j int) bool { - return periodicConfigs[i].From.Time.Before(periodicConfigs[i].From.Time) + return periodicConfigs[i].From.Time.Before(periodicConfigs[j].From.Time) }) for _, periodicConfig := range periodicConfigs { objectClient, err := storage.NewObjectClient(periodicConfig.ObjectType, storageConfig, clientMetrics) diff --git a/pkg/ingester-rf1/stream.go b/pkg/ingester-rf1/stream.go index 32ccf454c41c4..8913e206a7c2a 100644 --- a/pkg/ingester-rf1/stream.go +++ b/pkg/ingester-rf1/stream.go @@ -176,7 +176,7 @@ func errorForFailedEntries(s *stream, failedEntriesWithError []entryWithError, t fmt.Fprintf(&buf, "user '%s', total ignored: %d out of %d for stream: %s", s.tenant, len(failedEntriesWithError), totalEntries, streamName) - return httpgrpc.Errorf(statusCode, buf.String()) + return httpgrpc.Errorf(statusCode, "%s", buf.String()) } func hasRateLimitErr(errs []entryWithError) bool { diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index f201da437e4ea..17d34b57dc549 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -1434,7 +1434,7 @@ func createIngesterServer(t *testing.T, ingesterConfig Config) (ingesterClient, }() // nolint:staticcheck // grpc.DialContext() has been deprecated; we'll address it before upgrading to gRPC 2. - conn, err := grpc.DialContext(context.Background(), "", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithContextDialer(func(ctx context.Context, s string) (net.Conn, error) { + conn, err := grpc.DialContext(context.Background(), "", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithContextDialer(func(_ context.Context, _ string) (net.Conn, error) { return listener.Dial() })) require.NoError(t, err) diff --git a/pkg/ingester/instance.go b/pkg/ingester/instance.go index aeddd5e9f0f13..e2fd472656a9f 100644 --- a/pkg/ingester/instance.go +++ b/pkg/ingester/instance.go @@ -289,7 +289,7 @@ func (i *instance) createStream(ctx context.Context, pushReqStream logproto.Stre "stream", pushReqStream.Labels, ) } - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } if record != nil { diff --git a/pkg/ingester/stream.go b/pkg/ingester/stream.go index 0754de9caf0fa..fe4a644c71109 100644 --- a/pkg/ingester/stream.go +++ b/pkg/ingester/stream.go @@ -268,7 +268,7 @@ func errorForFailedEntries(s *stream, failedEntriesWithError []entryWithError, t fmt.Fprintf(&buf, "user '%s', total ignored: %d out of %d for stream: %s", s.tenant, len(failedEntriesWithError), totalEntries, streamName) - return httpgrpc.Errorf(statusCode, buf.String()) + return httpgrpc.Errorf(statusCode, "%s", buf.String()) } func hasRateLimitErr(errs []entryWithError) bool { diff --git a/pkg/ingester/stream_test.go b/pkg/ingester/stream_test.go index 3bbd091b25c5c..6dbd521f1abc7 100644 --- a/pkg/ingester/stream_test.go +++ b/pkg/ingester/stream_test.go @@ -101,7 +101,7 @@ func TestMaxReturnedStreamsErrors(t *testing.T) { } fmt.Fprintf(&expected, "user 'fake', total ignored: %d out of %d for stream: {foo=\"bar\"}", numLogs, numLogs) - expectErr := httpgrpc.Errorf(http.StatusBadRequest, expected.String()) + expectErr := httpgrpc.Errorf(http.StatusBadRequest, "%s", expected.String()) _, err = s.Push(context.Background(), newLines, recordPool.GetRecord(), 0, true, false, nil) require.Error(t, err) diff --git a/pkg/iter/v2/ordering_test.go b/pkg/iter/v2/ordering_test.go index 6a2e81abae014..fb29cf888a383 100644 --- a/pkg/iter/v2/ordering_test.go +++ b/pkg/iter/v2/ordering_test.go @@ -84,7 +84,7 @@ func TestOrdering(t *testing.T) { return o.Unwrap() }) - EqualIterators[int](t, func(a, b int) {}, NewSliceIter(tc.expected), unmap) + EqualIterators[int](t, func(_, _ int) {}, NewSliceIter(tc.expected), unmap) }) } } diff --git a/pkg/logcli/client/client.go b/pkg/logcli/client/client.go index 0c7880d62257c..531ef15f61ad5 100644 --- a/pkg/logcli/client/client.go +++ b/pkg/logcli/client/client.go @@ -449,7 +449,7 @@ func (c *DefaultClient) wsConnect(path, query string, quiet bool) (*websocket.Co } if c.ProxyURL != "" { - ws.Proxy = func(req *http.Request) (*url.URL, error) { + ws.Proxy = func(_ *http.Request) (*url.URL, error) { return url.Parse(c.ProxyURL) } } diff --git a/pkg/loghttp/query.go b/pkg/loghttp/query.go index 89ad4e00a79c0..af67b9df2d0a3 100644 --- a/pkg/loghttp/query.go +++ b/pkg/loghttp/query.go @@ -506,7 +506,7 @@ func ParseRangeQuery(r *http.Request) (*RangeQuery, error) { if GetVersion(r.URL.Path) == VersionLegacy { result.Query, err = parseRegexQuery(r) if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } expr, err := syntax.ParseExpr(result.Query) diff --git a/pkg/loghttp/tail.go b/pkg/loghttp/tail.go index 658ae112cce07..109a81b27e24e 100644 --- a/pkg/loghttp/tail.go +++ b/pkg/loghttp/tail.go @@ -83,7 +83,7 @@ func ParseTailQuery(r *http.Request) (*logproto.TailRequest, error) { req.Query, err = parseRegexQuery(r) if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } req.Limit, err = limit(r) diff --git a/pkg/logql/accumulator_test.go b/pkg/logql/accumulator_test.go index d7d379cb87a4e..f9652402a5641 100644 --- a/pkg/logql/accumulator_test.go +++ b/pkg/logql/accumulator_test.go @@ -190,7 +190,7 @@ func BenchmarkAccumulator(b *testing.B) { }, "quantile sketches": { newQuantileSketchResults(), - func(p Params, _ []logqlmodel.Result) Accumulator { + func(_ Params, _ []logqlmodel.Result) Accumulator { return newQuantileSketchAccumulator() }, params, diff --git a/pkg/logql/log/jsonexpr/lexer.go b/pkg/logql/log/jsonexpr/lexer.go index f3ba6dcd9536b..2e0241cc18b37 100644 --- a/pkg/logql/log/jsonexpr/lexer.go +++ b/pkg/logql/log/jsonexpr/lexer.go @@ -23,7 +23,7 @@ func NewScanner(r io.Reader, debug bool) *Scanner { } func (sc *Scanner) Error(s string) { - sc.err = fmt.Errorf(s) + sc.err = fmt.Errorf("%s", s) fmt.Printf("syntax error: %s\n", s) } @@ -53,7 +53,7 @@ func (sc *Scanner) lex(lval *JSONExprSymType) int { sc.unread() val, err := sc.scanInt() if err != nil { - sc.err = fmt.Errorf(err.Error()) + sc.err = fmt.Errorf("%s", err.Error()) return 0 } diff --git a/pkg/logql/log/logfmt/lexer.go b/pkg/logql/log/logfmt/lexer.go index 06756c0bfea6c..a14bbb55ae434 100644 --- a/pkg/logql/log/logfmt/lexer.go +++ b/pkg/logql/log/logfmt/lexer.go @@ -22,7 +22,7 @@ func NewScanner(r io.Reader, debug bool) *Scanner { } func (sc *Scanner) Error(s string) { - sc.err = fmt.Errorf(s) + sc.err = fmt.Errorf("%s", s) fmt.Printf("syntax error: %s\n", s) } diff --git a/pkg/logql/log/metrics_extraction.go b/pkg/logql/log/metrics_extraction.go index e8605f6b293a7..a7e37cfcc042f 100644 --- a/pkg/logql/log/metrics_extraction.go +++ b/pkg/logql/log/metrics_extraction.go @@ -22,7 +22,7 @@ const ( type LineExtractor func([]byte) float64 var ( - CountExtractor LineExtractor = func(line []byte) float64 { return 1. } + CountExtractor LineExtractor = func(_ []byte) float64 { return 1. } BytesExtractor LineExtractor = func(line []byte) float64 { return float64(len(line)) } ) diff --git a/pkg/logql/syntax/ast.go b/pkg/logql/syntax/ast.go index 38231a936a02b..0ecab6313a40f 100644 --- a/pkg/logql/syntax/ast.go +++ b/pkg/logql/syntax/ast.go @@ -60,7 +60,7 @@ func ExtractLineFilters(e Expr) []LineFilterExpr { } var filters []LineFilterExpr visitor := &DepthFirstTraversal{ - VisitLineFilterFn: func(v RootVisitor, e *LineFilterExpr) { + VisitLineFilterFn: func(_ RootVisitor, e *LineFilterExpr) { if e != nil { filters = append(filters, *e) } diff --git a/pkg/logql/syntax/visit_test.go b/pkg/logql/syntax/visit_test.go index eeb040ce83a1a..445f165d9057a 100644 --- a/pkg/logql/syntax/visit_test.go +++ b/pkg/logql/syntax/visit_test.go @@ -12,16 +12,16 @@ func TestDepthFirstTraversalVisitor(t *testing.T) { visited := [][2]string{} visitor := &DepthFirstTraversal{ - VisitLabelParserFn: func(v RootVisitor, e *LabelParserExpr) { + VisitLabelParserFn: func(_ RootVisitor, e *LabelParserExpr) { visited = append(visited, [2]string{fmt.Sprintf("%T", e), e.String()}) }, - VisitLineFilterFn: func(v RootVisitor, e *LineFilterExpr) { + VisitLineFilterFn: func(_ RootVisitor, e *LineFilterExpr) { visited = append(visited, [2]string{fmt.Sprintf("%T", e), e.String()}) }, - VisitLogfmtParserFn: func(v RootVisitor, e *LogfmtParserExpr) { + VisitLogfmtParserFn: func(_ RootVisitor, e *LogfmtParserExpr) { visited = append(visited, [2]string{fmt.Sprintf("%T", e), e.String()}) }, - VisitMatchersFn: func(v RootVisitor, e *MatchersExpr) { + VisitMatchersFn: func(_ RootVisitor, e *MatchersExpr) { visited = append(visited, [2]string{fmt.Sprintf("%T", e), e.String()}) }, } diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index bbdaf85e7b7d0..b08f829bb9159 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -1166,7 +1166,7 @@ func (t *Loki) initCacheGenerationLoader() (_ services.Service, err error) { } t.cacheGenerationLoader = generationnumber.NewGenNumberLoader(client, prometheus.DefaultRegisterer) - return services.NewIdleService(nil, func(failureCase error) error { + return services.NewIdleService(nil, func(_ error) error { t.cacheGenerationLoader.Stop() return nil }), nil diff --git a/pkg/loki/version_handler.go b/pkg/loki/version_handler.go index ef49d1b0f7de7..bf4e28027508d 100644 --- a/pkg/loki/version_handler.go +++ b/pkg/loki/version_handler.go @@ -10,7 +10,7 @@ import ( ) func versionHandler() http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, _ *http.Request) { info := prom.PrometheusVersion{ Version: build.Version, Revision: build.Revision, diff --git a/pkg/lokifrontend/frontend/transport/handler.go b/pkg/lokifrontend/frontend/transport/handler.go index 7c9e50daf8b59..4163800f4bca2 100644 --- a/pkg/lokifrontend/frontend/transport/handler.go +++ b/pkg/lokifrontend/frontend/transport/handler.go @@ -37,8 +37,8 @@ const ( ) var ( - errCanceled = httpgrpc.Errorf(StatusClientClosedRequest, context.Canceled.Error()) - errDeadlineExceeded = httpgrpc.Errorf(http.StatusGatewayTimeout, context.DeadlineExceeded.Error()) + errCanceled = httpgrpc.Errorf(StatusClientClosedRequest, "%s", context.Canceled.Error()) + errDeadlineExceeded = httpgrpc.Errorf(http.StatusGatewayTimeout, "%s", context.DeadlineExceeded.Error()) errRequestEntityTooLarge = httpgrpc.Errorf(http.StatusRequestEntityTooLarge, "http: request body too large") ) diff --git a/pkg/lokifrontend/frontend/v1/frontend_test.go b/pkg/lokifrontend/frontend/v1/frontend_test.go index 2d26e9f188a3b..b7e4061d804a0 100644 --- a/pkg/lokifrontend/frontend/v1/frontend_test.go +++ b/pkg/lokifrontend/frontend/v1/frontend_test.go @@ -46,7 +46,7 @@ const ( ) func TestFrontend(t *testing.T) { - handler := queryrangebase.HandlerFunc(func(ctx context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { + handler := queryrangebase.HandlerFunc(func(_ context.Context, _ queryrangebase.Request) (queryrangebase.Response, error) { return &queryrange.LokiLabelNamesResponse{Data: []string{"Hello", "world"}, Version: uint32(loghttp.VersionV1)}, nil }) test := func(addr string, _ *Frontend) { @@ -77,7 +77,7 @@ func TestFrontendPropagateTrace(t *testing.T) { observedTraceID := make(chan string, 2) - handler := queryrangebase.HandlerFunc(func(ctx context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { + handler := queryrangebase.HandlerFunc(func(ctx context.Context, _ queryrangebase.Request) (queryrangebase.Response, error) { sp := opentracing.SpanFromContext(ctx) defer sp.Finish() @@ -157,7 +157,7 @@ func TestFrontendCheckReady(t *testing.T) { // the underlying query is correctly cancelled _and not retried_. func TestFrontendCancel(t *testing.T) { var tries atomic.Int32 - handler := queryrangebase.HandlerFunc(func(ctx context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { + handler := queryrangebase.HandlerFunc(func(ctx context.Context, _ queryrangebase.Request) (queryrangebase.Response, error) { <-ctx.Done() tries.Inc() return nil, ctx.Err() @@ -188,7 +188,7 @@ func TestFrontendCancel(t *testing.T) { } func TestFrontendMetricsCleanup(t *testing.T) { - handler := queryrangebase.HandlerFunc(func(ctx context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { + handler := queryrangebase.HandlerFunc(func(_ context.Context, _ queryrangebase.Request) (queryrangebase.Response, error) { return &queryrange.LokiLabelNamesResponse{Data: []string{"Hello", "world"}, Version: uint32(loghttp.VersionV1)}, nil }) diff --git a/pkg/lokifrontend/frontend/v2/frontend_scheduler_worker.go b/pkg/lokifrontend/frontend/v2/frontend_scheduler_worker.go index 1fe304f490ff4..d818d90c23454 100644 --- a/pkg/lokifrontend/frontend/v2/frontend_scheduler_worker.go +++ b/pkg/lokifrontend/frontend/v2/frontend_scheduler_worker.go @@ -325,7 +325,7 @@ func (w *frontendSchedulerWorker) schedulerLoop(loop schedulerpb.SchedulerForFro case schedulerpb.ERROR: req.enqueue <- enqueueResult{status: waitForResponse} - req.response <- ResponseTuple{nil, httpgrpc.Errorf(http.StatusInternalServerError, resp.Error)} + req.response <- ResponseTuple{nil, httpgrpc.Errorf(http.StatusInternalServerError, "%s", resp.Error)} case schedulerpb.TOO_MANY_REQUESTS_PER_TENANT: req.enqueue <- enqueueResult{status: waitForResponse} req.response <- ResponseTuple{nil, httpgrpc.Errorf(http.StatusTooManyRequests, "too many outstanding requests")} diff --git a/pkg/lokifrontend/frontend/v2/frontend_test.go b/pkg/lokifrontend/frontend/v2/frontend_test.go index 41fa9653f6949..baf62348216f5 100644 --- a/pkg/lokifrontend/frontend/v2/frontend_test.go +++ b/pkg/lokifrontend/frontend/v2/frontend_test.go @@ -186,7 +186,7 @@ func TestFrontendRetryEnqueue(t *testing.T) { func TestFrontendEnqueueFailure(t *testing.T) { cfg := Config{} flagext.DefaultValues(&cfg) - f, _ := setupFrontend(t, cfg, func(f *Frontend, msg *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend { + f, _ := setupFrontend(t, cfg, func(_ *Frontend, _ *schedulerpb.FrontendToScheduler) *schedulerpb.SchedulerToFrontend { return &schedulerpb.SchedulerToFrontend{Status: schedulerpb.SHUTTING_DOWN} }) diff --git a/pkg/pattern/drain/drain.go b/pkg/pattern/drain/drain.go index 5c48d2980e022..64cb4c4103e83 100644 --- a/pkg/pattern/drain/drain.go +++ b/pkg/pattern/drain/drain.go @@ -312,14 +312,14 @@ func (d *Drain) pruneTree(node *Node) int { } } - validClusterIds := 0 + validClusterIDs := 0 for _, clusterID := range node.clusterIDs { cluster := d.idToCluster.Get(clusterID) if cluster != nil { - validClusterIds++ + validClusterIDs++ } } - return len(node.keyToChildNode) + validClusterIds + return len(node.keyToChildNode) + validClusterIDs } func (d *Drain) Delete(cluster *LogCluster) { diff --git a/pkg/pattern/ingester_querier.go b/pkg/pattern/ingester_querier.go index a77dd47b31137..3a275ffd46445 100644 --- a/pkg/pattern/ingester_querier.go +++ b/pkg/pattern/ingester_querier.go @@ -52,7 +52,7 @@ func NewIngesterQuerier( func (q *IngesterQuerier) Patterns(ctx context.Context, req *logproto.QueryPatternsRequest) (*logproto.QueryPatternsResponse, error) { _, err := syntax.ParseMatchers(req.Query, true) if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } resps, err := q.forAllIngesters(ctx, func(_ context.Context, client logproto.PatternClient) (interface{}, error) { return client.Query(ctx, req) diff --git a/pkg/pattern/instance.go b/pkg/pattern/instance.go index 719f90d69075c..24f2814e467f5 100644 --- a/pkg/pattern/instance.go +++ b/pkg/pattern/instance.go @@ -102,7 +102,7 @@ func (i *instance) Push(ctx context.Context, req *logproto.PushRequest) error { } if ownedStream { - if reqStream.Entries == nil || len(reqStream.Entries) == 0 { + if len(reqStream.Entries) == 0 { continue } s, _, err := i.streams.LoadOrStoreNew(reqStream.Labels, @@ -158,7 +158,7 @@ func (i *instance) isOwnedStream(ingesterID string, stream string) (bool, error) func (i *instance) Iterator(ctx context.Context, req *logproto.QueryPatternsRequest) (iter.Iterator, error) { matchers, err := syntax.ParseMatchers(req.Query, true) if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } from, through := util.RoundToMilliseconds(req.Start, req.End) step := model.Time(req.Step) @@ -216,7 +216,7 @@ outer: func (i *instance) createStream(_ context.Context, pushReqStream logproto.Stream) (*stream, error) { labels, err := syntax.ParseLabels(pushReqStream.Labels) if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } fp := i.getHashForLabels(labels) sortedLabels := i.index.Add(logproto.FromLabelsToLabelAdapters(labels), fp) diff --git a/pkg/querier-rf1/http.go b/pkg/querier-rf1/http.go index 279d52bf9ccc9..baa820a99460f 100644 --- a/pkg/querier-rf1/http.go +++ b/pkg/querier-rf1/http.go @@ -300,7 +300,7 @@ func (q *QuerierAPI) PatternsHandler(ctx context.Context, req *logproto.QueryPat func (q *QuerierAPI) validateMaxEntriesLimits(ctx context.Context, expr syntax.Expr, limit uint32) error { tenantIDs, err := tenant.TenantIDs(ctx) if err != nil { - return httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } // entry limit does not apply to metric queries. @@ -341,7 +341,7 @@ func WrapQuerySpanAndTimeout(call string, limits Limits) middleware.Interface { tenants, err := tenant.TenantIDs(ctx) if err != nil { level.Error(log).Log("msg", "couldn't fetch tenantID", "err", err) - serverutil.WriteError(httpgrpc.Errorf(http.StatusBadRequest, err.Error()), w) + serverutil.WriteError(httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()), w) return } diff --git a/pkg/querier-rf1/querier.go b/pkg/querier-rf1/querier.go index ead5f744dae88..a2ff7100376ad 100644 --- a/pkg/querier-rf1/querier.go +++ b/pkg/querier-rf1/querier.go @@ -924,7 +924,7 @@ func (q *Rf1Querier) Patterns(ctx context.Context, req *logproto.QueryPatternsRe } res, err := q.patternQuerier.Patterns(ctx, req) if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } return res, err diff --git a/pkg/querier/http.go b/pkg/querier/http.go index 9a12b9b96271c..5f0e928b6a1c0 100644 --- a/pkg/querier/http.go +++ b/pkg/querier/http.go @@ -135,20 +135,20 @@ func (q *QuerierAPI) LabelHandler(ctx context.Context, req *logproto.LabelReques // TailHandler is a http.HandlerFunc for handling tail queries. func (q *QuerierAPI) TailHandler(w http.ResponseWriter, r *http.Request) { upgrader := websocket.Upgrader{ - CheckOrigin: func(r *http.Request) bool { return true }, + CheckOrigin: func(_ *http.Request) bool { return true }, } logger := util_log.WithContext(r.Context(), util_log.Logger) req, err := loghttp.ParseTailQuery(r) if err != nil { - serverutil.WriteError(httpgrpc.Errorf(http.StatusBadRequest, err.Error()), w) + serverutil.WriteError(httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()), w) return } tenantID, err := tenant.TenantID(r.Context()) if err != nil { level.Warn(logger).Log("msg", "error getting tenant id", "err", err) - serverutil.WriteError(httpgrpc.Errorf(http.StatusBadRequest, err.Error()), w) + serverutil.WriteError(httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()), w) return } @@ -420,7 +420,7 @@ func (q *QuerierAPI) PatternsHandler(ctx context.Context, req *logproto.QueryPat func (q *QuerierAPI) validateMaxEntriesLimits(ctx context.Context, expr syntax.Expr, limit uint32) error { tenantIDs, err := tenant.TenantIDs(ctx) if err != nil { - return httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } // entry limit does not apply to metric queries. @@ -461,7 +461,7 @@ func WrapQuerySpanAndTimeout(call string, limits Limits) middleware.Interface { tenants, err := tenant.TenantIDs(ctx) if err != nil { level.Error(log).Log("msg", "couldn't fetch tenantID", "err", err) - serverutil.WriteError(httpgrpc.Errorf(http.StatusBadRequest, err.Error()), w) + serverutil.WriteError(httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()), w) return } diff --git a/pkg/querier/querier_mock_test.go b/pkg/querier/querier_mock_test.go index 7f5cc24ff276c..2ff95b1dff1a8 100644 --- a/pkg/querier/querier_mock_test.go +++ b/pkg/querier/querier_mock_test.go @@ -142,7 +142,7 @@ func (c *querierClientMock) Close() error { // newIngesterClientMockFactory creates a factory function always returning // the input querierClientMock func newIngesterClientMockFactory(c *querierClientMock) ring_client.PoolFactory { - return ring_client.PoolAddrFunc(func(addr string) (ring_client.PoolClient, error) { + return ring_client.PoolAddrFunc(func(_ string) (ring_client.PoolClient, error) { return c, nil }) } diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index e542b28247cd1..afe9a77a95ca9 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -254,7 +254,7 @@ func TestQuerier_SeriesAPI(t *testing.T) { { "ingester error", mkReq([]string{`{a="1"}`}), - func(store *storeMock, querier *queryClientMock, ingester *querierClientMock, limits validation.Limits, req *logproto.SeriesRequest) { + func(store *storeMock, _ *queryClientMock, ingester *querierClientMock, _ validation.Limits, req *logproto.SeriesRequest) { ingester.On("Series", mock.Anything, req, mock.Anything).Return(nil, errors.New("tst-err")) store.On("SelectSeries", mock.Anything, mock.Anything).Return(nil, nil) @@ -268,7 +268,7 @@ func TestQuerier_SeriesAPI(t *testing.T) { { "store error", mkReq([]string{`{a="1"}`}), - func(store *storeMock, querier *queryClientMock, ingester *querierClientMock, limits validation.Limits, req *logproto.SeriesRequest) { + func(store *storeMock, _ *queryClientMock, ingester *querierClientMock, _ validation.Limits, req *logproto.SeriesRequest) { ingester.On("Series", mock.Anything, req, mock.Anything).Return(mockSeriesResponse([]map[string]string{ {"a": "1"}, }), nil) @@ -284,7 +284,7 @@ func TestQuerier_SeriesAPI(t *testing.T) { { "no matches", mkReq([]string{`{a="1"}`}), - func(store *storeMock, querier *queryClientMock, ingester *querierClientMock, limits validation.Limits, req *logproto.SeriesRequest) { + func(store *storeMock, _ *queryClientMock, ingester *querierClientMock, _ validation.Limits, req *logproto.SeriesRequest) { ingester.On("Series", mock.Anything, req, mock.Anything).Return(mockSeriesResponse(nil), nil) store.On("SelectSeries", mock.Anything, mock.Anything).Return(nil, nil) }, @@ -298,7 +298,7 @@ func TestQuerier_SeriesAPI(t *testing.T) { { "returns series", mkReq([]string{`{a="1"}`}), - func(store *storeMock, querier *queryClientMock, ingester *querierClientMock, limits validation.Limits, req *logproto.SeriesRequest) { + func(store *storeMock, _ *queryClientMock, ingester *querierClientMock, _ validation.Limits, req *logproto.SeriesRequest) { ingester.On("Series", mock.Anything, req, mock.Anything).Return(mockSeriesResponse([]map[string]string{ {"a": "1", "b": "2"}, {"a": "1", "b": "3"}, @@ -344,7 +344,7 @@ func TestQuerier_SeriesAPI(t *testing.T) { { "dedupes", mkReq([]string{`{a="1"}`}), - func(store *storeMock, querier *queryClientMock, ingester *querierClientMock, limits validation.Limits, req *logproto.SeriesRequest) { + func(store *storeMock, _ *queryClientMock, ingester *querierClientMock, _ validation.Limits, req *logproto.SeriesRequest) { ingester.On("Series", mock.Anything, req, mock.Anything).Return(mockSeriesResponse([]map[string]string{ {"a": "1", "b": "2"}, }), nil) diff --git a/pkg/querier/queryrange/codec.go b/pkg/querier/queryrange/codec.go index 97cc813637a7d..2c4ff98c92c89 100644 --- a/pkg/querier/queryrange/codec.go +++ b/pkg/querier/queryrange/codec.go @@ -327,7 +327,7 @@ func (*DetectedLabelsRequest) GetCachingOptions() (res queryrangebase.CachingOpt func (Codec) DecodeRequest(_ context.Context, r *http.Request, _ []string) (queryrangebase.Request, error) { if err := r.ParseForm(); err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } disableCacheReq := false @@ -340,13 +340,13 @@ func (Codec) DecodeRequest(_ context.Context, r *http.Request, _ []string) (quer case QueryRangeOp: req, err := parseRangeQuery(r) if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } return req, nil case InstantQueryOp: req, err := parseInstantQuery(r) if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } req.CachingOptions = queryrangebase.CachingOptions{ @@ -357,7 +357,7 @@ func (Codec) DecodeRequest(_ context.Context, r *http.Request, _ []string) (quer case SeriesOp: req, err := loghttp.ParseAndValidateSeriesQuery(r) if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } return &LokiSeriesRequest{ Match: req.Groups, @@ -369,7 +369,7 @@ func (Codec) DecodeRequest(_ context.Context, r *http.Request, _ []string) (quer case LabelNamesOp: req, err := loghttp.ParseLabelQuery(r) if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } return &LabelRequest{ @@ -379,7 +379,7 @@ func (Codec) DecodeRequest(_ context.Context, r *http.Request, _ []string) (quer case IndexStatsOp: req, err := loghttp.ParseIndexStatsQuery(r) if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } from, through := util.RoundToMilliseconds(req.Start, req.End) return &logproto.IndexStatsRequest{ @@ -390,7 +390,7 @@ func (Codec) DecodeRequest(_ context.Context, r *http.Request, _ []string) (quer case IndexShardsOp: req, targetBytes, err := loghttp.ParseIndexShardsQuery(r) if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } from, through := util.RoundToMilliseconds(req.Start, req.End) return &logproto.ShardsRequest{ @@ -402,7 +402,7 @@ func (Codec) DecodeRequest(_ context.Context, r *http.Request, _ []string) (quer case VolumeOp: req, err := loghttp.ParseVolumeInstantQuery(r) if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } from, through := util.RoundToMilliseconds(req.Start, req.End) return &logproto.VolumeRequest{ @@ -420,7 +420,7 @@ func (Codec) DecodeRequest(_ context.Context, r *http.Request, _ []string) (quer case VolumeRangeOp: req, err := loghttp.ParseVolumeRangeQuery(r) if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } from, through := util.RoundToMilliseconds(req.Start, req.End) return &logproto.VolumeRequest{ @@ -438,12 +438,12 @@ func (Codec) DecodeRequest(_ context.Context, r *http.Request, _ []string) (quer case DetectedFieldsOp: req, err := loghttp.ParseDetectedFieldsQuery(r) if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } _, err = syntax.ParseExpr(req.Query) if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } return &DetectedFieldsRequest{ @@ -453,20 +453,20 @@ func (Codec) DecodeRequest(_ context.Context, r *http.Request, _ []string) (quer case PatternsQueryOp: req, err := loghttp.ParsePatternsQuery(r) if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } return req, nil case DetectedLabelsOp: req, err := loghttp.ParseDetectedLabelsQuery(r) if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } return &DetectedLabelsRequest{ DetectedLabelsRequest: *req, path: r.URL.Path, }, nil default: - return nil, httpgrpc.Errorf(http.StatusNotFound, fmt.Sprintf("unknown request path: %s", r.URL.Path)) + return nil, httpgrpc.Errorf(http.StatusNotFound, "%s", fmt.Sprintf("unknown request path: %s", r.URL.Path)) } } @@ -477,7 +477,7 @@ var labelNamesRoutes = regexp.MustCompile(`/loki/api/v1/label/(?P[^/]+)/va func (Codec) DecodeHTTPGrpcRequest(ctx context.Context, r *httpgrpc.HTTPRequest) (queryrangebase.Request, context.Context, error) { httpReq, err := http.NewRequest(r.Method, r.Url, io.NopCloser(bytes.NewBuffer(r.Body))) if err != nil { - return nil, ctx, httpgrpc.Errorf(http.StatusInternalServerError, err.Error()) + return nil, ctx, httpgrpc.Errorf(http.StatusInternalServerError, "%s", err.Error()) } httpReq = httpReq.WithContext(ctx) httpReq.RequestURI = r.Url @@ -524,28 +524,28 @@ func (Codec) DecodeHTTPGrpcRequest(ctx context.Context, r *httpgrpc.HTTPRequest) } if err := httpReq.ParseForm(); err != nil { - return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } switch op := getOperation(httpReq.URL.Path); op { case QueryRangeOp: req, err := parseRangeQuery(httpReq) if err != nil { - return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } return req, ctx, nil case InstantQueryOp: req, err := parseInstantQuery(httpReq) if err != nil { - return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } return req, ctx, nil case SeriesOp: req, err := loghttp.ParseAndValidateSeriesQuery(httpReq) if err != nil { - return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } return &LokiSeriesRequest{ Match: req.Groups, @@ -557,7 +557,7 @@ func (Codec) DecodeHTTPGrpcRequest(ctx context.Context, r *httpgrpc.HTTPRequest) case LabelNamesOp: req, err := loghttp.ParseLabelQuery(httpReq) if err != nil { - return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } if req.Name == "" { @@ -574,7 +574,7 @@ func (Codec) DecodeHTTPGrpcRequest(ctx context.Context, r *httpgrpc.HTTPRequest) case IndexStatsOp: req, err := loghttp.ParseIndexStatsQuery(httpReq) if err != nil { - return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } from, through := util.RoundToMilliseconds(req.Start, req.End) return &logproto.IndexStatsRequest{ @@ -585,7 +585,7 @@ func (Codec) DecodeHTTPGrpcRequest(ctx context.Context, r *httpgrpc.HTTPRequest) case IndexShardsOp: req, targetBytes, err := loghttp.ParseIndexShardsQuery(httpReq) if err != nil { - return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } from, through := util.RoundToMilliseconds(req.Start, req.End) return &logproto.ShardsRequest{ @@ -598,7 +598,7 @@ func (Codec) DecodeHTTPGrpcRequest(ctx context.Context, r *httpgrpc.HTTPRequest) case VolumeOp: req, err := loghttp.ParseVolumeInstantQuery(httpReq) if err != nil { - return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } from, through := util.RoundToMilliseconds(req.Start, req.End) return &logproto.VolumeRequest{ @@ -613,7 +613,7 @@ func (Codec) DecodeHTTPGrpcRequest(ctx context.Context, r *httpgrpc.HTTPRequest) case VolumeRangeOp: req, err := loghttp.ParseVolumeRangeQuery(httpReq) if err != nil { - return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } from, through := util.RoundToMilliseconds(req.Start, req.End) return &logproto.VolumeRequest{ @@ -628,7 +628,7 @@ func (Codec) DecodeHTTPGrpcRequest(ctx context.Context, r *httpgrpc.HTTPRequest) case DetectedFieldsOp: req, err := loghttp.ParseDetectedFieldsQuery(httpReq) if err != nil { - return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } return &DetectedFieldsRequest{ @@ -638,27 +638,27 @@ func (Codec) DecodeHTTPGrpcRequest(ctx context.Context, r *httpgrpc.HTTPRequest) case PatternsQueryOp: req, err := loghttp.ParsePatternsQuery(httpReq) if err != nil { - return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } return req, ctx, nil case DetectedLabelsOp: req, err := loghttp.ParseDetectedLabelsQuery(httpReq) if err != nil { - return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } return &DetectedLabelsRequest{ DetectedLabelsRequest: *req, path: httpReq.URL.Path, }, ctx, err default: - return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, fmt.Sprintf("unknown request path in HTTP gRPC decode: %s", r.Url)) + return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, "%s", fmt.Sprintf("unknown request path in HTTP gRPC decode: %s", r.Url)) } } // DecodeHTTPGrpcResponse decodes an httpgrp.HTTPResponse to queryrangebase.Response. func (Codec) DecodeHTTPGrpcResponse(r *httpgrpc.HTTPResponse, req queryrangebase.Request) (queryrangebase.Response, error) { if r.Code/100 != 2 { - return nil, httpgrpc.Errorf(int(r.Code), string(r.Body)) + return nil, httpgrpc.Errorf(int(r.Code), "%s", string(r.Body)) } headers := make(http.Header) @@ -989,7 +989,7 @@ func (c Codec) EncodeRequest(ctx context.Context, r queryrangebase.Request) (*ht return req.WithContext(ctx), nil default: - return nil, httpgrpc.Errorf(http.StatusInternalServerError, fmt.Sprintf("invalid request format, got (%T)", r)) + return nil, httpgrpc.Errorf(http.StatusInternalServerError, "%s", fmt.Sprintf("invalid request format, got (%T)", r)) } } @@ -1041,7 +1041,7 @@ type Buffer interface { func (Codec) DecodeResponse(_ context.Context, r *http.Response, req queryrangebase.Request) (queryrangebase.Response, error) { if r.StatusCode/100 != 2 { body, _ := io.ReadAll(r.Body) - return nil, httpgrpc.Errorf(r.StatusCode, string(body)) + return nil, httpgrpc.Errorf(r.StatusCode, "%s", string(body)) } if r.Header.Get("Content-Type") == ProtobufType { @@ -1377,7 +1377,7 @@ func encodeResponseJSONTo(version loghttp.Version, res queryrangebase.Response, return err } default: - return httpgrpc.Errorf(http.StatusInternalServerError, fmt.Sprintf("invalid response format, got (%T)", res)) + return httpgrpc.Errorf(http.StatusInternalServerError, "%s", fmt.Sprintf("invalid response format, got (%T)", res)) } return nil @@ -1389,7 +1389,7 @@ func encodeResponseProtobuf(ctx context.Context, res queryrangebase.Response) (* p, err := QueryResponseWrap(res) if err != nil { - return nil, httpgrpc.Errorf(http.StatusInternalServerError, err.Error()) + return nil, httpgrpc.Errorf(http.StatusInternalServerError, "%s", err.Error()) } buf, err := p.Marshal() @@ -2130,7 +2130,7 @@ func NewEmptyResponse(r queryrangebase.Request) (queryrangebase.Response, error) // range query can either be metrics or logs expr, err := syntax.ParseExpr(req.Query) if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } if _, ok := expr.(syntax.SampleExpr); ok { return &LokiPromResponse{ diff --git a/pkg/querier/queryrange/index_stats_cache_test.go b/pkg/querier/queryrange/index_stats_cache_test.go index 4d0f4124788a4..cd4a3cc1139c7 100644 --- a/pkg/querier/queryrange/index_stats_cache_test.go +++ b/pkg/querier/queryrange/index_stats_cache_test.go @@ -212,7 +212,7 @@ func TestIndexStatsCache_RecentData(t *testing.T) { func indexStatsResultHandler(v *IndexStatsResponse) (*int, queryrangebase.Handler) { calls := 0 - return &calls, queryrangebase.HandlerFunc(func(_ context.Context, req queryrangebase.Request) (queryrangebase.Response, error) { + return &calls, queryrangebase.HandlerFunc(func(_ context.Context, _ queryrangebase.Request) (queryrangebase.Response, error) { calls++ return v, nil }) diff --git a/pkg/querier/queryrange/limits.go b/pkg/querier/queryrange/limits.go index 695c0d5346fa4..0e47e6b762bff 100644 --- a/pkg/querier/queryrange/limits.go +++ b/pkg/querier/queryrange/limits.go @@ -156,7 +156,7 @@ func (l limitsMiddleware) Do(ctx context.Context, r queryrangebase.Request) (que tenantIDs, err := tenant.TenantIDs(ctx) if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } // Clamp the time range based on the max query lookback. @@ -352,7 +352,7 @@ func (q *querySizeLimiter) Do(ctx context.Context, r queryrangebase.Request) (qu tenantIDs, err := tenant.TenantIDs(ctx) if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } limitFuncCapture := func(id string) int { return q.limitFunc(ctx, id) } @@ -495,7 +495,7 @@ func (rt limitedRoundTripper) Do(c context.Context, request queryrangebase.Reque tenantIDs, err := tenant.TenantIDs(ctx) if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } parallelism := MinWeightedParallelism( @@ -508,7 +508,7 @@ func (rt limitedRoundTripper) Do(c context.Context, request queryrangebase.Reque ) if parallelism < 1 { - return nil, httpgrpc.Errorf(http.StatusTooManyRequests, ErrMaxQueryParalellism.Error()) + return nil, httpgrpc.Errorf(http.StatusTooManyRequests, "%s", ErrMaxQueryParalellism.Error()) } semWithTiming := NewSemaphoreWithTiming(int64(parallelism)) @@ -678,7 +678,7 @@ func MinWeightedParallelism(ctx context.Context, tenantIDs []string, configs []c func validateMaxEntriesLimits(ctx context.Context, reqLimit uint32, limits Limits) error { tenantIDs, err := tenant.TenantIDs(ctx) if err != nil { - return httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } maxEntriesCapture := func(id string) int { return limits.MaxEntriesLimitPerQuery(ctx, id) } diff --git a/pkg/querier/queryrange/limits_test.go b/pkg/querier/queryrange/limits_test.go index 7cc2ad951b262..d2ad3385b26ac 100644 --- a/pkg/querier/queryrange/limits_test.go +++ b/pkg/querier/queryrange/limits_test.go @@ -248,7 +248,7 @@ func Test_MaxQueryParallelism(t *testing.T) { _, _ = NewLimitedRoundTripper(h, fakeLimits{maxQueryParallelism: maxQueryParallelism}, testSchemas, base.MiddlewareFunc(func(next base.Handler) base.Handler { - return base.HandlerFunc(func(c context.Context, r base.Request) (base.Response, error) { + return base.HandlerFunc(func(c context.Context, _ base.Request) (base.Response, error) { var wg sync.WaitGroup for i := 0; i < 10; i++ { wg.Add(1) @@ -306,7 +306,7 @@ func Test_MaxQueryParallelismDisable(t *testing.T) { _, err := NewLimitedRoundTripper(h, fakeLimits{maxQueryParallelism: maxQueryParallelism}, testSchemas, base.MiddlewareFunc(func(next base.Handler) base.Handler { - return base.HandlerFunc(func(c context.Context, r base.Request) (base.Response, error) { + return base.HandlerFunc(func(c context.Context, _ base.Request) (base.Response, error) { for i := 0; i < 10; i++ { go func() { _, _ = next.Do(c, &LokiRequest{}) @@ -759,7 +759,7 @@ func Test_MaxQuerySize_MaxLookBackPeriod(t *testing.T) { } handler := tc.middleware.Wrap( - base.HandlerFunc(func(_ context.Context, req base.Request) (base.Response, error) { + base.HandlerFunc(func(_ context.Context, _ base.Request) (base.Response, error) { return &LokiResponse{}, nil }), ) diff --git a/pkg/querier/queryrange/log_result_cache.go b/pkg/querier/queryrange/log_result_cache.go index da3dc58896a4f..842004a34b7b1 100644 --- a/pkg/querier/queryrange/log_result_cache.go +++ b/pkg/querier/queryrange/log_result_cache.go @@ -86,7 +86,7 @@ func (l *logResultCache) Do(ctx context.Context, req queryrangebase.Request) (qu defer sp.Finish() tenantIDs, err := tenant.TenantIDs(ctx) if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } if l.shouldCache != nil && !l.shouldCache(ctx, req) { diff --git a/pkg/querier/queryrange/marshal.go b/pkg/querier/queryrange/marshal.go index b3920e00a6668..ab7a483890e03 100644 --- a/pkg/querier/queryrange/marshal.go +++ b/pkg/querier/queryrange/marshal.go @@ -332,7 +332,7 @@ func (Codec) QueryRequestUnwrap(ctx context.Context, req *QueryRequest) (queryra if concrete.Instant.Plan == nil { parsed, err := syntax.ParseExpr(concrete.Instant.GetQuery()) if err != nil { - return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } concrete.Instant.Plan = &plan.QueryPlan{ AST: parsed, @@ -350,7 +350,7 @@ func (Codec) QueryRequestUnwrap(ctx context.Context, req *QueryRequest) (queryra if concrete.Streams.Plan == nil { parsed, err := syntax.ParseExpr(concrete.Streams.GetQuery()) if err != nil { - return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, ctx, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } concrete.Streams.Plan = &plan.QueryPlan{ AST: parsed, diff --git a/pkg/querier/queryrange/queryrangebase/middleware_test.go b/pkg/querier/queryrange/queryrangebase/middleware_test.go index b5517046308a8..90d0b401aa0ff 100644 --- a/pkg/querier/queryrange/queryrangebase/middleware_test.go +++ b/pkg/querier/queryrange/queryrangebase/middleware_test.go @@ -18,7 +18,7 @@ func TestCacheGenNumberHeaderSetterMiddleware(t *testing.T) { loader := &fakeGenNumberLoader{genNumber: "test-header-value"} mware := CacheGenNumberHeaderSetterMiddleware(loader). - Wrap(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {})) + Wrap(http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) {})) mware.ServeHTTP(w, req) assert.Equal(t, w.Header().Get(ResultsCacheGenNumberHeaderName), "test-header-value") diff --git a/pkg/querier/queryrange/queryrangebase/promql_test.go b/pkg/querier/queryrange/queryrangebase/promql_test.go index e5c9e119d68c6..edc7a0e5829a0 100644 --- a/pkg/querier/queryrange/queryrangebase/promql_test.go +++ b/pkg/querier/queryrange/queryrangebase/promql_test.go @@ -570,7 +570,7 @@ func Test_FunctionParallelism(t *testing.T) { } -var shardAwareQueryable = storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) { +var shardAwareQueryable = storage.QueryableFunc(func(_, _ int64) (storage.Querier, error) { return &testMatrix{ series: []*promql.StorageSeries{ newSeries(labels.Labels{{Name: "__name__", Value: "bar1"}, {Name: "baz", Value: "blip"}, {Name: "bar", Value: "blop"}, {Name: "foo", Value: "barr"}}, factor(5)), diff --git a/pkg/querier/queryrange/queryrangebase/query_range.go b/pkg/querier/queryrange/queryrangebase/query_range.go index bb85f1a191247..24338d60f585a 100644 --- a/pkg/querier/queryrange/queryrangebase/query_range.go +++ b/pkg/querier/queryrange/queryrangebase/query_range.go @@ -204,7 +204,7 @@ func (p prometheusCodec) MergeResponse(responses ...Response) (Response, error) func (prometheusCodec) DecodeResponse(ctx context.Context, r *http.Response, _ Request) (Response, error) { if r.StatusCode/100 != 2 { body, _ := io.ReadAll(r.Body) - return nil, httpgrpc.Errorf(r.StatusCode, string(body)) + return nil, httpgrpc.Errorf(r.StatusCode, "%s", string(body)) } sp, ctx := opentracing.StartSpanFromContext(ctx, "ParseQueryRangeResponse") //nolint:ineffassign,staticcheck defer sp.Finish() diff --git a/pkg/querier/queryrange/queryrangebase/results_cache_test.go b/pkg/querier/queryrange/queryrangebase/results_cache_test.go index 1453808d14646..456bdf7704de5 100644 --- a/pkg/querier/queryrange/queryrangebase/results_cache_test.go +++ b/pkg/querier/queryrange/queryrangebase/results_cache_test.go @@ -419,7 +419,7 @@ func TestResultsCache(t *testing.T) { PrometheusResponseExtractor{}, nil, nil, - func(_ context.Context, tenantIDs []string, r Request) int { + func(_ context.Context, _ []string, _ Request) int { return mockLimits{}.MaxQueryParallelism(context.Background(), "fake") }, false, @@ -466,7 +466,7 @@ func TestResultsCacheRecent(t *testing.T) { PrometheusResponseExtractor{}, nil, nil, - func(_ context.Context, tenantIDs []string, r Request) int { + func(_ context.Context, _ []string, _ Request) int { return mockLimits{}.MaxQueryParallelism(context.Background(), "fake") }, false, @@ -577,7 +577,7 @@ func TestResultsCacheShouldCacheFunc(t *testing.T) { PrometheusResponseExtractor{}, nil, tc.shouldCache, - func(_ context.Context, tenantIDs []string, r Request) int { + func(_ context.Context, _ []string, _ Request) int { return mockLimits{}.MaxQueryParallelism(context.Background(), "fake") }, false, diff --git a/pkg/querier/queryrange/queryrangebase/retry_test.go b/pkg/querier/queryrange/queryrangebase/retry_test.go index f3a33b45c9d1e..dec1d82b5e9f6 100644 --- a/pkg/querier/queryrange/queryrangebase/retry_test.go +++ b/pkg/querier/queryrange/queryrangebase/retry_test.go @@ -29,7 +29,7 @@ func TestRetry(t *testing.T) { }{ { name: "retry failures", - handler: HandlerFunc(func(_ context.Context, req Request) (Response, error) { + handler: HandlerFunc(func(_ context.Context, _ Request) (Response, error) { if try.Inc() == 5 { return &PrometheusResponse{Status: "Hello World"}, nil } @@ -40,7 +40,7 @@ func TestRetry(t *testing.T) { }, { name: "don't retry 400s", - handler: HandlerFunc(func(_ context.Context, req Request) (Response, error) { + handler: HandlerFunc(func(_ context.Context, _ Request) (Response, error) { try.Inc() return nil, httpgrpc.Errorf(http.StatusBadRequest, "Bad Request") }), @@ -49,7 +49,7 @@ func TestRetry(t *testing.T) { }, { name: "retry 500s", - handler: HandlerFunc(func(_ context.Context, req Request) (Response, error) { + handler: HandlerFunc(func(_ context.Context, _ Request) (Response, error) { try.Inc() return nil, httpgrpc.Errorf(http.StatusInternalServerError, "Internal Server Error") }), @@ -58,7 +58,7 @@ func TestRetry(t *testing.T) { }, { name: "last error", - handler: HandlerFunc(func(_ context.Context, req Request) (Response, error) { + handler: HandlerFunc(func(_ context.Context, _ Request) (Response, error) { if try.Inc() == 5 { return nil, httpgrpc.Errorf(http.StatusBadRequest, "Bad Request") } @@ -71,7 +71,7 @@ func TestRetry(t *testing.T) { // Next set of tests validate the retry behavior when using protobuf encoding where the status does not include the details. { name: "protobuf enc don't retry 400s", - handler: HandlerFunc(func(_ context.Context, req Request) (Response, error) { + handler: HandlerFunc(func(_ context.Context, _ Request) (Response, error) { try.Inc() return nil, status.New(codes.Code(http.StatusBadRequest), "Bad Request").Err() }), @@ -80,7 +80,7 @@ func TestRetry(t *testing.T) { }, { name: "protobuf enc retry 500s", - handler: HandlerFunc(func(_ context.Context, req Request) (Response, error) { + handler: HandlerFunc(func(_ context.Context, _ Request) (Response, error) { try.Inc() return nil, status.New(codes.Code(http.StatusInternalServerError), "Internal Server Error").Err() }), @@ -111,7 +111,7 @@ func Test_RetryMiddlewareCancel(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() _, err := NewRetryMiddleware(log.NewNopLogger(), 5, nil, constants.Loki).Wrap( - HandlerFunc(func(c context.Context, r Request) (Response, error) { + HandlerFunc(func(_ context.Context, _ Request) (Response, error) { try.Inc() return nil, ctx.Err() }), @@ -121,7 +121,7 @@ func Test_RetryMiddlewareCancel(t *testing.T) { ctx, cancel = context.WithCancel(context.Background()) _, err = NewRetryMiddleware(log.NewNopLogger(), 5, nil, constants.Loki).Wrap( - HandlerFunc(func(c context.Context, r Request) (Response, error) { + HandlerFunc(func(_ context.Context, _ Request) (Response, error) { try.Inc() cancel() return nil, errors.New("failed") diff --git a/pkg/querier/queryrange/querysharding.go b/pkg/querier/queryrange/querysharding.go index bd5c26079636b..9fe578fad665a 100644 --- a/pkg/querier/queryrange/querysharding.go +++ b/pkg/querier/queryrange/querysharding.go @@ -124,7 +124,7 @@ type astMapperware struct { func (ast *astMapperware) checkQuerySizeLimit(ctx context.Context, bytesPerShard uint64, notShardable bool) error { tenantIDs, err := tenant.TenantIDs(ctx) if err != nil { - return httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } maxQuerierBytesReadCapture := func(id string) int { return ast.limits.MaxQuerierBytesRead(ctx, id) } @@ -323,7 +323,7 @@ type shardSplitter struct { func (splitter *shardSplitter) Do(ctx context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { tenantIDs, err := tenant.TenantIDs(ctx) if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } minShardingLookback := validation.SmallestPositiveNonZeroDurationPerTenant(tenantIDs, splitter.limits.MinShardingLookback) if minShardingLookback == 0 { @@ -456,7 +456,7 @@ func (ss *seriesShardingHandler) Do(ctx context.Context, r queryrangebase.Reques tenantIDs, err := tenant.TenantIDs(ctx) if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } requestResponses, err := queryrangebase.DoRequests( ctx, diff --git a/pkg/querier/queryrange/querysharding_test.go b/pkg/querier/queryrange/querysharding_test.go index 809013ffb0021..49c915566a5b2 100644 --- a/pkg/querier/queryrange/querysharding_test.go +++ b/pkg/querier/queryrange/querysharding_test.go @@ -152,7 +152,7 @@ func Test_astMapper(t *testing.T) { var lock sync.Mutex called := 0 - handler := queryrangebase.HandlerFunc(func(ctx context.Context, req queryrangebase.Request) (queryrangebase.Response, error) { + handler := queryrangebase.HandlerFunc(func(_ context.Context, _ queryrangebase.Request) (queryrangebase.Response, error) { lock.Lock() defer lock.Unlock() resp := lokiResps[called] @@ -264,7 +264,7 @@ func Test_astMapper_QuerySizeLimits(t *testing.T) { } { t.Run(tc.desc, func(t *testing.T) { statsCalled := 0 - handler := queryrangebase.HandlerFunc(func(ctx context.Context, req queryrangebase.Request) (queryrangebase.Response, error) { + handler := queryrangebase.HandlerFunc(func(_ context.Context, req queryrangebase.Request) (queryrangebase.Response, error) { if casted, ok := req.(*logproto.IndexStatsRequest); ok { statsCalled++ @@ -341,7 +341,7 @@ func Test_astMapper_QuerySizeLimits(t *testing.T) { func Test_ShardingByPass(t *testing.T) { called := 0 - handler := queryrangebase.HandlerFunc(func(ctx context.Context, req queryrangebase.Request) (queryrangebase.Response, error) { + handler := queryrangebase.HandlerFunc(func(_ context.Context, _ queryrangebase.Request) (queryrangebase.Response, error) { called++ return nil, nil }) @@ -413,7 +413,7 @@ func Test_hasShards(t *testing.T) { // astmapper successful stream & prom conversion func mockHandler(resp queryrangebase.Response, err error) queryrangebase.Handler { - return queryrangebase.HandlerFunc(func(ctx context.Context, req queryrangebase.Request) (queryrangebase.Response, error) { + return queryrangebase.HandlerFunc(func(ctx context.Context, _ queryrangebase.Request) (queryrangebase.Response, error) { if expired := ctx.Err(); expired != nil { return nil, expired } @@ -445,7 +445,7 @@ func Test_InstantSharding(t *testing.T) { nil, []string{}, ) - response, err := sharding.Wrap(queryrangebase.HandlerFunc(func(c context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { + response, err := sharding.Wrap(queryrangebase.HandlerFunc(func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { lock.Lock() defer lock.Unlock() called++ @@ -508,7 +508,7 @@ func Test_SeriesShardingHandler(t *testing.T) { ) ctx := user.InjectOrgID(context.Background(), "1") - response, err := sharding.Wrap(queryrangebase.HandlerFunc(func(c context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { + response, err := sharding.Wrap(queryrangebase.HandlerFunc(func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { req, ok := r.(*LokiSeriesRequest) if !ok { return nil, errors.New("not a series call") @@ -711,7 +711,7 @@ func TestShardingAcrossConfigs_ASTMapper(t *testing.T) { var lock sync.Mutex called := 0 - handler := queryrangebase.HandlerFunc(func(ctx context.Context, req queryrangebase.Request) (queryrangebase.Response, error) { + handler := queryrangebase.HandlerFunc(func(_ context.Context, _ queryrangebase.Request) (queryrangebase.Response, error) { lock.Lock() defer lock.Unlock() called++ @@ -814,7 +814,7 @@ func TestShardingAcrossConfigs_SeriesSharding(t *testing.T) { DefaultCodec, ) - _, err := mware.Wrap(queryrangebase.HandlerFunc(func(c context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { + _, err := mware.Wrap(queryrangebase.HandlerFunc(func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { _, ok := r.(*LokiSeriesRequest) if !ok { return nil, errors.New("not a series call") @@ -839,7 +839,7 @@ func Test_ASTMapper_MaxLookBackPeriod(t *testing.T) { engineOpts := testEngineOpts engineOpts.MaxLookBackPeriod = 1 * time.Hour - queryHandler := queryrangebase.HandlerFunc(func(_ context.Context, req queryrangebase.Request) (queryrangebase.Response, error) { + queryHandler := queryrangebase.HandlerFunc(func(_ context.Context, _ queryrangebase.Request) (queryrangebase.Response, error) { return &LokiResponse{}, nil }) diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go index f553c61dafb67..3b8031cb5e1ef 100644 --- a/pkg/querier/queryrange/roundtrip.go +++ b/pkg/querier/queryrange/roundtrip.go @@ -388,17 +388,17 @@ func (r roundTripper) Do(ctx context.Context, req base.Request) (base.Response, for _, g := range groups { if err := validateMatchers(ctx, r.limits, g.Matchers); err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } } return r.metric.Do(ctx, req) case syntax.LogSelectorExpr: if err := validateMaxEntriesLimits(ctx, op.Limit, r.limits); err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } if err := validateMatchers(ctx, r.limits, e.Matchers()); err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } // Some queries we don't want to parallelize as aggressively, like limited queries and `datasample` queries diff --git a/pkg/querier/queryrange/roundtrip_test.go b/pkg/querier/queryrange/roundtrip_test.go index 27d3ff781b0b5..2f3b5fcd92ee3 100644 --- a/pkg/querier/queryrange/roundtrip_test.go +++ b/pkg/querier/queryrange/roundtrip_test.go @@ -1086,7 +1086,7 @@ func TestTripperware_RequiredLabels(t *testing.T) { _, err = tpw.Wrap(h).Do(ctx, lreq) if test.expectedError != "" { - require.Equal(t, httpgrpc.Errorf(http.StatusBadRequest, test.expectedError), err) + require.Equal(t, httpgrpc.Errorf(http.StatusBadRequest, "%s", test.expectedError), err) } else { require.NoError(t, err) } @@ -1194,7 +1194,7 @@ func TestTripperware_RequiredNumberLabels(t *testing.T) { _, err = tpw.Wrap(h).Do(ctx, lreq) if tc.expectedError != noErr { - require.Equal(t, httpgrpc.Errorf(http.StatusBadRequest, tc.expectedError), err) + require.Equal(t, httpgrpc.Errorf(http.StatusBadRequest, "%s", tc.expectedError), err) } else { require.NoError(t, err) } @@ -1543,7 +1543,7 @@ func (i ingesterQueryOpts) QueryIngestersWithin() time.Duration { func counter() (*int, base.Handler) { count := 0 var lock sync.Mutex - return &count, base.HandlerFunc(func(ctx context.Context, r base.Request) (base.Response, error) { + return &count, base.HandlerFunc(func(_ context.Context, _ base.Request) (base.Response, error) { lock.Lock() defer lock.Unlock() count++ @@ -1554,7 +1554,7 @@ func counter() (*int, base.Handler) { func counterWithError(err error) (*int, base.Handler) { count := 0 var lock sync.Mutex - return &count, base.HandlerFunc(func(ctx context.Context, r base.Request) (base.Response, error) { + return &count, base.HandlerFunc(func(_ context.Context, _ base.Request) (base.Response, error) { lock.Lock() defer lock.Unlock() count++ @@ -1565,7 +1565,7 @@ func counterWithError(err error) (*int, base.Handler) { func promqlResult(v parser.Value) (*int, base.Handler) { count := 0 var lock sync.Mutex - return &count, base.HandlerFunc(func(ctx context.Context, r base.Request) (base.Response, error) { + return &count, base.HandlerFunc(func(_ context.Context, r base.Request) (base.Response, error) { lock.Lock() defer lock.Unlock() count++ @@ -1581,7 +1581,7 @@ func promqlResult(v parser.Value) (*int, base.Handler) { func seriesResult(v logproto.SeriesResponse) (*int, base.Handler) { count := 0 var lock sync.Mutex - return &count, base.HandlerFunc(func(ctx context.Context, r base.Request) (base.Response, error) { + return &count, base.HandlerFunc(func(_ context.Context, _ base.Request) (base.Response, error) { lock.Lock() defer lock.Unlock() count++ diff --git a/pkg/querier/queryrange/serialize_test.go b/pkg/querier/queryrange/serialize_test.go index 0bd6c36aa4bd6..f37face6e9351 100644 --- a/pkg/querier/queryrange/serialize_test.go +++ b/pkg/querier/queryrange/serialize_test.go @@ -108,7 +108,7 @@ func TestResponseFormat(t *testing.T) { }, } { t.Run(fmt.Sprintf("%s returns the expected format", tc.url), func(t *testing.T) { - handler := queryrangebase.HandlerFunc(func(ctx context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { + handler := queryrangebase.HandlerFunc(func(_ context.Context, _ queryrangebase.Request) (queryrangebase.Response, error) { return tc.response, nil }) httpHandler := NewSerializeHTTPHandler(handler, DefaultCodec) diff --git a/pkg/querier/queryrange/split_by_interval.go b/pkg/querier/queryrange/split_by_interval.go index 040befd26de93..701a045270d0b 100644 --- a/pkg/querier/queryrange/split_by_interval.go +++ b/pkg/querier/queryrange/split_by_interval.go @@ -179,7 +179,7 @@ func (h *splitByInterval) loop(ctx context.Context, ch <-chan *lokiResult, next func (h *splitByInterval) Do(ctx context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { tenantIDs, err := tenant.TenantIDs(ctx) if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } var interval time.Duration diff --git a/pkg/querier/queryrange/split_by_interval_test.go b/pkg/querier/queryrange/split_by_interval_test.go index c74ec05c252c7..de1b19be10450 100644 --- a/pkg/querier/queryrange/split_by_interval_test.go +++ b/pkg/querier/queryrange/split_by_interval_test.go @@ -1550,7 +1550,7 @@ func Test_splitByInterval_Do(t *testing.T) { func Test_series_splitByInterval_Do(t *testing.T) { ctx := user.InjectOrgID(context.Background(), "1") - next := queryrangebase.HandlerFunc(func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { + next := queryrangebase.HandlerFunc(func(_ context.Context, _ queryrangebase.Request) (queryrangebase.Response, error) { return &LokiSeriesResponse{ Status: "success", Version: uint32(loghttp.VersionV1), @@ -1653,7 +1653,7 @@ func Test_seriesvolume_splitByInterval_Do(t *testing.T) { from := model.TimeFromUnixNano(start.UnixNano()) through := model.TimeFromUnixNano(end.UnixNano()) - next := queryrangebase.HandlerFunc(func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { + next := queryrangebase.HandlerFunc(func(_ context.Context, _ queryrangebase.Request) (queryrangebase.Response, error) { return &VolumeResponse{ Response: &logproto.VolumeResponse{ Volumes: []logproto.Volume{ @@ -1691,7 +1691,7 @@ func Test_seriesvolume_splitByInterval_Do(t *testing.T) { t.Run("volumes with limits", func(t *testing.T) { from := model.TimeFromUnixNano(start.UnixNano()) through := model.TimeFromUnixNano(end.UnixNano()) - next := queryrangebase.HandlerFunc(func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { + next := queryrangebase.HandlerFunc(func(_ context.Context, _ queryrangebase.Request) (queryrangebase.Response, error) { return &VolumeResponse{ Response: &logproto.VolumeResponse{ Volumes: []logproto.Volume{ @@ -1733,7 +1733,7 @@ func Test_seriesvolume_splitByInterval_Do(t *testing.T) { t.Run("volumes with a query split by of 0", func(t *testing.T) { from := model.TimeFromUnixNano(start.UnixNano()) through := model.TimeFromUnixNano(end.UnixNano()) - next := queryrangebase.HandlerFunc(func(_ context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { + next := queryrangebase.HandlerFunc(func(_ context.Context, _ queryrangebase.Request) (queryrangebase.Response, error) { return &VolumeResponse{ Response: &logproto.VolumeResponse{ Volumes: []logproto.Volume{ diff --git a/pkg/querier/queryrange/split_by_range.go b/pkg/querier/queryrange/split_by_range.go index 380466d04408b..98ac6f6b34d13 100644 --- a/pkg/querier/queryrange/split_by_range.go +++ b/pkg/querier/queryrange/split_by_range.go @@ -59,7 +59,7 @@ func (s *splitByRange) Do(ctx context.Context, request queryrangebase.Request) ( tenants, err := tenant.TenantIDs(ctx) if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } interval := validation.SmallestPositiveNonZeroDurationPerTenant(tenants, s.limits.InstantMetricQuerySplitDuration) diff --git a/pkg/querier/queryrange/split_by_range_test.go b/pkg/querier/queryrange/split_by_range_test.go index 0f61c3c276b1f..e3c30c66cc54c 100644 --- a/pkg/querier/queryrange/split_by_range_test.go +++ b/pkg/querier/queryrange/split_by_range_test.go @@ -275,7 +275,7 @@ func Test_RangeVectorSplitAlign(t *testing.T) { } resp, err := srm.Wrap(queryrangebase.HandlerFunc( - func(ctx context.Context, req queryrangebase.Request) (queryrangebase.Response, error) { + func(_ context.Context, req queryrangebase.Request) (queryrangebase.Response, error) { // req should match with one of the subqueries. ts := req.(*LokiInstantRequest).TimeTs subq, ok := byTimeTs[ts.UnixNano()] @@ -411,7 +411,7 @@ func Test_RangeVectorSplit(t *testing.T) { tc := tc t.Run(tc.in.GetQuery(), func(t *testing.T) { resp, err := srm.Wrap(queryrangebase.HandlerFunc( - func(ctx context.Context, req queryrangebase.Request) (queryrangebase.Response, error) { + func(_ context.Context, req queryrangebase.Request) (queryrangebase.Response, error) { // Assert subquery request for _, reqResp := range tc.subQueries { if req.GetQuery() == reqResp.Request.GetQuery() { @@ -421,7 +421,7 @@ func Test_RangeVectorSplit(t *testing.T) { } } - return nil, fmt.Errorf("subquery request '" + req.GetQuery() + "' not found") + return nil, fmt.Errorf("%s", "subquery request '"+req.GetQuery()+"' not found") })).Do(ctx, tc.in) require.NoError(t, err) require.Equal(t, tc.expected, resp.(*LokiPromResponse).Response) diff --git a/pkg/querier/queryrange/stats_test.go b/pkg/querier/queryrange/stats_test.go index 8c48a9ece8538..c2b6b3755bda4 100644 --- a/pkg/querier/queryrange/stats_test.go +++ b/pkg/querier/queryrange/stats_test.go @@ -24,7 +24,7 @@ func TestStatsCollectorMiddleware(t *testing.T) { now = time.Now() ) ctx := context.WithValue(context.Background(), ctxKey, data) - _, _ = StatsCollectorMiddleware().Wrap(queryrangebase.HandlerFunc(func(ctx context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { + _, _ = StatsCollectorMiddleware().Wrap(queryrangebase.HandlerFunc(func(_ context.Context, _ queryrangebase.Request) (queryrangebase.Response, error) { return nil, nil })).Do(ctx, &LokiRequest{ Query: "foo", @@ -37,7 +37,7 @@ func TestStatsCollectorMiddleware(t *testing.T) { // no context. data = &queryData{} - _, _ = StatsCollectorMiddleware().Wrap(queryrangebase.HandlerFunc(func(ctx context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { + _, _ = StatsCollectorMiddleware().Wrap(queryrangebase.HandlerFunc(func(_ context.Context, _ queryrangebase.Request) (queryrangebase.Response, error) { return nil, nil })).Do(context.Background(), &LokiRequest{ Query: "foo", @@ -48,7 +48,7 @@ func TestStatsCollectorMiddleware(t *testing.T) { // stats data = &queryData{} ctx = context.WithValue(context.Background(), ctxKey, data) - _, _ = StatsCollectorMiddleware().Wrap(queryrangebase.HandlerFunc(func(ctx context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { + _, _ = StatsCollectorMiddleware().Wrap(queryrangebase.HandlerFunc(func(_ context.Context, _ queryrangebase.Request) (queryrangebase.Response, error) { return &LokiPromResponse{ Statistics: stats.Result{ Ingester: stats.Ingester{ @@ -69,7 +69,7 @@ func TestStatsCollectorMiddleware(t *testing.T) { // Rationale being, in that case returned `response` will be nil and there won't be any `response.statistics` to collect. data = &queryData{} ctx = context.WithValue(context.Background(), ctxKey, data) - _, _ = StatsCollectorMiddleware().Wrap(queryrangebase.HandlerFunc(func(ctx context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { + _, _ = StatsCollectorMiddleware().Wrap(queryrangebase.HandlerFunc(func(_ context.Context, _ queryrangebase.Request) (queryrangebase.Response, error) { return nil, errors.New("request timedout") })).Do(ctx, &LokiRequest{ Query: "foo", @@ -86,17 +86,17 @@ func Test_StatsHTTP(t *testing.T) { }{ { "should not record metric if nothing is recorded", - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) { data := r.Context().Value(ctxKey).(*queryData) data.recorded = false }), - func(t *testing.T, data *queryData) { + func(t *testing.T, _ *queryData) { t.Fail() }, }, { "empty statistics success", - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) { data := r.Context().Value(ctxKey).(*queryData) data.recorded = true data.params, _ = ParamsFromRequest(&LokiRequest{ @@ -189,7 +189,7 @@ func Test_StatsHTTP(t *testing.T) { } func Test_StatsUpdateResult(t *testing.T) { - resp, err := StatsCollectorMiddleware().Wrap(queryrangebase.HandlerFunc(func(c context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { + resp, err := StatsCollectorMiddleware().Wrap(queryrangebase.HandlerFunc(func(_ context.Context, _ queryrangebase.Request) (queryrangebase.Response, error) { time.Sleep(20 * time.Millisecond) return &LokiResponse{}, nil })).Do(context.Background(), &LokiRequest{ diff --git a/pkg/querier/queryrange/views.go b/pkg/querier/queryrange/views.go index b34020934c1c5..2757c76b7708d 100644 --- a/pkg/querier/queryrange/views.go +++ b/pkg/querier/queryrange/views.go @@ -123,7 +123,7 @@ func (v *SeriesIdentifierView) ForEachLabel(fn func(string, string) error) error return false, err } - err = molecule.MessageEach(codec.NewBuffer(entry), func(fieldNum int32, labelOrKey molecule.Value) (bool, error) { + err = molecule.MessageEach(codec.NewBuffer(entry), func(_ int32, labelOrKey molecule.Value) (bool, error) { s, err := labelOrKey.AsStringUnsafe() if err != nil { return false, err diff --git a/pkg/querier/queryrange/views_test.go b/pkg/querier/queryrange/views_test.go index 7d1938dacb775..ead7981a7aee3 100644 --- a/pkg/querier/queryrange/views_test.go +++ b/pkg/querier/queryrange/views_test.go @@ -185,7 +185,7 @@ func TestMergedViewDeduplication(t *testing.T) { } count := 0 - err := view.ForEachUniqueSeries(func(s *SeriesIdentifierView) error { + err := view.ForEachUniqueSeries(func(_ *SeriesIdentifierView) error { count++ return nil }) diff --git a/pkg/querier/queryrange/volume_test.go b/pkg/querier/queryrange/volume_test.go index 7327a58e15d9e..d4d2a9febe33d 100644 --- a/pkg/querier/queryrange/volume_test.go +++ b/pkg/querier/queryrange/volume_test.go @@ -258,7 +258,7 @@ func Test_toPrometheusResponse(t *testing.T) { func Test_VolumeMiddleware(t *testing.T) { makeVolumeRequest := func(req *logproto.VolumeRequest) *queryrangebase.PrometheusResponse { - nextHandler := queryrangebase.HandlerFunc(func(ctx context.Context, r queryrangebase.Request) (queryrangebase.Response, error) { + nextHandler := queryrangebase.HandlerFunc(func(_ context.Context, _ queryrangebase.Request) (queryrangebase.Response, error) { return &VolumeResponse{ Response: &logproto.VolumeResponse{ Volumes: []logproto.Volume{ diff --git a/pkg/querier/tail_test.go b/pkg/querier/tail_test.go index 4867574e5792c..3be5e5f053dc9 100644 --- a/pkg/querier/tail_test.go +++ b/pkg/querier/tail_test.go @@ -33,7 +33,7 @@ func TestTailer(t *testing.T) { "tail logs from historic entries only (no tail clients provided)": { historicEntries: mockStreamIterator(1, 2), tailClient: nil, - tester: func(t *testing.T, tailer *Tailer, tailClient *tailClientMock) { + tester: func(t *testing.T, tailer *Tailer, _ *tailClientMock) { responses, err := readFromTailer(tailer, 2) require.NoError(t, err) @@ -82,7 +82,7 @@ func TestTailer(t *testing.T) { "honor max entries per tail response": { historicEntries: mockStreamIterator(1, maxEntriesPerTailResponse+1), tailClient: nil, - tester: func(t *testing.T, tailer *Tailer, tailClient *tailClientMock) { + tester: func(t *testing.T, tailer *Tailer, _ *tailClientMock) { responses, err := readFromTailer(tailer, maxEntriesPerTailResponse+1) require.NoError(t, err) diff --git a/pkg/querier/worker/scheduler_processor_test.go b/pkg/querier/worker/scheduler_processor_test.go index 264d5a1769fd1..1634da77c4afc 100644 --- a/pkg/querier/worker/scheduler_processor_test.go +++ b/pkg/querier/worker/scheduler_processor_test.go @@ -79,7 +79,7 @@ func TestSchedulerProcessor_processQueriesOnSingleStream(t *testing.T) { workerCtx, workerCancel := context.WithCancel(context.Background()) - requestHandler.On("Do", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + requestHandler.On("Do", mock.Anything, mock.Anything).Run(func(_ mock.Arguments) { // Cancel the worker context while the query execution is in progress. workerCancel() diff --git a/pkg/queue/queue_test.go b/pkg/queue/queue_test.go index b51ccf7cc2a06..9b4aca8481c72 100644 --- a/pkg/queue/queue_test.go +++ b/pkg/queue/queue_test.go @@ -31,7 +31,7 @@ func BenchmarkGetNextRequest(b *testing.B) { }{ { "without sub-queues", - func(i int) []string { return nil }, + func(_ int) []string { return nil }, }, { "with 1 level of sub-queues", @@ -554,7 +554,7 @@ func assertChanReceived(t *testing.T, c chan struct{}, timeout time.Duration, ms select { case <-c: case <-time.After(timeout): - t.Fatalf(msg) + t.Fatal(msg) } } diff --git a/pkg/ruler/base/compat_test.go b/pkg/ruler/base/compat_test.go index e37ef6646811a..1dd65282ddf75 100644 --- a/pkg/ruler/base/compat_test.go +++ b/pkg/ruler/base/compat_test.go @@ -197,7 +197,7 @@ func TestMetricsQueryFuncErrors(t *testing.T) { queries := prometheus.NewCounter(prometheus.CounterOpts{}) failures := prometheus.NewCounter(prometheus.CounterOpts{}) - mockFunc := func(ctx context.Context, q string, t time.Time) (promql.Vector, error) { + mockFunc := func(_ context.Context, _ string, _ time.Time) (promql.Vector, error) { return promql.Vector{}, WrapQueryableErrors(tc.returnedError) } qf := MetricsQueryFunc(mockFunc, queries, failures) @@ -214,7 +214,7 @@ func TestMetricsQueryFuncErrors(t *testing.T) { func TestRecordAndReportRuleQueryMetrics(t *testing.T) { queryTime := prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{"user"}) - mockFunc := func(ctx context.Context, q string, t time.Time) (promql.Vector, error) { + mockFunc := func(_ context.Context, _ string, _ time.Time) (promql.Vector, error) { time.Sleep(1 * time.Second) return promql.Vector{}, nil } diff --git a/pkg/ruler/base/ruler.go b/pkg/ruler/base/ruler.go index 7255142829c4d..2e6c74c759dfb 100644 --- a/pkg/ruler/base/ruler.go +++ b/pkg/ruler/base/ruler.go @@ -417,7 +417,7 @@ func grafanaLinkForExpression(expr, datasourceUID string) string { // // Copied from Prometheus's main.go. func SendAlerts(n sender, externalURL, datasourceUID string) promRules.NotifyFunc { - return func(ctx context.Context, expr string, alerts ...*promRules.Alert) { + return func(_ context.Context, expr string, alerts ...*promRules.Alert) { var res []*notifier.Alert for _, alert := range alerts { diff --git a/pkg/ruler/base/ruler_test.go b/pkg/ruler/base/ruler_test.go index c80ad29cb1ad0..b180c559d8d3b 100644 --- a/pkg/ruler/base/ruler_test.go +++ b/pkg/ruler/base/ruler_test.go @@ -108,12 +108,12 @@ func (r ruleLimits) RulerAlertManagerConfig(tenantID string) *config.AlertManage func testQueryableFunc(q storage.Querier) storage.QueryableFunc { if q != nil { - return func(mint, maxt int64) (storage.Querier, error) { + return func(_, _ int64) (storage.Querier, error) { return q, nil } } - return func(mint, maxt int64) (storage.Querier, error) { + return func(_, _ int64) (storage.Querier, error) { return storage.NoopQuerier(), nil } } @@ -245,7 +245,7 @@ func TestNotifierSendsUserIDHeader(t *testing.T) { // We do expect 1 API call for the user create with the getOrCreateNotifier() wg.Add(1) - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ts := httptest.NewServer(http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) { userID, _, err := tenant.ExtractTenantIDFromHTTPRequest(r) assert.NoError(t, err) assert.Equal(t, userID, "1") @@ -290,7 +290,7 @@ func TestMultiTenantsNotifierSendsUserIDHeader(t *testing.T) { // We do expect 2 API calls for the users create with the getOrCreateNotifier() wg.Add(2) - ts1 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ts1 := httptest.NewServer(http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) { userID, _, err := tenant.ExtractTenantIDFromHTTPRequest(r) assert.NoError(t, err) assert.Equal(t, userID, tenant1) @@ -298,7 +298,7 @@ func TestMultiTenantsNotifierSendsUserIDHeader(t *testing.T) { })) defer ts1.Close() - ts2 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ts2 := httptest.NewServer(http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) { userID, _, err := tenant.ExtractTenantIDFromHTTPRequest(r) assert.NoError(t, err) assert.Equal(t, userID, tenant2) @@ -1836,7 +1836,7 @@ func TestRecoverAlertsPostOutage(t *testing.T) { defer m.Unregister() // create a ruler but don't start it. instead, we'll evaluate the rule groups manually. r := buildRuler(t, rulerCfg, &fakeQuerier{ - fn: func(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { + fn: func(_ bool, _ *storage.SelectHints, _ ...*labels.Matcher) storage.SeriesSet { return series.NewConcreteSeriesSet([]storage.Series{ series.NewConcreteSeries( labels.Labels{ @@ -1978,7 +1978,7 @@ func TestRuleGroupAlertsAndSeriesLimit(t *testing.T) { defer m.Unregister() r := buildRuler(tt, rulerCfg, &fakeQuerier{ - fn: func(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { + fn: func(_ bool, _ *storage.SelectHints, _ ...*labels.Matcher) storage.SeriesSet { return series.NewConcreteSeriesSet([]storage.Series{ series.NewConcreteSeries( labels.Labels{ diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go index 3f413a13b8c5f..838f08cb7f227 100644 --- a/pkg/ruler/compat.go +++ b/pkg/ruler/compat.go @@ -251,9 +251,9 @@ func validateRuleNode(r *rulefmt.RuleNode, groupName string) error { return errors.Errorf("field 'expr' must be set in rule") } else if _, err := syntax.ParseExpr(r.Expr.Value); err != nil { if r.Record.Value != "" { - return errors.Wrapf(err, fmt.Sprintf("could not parse expression for record '%s' in group '%s'", r.Record.Value, groupName)) + return errors.Wrapf(err, "could not parse expression for record '%s' in group '%s'", r.Record.Value, groupName) } - return errors.Wrapf(err, fmt.Sprintf("could not parse expression for alert '%s' in group '%s'", r.Alert.Value, groupName)) + return errors.Wrapf(err, "could not parse expression for alert '%s' in group '%s'", r.Alert.Value, groupName) } if r.Record.Value != "" { diff --git a/pkg/ruler/evaluator_remote_test.go b/pkg/ruler/evaluator_remote_test.go index 0b11978a7f7ed..3d76d57640dab 100644 --- a/pkg/ruler/evaluator_remote_test.go +++ b/pkg/ruler/evaluator_remote_test.go @@ -45,7 +45,7 @@ func TestRemoteEvalQueryTimeout(t *testing.T) { require.NoError(t, err) cli := mockClient{ - handleFn: func(ctx context.Context, in *httpgrpc.HTTPRequest, opts ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { + handleFn: func(_ context.Context, _ *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { // sleep for slightly longer than the timeout time.Sleep(timeout + (100 * time.Millisecond)) return &httpgrpc.HTTPResponse{ @@ -79,7 +79,7 @@ func TestRemoteEvalMaxResponseSize(t *testing.T) { require.NoError(t, err) cli := mockClient{ - handleFn: func(ctx context.Context, in *httpgrpc.HTTPRequest, opts ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { + handleFn: func(_ context.Context, _ *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { // generate a response of random bytes that's just too big for the max response size var resp = make([]byte, exceededSize) _, err = rand.Read(resp) @@ -116,7 +116,7 @@ func TestRemoteEvalScalar(t *testing.T) { ) cli := mockClient{ - handleFn: func(ctx context.Context, in *httpgrpc.HTTPRequest, opts ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { + handleFn: func(_ context.Context, _ *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { // this is somewhat bleeding the abstraction, but it's more idiomatic/readable than constructing // the expected JSON response by hand resp := loghttp.QueryResponse{ @@ -162,7 +162,7 @@ func TestRemoteEvalEmptyScalarResponse(t *testing.T) { require.NoError(t, err) cli := mockClient{ - handleFn: func(ctx context.Context, in *httpgrpc.HTTPRequest, opts ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { + handleFn: func(_ context.Context, _ *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { // this is somewhat bleeding the abstraction, but it's more idiomatic/readable than constructing // the expected JSON response by hand resp := loghttp.QueryResponse{ @@ -205,7 +205,7 @@ func TestRemoteEvalVectorResponse(t *testing.T) { value := 35891 cli := mockClient{ - handleFn: func(ctx context.Context, in *httpgrpc.HTTPRequest, opts ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { + handleFn: func(_ context.Context, _ *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { // this is somewhat bleeding the abstraction, but it's more idiomatic/readable than constructing // the expected JSON response by hand resp := loghttp.QueryResponse{ @@ -267,7 +267,7 @@ func TestRemoteEvalEmptyVectorResponse(t *testing.T) { require.NoError(t, err) cli := mockClient{ - handleFn: func(ctx context.Context, in *httpgrpc.HTTPRequest, opts ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { + handleFn: func(_ context.Context, _ *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { // this is somewhat bleeding the abstraction, but it's more idiomatic/readable than constructing // the expected JSON response by hand resp := loghttp.QueryResponse{ @@ -307,7 +307,7 @@ func TestRemoteEvalErrorResponse(t *testing.T) { var respErr = fmt.Errorf("some error occurred") cli := mockClient{ - handleFn: func(ctx context.Context, in *httpgrpc.HTTPRequest, opts ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { + handleFn: func(_ context.Context, _ *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { return nil, respErr }, } @@ -331,7 +331,7 @@ func TestRemoteEvalNon2xxResponse(t *testing.T) { const httpErr = http.StatusInternalServerError cli := mockClient{ - handleFn: func(ctx context.Context, in *httpgrpc.HTTPRequest, opts ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { + handleFn: func(_ context.Context, _ *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { return &httpgrpc.HTTPResponse{ Code: httpErr, }, nil @@ -354,7 +354,7 @@ func TestRemoteEvalNonJSONResponse(t *testing.T) { require.NoError(t, err) cli := mockClient{ - handleFn: func(ctx context.Context, in *httpgrpc.HTTPRequest, opts ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { + handleFn: func(_ context.Context, _ *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { return &httpgrpc.HTTPResponse{ Code: http.StatusOK, Body: []byte("this is not json"), @@ -378,7 +378,7 @@ func TestRemoteEvalUnsupportedResultResponse(t *testing.T) { require.NoError(t, err) cli := mockClient{ - handleFn: func(ctx context.Context, in *httpgrpc.HTTPRequest, opts ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { + handleFn: func(_ context.Context, _ *httpgrpc.HTTPRequest, _ ...grpc.CallOption) (*httpgrpc.HTTPResponse, error) { // this is somewhat bleeding the abstraction, but it's more idiomatic/readable than constructing // the expected JSON response by hand resp := loghttp.QueryResponse{ diff --git a/pkg/ruler/memstore_test.go b/pkg/ruler/memstore_test.go index 3c26a0f71506a..94b6adfd598d3 100644 --- a/pkg/ruler/memstore_test.go +++ b/pkg/ruler/memstore_test.go @@ -48,7 +48,7 @@ func TestSelectRestores(t *testing.T) { } callCount := 0 - fn := rules.QueryFunc(func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) { + fn := rules.QueryFunc(func(_ context.Context, _ string, t time.Time) (promql.Vector, error) { callCount++ return promql.Vector{ promql.Sample{ @@ -138,7 +138,7 @@ func TestMemstoreStart(_ *testing.T) { }, } - fn := rules.QueryFunc(func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) { + fn := rules.QueryFunc(func(_ context.Context, _ string, _ time.Time) (promql.Vector, error) { return nil, nil }) @@ -171,7 +171,7 @@ func TestMemstoreBlocks(t *testing.T) { }, } - fn := rules.QueryFunc(func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) { + fn := rules.QueryFunc(func(_ context.Context, _ string, _ time.Time) (promql.Vector, error) { return nil, nil }) diff --git a/pkg/ruler/rulestore/config_test.go b/pkg/ruler/rulestore/config_test.go index bde42e48c0703..2929c25e94ebf 100644 --- a/pkg/ruler/rulestore/config_test.go +++ b/pkg/ruler/rulestore/config_test.go @@ -19,7 +19,7 @@ func TestIsDefaults(t *testing.T) { expected: true, }, "should return false if the config contains zero values": { - setup: func(cfg *Config) {}, + setup: func(_ *Config) {}, expected: false, }, "should return false if the config contains default values and some overrides": { diff --git a/pkg/ruler/storage/cleaner/cleaner_test.go b/pkg/ruler/storage/cleaner/cleaner_test.go index 5d5147eb0ada7..67b084da2bba6 100644 --- a/pkg/ruler/storage/cleaner/cleaner_test.go +++ b/pkg/ruler/storage/cleaner/cleaner_test.go @@ -50,7 +50,7 @@ func TestWALCleaner_getAbandonedStorageBeforeCutoff(t *testing.T) { now := time.Now() cleaner := newCleaner(walRoot, Config{}) - cleaner.walLastModified = func(path string) (time.Time, error) { + cleaner.walLastModified = func(_ string) (time.Time, error) { return now, nil } @@ -76,7 +76,7 @@ func TestWALCleaner_getAbandonedStorageAfterCutoff(t *testing.T) { MinAge: 5 * time.Minute, }) - cleaner.walLastModified = func(path string) (time.Time, error) { + cleaner.walLastModified = func(_ string) (time.Time, error) { return now.Add(-30 * time.Minute), nil } @@ -105,7 +105,7 @@ func TestWALCleaner_cleanup(t *testing.T) { }) cleaner.instanceManager = manager - cleaner.walLastModified = func(path string) (time.Time, error) { + cleaner.walLastModified = func(_ string) (time.Time, error) { return now.Add(-30 * time.Minute), nil } diff --git a/pkg/ruler/storage/instance/instance.go b/pkg/ruler/storage/instance/instance.go index 15eb356375f52..ddd017664c976 100644 --- a/pkg/ruler/storage/instance/instance.go +++ b/pkg/ruler/storage/instance/instance.go @@ -262,7 +262,7 @@ func (i *Instance) Run(ctx context.Context) error { level.Info(i.logger).Log("msg", "truncation loop stopped") return nil }, - func(err error) { + func(_ error) { level.Info(i.logger).Log("msg", "stopping truncation loop...") contextCancel() }, diff --git a/pkg/ruler/storage/instance/manager_test.go b/pkg/ruler/storage/instance/manager_test.go index c2321bb81e1de..2cf4e0a977984 100644 --- a/pkg/ruler/storage/instance/manager_test.go +++ b/pkg/ruler/storage/instance/manager_test.go @@ -24,7 +24,7 @@ func TestBasicManager_ApplyConfig(t *testing.T) { <-ctx.Done() return nil }, - UpdateFunc: func(c Config) error { + UpdateFunc: func(_ Config) error { return nil }, TargetsActiveFunc: func() map[string][]*scrape.Target { @@ -34,7 +34,7 @@ func TestBasicManager_ApplyConfig(t *testing.T) { t.Run("dynamic update successful", func(t *testing.T) { spawnedCount := 0 - spawner := func(c Config) (ManagedInstance, error) { + spawner := func(_ Config) (ManagedInstance, error) { spawnedCount++ newMock := baseMock @@ -53,11 +53,11 @@ func TestBasicManager_ApplyConfig(t *testing.T) { t.Run("dynamic update unsuccessful", func(t *testing.T) { spawnedCount := 0 - spawner := func(c Config) (ManagedInstance, error) { + spawner := func(_ Config) (ManagedInstance, error) { spawnedCount++ newMock := baseMock - newMock.UpdateFunc = func(c Config) error { + newMock.UpdateFunc = func(_ Config) error { return ErrInvalidUpdate{ Inner: fmt.Errorf("cannot dynamically update for testing reasons"), } @@ -77,11 +77,11 @@ func TestBasicManager_ApplyConfig(t *testing.T) { t.Run("dynamic update errored", func(t *testing.T) { spawnedCount := 0 - spawner := func(c Config) (ManagedInstance, error) { + spawner := func(_ Config) (ManagedInstance, error) { spawnedCount++ newMock := baseMock - newMock.UpdateFunc = func(c Config) error { + newMock.UpdateFunc = func(_ Config) error { return fmt.Errorf("something really bad happened") } return &newMock, nil diff --git a/pkg/storage/async_store.go b/pkg/storage/async_store.go index 49fe26612ec69..ffc8779328ab3 100644 --- a/pkg/storage/async_store.go +++ b/pkg/storage/async_store.go @@ -156,7 +156,7 @@ func (a *AsyncStore) Stats(ctx context.Context, userID string, from, through mod ctx, len(jobs), len(jobs), - func(ctx context.Context, i int) error { + func(_ context.Context, i int) error { resp, err := jobs[i]() resps[i] = resp return err @@ -208,7 +208,7 @@ func (a *AsyncStore) Volume(ctx context.Context, userID string, from, through mo ctx, len(jobs), len(jobs), - func(ctx context.Context, i int) error { + func(_ context.Context, i int) error { resp, err := jobs[i]() resps[i] = resp return err @@ -324,7 +324,7 @@ func (a *AsyncStore) GetShards( ctx, len(jobs), len(jobs), - func(ctx context.Context, i int) error { + func(_ context.Context, i int) error { return jobs[i]() }, ); err != nil { diff --git a/pkg/storage/bloom/v1/bloom_tester.go b/pkg/storage/bloom/v1/bloom_tester.go index 349f3691f6ea0..b60fbadbffb92 100644 --- a/pkg/storage/bloom/v1/bloom_tester.go +++ b/pkg/storage/bloom/v1/bloom_tester.go @@ -52,12 +52,12 @@ func ExtractTestableLineFilters(expr syntax.Expr) []syntax.LineFilterExpr { var filters []syntax.LineFilterExpr var lineFmtFound bool visitor := &syntax.DepthFirstTraversal{ - VisitLineFilterFn: func(v syntax.RootVisitor, e *syntax.LineFilterExpr) { + VisitLineFilterFn: func(_ syntax.RootVisitor, e *syntax.LineFilterExpr) { if e != nil && !lineFmtFound { filters = append(filters, *e) } }, - VisitLineFmtFn: func(v syntax.RootVisitor, e *syntax.LineFmtExpr) { + VisitLineFmtFn: func(_ syntax.RootVisitor, e *syntax.LineFmtExpr) { if e != nil { lineFmtFound = true } @@ -252,7 +252,7 @@ func (b stringMatcherFilter) Matches(test log.Checker) bool { } func newStringFilterFunc(b NGramBuilder) log.NewMatcherFiltererFunc { - return func(match []byte, caseInsensitive bool) log.MatcherFilterer { + return func(match []byte, _ bool) log.MatcherFilterer { return log.WrapMatcher(stringMatcherFilter{ test: newStringTest(b, string(match)), }) diff --git a/pkg/storage/bloom/v1/builder_test.go b/pkg/storage/bloom/v1/builder_test.go index 640fef038a6e8..cbe183e89045a 100644 --- a/pkg/storage/bloom/v1/builder_test.go +++ b/pkg/storage/bloom/v1/builder_test.go @@ -245,7 +245,7 @@ func TestMergeBuilder(t *testing.T) { } // We're not testing the ability to extend a bloom in this test - pop := func(s *Series, srcBlooms iter.SizedIterator[*Bloom], toAdd ChunkRefs, ch chan *BloomCreation) { + pop := func(_ *Series, srcBlooms iter.SizedIterator[*Bloom], _ ChunkRefs, ch chan *BloomCreation) { for srcBlooms.Next() { bloom := srcBlooms.At() ch <- &BloomCreation{ @@ -353,7 +353,7 @@ func TestMergeBuilderFingerprintCollision(t *testing.T) { } // We're not testing the ability to extend a bloom in this test - pop := func(s *Series, srcBlooms iter.SizedIterator[*Bloom], toAdd ChunkRefs, ch chan *BloomCreation) { + pop := func(_ *Series, _ iter.SizedIterator[*Bloom], _ ChunkRefs, ch chan *BloomCreation) { ch <- &BloomCreation{ Bloom: &Bloom{ ScalableBloomFilter: *filter.NewScalableBloomFilter(1024, 0.01, 0.8), @@ -524,7 +524,7 @@ func TestMergeBuilder_Roundtrip(t *testing.T) { ) // We're not testing the ability to extend a bloom in this test - pop := func(s *Series, srcBlooms iter.SizedIterator[*Bloom], toAdd ChunkRefs, ch chan *BloomCreation) { + pop := func(_ *Series, srcBlooms iter.SizedIterator[*Bloom], _ ChunkRefs, ch chan *BloomCreation) { for srcBlooms.Next() { bloom := srcBlooms.At() ch <- &BloomCreation{ diff --git a/pkg/storage/chunk/cache/embeddedcache_test.go b/pkg/storage/chunk/cache/embeddedcache_test.go index 7e109efea4f97..b264f6e44bc8e 100644 --- a/pkg/storage/chunk/cache/embeddedcache_test.go +++ b/pkg/storage/chunk/cache/embeddedcache_test.go @@ -48,7 +48,7 @@ func TestEmbeddedCacheEviction(t *testing.T) { for _, test := range tests { removedEntriesCount := atomic.NewInt64(0) - onEntryRemoved := func(key string, value []byte) { + onEntryRemoved := func(_ string, _ []byte) { removedEntriesCount.Inc() } c := NewTypedEmbeddedCache[string, []byte](test.name, test.cfg, nil, log.NewNopLogger(), "test", sizeOf, onEntryRemoved) @@ -187,7 +187,7 @@ func TestEmbeddedCacheExpiry(t *testing.T) { } removedEntriesCount := atomic.NewInt64(0) - onEntryRemoved := func(key string, value []byte) { + onEntryRemoved := func(_ string, _ []byte) { removedEntriesCount.Inc() } c := NewTypedEmbeddedCache[string, []byte]("cache_exprity_test", cfg, nil, log.NewNopLogger(), "test", sizeOf, onEntryRemoved) diff --git a/pkg/storage/chunk/cache/redis_client_test.go b/pkg/storage/chunk/cache/redis_client_test.go index 2f5494193e572..2a8b7426f56dd 100644 --- a/pkg/storage/chunk/cache/redis_client_test.go +++ b/pkg/storage/chunk/cache/redis_client_test.go @@ -118,7 +118,7 @@ func Test_deriveEndpoints(t *testing.T) { { name: "single endpoint", endpoints: fmt.Sprintf("%s:6379", upstream), - lookup: func(host string) ([]string, error) { + lookup: func(_ string) ([]string, error) { return []string{upstream}, nil }, want: []string{fmt.Sprintf("%s:6379", upstream)}, @@ -136,7 +136,7 @@ func Test_deriveEndpoints(t *testing.T) { { name: "all loopback", endpoints: fmt.Sprintf("%s:6379", lookback), - lookup: func(host string) ([]string, error) { + lookup: func(_ string) ([]string, error) { return []string{"::1", "127.0.0.1"}, nil }, want: []string{fmt.Sprintf("%s:6379", lookback)}, @@ -145,7 +145,7 @@ func Test_deriveEndpoints(t *testing.T) { { name: "non-loopback address resolving to multiple addresses", endpoints: fmt.Sprintf("%s:6379", upstream), - lookup: func(host string) ([]string, error) { + lookup: func(_ string) ([]string, error) { return []string{upstream, downstream}, nil }, want: []string{fmt.Sprintf("%s:6379", upstream), fmt.Sprintf("%s:6379", downstream)}, @@ -154,7 +154,7 @@ func Test_deriveEndpoints(t *testing.T) { { name: "no such host", endpoints: fmt.Sprintf("%s:6379", upstream), - lookup: func(host string) ([]string, error) { + lookup: func(_ string) ([]string, error) { return nil, fmt.Errorf("no such host") }, want: nil, diff --git a/pkg/storage/chunk/cache/resultscache/cache.go b/pkg/storage/chunk/cache/resultscache/cache.go index aaf1d47fa88eb..0dfc4d49aae0a 100644 --- a/pkg/storage/chunk/cache/resultscache/cache.go +++ b/pkg/storage/chunk/cache/resultscache/cache.go @@ -105,7 +105,7 @@ func (s ResultsCache) Do(ctx context.Context, r Request) (Response, error) { defer sp.Finish() tenantIDs, err := tenant.TenantIDs(ctx) if err != nil { - return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } if s.shouldCacheReq != nil && !s.shouldCacheReq(ctx, r) { @@ -200,7 +200,7 @@ func (s ResultsCache) handleHit(ctx context.Context, r Request, extents []Extent tenantIDs, err := tenant.TenantIDs(ctx) if err != nil { - return nil, nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + return nil, nil, httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()) } reqResps, err = DoRequests(ctx, s.next, requests, s.parallelismForReq(ctx, tenantIDs, r)) diff --git a/pkg/storage/chunk/cache/resultscache/cache_test.go b/pkg/storage/chunk/cache/resultscache/cache_test.go index 0febe48020867..efb1358cca661 100644 --- a/pkg/storage/chunk/cache/resultscache/cache_test.go +++ b/pkg/storage/chunk/cache/resultscache/cache_test.go @@ -491,7 +491,7 @@ func TestHandleHit(t *testing.T) { minCacheExtent: 10, limits: mockLimits{}, merger: MockMerger{}, - parallelismForReq: func(_ context.Context, tenantIDs []string, r Request) int { return 1 }, + parallelismForReq: func(_ context.Context, _ []string, _ Request) int { return 1 }, next: HandlerFunc(func(_ context.Context, req Request) (Response, error) { return mkAPIResponse(req.GetStart().UnixMilli(), req.GetEnd().UnixMilli(), req.GetStep()), nil }), @@ -514,7 +514,7 @@ func TestHandleHit_queryLengthServed(t *testing.T) { extractor: MockExtractor{}, limits: mockLimits{}, merger: MockMerger{}, - parallelismForReq: func(_ context.Context, tenantIDs []string, r Request) int { return 1 }, + parallelismForReq: func(_ context.Context, _ []string, _ Request) int { return 1 }, next: HandlerFunc(func(_ context.Context, req Request) (Response, error) { return mkAPIResponse(req.GetStart().UnixMilli(), req.GetEnd().UnixMilli(), req.GetStep()), nil }), @@ -602,7 +602,7 @@ func TestResultsCacheMaxFreshness(t *testing.T) { MockExtractor{}, nil, nil, - func(_ context.Context, tenantIDs []string, r Request) int { + func(_ context.Context, _ []string, _ Request) int { return 10 }, nil, @@ -646,7 +646,7 @@ func Test_resultsCache_MissingData(t *testing.T) { MockExtractor{}, nil, nil, - func(_ context.Context, tenantIDs []string, r Request) int { + func(_ context.Context, _ []string, _ Request) int { return 10 }, nil, @@ -700,7 +700,7 @@ func Test_shouldCacheReq(t *testing.T) { MockExtractor{}, nil, nil, - func(_ context.Context, tenantIDs []string, r Request) int { + func(_ context.Context, _ []string, _ Request) int { return 10 }, nil, diff --git a/pkg/storage/chunk/client/alibaba/oss_object_client.go b/pkg/storage/chunk/client/alibaba/oss_object_client.go index cbe449fca9e5c..423a7348086e4 100644 --- a/pkg/storage/chunk/client/alibaba/oss_object_client.go +++ b/pkg/storage/chunk/client/alibaba/oss_object_client.go @@ -74,7 +74,7 @@ func (s *OssObjectClient) Stop() { func (s *OssObjectClient) ObjectExists(ctx context.Context, objectKey string) (bool, error) { var options []oss.Option - err := instrument.CollectedRequest(ctx, "OSS.ObjectExists", ossRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + err := instrument.CollectedRequest(ctx, "OSS.ObjectExists", ossRequestDuration, instrument.ErrorCode, func(_ context.Context) error { _, requestErr := s.defaultBucket.GetObjectMeta(objectKey, options...) return requestErr }) @@ -89,7 +89,7 @@ func (s *OssObjectClient) ObjectExists(ctx context.Context, objectKey string) (b func (s *OssObjectClient) GetObject(ctx context.Context, objectKey string) (io.ReadCloser, int64, error) { var resp *oss.GetObjectResult var options []oss.Option - err := instrument.CollectedRequest(ctx, "OSS.GetObject", ossRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + err := instrument.CollectedRequest(ctx, "OSS.GetObject", ossRequestDuration, instrument.ErrorCode, func(_ context.Context) error { var requestErr error resp, requestErr = s.defaultBucket.DoGetObject(&oss.GetObjectRequest{ObjectKey: objectKey}, options) if requestErr != nil { @@ -114,7 +114,7 @@ func (s *OssObjectClient) GetObjectRange(ctx context.Context, objectKey string, options := []oss.Option{ oss.Range(offset, offset+length-1), } - err := instrument.CollectedRequest(ctx, "OSS.GetObject", ossRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + err := instrument.CollectedRequest(ctx, "OSS.GetObject", ossRequestDuration, instrument.ErrorCode, func(_ context.Context) error { var requestErr error resp, requestErr = s.defaultBucket.DoGetObject(&oss.GetObjectRequest{ObjectKey: objectKey}, options) if requestErr != nil { @@ -130,7 +130,7 @@ func (s *OssObjectClient) GetObjectRange(ctx context.Context, objectKey string, // PutObject puts the specified bytes into the configured OSS bucket at the provided key func (s *OssObjectClient) PutObject(ctx context.Context, objectKey string, object io.Reader) error { - return instrument.CollectedRequest(ctx, "OSS.PutObject", ossRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + return instrument.CollectedRequest(ctx, "OSS.PutObject", ossRequestDuration, instrument.ErrorCode, func(_ context.Context) error { if err := s.defaultBucket.PutObject(objectKey, object); err != nil { return errors.Wrap(err, "failed to put oss object") } @@ -173,7 +173,7 @@ func (s *OssObjectClient) List(ctx context.Context, prefix, delimiter string) ([ // DeleteObject deletes the specified object key from the configured OSS bucket. func (s *OssObjectClient) DeleteObject(ctx context.Context, objectKey string) error { - return instrument.CollectedRequest(ctx, "OSS.DeleteObject", ossRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + return instrument.CollectedRequest(ctx, "OSS.DeleteObject", ossRequestDuration, instrument.ErrorCode, func(_ context.Context) error { err := s.defaultBucket.DeleteObject(objectKey) if err != nil { return err diff --git a/pkg/storage/chunk/client/aws/dynamodb_index_reader.go b/pkg/storage/chunk/client/aws/dynamodb_index_reader.go index 4b1c4cd8a9e2d..19ea676451a9e 100644 --- a/pkg/storage/chunk/client/aws/dynamodb_index_reader.go +++ b/pkg/storage/chunk/client/aws/dynamodb_index_reader.go @@ -93,7 +93,7 @@ func (r *dynamodbIndexReader) ReadIndexEntries(ctx context.Context, tableName st withRetrys := func(req *request.Request) { req.Retryer = client.DefaultRetryer{NumMaxRetries: r.maxRetries} } - err := r.DynamoDB.ScanPagesWithContext(ctx, input, func(page *dynamodb.ScanOutput, lastPage bool) bool { + err := r.DynamoDB.ScanPagesWithContext(ctx, input, func(page *dynamodb.ScanOutput, _ bool) bool { if cc := page.ConsumedCapacity; cc != nil { r.metrics.dynamoConsumedCapacity.WithLabelValues("DynamoDB.ScanTable", *cc.TableName). Add(*cc.CapacityUnits) diff --git a/pkg/storage/chunk/client/aws/dynamodb_storage_client.go b/pkg/storage/chunk/client/aws/dynamodb_storage_client.go index 87fd24e127db0..064116cf2ed00 100644 --- a/pkg/storage/chunk/client/aws/dynamodb_storage_client.go +++ b/pkg/storage/chunk/client/aws/dynamodb_storage_client.go @@ -194,7 +194,7 @@ func (a dynamoDBStorageClient) BatchWrite(ctx context.Context, input index.Write ReturnConsumedCapacity: aws.String(dynamodb.ReturnConsumedCapacityTotal), }) - err := instrument.CollectedRequest(ctx, "DynamoDB.BatchWriteItem", a.metrics.dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + err := instrument.CollectedRequest(ctx, "DynamoDB.BatchWriteItem", a.metrics.dynamoRequestDuration, instrument.ErrorCode, func(_ context.Context) error { return request.Send() }) resp := request.Data().(*dynamodb.BatchWriteItemOutput) @@ -450,7 +450,7 @@ func (a dynamoDBStorageClient) getDynamoDBChunks(ctx context.Context, chunks []c ReturnConsumedCapacity: aws.String(dynamodb.ReturnConsumedCapacityTotal), }) - err := instrument.CollectedRequest(ctx, "DynamoDB.BatchGetItemPages", a.metrics.dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + err := instrument.CollectedRequest(ctx, "DynamoDB.BatchGetItemPages", a.metrics.dynamoRequestDuration, instrument.ErrorCode, func(_ context.Context) error { return request.Send() }) response := request.Data().(*dynamodb.BatchGetItemOutput) diff --git a/pkg/storage/chunk/client/aws/s3_storage_client.go b/pkg/storage/chunk/client/aws/s3_storage_client.go index 4785d4667bd3d..12fea874e311f 100644 --- a/pkg/storage/chunk/client/aws/s3_storage_client.go +++ b/pkg/storage/chunk/client/aws/s3_storage_client.go @@ -314,7 +314,7 @@ func (a *S3ObjectClient) ObjectExists(ctx context.Context, objectKey string) (bo if ctx.Err() != nil { return false, errors.Wrap(ctx.Err(), "ctx related error during s3 objectExists") } - lastErr = instrument.CollectedRequest(ctx, "S3.ObjectExists", s3RequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + lastErr = instrument.CollectedRequest(ctx, "S3.ObjectExists", s3RequestDuration, instrument.ErrorCode, func(_ context.Context) error { headObjectInput := &s3.HeadObjectInput{ Bucket: aws.String(a.bucketFromKey(objectKey)), Key: aws.String(objectKey), diff --git a/pkg/storage/chunk/client/aws/s3_storage_client_test.go b/pkg/storage/chunk/client/aws/s3_storage_client_test.go index 1cf020e7b9ec8..3a2c1e8dc33c3 100644 --- a/pkg/storage/chunk/client/aws/s3_storage_client_test.go +++ b/pkg/storage/chunk/client/aws/s3_storage_client_test.go @@ -177,8 +177,8 @@ func Test_Hedging(t *testing.T) { SecretAccessKey: flagext.SecretWithValue("bar"), BackoffConfig: backoff.Config{MaxRetries: 1}, BucketNames: "foo", - Inject: func(next http.RoundTripper) http.RoundTripper { - return RoundTripperFunc(func(req *http.Request) (*http.Response, error) { + Inject: func(_ http.RoundTripper) http.RoundTripper { + return RoundTripperFunc(func(_ *http.Request) (*http.Response, error) { count.Inc() time.Sleep(200 * time.Millisecond) return nil, errors.New("foo") @@ -244,7 +244,7 @@ func Test_RetryLogic(t *testing.T) { callCount := atomic.NewInt32(0) mockS3 := &MockS3Client{ - HeadObjectFunc: func(input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) { + HeadObjectFunc: func(_ *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) { callNum := callCount.Inc() if !tc.exists { rfIn := awserr.NewRequestFailure( @@ -269,8 +269,8 @@ func Test_RetryLogic(t *testing.T) { SecretAccessKey: flagext.SecretWithValue("bar"), BackoffConfig: backoff.Config{MaxRetries: tc.maxRetries}, BucketNames: "foo", - Inject: func(next http.RoundTripper) http.RoundTripper { - return RoundTripperFunc(func(req *http.Request) (*http.Response, error) { + Inject: func(_ http.RoundTripper) http.RoundTripper { + return RoundTripperFunc(func(_ *http.Request) (*http.Response, error) { // Increment the call counter callNum := callCount.Inc() diff --git a/pkg/storage/chunk/client/azure/blob_storage_client.go b/pkg/storage/chunk/client/azure/blob_storage_client.go index 2e66e1e89da36..0a9d6300b1634 100644 --- a/pkg/storage/chunk/client/azure/blob_storage_client.go +++ b/pkg/storage/chunk/client/azure/blob_storage_client.go @@ -361,7 +361,7 @@ func (b *BlobStorage) newPipeline(hedgingCfg hedging.Config, hedging bool) (pipe client := defaultClientFactory() - opts.HTTPSender = pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { + opts.HTTPSender = pipeline.FactoryFunc(func(_ pipeline.Policy, _ *pipeline.PolicyOptions) pipeline.PolicyFunc { return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { resp, err := client.Do(request.WithContext(ctx)) return pipeline.NewHTTPResponse(resp), err @@ -373,7 +373,7 @@ func (b *BlobStorage) newPipeline(hedgingCfg hedging.Config, hedging bool) (pipe if err != nil { return nil, err } - opts.HTTPSender = pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { + opts.HTTPSender = pipeline.FactoryFunc(func(_ pipeline.Policy, _ *pipeline.PolicyOptions) pipeline.PolicyFunc { return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { resp, err := client.Do(request.WithContext(ctx)) return pipeline.NewHTTPResponse(resp), err @@ -450,7 +450,7 @@ func (b *BlobStorage) getServicePrincipalToken(authFunctions authFunctions) (*ad if b.cfg.UseFederatedToken { token, err := b.servicePrincipalTokenFromFederatedToken(resource, authFunctions.NewOAuthConfigFunc, authFunctions.NewServicePrincipalTokenFromFederatedTokenFunc) - var customRefreshFunc adal.TokenRefresh = func(context context.Context, resource string) (*adal.Token, error) { + var customRefreshFunc adal.TokenRefresh = func(_ context.Context, resource string) (*adal.Token, error) { newToken, err := b.servicePrincipalTokenFromFederatedToken(resource, authFunctions.NewOAuthConfigFunc, authFunctions.NewServicePrincipalTokenFromFederatedTokenFunc) if err != nil { return nil, err diff --git a/pkg/storage/chunk/client/azure/blob_storage_client_test.go b/pkg/storage/chunk/client/azure/blob_storage_client_test.go index 2f59934aabf20..cedc5057e85bc 100644 --- a/pkg/storage/chunk/client/azure/blob_storage_client_test.go +++ b/pkg/storage/chunk/client/azure/blob_storage_client_test.go @@ -66,7 +66,7 @@ func (suite *FederatedTokenTestSuite) TestGetServicePrincipalToken() { return suite.mockOAuthConfig, nil } - servicePrincipalTokenFromFederatedTokenFunc := func(oauthConfig adal.OAuthConfig, clientID string, jwt string, resource string, callbacks ...adal.TokenRefreshCallback) (*adal.ServicePrincipalToken, error) { + servicePrincipalTokenFromFederatedTokenFunc := func(oauthConfig adal.OAuthConfig, clientID string, jwt string, resource string, _ ...adal.TokenRefreshCallback) (*adal.ServicePrincipalToken, error) { require.True(suite.T(), *suite.mockOAuthConfig == oauthConfig, "should return the mocked object") require.Equal(suite.T(), "myClientId", clientID) require.Equal(suite.T(), "myJwtToken", jwt) diff --git a/pkg/storage/chunk/client/baidubce/bos_storage_client.go b/pkg/storage/chunk/client/baidubce/bos_storage_client.go index edb6870033db9..b76db38e47c60 100644 --- a/pkg/storage/chunk/client/baidubce/bos_storage_client.go +++ b/pkg/storage/chunk/client/baidubce/bos_storage_client.go @@ -80,7 +80,7 @@ func NewBOSObjectStorage(cfg *BOSStorageConfig) (*BOSObjectStorage, error) { } func (b *BOSObjectStorage) PutObject(ctx context.Context, objectKey string, object io.Reader) error { - return instrument.CollectedRequest(ctx, "BOS.PutObject", bosRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + return instrument.CollectedRequest(ctx, "BOS.PutObject", bosRequestDuration, instrument.ErrorCode, func(_ context.Context) error { body, err := bce.NewBodyFromSizedReader(object, -1) if err != nil { return err @@ -91,7 +91,7 @@ func (b *BOSObjectStorage) PutObject(ctx context.Context, objectKey string, obje } func (b *BOSObjectStorage) ObjectExists(ctx context.Context, objectKey string) (bool, error) { - err := instrument.CollectedRequest(ctx, "BOS.ObjectExists", bosRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + err := instrument.CollectedRequest(ctx, "BOS.ObjectExists", bosRequestDuration, instrument.ErrorCode, func(_ context.Context) error { var requestErr error _, requestErr = b.client.GetObjectMeta(b.cfg.BucketName, objectKey) return requestErr @@ -105,7 +105,7 @@ func (b *BOSObjectStorage) ObjectExists(ctx context.Context, objectKey string) ( func (b *BOSObjectStorage) GetObject(ctx context.Context, objectKey string) (io.ReadCloser, int64, error) { var res *api.GetObjectResult - err := instrument.CollectedRequest(ctx, "BOS.GetObject", bosRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + err := instrument.CollectedRequest(ctx, "BOS.GetObject", bosRequestDuration, instrument.ErrorCode, func(_ context.Context) error { var requestErr error res, requestErr = b.client.BasicGetObject(b.cfg.BucketName, objectKey) return requestErr @@ -119,7 +119,7 @@ func (b *BOSObjectStorage) GetObject(ctx context.Context, objectKey string) (io. func (b *BOSObjectStorage) GetObjectRange(ctx context.Context, objectKey string, offset, length int64) (io.ReadCloser, error) { var res *api.GetObjectResult - err := instrument.CollectedRequest(ctx, "BOS.GetObject", bosRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + err := instrument.CollectedRequest(ctx, "BOS.GetObject", bosRequestDuration, instrument.ErrorCode, func(_ context.Context) error { var requestErr error res, requestErr = b.client.GetObject(b.cfg.BucketName, objectKey, nil, offset, offset+length-1) return requestErr @@ -134,7 +134,7 @@ func (b *BOSObjectStorage) List(ctx context.Context, prefix string, delimiter st var storageObjects []client.StorageObject var commonPrefixes []client.StorageCommonPrefix - err := instrument.CollectedRequest(ctx, "BOS.List", bosRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + err := instrument.CollectedRequest(ctx, "BOS.List", bosRequestDuration, instrument.ErrorCode, func(_ context.Context) error { args := new(api.ListObjectsArgs) args.Prefix = prefix args.Delimiter = delimiter @@ -172,7 +172,7 @@ func (b *BOSObjectStorage) List(ctx context.Context, prefix string, delimiter st } func (b *BOSObjectStorage) DeleteObject(ctx context.Context, objectKey string) error { - return instrument.CollectedRequest(ctx, "BOS.DeleteObject", bosRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + return instrument.CollectedRequest(ctx, "BOS.DeleteObject", bosRequestDuration, instrument.ErrorCode, func(_ context.Context) error { err := b.client.DeleteObject(b.cfg.BucketName, objectKey) return err }) diff --git a/pkg/storage/chunk/client/gcp/fixtures.go b/pkg/storage/chunk/client/gcp/fixtures.go index 153a906776cf3..67f0c116b1116 100644 --- a/pkg/storage/chunk/client/gcp/fixtures.go +++ b/pkg/storage/chunk/client/gcp/fixtures.go @@ -92,7 +92,7 @@ func (f *fixture) Clients() ( c, err = newGCSObjectClient(ctx, GCSConfig{ BucketName: "chunks", Insecure: true, - }, hedging.Config{}, func(ctx context.Context, opts ...option.ClientOption) (*storage.Client, error) { + }, hedging.Config{}, func(_ context.Context, _ ...option.ClientOption) (*storage.Client, error) { return f.gcssrv.Client(), nil }) if err != nil { diff --git a/pkg/storage/chunk/client/gcp/gcs_object_client_test.go b/pkg/storage/chunk/client/gcp/gcs_object_client_test.go index 230067f9e9508..5bece14a18dbb 100644 --- a/pkg/storage/chunk/client/gcp/gcs_object_client_test.go +++ b/pkg/storage/chunk/client/gcp/gcs_object_client_test.go @@ -80,7 +80,7 @@ func Test_Hedging(t *testing.T) { } func fakeServer(t *testing.T, returnIn time.Duration, counter *atomic.Int32) *httptest.Server { - server := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + server := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { counter.Inc() time.Sleep(returnIn) _, _ = w.Write([]byte(`{}`)) @@ -236,7 +236,7 @@ func TestTCPErrs(t *testing.T) { } func fakeHTTPRespondingServer(t *testing.T, code int) *httptest.Server { - server := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + server := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(code) })) server.StartTLS() @@ -246,7 +246,7 @@ func fakeHTTPRespondingServer(t *testing.T, code int) *httptest.Server { } func fakeSleepingServer(t *testing.T, responseSleep, connectSleep time.Duration, closeOnNew, closeOnActive bool) *httptest.Server { - server := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + server := httptest.NewUnstartedServer(http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) { // sleep on response to mimic server overload time.Sleep(responseSleep) })) diff --git a/pkg/storage/chunk/client/grpc/grpc_client_test.go b/pkg/storage/chunk/client/grpc/grpc_client_test.go index dc040cb5aecab..b0bcffce91ebf 100644 --- a/pkg/storage/chunk/client/grpc/grpc_client_test.go +++ b/pkg/storage/chunk/client/grpc/grpc_client_test.go @@ -157,7 +157,7 @@ func TestGrpcStore(t *testing.T) { {TableName: "table", HashValue: "foo"}, } results := 0 - err = storageClient.QueryPages(context.Background(), queries, func(query index.Query, batch index.ReadBatchResult) bool { + err = storageClient.QueryPages(context.Background(), queries, func(_ index.Query, batch index.ReadBatchResult) bool { iter := batch.Iterator() for iter.Next() { results++ diff --git a/pkg/storage/chunk/client/hedging/hedging_test.go b/pkg/storage/chunk/client/hedging/hedging_test.go index 4524156e7309b..1baf0f757dbd0 100644 --- a/pkg/storage/chunk/client/hedging/hedging_test.go +++ b/pkg/storage/chunk/client/hedging/hedging_test.go @@ -34,7 +34,7 @@ func TestHedging(t *testing.T) { } count := atomic.NewInt32(0) client, err := cfg.Client(&http.Client{ - Transport: RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + Transport: RoundTripperFunc(func(_ *http.Request) (*http.Response, error) { count.Inc() time.Sleep(200 * time.Millisecond) return &http.Response{ @@ -69,7 +69,7 @@ func TestHedgingRateLimit(t *testing.T) { } count := atomic.NewInt32(0) client, err := cfg.Client(&http.Client{ - Transport: RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + Transport: RoundTripperFunc(func(_ *http.Request) (*http.Response, error) { count.Inc() time.Sleep(200 * time.Millisecond) return &http.Response{ diff --git a/pkg/storage/chunk/client/ibmcloud/cos_object_client.go b/pkg/storage/chunk/client/ibmcloud/cos_object_client.go index a796ab88dea4e..d432071293054 100644 --- a/pkg/storage/chunk/client/ibmcloud/cos_object_client.go +++ b/pkg/storage/chunk/client/ibmcloud/cos_object_client.go @@ -321,7 +321,7 @@ func (c *COSObjectClient) DeleteObject(ctx context.Context, objectKey string) er func (c *COSObjectClient) ObjectExists(ctx context.Context, objectKey string) (bool, error) { bucket := c.bucketFromKey(objectKey) - err := instrument.CollectedRequest(ctx, "COS.GetObject", cosRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { + err := instrument.CollectedRequest(ctx, "COS.GetObject", cosRequestDuration, instrument.ErrorCode, func(_ context.Context) error { var requestErr error _, requestErr = c.hedgedCOS.HeadObject(&cos.HeadObjectInput{ Bucket: ibm.String(bucket), diff --git a/pkg/storage/chunk/client/ibmcloud/cos_object_client_test.go b/pkg/storage/chunk/client/ibmcloud/cos_object_client_test.go index f6959b3f31d81..3d6ee89af934b 100644 --- a/pkg/storage/chunk/client/ibmcloud/cos_object_client_test.go +++ b/pkg/storage/chunk/client/ibmcloud/cos_object_client_test.go @@ -584,7 +584,7 @@ func mockCOSServer(accessToken, tokenType, resp string) *httptest.Server { } func mockAuthServer(accessToken, tokenType string) *httptest.Server { - return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { token := token.Token{ AccessToken: accessToken, RefreshToken: "test", diff --git a/pkg/storage/chunk/client/local/boltdb_table_client.go b/pkg/storage/chunk/client/local/boltdb_table_client.go index df30db04d29ac..d2bdab807d4d0 100644 --- a/pkg/storage/chunk/client/local/boltdb_table_client.go +++ b/pkg/storage/chunk/client/local/boltdb_table_client.go @@ -20,7 +20,7 @@ func NewTableClient(directory string) (index.TableClient, error) { func (c *TableClient) ListTables(_ context.Context) ([]string, error) { boltDbFiles := []string{} - err := filepath.Walk(c.directory, func(path string, info os.FileInfo, err error) error { + err := filepath.Walk(c.directory, func(_ string, info os.FileInfo, err error) error { if err != nil { return err } diff --git a/pkg/storage/chunk/client/local/fs_object_client.go b/pkg/storage/chunk/client/local/fs_object_client.go index 751e1b94b37b1..0eb027e9fd3cf 100644 --- a/pkg/storage/chunk/client/local/fs_object_client.go +++ b/pkg/storage/chunk/client/local/fs_object_client.go @@ -230,7 +230,7 @@ func (f *FSObjectClient) DeleteObject(_ context.Context, objectKey string) error // DeleteChunksBefore implements BucketClient func (f *FSObjectClient) DeleteChunksBefore(_ context.Context, ts time.Time) error { - return filepath.Walk(f.cfg.Directory, func(path string, info os.FileInfo, err error) error { + return filepath.Walk(f.cfg.Directory, func(path string, info os.FileInfo, _ error) error { if !info.IsDir() && info.ModTime().Before(ts) { level.Info(util_log.Logger).Log("msg", "file has exceeded the retention period, removing it", "filepath", info.Name()) if err := os.Remove(path); err != nil { diff --git a/pkg/storage/chunk/client/util/parallel_chunk_fetch_test.go b/pkg/storage/chunk/client/util/parallel_chunk_fetch_test.go index 98b654d9df074..8fec4518e18ae 100644 --- a/pkg/storage/chunk/client/util/parallel_chunk_fetch_test.go +++ b/pkg/storage/chunk/client/util/parallel_chunk_fetch_test.go @@ -13,7 +13,7 @@ func BenchmarkGetParallelChunks(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { res, err := GetParallelChunks(ctx, 150, in, - func(_ context.Context, d *chunk.DecodeContext, c chunk.Chunk) (chunk.Chunk, error) { + func(_ context.Context, _ *chunk.DecodeContext, c chunk.Chunk) (chunk.Chunk, error) { return c, nil }) if err != nil { diff --git a/pkg/storage/stores/series/series_index_store.go b/pkg/storage/stores/series/series_index_store.go index 9fb64fe9b85b8..75fa2969e926c 100644 --- a/pkg/storage/stores/series/series_index_store.go +++ b/pkg/storage/stores/series/series_index_store.go @@ -501,12 +501,12 @@ func (c *IndexReaderWriter) lookupSeriesByMetricNameMatchers(ctx context.Context } func (c *IndexReaderWriter) lookupSeriesByMetricNameMatcher(ctx context.Context, from, through model.Time, userID, metricName string, matcher *labels.Matcher, shard *astmapper.ShardAnnotation) ([]string, error) { - return c.lookupIdsByMetricNameMatcher(ctx, from, through, userID, metricName, matcher, func(queries []series_index.Query) []series_index.Query { + return c.lookupIDsByMetricNameMatcher(ctx, from, through, userID, metricName, matcher, func(queries []series_index.Query) []series_index.Query { return c.schema.FilterReadQueries(queries, shard) }) } -func (c *IndexReaderWriter) lookupIdsByMetricNameMatcher(ctx context.Context, from, through model.Time, userID, metricName string, matcher *labels.Matcher, filter func([]series_index.Query) []series_index.Query) ([]string, error) { +func (c *IndexReaderWriter) lookupIDsByMetricNameMatcher(ctx context.Context, from, through model.Time, userID, metricName string, matcher *labels.Matcher, filter func([]series_index.Query) []series_index.Query) ([]string, error) { var err error var queries []series_index.Query var labelName string diff --git a/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go b/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go index fb802fd63b9a5..e7723b6d26536 100644 --- a/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go +++ b/pkg/storage/stores/shipper/bloomshipper/fetcher_test.go @@ -181,7 +181,7 @@ func TestFetcher_DownloadQueue(t *testing.T) { _, err := newDownloadQueue[bool, bool]( tc.size, tc.workers, - func(ctx context.Context, r downloadRequest[bool, bool]) {}, + func(_ context.Context, _ downloadRequest[bool, bool]) {}, log.NewNopLogger(), ) require.ErrorContains(t, err, tc.err) diff --git a/pkg/storage/stores/shipper/bloomshipper/store.go b/pkg/storage/stores/shipper/bloomshipper/store.go index 363fb7806ece3..b486b7ca8e524 100644 --- a/pkg/storage/stores/shipper/bloomshipper/store.go +++ b/pkg/storage/stores/shipper/bloomshipper/store.go @@ -314,7 +314,7 @@ func NewBloomStore( // sort by From time sort.Slice(periodicConfigs, func(i, j int) bool { - return periodicConfigs[i].From.Time.Before(periodicConfigs[i].From.Time) + return periodicConfigs[i].From.Time.Before(periodicConfigs[j].From.Time) }) // TODO(chaudum): Remove wrapper diff --git a/pkg/storage/stores/shipper/bloomshipper/store_test.go b/pkg/storage/stores/shipper/bloomshipper/store_test.go index 15568e8763bd2..6a6705f8f0be0 100644 --- a/pkg/storage/stores/shipper/bloomshipper/store_test.go +++ b/pkg/storage/stores/shipper/bloomshipper/store_test.go @@ -353,7 +353,7 @@ func TestBloomStore_TenantFilesForInterval(t *testing.T) { tenantFiles, err := store.TenantFilesForInterval( ctx, NewInterval(parseTime("2024-01-18 00:00"), parseTime("2024-02-12 00:00")), - func(tenant string, object client.StorageObject) bool { + func(tenant string, _ client.StorageObject) bool { return tenant == "1" }, ) diff --git a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/table_compactor.go b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/table_compactor.go index bdd42afc935d6..174a48d498f4e 100644 --- a/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/table_compactor.go +++ b/pkg/storage/stores/shipper/indexshipper/boltdb/compactor/table_compactor.go @@ -280,7 +280,7 @@ func (t *tableCompactor) compactUserIndexes(idxSet compactor.IndexSet) (*Compact } // go through each file and dump records in the local bucket of the new compacted file - err = concurrency.ForEachJob(t.ctx, len(indexes), readDBsConcurrency, func(ctx context.Context, idx int) error { + err = concurrency.ForEachJob(t.ctx, len(indexes), readDBsConcurrency, func(_ context.Context, idx int) error { downloadAt, err := idxSet.GetSourceFile(indexes[idx]) if err != nil { return err @@ -296,7 +296,7 @@ func (t *tableCompactor) compactUserIndexes(idxSet compactor.IndexSet) (*Compact } dbPair.db = db - err = readFile(idxSet.GetLogger(), dbPair, func(bucketName string, batch []indexEntry) error { + err = readFile(idxSet.GetLogger(), dbPair, func(_ string, batch []indexEntry) error { return writeBatch(compactedFile, batch) }) if err != nil { @@ -341,7 +341,7 @@ func (t *tableCompactor) compactCommonIndexes(ctx context.Context) (*CompactedIn var fetchStateMx sync.Mutex defer func() { - err := concurrency.ForEachJob(ctx, len(dbsToRead), readDBsConcurrency, func(ctx context.Context, idx int) error { + err := concurrency.ForEachJob(ctx, len(dbsToRead), readDBsConcurrency, func(_ context.Context, idx int) error { dbsToRead[idx].cleanup(idxSet.GetLogger()) return nil }) @@ -351,7 +351,7 @@ func (t *tableCompactor) compactCommonIndexes(ctx context.Context) (*CompactedIn }() // fetch common index files and extract information about tenants that have records in a given file - err = concurrency.ForEachJob(ctx, len(indexes), readDBsConcurrency, func(ctx context.Context, idx int) error { + err = concurrency.ForEachJob(ctx, len(indexes), readDBsConcurrency, func(_ context.Context, idx int) error { workNum := idx // skip seed file if workNum == compactedFileIdx { @@ -378,7 +378,7 @@ func (t *tableCompactor) compactCommonIndexes(ctx context.Context) (*CompactedIn dbsToRead[idx].db = db return db.View(func(tx *bbolt.Tx) error { - return tx.ForEach(func(name []byte, b *bbolt.Bucket) error { + return tx.ForEach(func(name []byte, _ *bbolt.Bucket) error { bucketNameStr := string(name) if bucketNameStr == shipper_util.GetUnsafeString(local.IndexBucketName) { return nil @@ -396,13 +396,13 @@ func (t *tableCompactor) compactCommonIndexes(ctx context.Context) (*CompactedIn return nil, errors.Wrap(err, "unable to fetch index files and extract tenants: ") } - tenantIdsSlice := make([]string, 0, len(tenantsToFetch)) + tenantIDsSlice := make([]string, 0, len(tenantsToFetch)) for tenant := range tenantsToFetch { - tenantIdsSlice = append(tenantIdsSlice, tenant) + tenantIDsSlice = append(tenantIDsSlice, tenant) } - err = concurrency.ForEachJob(ctx, len(tenantIdsSlice), readDBsConcurrency, func(ctx context.Context, idx int) error { - userID := tenantIdsSlice[idx] + err = concurrency.ForEachJob(ctx, len(tenantIDsSlice), readDBsConcurrency, func(_ context.Context, idx int) error { + userID := tenantIDsSlice[idx] return t.fetchOrCreateUserCompactedIndexSet(userID) }) @@ -411,7 +411,7 @@ func (t *tableCompactor) compactCommonIndexes(ctx context.Context) (*CompactedIn } // go through each file and build index in FORMAT1 from FORMAT1 indexes and FORMAT3 from FORMAT2 indexes - err = concurrency.ForEachJob(ctx, len(indexes), readDBsConcurrency, func(ctx context.Context, idx int) error { + err = concurrency.ForEachJob(ctx, len(indexes), readDBsConcurrency, func(_ context.Context, idx int) error { workNum := idx // skip seed file if workNum == compactedFileIdx { diff --git a/pkg/storage/stores/shipper/indexshipper/boltdb/table.go b/pkg/storage/stores/shipper/indexshipper/boltdb/table.go index f1893ccc33563..abb40f0bf1684 100644 --- a/pkg/storage/stores/shipper/indexshipper/boltdb/table.go +++ b/pkg/storage/stores/shipper/indexshipper/boltdb/table.go @@ -109,7 +109,7 @@ func (lt *Table) Snapshot() error { for name, db := range lt.dbs { level.Debug(util_log.Logger).Log("msg", fmt.Sprintf("checking db %s for snapshot", name)) srcWriteCount := int64(0) - err := db.View(func(tx *bbolt.Tx) error { + err := db.View(func(_ *bbolt.Tx) error { srcWriteCount = db.Stats().TxStats.Write return nil }) diff --git a/pkg/storage/stores/shipper/indexshipper/boltdb/table_manager_test.go b/pkg/storage/stores/shipper/indexshipper/boltdb/table_manager_test.go index 9cd73fe3e60c6..4632a40c7c863 100644 --- a/pkg/storage/stores/shipper/indexshipper/boltdb/table_manager_test.go +++ b/pkg/storage/stores/shipper/indexshipper/boltdb/table_manager_test.go @@ -143,7 +143,7 @@ func TestLoadTables(t *testing.T) { for tableName, expectedIndex := range expectedTables { // loaded tables should not have any index files, it should have handed them over to index shipper testutil.VerifyIndexes(t, userID, []index.Query{{TableName: tableName}}, - func(ctx context.Context, table string, callback func(b *bbolt.DB) error) error { + func(ctx context.Context, _ string, callback func(_ *bbolt.DB) error) error { return tm.tables[tableName].ForEach(ctx, callback) }, 0, 0) @@ -187,7 +187,7 @@ func TestTableManager_BatchWrite(t *testing.T) { for tableName, expectedIndex := range tc { require.NoError(t, tm.tables[tableName].Snapshot()) testutil.VerifyIndexes(t, userID, []index.Query{{TableName: tableName}}, - func(ctx context.Context, table string, callback func(b *bbolt.DB) error) error { + func(_ context.Context, _ string, callback func(_ *bbolt.DB) error) error { return tm.tables[tableName].ForEach(context.Background(), callback) }, expectedIndex.start, expectedIndex.numRecords) diff --git a/pkg/storage/stores/shipper/indexshipper/testutil/testutil.go b/pkg/storage/stores/shipper/indexshipper/testutil/testutil.go index 48f5990dc0790..8f602aad855e0 100644 --- a/pkg/storage/stores/shipper/indexshipper/testutil/testutil.go +++ b/pkg/storage/stores/shipper/indexshipper/testutil/testutil.go @@ -94,7 +94,7 @@ func VerifySingleIndexFile(t *testing.T, query index.Query, db *bbolt.DB, bucket func makeTestCallback(t *testing.T, minValue, maxValue int, records map[string]string) index.QueryPagesCallback { t.Helper() recordsMtx := sync.Mutex{} - return func(query index.Query, batch index.ReadBatchResult) (shouldContinue bool) { + return func(_ index.Query, batch index.ReadBatchResult) (shouldContinue bool) { itr := batch.Iterator() for itr.Next() { require.Equal(t, itr.RangeValue(), itr.Value()) diff --git a/pkg/storage/stores/shipper/indexshipper/tsdb/compactor.go b/pkg/storage/stores/shipper/indexshipper/tsdb/compactor.go index 5c2ae28d89351..df8ea85465142 100644 --- a/pkg/storage/stores/shipper/indexshipper/tsdb/compactor.go +++ b/pkg/storage/stores/shipper/indexshipper/tsdb/compactor.go @@ -99,7 +99,7 @@ func (t *tableCompactor) CompactTable() error { downloadPaths := make([]string, len(multiTenantIndexes)) // concurrently download and open all the multi-tenant indexes - err := concurrency.ForEachJob(t.ctx, len(multiTenantIndexes), readDBsConcurrency, func(ctx context.Context, job int) error { + err := concurrency.ForEachJob(t.ctx, len(multiTenantIndexes), readDBsConcurrency, func(_ context.Context, job int) error { downloadedAt, err := t.commonIndexSet.GetSourceFile(multiTenantIndexes[job]) if err != nil { return err diff --git a/pkg/storage/stores/shipper/indexshipper/tsdb/compactor_test.go b/pkg/storage/stores/shipper/indexshipper/tsdb/compactor_test.go index 5f8a5b1e6d9d5..23a951deacbd6 100644 --- a/pkg/storage/stores/shipper/indexshipper/tsdb/compactor_test.go +++ b/pkg/storage/stores/shipper/indexshipper/tsdb/compactor_test.go @@ -144,7 +144,7 @@ func setupMultiTenantIndex(t *testing.T, indexFormat int, userStreams map[string _, err := b.Build( context.Background(), t.TempDir(), - func(from, through model.Time, checksum uint32) Identifier { + func(_, _ model.Time, _ uint32) Identifier { return dst }, ) @@ -609,7 +609,7 @@ func TestCompactor_Compact(t *testing.T) { require.NoError(t, err) actualChunks = map[string]index.ChunkMetas{} - err = indexFile.(*TSDBFile).Index.(*TSDBIndex).ForSeries(context.Background(), "", nil, 0, math.MaxInt64, func(lbls labels.Labels, fp model.Fingerprint, chks []index.ChunkMeta) (stop bool) { + err = indexFile.(*TSDBFile).Index.(*TSDBIndex).ForSeries(context.Background(), "", nil, 0, math.MaxInt64, func(lbls labels.Labels, _ model.Fingerprint, chks []index.ChunkMeta) (stop bool) { actualChunks[lbls.String()] = chks return false }, labels.MustNewMatcher(labels.MatchEqual, "", "")) @@ -824,7 +824,7 @@ func TestCompactedIndex(t *testing.T) { require.NoError(t, err) foundChunks := map[string]index.ChunkMetas{} - err = indexFile.(*TSDBFile).Index.(*TSDBIndex).ForSeries(context.Background(), "", nil, 0, math.MaxInt64, func(lbls labels.Labels, fp model.Fingerprint, chks []index.ChunkMeta) (stop bool) { + err = indexFile.(*TSDBFile).Index.(*TSDBIndex).ForSeries(context.Background(), "", nil, 0, math.MaxInt64, func(lbls labels.Labels, _ model.Fingerprint, chks []index.ChunkMeta) (stop bool) { foundChunks[lbls.String()] = append(index.ChunkMetas{}, chks...) return false }, labels.MustNewMatcher(labels.MatchEqual, "", "")) diff --git a/pkg/storage/stores/shipper/indexshipper/tsdb/manager.go b/pkg/storage/stores/shipper/indexshipper/tsdb/manager.go index c50d3e00f12f7..96f56d7021f45 100644 --- a/pkg/storage/stores/shipper/indexshipper/tsdb/manager.go +++ b/pkg/storage/stores/shipper/indexshipper/tsdb/manager.go @@ -221,7 +221,7 @@ func (m *tsdbManager) buildFromHead(heads *tenantHeads, indexShipper indexshippe _, err = b.Build( context.Background(), filepath.Join(managerScratchDir(m.dir), m.name), - func(from, through model.Time, checksum uint32) Identifier { + func(_, _ model.Time, _ uint32) Identifier { return dst }, ) diff --git a/pkg/storage/stores/shipper/indexshipper/tsdb/single_file_index.go b/pkg/storage/stores/shipper/indexshipper/tsdb/single_file_index.go index 255425b286f22..6bd7e6e79a251 100644 --- a/pkg/storage/stores/shipper/indexshipper/tsdb/single_file_index.go +++ b/pkg/storage/stores/shipper/indexshipper/tsdb/single_file_index.go @@ -218,7 +218,7 @@ func (i *TSDBIndex) GetChunkRefs(ctx context.Context, userID string, from, throu } res = res[:0] - if err := i.ForSeries(ctx, "", fpFilter, from, through, func(ls labels.Labels, fp model.Fingerprint, chks []index.ChunkMeta) (stop bool) { + if err := i.ForSeries(ctx, "", fpFilter, from, through, func(_ labels.Labels, fp model.Fingerprint, chks []index.ChunkMeta) (stop bool) { for _, chk := range chks { res = append(res, ChunkRef{ diff --git a/pkg/storage/stores/shipper/indexshipper/util/queries_test.go b/pkg/storage/stores/shipper/indexshipper/util/queries_test.go index a33da42c264f0..4622d027aa736 100644 --- a/pkg/storage/stores/shipper/indexshipper/util/queries_test.go +++ b/pkg/storage/stores/shipper/indexshipper/util/queries_test.go @@ -64,7 +64,7 @@ func TestDoParallelQueries(t *testing.T) { queries: map[string]index.Query{}, } - err := DoParallelQueries(context.Background(), tableQuerier.MultiQueries, queries, func(query index.Query, batch index.ReadBatchResult) bool { + err := DoParallelQueries(context.Background(), tableQuerier.MultiQueries, queries, func(_ index.Query, _ index.ReadBatchResult) bool { return false }) require.NoError(t, err) diff --git a/pkg/tool/commands/rules.go b/pkg/tool/commands/rules.go index 4abc14162eddd..2eb5c0eb6cdd3 100644 --- a/pkg/tool/commands/rules.go +++ b/pkg/tool/commands/rules.go @@ -628,7 +628,7 @@ func (r *RuleCommand) prepare(_ *kingpin.ParseContext) error { } // Do not apply the aggregation label to excluded rule groups. - applyTo := func(group rwrulefmt.RuleGroup, rule rulefmt.RuleNode) bool { + applyTo := func(group rwrulefmt.RuleGroup, _ rulefmt.RuleNode) bool { _, excluded := r.aggregationLabelExcludedRuleGroupsList[group.Name] return !excluded } diff --git a/pkg/tool/rules/rules.go b/pkg/tool/rules/rules.go index eccfbdabe45a4..4ac84f7da92c4 100644 --- a/pkg/tool/rules/rules.go +++ b/pkg/tool/rules/rules.go @@ -148,7 +148,7 @@ func (r RuleNamespace) AggregateBy(label string, applyTo func(group rwrulefmt.Ru // exprNodeInspectorFunc returns a PromQL inspector. // It modifies most PromQL expressions to include a given label. func exprNodeInspectorFunc(rule rulefmt.RuleNode, label string) func(node parser.Node, path []parser.Node) error { - return func(node parser.Node, path []parser.Node) error { + return func(node parser.Node, _ []parser.Node) error { var err error switch n := node.(type) { case *parser.AggregateExpr: diff --git a/pkg/tool/rules/rules_test.go b/pkg/tool/rules/rules_test.go index fba13040d49b8..8c24a7d8ab490 100644 --- a/pkg/tool/rules/rules_test.go +++ b/pkg/tool/rules/rules_test.go @@ -176,7 +176,7 @@ func TestAggregateBy(t *testing.T) { }, }, }, - applyTo: func(group rwrulefmt.RuleGroup, rule rulefmt.RuleNode) bool { + applyTo: func(group rwrulefmt.RuleGroup, _ rulefmt.RuleNode) bool { return group.Name != "CountSkipped" }, expectedExpr: []string{`count by (namespace, cluster) (test_series) > 1`, `count by (namespace) (test_series) > 1`}, diff --git a/pkg/util/cfg/dynamic_test.go b/pkg/util/cfg/dynamic_test.go index b76cc2e79ca94..ab2f568bbf3f8 100644 --- a/pkg/util/cfg/dynamic_test.go +++ b/pkg/util/cfg/dynamic_test.go @@ -52,7 +52,7 @@ server: t.Run("calls ApplyDynamicConfig on provided DynamicCloneable", func(t *testing.T) { applyDynamicConfigCalled := false - mockApplyDynamicConfig := func(dst Cloneable) error { + mockApplyDynamicConfig := func(_ Cloneable) error { applyDynamicConfigCalled = true return nil } @@ -113,7 +113,7 @@ type DynamicConfig struct { func NewDynamicConfig(applyDynamicConfig Source) DynamicConfig { if applyDynamicConfig == nil { - applyDynamicConfig = func(config Cloneable) error { + applyDynamicConfig = func(_ Cloneable) error { return nil } } diff --git a/pkg/util/cfg/flag.go b/pkg/util/cfg/flag.go index c95798883692c..6315ca137beea 100644 --- a/pkg/util/cfg/flag.go +++ b/pkg/util/cfg/flag.go @@ -36,7 +36,7 @@ func Flags(args []string, fs *flag.FlagSet) Source { // dFlags parses the flagset, applying all values set on the slice func dFlags(fs *flag.FlagSet, args []string) Source { - return func(dst Cloneable) error { + return func(_ Cloneable) error { // parse the final flagset return fs.Parse(args) } diff --git a/pkg/util/fakeauth/fake_auth.go b/pkg/util/fakeauth/fake_auth.go index 8f836c9a0c181..250c5726c4ef1 100644 --- a/pkg/util/fakeauth/fake_auth.go +++ b/pkg/util/fakeauth/fake_auth.go @@ -55,7 +55,7 @@ var fakeHTTPAuthMiddleware = middleware.Func(func(next http.Handler) http.Handle }) }) -var fakeGRPCAuthUniaryMiddleware = func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { +var fakeGRPCAuthUniaryMiddleware = func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { ctx = user.InjectOrgID(ctx, "fake") return handler(ctx, req) } diff --git a/pkg/util/httpreq/tags_test.go b/pkg/util/httpreq/tags_test.go index 830e13c84af49..430d616451f7e 100644 --- a/pkg/util/httpreq/tags_test.go +++ b/pkg/util/httpreq/tags_test.go @@ -44,7 +44,7 @@ func TestQueryTags(t *testing.T) { w := httptest.NewRecorder() checked := false - mware := ExtractQueryTagsMiddleware().Wrap(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + mware := ExtractQueryTagsMiddleware().Wrap(http.HandlerFunc(func(_ http.ResponseWriter, req *http.Request) { require.Equal(t, tc.exp, req.Context().Value(QueryTagsHTTPHeader).(string)) checked = true })) @@ -85,7 +85,7 @@ func TestQueryMetrics(t *testing.T) { w := httptest.NewRecorder() checked := false - mware := ExtractQueryMetricsMiddleware().Wrap(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + mware := ExtractQueryMetricsMiddleware().Wrap(http.HandlerFunc(func(_ http.ResponseWriter, req *http.Request) { require.Equal(t, tc.exp, req.Context().Value(QueryQueueTimeHTTPHeader)) checked = true })) diff --git a/pkg/util/jumphash/memcached_client_selector_test.go b/pkg/util/jumphash/memcached_client_selector_test.go index 0708f06d763e4..939106ad5aac8 100644 --- a/pkg/util/jumphash/memcached_client_selector_test.go +++ b/pkg/util/jumphash/memcached_client_selector_test.go @@ -47,7 +47,7 @@ var ips = map[string][]byte{ "microsoft.com:80": net.ParseIP("172.12.34.56"), } -var mockTCPResolver = func(network, address string) (*net.TCPAddr, error) { +var mockTCPResolver = func(_, address string) (*net.TCPAddr, error) { return &net.TCPAddr{ IP: ips[address], Port: 0, diff --git a/pkg/util/limiter/query_limiter.go b/pkg/util/limiter/query_limiter.go index 430eee3ebc8be..47f0276c1731a 100644 --- a/pkg/util/limiter/query_limiter.go +++ b/pkg/util/limiter/query_limiter.go @@ -97,7 +97,7 @@ func (ql *QueryLimiter) AddChunks(count int) error { } if ql.chunkCount.Add(int64(count)) > int64(ql.maxChunksPerQuery) { - return fmt.Errorf(fmt.Sprintf(ErrMaxChunksPerQueryLimit, ql.maxChunksPerQuery)) + return fmt.Errorf("%s %d", ErrMaxChunksPerQueryLimit, ql.maxChunksPerQuery) } return nil } diff --git a/pkg/util/marshal/marshal_test.go b/pkg/util/marshal/marshal_test.go index c749677f77026..0e08239b20ad5 100644 --- a/pkg/util/marshal/marshal_test.go +++ b/pkg/util/marshal/marshal_test.go @@ -1056,7 +1056,7 @@ func Test_WriteTailResponseJSON(t *testing.T) { {Timestamp: time.Unix(0, 2), Labels: `{app="dropped"}`}, }, }, - NewWebsocketJSONWriter(WebsocketWriterFunc(func(i int, b []byte) error { + NewWebsocketJSONWriter(WebsocketWriterFunc(func(_ int, b []byte) error { require.Equal(t, `{"streams":[{"stream":{"app":"foo"},"values":[["1","foobar"]]}],"dropped_entries":[{"timestamp":"2","labels":{"app":"dropped"}}]}`, string(b)) return nil })), diff --git a/pkg/util/querylimits/middleware_test.go b/pkg/util/querylimits/middleware_test.go index 1861df3ce1f81..acea9fd5d3ebb 100644 --- a/pkg/util/querylimits/middleware_test.go +++ b/pkg/util/querylimits/middleware_test.go @@ -12,7 +12,7 @@ import ( ) func Test_MiddlewareWithoutHeader(t *testing.T) { - nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + nextHandler := http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) { limits := ExtractQueryLimitsContext(r.Context()) require.Nil(t, limits) }) @@ -28,7 +28,7 @@ func Test_MiddlewareWithoutHeader(t *testing.T) { } func Test_MiddlewareWithBrokenHeader(t *testing.T) { - nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + nextHandler := http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) { limits := ExtractQueryLimitsContext(r.Context()) require.Nil(t, limits) }) @@ -56,7 +56,7 @@ func Test_MiddlewareWithHeader(t *testing.T) { 10, } - nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + nextHandler := http.HandlerFunc(func(_ http.ResponseWriter, r *http.Request) { actual := ExtractQueryLimitsContext(r.Context()) require.Equal(t, limits, *actual) }) diff --git a/pkg/util/ring/ringmanager.go b/pkg/util/ring/ringmanager.go index b9e0fb9c2a5f5..2834d8ef623b1 100644 --- a/pkg/util/ring/ringmanager.go +++ b/pkg/util/ring/ringmanager.go @@ -151,7 +151,7 @@ func (rm *RingManager) startClientMode() error { rm.Service = services.NewIdleService(func(ctx context.Context) error { return services.StartManagerAndAwaitHealthy(ctx, rm.subservices) - }, func(failureCase error) error { + }, func(_ error) error { return services.StopManagerAndAwaitStopped(context.Background(), rm.subservices) }) diff --git a/pkg/util/server/error_test.go b/pkg/util/server/error_test.go index 69f2bff163c6c..b0f593c094f14 100644 --- a/pkg/util/server/error_test.go +++ b/pkg/util/server/error_test.go @@ -44,7 +44,7 @@ func Test_writeError(t *testing.T) { {"mixed context and rpc deadline", util.MultiError{context.DeadlineExceeded, status.New(codes.DeadlineExceeded, context.DeadlineExceeded.Error()).Err()}, ErrDeadlineExceeded, http.StatusGatewayTimeout}, {"mixed context, rpc deadline and another", util.MultiError{errors.New("standard error"), context.DeadlineExceeded, status.New(codes.DeadlineExceeded, context.DeadlineExceeded.Error()).Err()}, "3 errors: standard error; context deadline exceeded; rpc error: code = DeadlineExceeded desc = context deadline exceeded", http.StatusInternalServerError}, {"parse error", logqlmodel.ParseError{}, "parse error : ", http.StatusBadRequest}, - {"httpgrpc", httpgrpc.Errorf(http.StatusBadRequest, errors.New("foo").Error()), "foo", http.StatusBadRequest}, + {"httpgrpc", httpgrpc.Errorf(http.StatusBadRequest, "%s", errors.New("foo").Error()), "foo", http.StatusBadRequest}, {"internal", errors.New("foo"), "foo", http.StatusInternalServerError}, {"query error", storage_errors.ErrQueryMustContainMetricName, storage_errors.ErrQueryMustContainMetricName.Error(), http.StatusBadRequest}, {"wrapped query error", fmt.Errorf("wrapped: %w", storage_errors.ErrQueryMustContainMetricName), "wrapped: " + storage_errors.ErrQueryMustContainMetricName.Error(), http.StatusBadRequest}, diff --git a/pkg/util/server/middleware.go b/pkg/util/server/middleware.go index 4dd241a6d54d7..9b0ad7071f345 100644 --- a/pkg/util/server/middleware.go +++ b/pkg/util/server/middleware.go @@ -14,7 +14,7 @@ func NewPrepopulateMiddleware() middleware.Interface { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { err := req.ParseForm() if err != nil { - WriteError(httpgrpc.Errorf(http.StatusBadRequest, err.Error()), w) + WriteError(httpgrpc.Errorf(http.StatusBadRequest, "%s", err.Error()), w) return } diff --git a/pkg/util/server/middleware_test.go b/pkg/util/server/middleware_test.go index b2267c919926a..c142775fb13c3 100644 --- a/pkg/util/server/middleware_test.go +++ b/pkg/util/server/middleware_test.go @@ -12,7 +12,7 @@ import ( ) func TestPrepopulate(t *testing.T) { - success := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + success := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { _, err := w.Write([]byte("ok")) require.Nil(t, err) }) diff --git a/pkg/util/server/recovery_test.go b/pkg/util/server/recovery_test.go index a8d1d3f1b6b9d..c1717ac2fa7ee 100644 --- a/pkg/util/server/recovery_test.go +++ b/pkg/util/server/recovery_test.go @@ -26,17 +26,17 @@ func Test_onPanic(t *testing.T) { ServeHTTP(rec, req) require.Equal(t, http.StatusInternalServerError, rec.Code) - require.Error(t, RecoveryGRPCStreamInterceptor(nil, fakeStream{}, nil, grpc.StreamHandler(func(srv interface{}, stream grpc.ServerStream) error { + require.Error(t, RecoveryGRPCStreamInterceptor(nil, fakeStream{}, nil, grpc.StreamHandler(func(_ interface{}, _ grpc.ServerStream) error { panic("foo") }))) - _, err = RecoveryGRPCUnaryInterceptor(context.Background(), nil, nil, grpc.UnaryHandler(func(ctx context.Context, req interface{}) (interface{}, error) { + _, err = RecoveryGRPCUnaryInterceptor(context.Background(), nil, nil, grpc.UnaryHandler(func(_ context.Context, _ interface{}) (interface{}, error) { panic("foo") })) require.Error(t, err) _, err = RecoveryMiddleware. - Wrap(queryrangebase.HandlerFunc(func(ctx context.Context, req queryrangebase.Request) (res queryrangebase.Response, err error) { + Wrap(queryrangebase.HandlerFunc(func(_ context.Context, _ queryrangebase.Request) (_ queryrangebase.Response, _ error) { panic("foo") })). Do(context.Background(), nil) diff --git a/tools/deprecated-config-checker/checker/checker.go b/tools/deprecated-config-checker/checker/checker.go index 5651ab49bbe82..d49b55584d31e 100644 --- a/tools/deprecated-config-checker/checker/checker.go +++ b/tools/deprecated-config-checker/checker/checker.go @@ -35,7 +35,7 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) { func (c *Config) Validate() error { if c.ConfigFile == "" && c.RuntimeConfigFile == "" { - return fmt.Errorf(configRequiredErrorMsg) + return fmt.Errorf("%s", configRequiredErrorMsg) } return nil } diff --git a/tools/querytee/proxy_endpoint_test.go b/tools/querytee/proxy_endpoint_test.go index 85bc45d066ae5..728f8fec415c9 100644 --- a/tools/querytee/proxy_endpoint_test.go +++ b/tools/querytee/proxy_endpoint_test.go @@ -189,8 +189,8 @@ func Test_ProxyEndpoint_Requests(t *testing.T) { require.NoError(t, err) return r }, - handler: func(t *testing.T) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { + handler: func(_ *testing.T) http.HandlerFunc { + return func(w http.ResponseWriter, _ *http.Request) { _, _ = w.Write([]byte("ok")) } }, @@ -224,7 +224,7 @@ func Test_ProxyEndpoint_Requests(t *testing.T) { wg.Add(tc.counts) if tc.handler == nil { - testHandler = func(w http.ResponseWriter, r *http.Request) { + testHandler = func(w http.ResponseWriter, _ *http.Request) { _, _ = w.Write([]byte("ok")) } @@ -320,7 +320,7 @@ func Test_ProxyEndpoint_SummaryMetrics(t *testing.T) { requestCount.Store(0) wg.Add(tc.counts) - testHandler = func(w http.ResponseWriter, r *http.Request) { + testHandler = func(w http.ResponseWriter, _ *http.Request) { _, _ = w.Write([]byte("ok")) } diff --git a/tools/tsdb/index-analyzer/analytics.go b/tools/tsdb/index-analyzer/analytics.go index de01d47d6ec00..b574a58341924 100644 --- a/tools/tsdb/index-analyzer/analytics.go +++ b/tools/tsdb/index-analyzer/analytics.go @@ -73,7 +73,7 @@ func analyze(indexShipper indexshipper.IndexShipper, tableName string, tenants [ "", nil, model.Earliest, model.Latest, - func(ls labels.Labels, fp model.Fingerprint, chks []tsdb_index.ChunkMeta) (stop bool) { + func(_ labels.Labels, _ model.Fingerprint, chks []tsdb_index.ChunkMeta) (stop bool) { if len(chks) > maxChunksPerSeries { maxChunksPerSeries = len(chks) if len(chks) > 1000 { diff --git a/tools/tsdb/tsdb-map/main.go b/tools/tsdb/tsdb-map/main.go index 0a72ac98db13d..9f35b53fe48c6 100644 --- a/tools/tsdb/tsdb-map/main.go +++ b/tools/tsdb/tsdb-map/main.go @@ -93,7 +93,7 @@ func main() { } log.Println("writing index") - if _, err := builder.Build(context.Background(), *dest, func(from, through model.Time, checksum uint32) tsdb.Identifier { + if _, err := builder.Build(context.Background(), *dest, func(_, _ model.Time, _ uint32) tsdb.Identifier { panic("todo") }); err != nil { panic(err)