Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enable Lint Rule: import-shadowing #6102

Merged
merged 4 commits into from
Oct 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 0 additions & 3 deletions .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -181,9 +181,6 @@ linters-settings:
# wtf: "you have exceeded the maximum number of public struct declarations"
- name: max-public-structs
disabled: true
# probably a good one to enable after cleanup
- name: import-shadowing
disabled: true
# TBD - often triggered in tests
- name: unhandled-error
disabled: true
Expand Down
4 changes: 2 additions & 2 deletions cmd/agent/app/agent.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,12 +29,12 @@ type Agent struct {

// NewAgent creates the new Agent.
func NewAgent(
processors []processors.Processor,
procs []processors.Processor,
httpServer *http.Server,
logger *zap.Logger,
) *Agent {
a := &Agent{
processors: processors,
processors: procs,
httpServer: httpServer,
logger: logger,
}
Expand Down
8 changes: 4 additions & 4 deletions cmd/agent/app/builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,14 +95,14 @@ func (b *Builder) WithReporter(r ...reporter.Reporter) *Builder {
// CreateAgent creates the Agent
func (b *Builder) CreateAgent(primaryProxy CollectorProxy, logger *zap.Logger, mFactory metrics.Factory) (*Agent, error) {
r := b.getReporter(primaryProxy)
processors, err := b.getProcessors(r, mFactory, logger)
procs, err := b.getProcessors(r, mFactory, logger)
if err != nil {
return nil, fmt.Errorf("cannot create processors: %w", err)
}
server := b.HTTPServer.getHTTPServer(primaryProxy.GetManager(), mFactory, logger)
b.publishOpts()

return NewAgent(processors, server, logger), nil
return NewAgent(procs, server, logger), nil
}

func (b *Builder) getReporter(primaryProxy CollectorProxy) reporter.Reporter {
Expand Down Expand Up @@ -140,11 +140,11 @@ func (b *Builder) getProcessors(rep reporter.Reporter, mFactory metrics.Factory,
default:
return nil, fmt.Errorf("cannot find agent processor for data model %v", cfg.Model)
}
metrics := mFactory.Namespace(metrics.NSOptions{Name: "", Tags: map[string]string{
metricsNamespace := mFactory.Namespace(metrics.NSOptions{Name: "", Tags: map[string]string{
"protocol": string(cfg.Protocol),
"model": string(cfg.Model),
}})
processor, err := cfg.GetThriftProcessor(metrics, protoFactory, handler, logger)
processor, err := cfg.GetThriftProcessor(metricsNamespace, protoFactory, handler, logger)
if err != nil {
return nil, fmt.Errorf("cannot create Thrift Processor: %w", err)
}
Expand Down
8 changes: 4 additions & 4 deletions cmd/agent/app/processors/thrift_processor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,8 +75,8 @@ func initCollectorAndReporter(t *testing.T) (*metricstest.Factory, *testutils.Gr
require.NoError(t, err)
rep := grpcrep.NewReporter(conn, map[string]string{}, zaptest.NewLogger(t))
metricsFactory := metricstest.NewFactory(0)
reporter := reporter.WrapWithMetrics(rep, metricsFactory)
return metricsFactory, grpcCollector, reporter, conn
metricsReporter := reporter.WrapWithMetrics(rep, metricsFactory)
return metricsFactory, grpcCollector, metricsReporter, conn
}

func TestNewThriftProcessor_ZeroCount(t *testing.T) {
Expand All @@ -85,11 +85,11 @@ func TestNewThriftProcessor_ZeroCount(t *testing.T) {
}

func TestProcessorWithCompactZipkin(t *testing.T) {
metricsFactory, collector, reporter, conn := initCollectorAndReporter(t)
metricsFactory, collector, metricsReporter, conn := initCollectorAndReporter(t)
defer conn.Close()
defer collector.Close()

hostPort, processor := createProcessor(t, metricsFactory, compactFactory, agent.NewAgentProcessor(reporter))
hostPort, processor := createProcessor(t, metricsFactory, compactFactory, agent.NewAgentProcessor(metricsReporter))
defer processor.Stop()

client, clientCloser, err := testutils.NewZipkinThriftUDPClient(hostPort)
Expand Down
12 changes: 6 additions & 6 deletions cmd/agent/app/reporter/client_metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -175,12 +175,12 @@ func (r *ClientMetricsReporter) updateClientMetrics(batch *jaeger.Batch) {
func (s *lastReceivedClientStats) update(
batchSeqNo int64,
stats *jaeger.ClientStats,
metrics *clientMetrics,
cMetrics *clientMetrics,
) {
s.lock.Lock()
defer s.lock.Unlock()

metrics.BatchesReceived.Inc(1)
cMetrics.BatchesReceived.Inc(1)

if s.batchSeqNo >= batchSeqNo {
// Ignore out of order batches. Once we receive a batch with a larger-than-seen number,
Expand All @@ -191,11 +191,11 @@ func (s *lastReceivedClientStats) update(
// do not update counters on the first batch, because it may cause a huge spike in totals
// if the client has been running for a while already, but the agent just started.
if s.batchSeqNo > 0 {
metrics.BatchesSent.Inc(batchSeqNo - s.batchSeqNo)
cMetrics.BatchesSent.Inc(batchSeqNo - s.batchSeqNo)
if stats != nil {
metrics.FailedToEmitSpans.Inc(stats.FailedToEmitSpans - s.failedToEmitSpans)
metrics.TooLargeDroppedSpans.Inc(stats.TooLargeDroppedSpans - s.tooLargeDroppedSpans)
metrics.FullQueueDroppedSpans.Inc(stats.FullQueueDroppedSpans - s.fullQueueDroppedSpans)
cMetrics.FailedToEmitSpans.Inc(stats.FailedToEmitSpans - s.failedToEmitSpans)
cMetrics.TooLargeDroppedSpans.Inc(stats.TooLargeDroppedSpans - s.tooLargeDroppedSpans)
cMetrics.FullQueueDroppedSpans.Inc(stats.FullQueueDroppedSpans - s.fullQueueDroppedSpans)
}
}

Expand Down
6 changes: 3 additions & 3 deletions cmd/agent/app/reporter/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,10 @@ type Options struct {
}

// AddFlags adds flags for Options.
func AddFlags(flags *flag.FlagSet) {
flags.String(reporterType, string(GRPC), fmt.Sprintf("Reporter type to use e.g. %s", string(GRPC)))
func AddFlags(flagSet *flag.FlagSet) {
flagSet.String(reporterType, string(GRPC), fmt.Sprintf("Reporter type to use e.g. %s", string(GRPC)))
if !setupcontext.IsAllInOne() {
flags.String(agentTags, "", "One or more tags to be added to the Process tags of all spans passing through this agent. Ex: key1=value1,key2=${envVar:defaultValue}")
flagSet.String(agentTags, "", "One or more tags to be added to the Process tags of all spans passing through this agent. Ex: key1=value1,key2=${envVar:defaultValue}")
}
}

Expand Down
66 changes: 33 additions & 33 deletions cmd/collector/app/flags/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -172,55 +172,55 @@ type GRPCOptions struct {
}

// AddFlags adds flags for CollectorOptions
func AddFlags(flags *flag.FlagSet) {
flags.Int(flagNumWorkers, DefaultNumWorkers, "The number of workers pulling items from the queue")
flags.Int(flagQueueSize, DefaultQueueSize, "The queue size of the collector")
flags.Uint(flagDynQueueSizeMemory, 0, "(experimental) The max memory size in MiB to use for the dynamic queue.")
flags.String(flagCollectorTags, "", "One or more tags to be added to the Process tags of all spans passing through this collector. Ex: key1=value1,key2=${envVar:defaultValue}")
flags.Bool(flagSpanSizeMetricsEnabled, false, "Enables metrics based on processed span size, which are more expensive to calculate.")

addHTTPFlags(flags, httpServerFlagsCfg, ports.PortToHostPort(ports.CollectorHTTP))
addGRPCFlags(flags, grpcServerFlagsCfg, ports.PortToHostPort(ports.CollectorGRPC))

flags.Bool(flagCollectorOTLPEnabled, true, "Enables OpenTelemetry OTLP receiver on dedicated HTTP and gRPC ports")
addHTTPFlags(flags, otlpServerFlagsCfg.HTTP, ":4318")
corsOTLPFlags.AddFlags(flags)
addGRPCFlags(flags, otlpServerFlagsCfg.GRPC, ":4317")

flags.String(flagZipkinHTTPHostPort, "", "The host:port (e.g. 127.0.0.1:9411 or :9411) of the collector's Zipkin server (disabled by default)")
flags.Bool(flagZipkinKeepAliveEnabled, true, "KeepAlive configures allow Keep-Alive for Zipkin HTTP server (enabled by default)")
tlsZipkinFlagsConfig.AddFlags(flags)
corsZipkinFlags.AddFlags(flags)

tenancy.AddFlags(flags)
func AddFlags(flagSet *flag.FlagSet) {
flagSet.Int(flagNumWorkers, DefaultNumWorkers, "The number of workers pulling items from the queue")
flagSet.Int(flagQueueSize, DefaultQueueSize, "The queue size of the collector")
flagSet.Uint(flagDynQueueSizeMemory, 0, "(experimental) The max memory size in MiB to use for the dynamic queue.")
flagSet.String(flagCollectorTags, "", "One or more tags to be added to the Process tags of all spans passing through this collector. Ex: key1=value1,key2=${envVar:defaultValue}")
flagSet.Bool(flagSpanSizeMetricsEnabled, false, "Enables metrics based on processed span size, which are more expensive to calculate.")

addHTTPFlags(flagSet, httpServerFlagsCfg, ports.PortToHostPort(ports.CollectorHTTP))
addGRPCFlags(flagSet, grpcServerFlagsCfg, ports.PortToHostPort(ports.CollectorGRPC))

flagSet.Bool(flagCollectorOTLPEnabled, true, "Enables OpenTelemetry OTLP receiver on dedicated HTTP and gRPC ports")
addHTTPFlags(flagSet, otlpServerFlagsCfg.HTTP, ":4318")
corsOTLPFlags.AddFlags(flagSet)
addGRPCFlags(flagSet, otlpServerFlagsCfg.GRPC, ":4317")

flagSet.String(flagZipkinHTTPHostPort, "", "The host:port (e.g. 127.0.0.1:9411 or :9411) of the collector's Zipkin server (disabled by default)")
flagSet.Bool(flagZipkinKeepAliveEnabled, true, "KeepAlive configures allow Keep-Alive for Zipkin HTTP server (enabled by default)")
tlsZipkinFlagsConfig.AddFlags(flagSet)
corsZipkinFlags.AddFlags(flagSet)

tenancy.AddFlags(flagSet)
}

func addHTTPFlags(flags *flag.FlagSet, cfg serverFlagsConfig, defaultHostPort string) {
flags.String(cfg.prefix+"."+flagSuffixHostPort, defaultHostPort, "The host:port (e.g. 127.0.0.1:12345 or :12345) of the collector's HTTP server")
flags.Duration(cfg.prefix+"."+flagSuffixHTTPIdleTimeout, 0, "See https://pkg.go.dev/net/http#Server")
flags.Duration(cfg.prefix+"."+flagSuffixHTTPReadTimeout, 0, "See https://pkg.go.dev/net/http#Server")
flags.Duration(cfg.prefix+"."+flagSuffixHTTPReadHeaderTimeout, 2*time.Second, "See https://pkg.go.dev/net/http#Server")
cfg.tls.AddFlags(flags)
func addHTTPFlags(flagSet *flag.FlagSet, cfg serverFlagsConfig, defaultHostPort string) {
flagSet.String(cfg.prefix+"."+flagSuffixHostPort, defaultHostPort, "The host:port (e.g. 127.0.0.1:12345 or :12345) of the collector's HTTP server")
flagSet.Duration(cfg.prefix+"."+flagSuffixHTTPIdleTimeout, 0, "See https://pkg.go.dev/net/http#Server")
flagSet.Duration(cfg.prefix+"."+flagSuffixHTTPReadTimeout, 0, "See https://pkg.go.dev/net/http#Server")
flagSet.Duration(cfg.prefix+"."+flagSuffixHTTPReadHeaderTimeout, 2*time.Second, "See https://pkg.go.dev/net/http#Server")
cfg.tls.AddFlags(flagSet)
}

func addGRPCFlags(flags *flag.FlagSet, cfg serverFlagsConfig, defaultHostPort string) {
flags.String(
func addGRPCFlags(flagSet *flag.FlagSet, cfg serverFlagsConfig, defaultHostPort string) {
flagSet.String(
cfg.prefix+"."+flagSuffixHostPort,
defaultHostPort,
"The host:port (e.g. 127.0.0.1:12345 or :12345) of the collector's gRPC server")
flags.Int(
flagSet.Int(
cfg.prefix+"."+flagSuffixGRPCMaxReceiveMessageLength,
DefaultGRPCMaxReceiveMessageLength,
"The maximum receivable message size for the collector's gRPC server")
flags.Duration(
flagSet.Duration(
cfg.prefix+"."+flagSuffixGRPCMaxConnectionAge,
0,
"The maximum amount of time a connection may exist. Set this value to a few seconds or minutes on highly elastic environments, so that clients discover new collector nodes frequently. See https://pkg.go.dev/google.golang.org/grpc/keepalive#ServerParameters")
flags.Duration(
flagSet.Duration(
cfg.prefix+"."+flagSuffixGRPCMaxConnectionAgeGrace,
0,
"The additive period after MaxConnectionAge after which the connection will be forcibly closed. See https://pkg.go.dev/google.golang.org/grpc/keepalive#ServerParameters")
cfg.tls.AddFlags(flags)
cfg.tls.AddFlags(flagSet)
}

func (opts *HTTPOptions) initFromViper(v *viper.Viper, _ *zap.Logger, cfg serverFlagsConfig) error {
Expand Down
26 changes: 13 additions & 13 deletions cmd/collector/app/handler/grpc_handler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -103,9 +103,9 @@ func newClient(t *testing.T, addr net.Addr) (api_v2.CollectorServiceClient, *grp
}

func TestPostSpans(t *testing.T) {
processor := &mockSpanProcessor{}
proc := &mockSpanProcessor{}
server, addr := initializeGRPCTestServer(t, func(s *grpc.Server) {
handler := NewGRPCHandler(zap.NewNop(), processor, &tenancy.Manager{})
handler := NewGRPCHandler(zap.NewNop(), proc, &tenancy.Manager{})
api_v2.RegisterCollectorServiceServer(s, handler)
})
defer server.Stop()
Expand All @@ -130,17 +130,17 @@ func TestPostSpans(t *testing.T) {
Batch: test.batch,
})
require.NoError(t, err)
got := processor.getSpans()
got := proc.getSpans()
require.Equal(t, len(test.batch.GetSpans()), len(got))
assert.Equal(t, test.expected, got)
processor.reset()
proc.reset()
}
}

func TestGRPCCompressionEnabled(t *testing.T) {
processor := &mockSpanProcessor{}
proc := &mockSpanProcessor{}
server, addr := initializeGRPCTestServer(t, func(s *grpc.Server) {
handler := NewGRPCHandler(zap.NewNop(), processor, &tenancy.Manager{})
handler := NewGRPCHandler(zap.NewNop(), proc, &tenancy.Manager{})
api_v2.RegisterCollectorServiceServer(s, handler)
})
defer server.Stop()
Expand Down Expand Up @@ -214,9 +214,9 @@ func TestPostTenantedSpans(t *testing.T) {
tenantHeader := "x-tenant"
dummyTenant := "grpc-test-tenant"

processor := &mockSpanProcessor{}
proc := &mockSpanProcessor{}
server, addr := initializeGRPCTestServer(t, func(s *grpc.Server) {
handler := NewGRPCHandler(zap.NewNop(), processor,
handler := NewGRPCHandler(zap.NewNop(), proc,
tenancy.NewManager(&tenancy.Options{
Enabled: true,
Header: tenantHeader,
Expand Down Expand Up @@ -296,9 +296,9 @@ func TestPostTenantedSpans(t *testing.T) {
} else {
require.NoError(t, err)
}
assert.Equal(t, test.expected, processor.getSpans())
assert.Equal(t, test.expectedTenants, processor.getTenants())
processor.reset()
assert.Equal(t, test.expected, proc.getSpans())
assert.Equal(t, test.expectedTenants, proc.getTenants())
proc.reset()
})
}
}
Expand Down Expand Up @@ -351,8 +351,8 @@ func TestGetTenant(t *testing.T) {
},
}

processor := &mockSpanProcessor{}
handler := NewGRPCHandler(zap.NewNop(), processor,
proc := &mockSpanProcessor{}
handler := NewGRPCHandler(zap.NewNop(), proc,
tenancy.NewManager(&tenancy.Options{
Enabled: true,
Header: tenantHeader,
Expand Down
4 changes: 2 additions & 2 deletions cmd/collector/app/options.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,9 +71,9 @@ func (options) PreProcessSpans(preProcessSpans ProcessSpans) Option {
}

// Sanitizer creates an Option that initializes the sanitizer function
func (options) Sanitizer(sanitizer sanitizer.SanitizeSpan) Option {
func (options) Sanitizer(spanSanitizer sanitizer.SanitizeSpan) Option {
return func(b *options) {
b.sanitizer = sanitizer
b.sanitizer = spanSanitizer
}
}

Expand Down
4 changes: 2 additions & 2 deletions cmd/collector/app/sanitizer/service_name_sanitizer.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@ import (
)

// NewServiceNameSanitizer creates a service name sanitizer.
func NewServiceNameSanitizer(cache cache.Cache) SanitizeSpan {
sanitizer := serviceNameSanitizer{cache: cache}
func NewServiceNameSanitizer(c cache.Cache) SanitizeSpan {
sanitizer := serviceNameSanitizer{cache: c}
return sanitizer.Sanitize
}

Expand Down
4 changes: 2 additions & 2 deletions cmd/ingester/app/builder/builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ func CreateConsumer(logger *zap.Logger, metricsFactory metrics.Factory, spanWrit
Writer: spanWriter,
Unmarshaller: unmarshaller,
}
spanProcessor := processor.NewSpanProcessor(spParams)
proc := processor.NewSpanProcessor(spParams)

consumerConfig := kafkaConsumer.Configuration{
Brokers: options.Brokers,
Expand All @@ -58,7 +58,7 @@ func CreateConsumer(logger *zap.Logger, metricsFactory metrics.Factory, spanWrit
factoryParams := consumer.ProcessorFactoryParams{
Parallelism: options.Parallelism,
SaramaConsumer: saramaConsumer,
BaseProcessor: spanProcessor,
BaseProcessor: proc,
Logger: logger,
Factory: metricsFactory,
}
Expand Down
4 changes: 2 additions & 2 deletions cmd/ingester/app/consumer/committing_processor.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,9 @@ type offsetMarker interface {
}

// NewCommittingProcessor returns a processor that commits message offsets to Kafka
func NewCommittingProcessor(processor processor.SpanProcessor, marker offsetMarker) processor.SpanProcessor {
func NewCommittingProcessor(proc processor.SpanProcessor, marker offsetMarker) processor.SpanProcessor {
return &comittingProcessor{
processor: processor,
processor: proc,
marker: marker,
}
}
Expand Down
10 changes: 5 additions & 5 deletions cmd/ingester/app/consumer/consumer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,19 +78,19 @@ func newConsumer(
t *testing.T,
metricsFactory metrics.Factory,
_ string, /* topic */
processor processor.SpanProcessor,
consumer consumer.Consumer,
proc processor.SpanProcessor,
cons consumer.Consumer,
) *Consumer {
logger, _ := zap.NewDevelopment()
consumerParams := Params{
MetricsFactory: metricsFactory,
Logger: logger,
InternalConsumer: consumer,
InternalConsumer: cons,
ProcessorFactory: ProcessorFactory{
consumer: consumer,
consumer: cons,
metricsFactory: metricsFactory,
logger: logger,
baseProcessor: processor,
baseProcessor: proc,
parallelism: 1,
},
}
Expand Down
4 changes: 2 additions & 2 deletions cmd/ingester/app/consumer/processor_factory.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,8 +50,8 @@ func NewProcessorFactory(params ProcessorFactoryParams) (*ProcessorFactory, erro
func (c *ProcessorFactory) new(topic string, partition int32, minOffset int64) processor.SpanProcessor {
c.logger.Info("Creating new processors", zap.Int32("partition", partition))

markOffset := func(offset int64) {
c.consumer.MarkPartitionOffset(topic, partition, offset, "")
markOffset := func(offsetVal int64) {
c.consumer.MarkPartitionOffset(topic, partition, offsetVal, "")
}

om := offset.NewManager(minOffset, markOffset, topic, partition, c.metricsFactory)
Expand Down
4 changes: 2 additions & 2 deletions cmd/ingester/app/processor/decorator/retry.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ func PropagateError(b bool) RetryOption {

// NewRetryingProcessor returns a processor that retries failures using an exponential backoff
// with jitter.
func NewRetryingProcessor(f metrics.Factory, processor processor.SpanProcessor, opts ...RetryOption) processor.SpanProcessor {
func NewRetryingProcessor(f metrics.Factory, proc processor.SpanProcessor, opts ...RetryOption) processor.SpanProcessor {
options := defaultOpts
for _, opt := range opts {
opt(&options)
Expand All @@ -89,7 +89,7 @@ func NewRetryingProcessor(f metrics.Factory, processor processor.SpanProcessor,
return &retryDecorator{
retryAttempts: m.Counter(metrics.Options{Name: "retry-attempts", Tags: nil}),
exhausted: m.Counter(metrics.Options{Name: "retry-exhausted", Tags: nil}),
processor: processor,
processor: proc,
options: options,
}
}
Expand Down
Loading
Loading