From 6356302b54f06c8f2dee8e59740409d49e84ef24 Mon Sep 17 00:00:00 2001 From: Dhia Ayachi Date: Mon, 12 Jul 2021 10:19:14 -0400 Subject: [PATCH] fix linter issues --- agent/acl_test.go | 4 ++-- agent/agent.go | 13 ++++++----- agent/agent_test.go | 4 ++-- agent/connect/ca/provider_aws.go | 5 ++-- agent/consul/leader_connect_ca.go | 39 +++++++++++++++++++------------ agent/consul/leader_metrics.go | 3 +-- agent/service_manager.go | 2 +- agent/setup.go | 34 +++++++++++++-------------- agent/testagent.go | 2 +- 9 files changed, 57 insertions(+), 49 deletions(-) diff --git a/agent/acl_test.go b/agent/acl_test.go index a1cf461f183c..ef06cc0ec9c3 100644 --- a/agent/acl_test.go +++ b/agent/acl_test.go @@ -58,7 +58,7 @@ func NewTestACLAgent(t *testing.T, name string, hcl string, resolveAuthz authzRe bd, err := NewBaseDeps(loader, logBuffer) require.NoError(t, err) - bd.Logger = hclog.NewInterceptLogger(&hclog.LoggerOptions{ + bd.Deps.Logger = hclog.NewInterceptLogger(&hclog.LoggerOptions{ Name: name, Level: hclog.Debug, Output: logBuffer, @@ -70,7 +70,7 @@ func NewTestACLAgent(t *testing.T, name string, hcl string, resolveAuthz authzRe require.NoError(t, err) agent.delegate = a - agent.State = local.NewState(LocalConfig(bd.RuntimeConfig), bd.Logger, bd.Tokens) + agent.State = local.NewState(LocalConfig(bd.RuntimeConfig), bd.Deps.Logger, bd.Deps.Tokens) agent.State.TriggerSyncChanges = func() {} a.Agent = agent return a diff --git a/agent/agent.go b/agent/agent.go index 73d22894c874..202ee69549db 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -352,6 +352,7 @@ type Agent struct { // * create the AutoConfig object for future use in fully // resolving the configuration func New(bd BaseDeps) (*Agent, error) { + a := Agent{ checkReapAfter: make(map[structs.CheckID]time.Duration), checkMonitors: make(map[structs.CheckID]*checks.CheckMonitor), @@ -371,16 +372,16 @@ func New(bd BaseDeps) (*Agent, error) { stateLock: mutex.New(), baseDeps: bd, - tokens: bd.Tokens, - logger: bd.Logger, - tlsConfigurator: bd.TLSConfigurator, + tokens: bd.Deps.Tokens, + logger: bd.Deps.Logger, + tlsConfigurator: bd.Deps.TLSConfigurator, config: bd.RuntimeConfig, cache: bd.Cache, - routineManager: routine.NewManager(bd.Logger), + routineManager: routine.NewManager(bd.Deps.Logger), } // TODO: create rpcClientHealth in BaseDeps once NetRPC is available without Agent - conn, err := bd.GRPCConnPool.ClientConn(bd.RuntimeConfig.Datacenter) + conn, err := bd.Deps.GRPCConnPool.ClientConn(bd.RuntimeConfig.Datacenter) if err != nil { return nil, err } @@ -392,7 +393,7 @@ func New(bd BaseDeps) (*Agent, error) { ViewStore: bd.ViewStore, MaterializerDeps: health.MaterializerDeps{ Conn: conn, - Logger: bd.Logger.Named("rpcclient.health"), + Logger: bd.Deps.Logger.Named("rpcclient.health"), }, UseStreamingBackend: a.config.UseStreamingBackend, } diff --git a/agent/agent_test.go b/agent/agent_test.go index 9570bec08bb8..869189476909 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -5035,7 +5035,7 @@ func TestSharedRPCRouter(t *testing.T) { testrpc.WaitForTestAgent(t, srv.RPC, "dc1") - mgr, server := srv.Agent.baseDeps.Router.FindLANRoute() + mgr, server := srv.Agent.baseDeps.Deps.Router.FindLANRoute() require.NotNil(t, mgr) require.NotNil(t, server) @@ -5047,7 +5047,7 @@ func TestSharedRPCRouter(t *testing.T) { testrpc.WaitForTestAgent(t, client.RPC, "dc1") - mgr, server = client.Agent.baseDeps.Router.FindLANRoute() + mgr, server = client.Agent.baseDeps.Deps.Router.FindLANRoute() require.NotNil(t, mgr) require.NotNil(t, server) } diff --git a/agent/connect/ca/provider_aws.go b/agent/connect/ca/provider_aws.go index f71df6d245b3..adfde621fbb9 100644 --- a/agent/connect/ca/provider_aws.go +++ b/agent/connect/ca/provider_aws.go @@ -12,13 +12,12 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/acmpca" - "github.com/hashicorp/go-hclog" - "github.com/mitchellh/mapstructure" - "github.com/hashicorp/go-hclog" + "github.com/mitchellh/mapstructure" "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/go-hclog" ) const ( diff --git a/agent/consul/leader_connect_ca.go b/agent/consul/leader_connect_ca.go index cb646dfe1e7c..c8e4f3851c37 100644 --- a/agent/consul/leader_connect_ca.go +++ b/agent/consul/leader_connect_ca.go @@ -180,7 +180,8 @@ func (c *CAManager) getPrimaryRoots() structs.IndexedCARoots { // when setting up the CA during establishLeadership. The state should be set to // non-ready before calling this. func (c *CAManager) initializeCAConfig() (*structs.CAConfiguration, error) { - st := c.delegate.State() + delegate := c.delegate.(ca.ConsulProviderStateDelegate) + st := delegate.State() _, config, err := st.CAConfig(nil) if err != nil { return nil, err @@ -211,7 +212,7 @@ func (c *CAManager) initializeCAConfig() (*structs.CAConfiguration, error) { Op: structs.CAOpSetConfig, Config: config, } - if resp, err := c.delegate.ApplyCARequest(&req); err != nil { + if resp, err := delegate.ApplyCARequest(&req); err != nil { return nil, err } else if respErr, ok := resp.(error); ok { return nil, respErr @@ -450,6 +451,7 @@ func (c *CAManager) newProvider(conf *structs.CAConfiguration) (ca.Provider, err // initializeRootCA runs the initialization logic for a root CA. It should only // be called while the state lock is held by setting the state to non-ready. func (c *CAManager) initializeRootCA(provider ca.Provider, conf *structs.CAConfiguration) error { + delegate := c.delegate.(ca.ConsulProviderStateDelegate) pCfg := ca.ProviderConfig{ ClusterID: conf.ClusterID, Datacenter: c.serverConf.Datacenter, @@ -497,7 +499,7 @@ func (c *CAManager) initializeRootCA(provider ca.Provider, conf *structs.CAConfi Op: structs.CAOpSetConfig, Config: conf, } - if _, err = c.delegate.ApplyCARequest(&req); err != nil { + if _, err = delegate.ApplyCARequest(&req); err != nil { return fmt.Errorf("error persisting provider state: %v", err) } } @@ -514,7 +516,7 @@ func (c *CAManager) initializeRootCA(provider ca.Provider, conf *structs.CAConfi // tied to the provider. // Every change to the CA after this initial bootstrapping should // be done through the rotation process. - state := c.delegate.State() + state := delegate.State() _, activeRoot, err := state.CARootActive(nil) if err != nil { return err @@ -546,7 +548,7 @@ func (c *CAManager) initializeRootCA(provider ca.Provider, conf *structs.CAConfi } // Store the root cert in raft - resp, err := c.delegate.ApplyCARequest(&structs.CARequest{ + resp, err := delegate.ApplyCARequest(&structs.CARequest{ Op: structs.CAOpSetRoots, Index: idx, Roots: []*structs.CARoot{rootCA}, @@ -572,6 +574,7 @@ func (c *CAManager) initializeRootCA(provider ca.Provider, conf *structs.CAConfi // to non-ready. func (c *CAManager) initializeSecondaryCA(provider ca.Provider, config *structs.CAConfiguration) error { activeIntermediate, err := provider.ActiveIntermediate() + delegate := c.delegate.(ca.ConsulProviderStateDelegate) if err != nil { return err } @@ -611,7 +614,8 @@ func (c *CAManager) initializeSecondaryCA(provider ca.Provider, config *structs. // This will fetch the secondary's exact current representation of the // active root. Note that this data should only be used if the IDs // match, otherwise it's out of date and should be regenerated. - _, activeSecondaryRoot, err = c.delegate.State().CARootActive(nil) + + _, activeSecondaryRoot, err = delegate.State().CARootActive(nil) if err != nil { return err } @@ -661,7 +665,7 @@ func (c *CAManager) initializeSecondaryCA(provider ca.Provider, config *structs. } // Update the roots list in the state store if there's a new active root. - state := c.delegate.State() + state := delegate.State() _, activeRoot, err := state.CARootActive(nil) if err != nil { return err @@ -685,7 +689,8 @@ func (c *CAManager) initializeSecondaryCA(provider ca.Provider, config *structs. // If newActiveRoot is non-nil, it will be appended to the current roots list. // If config is non-nil, it will be used to overwrite the existing config. func (c *CAManager) persistNewRootAndConfig(provider ca.Provider, newActiveRoot *structs.CARoot, config *structs.CAConfiguration) error { - state := c.delegate.State() + delegate := c.delegate.(ca.ConsulProviderStateDelegate) + state := delegate.State() idx, oldRoots, err := state.CARoots(nil) if err != nil { return err @@ -754,7 +759,7 @@ func (c *CAManager) persistNewRootAndConfig(provider ca.Provider, newActiveRoot Roots: newRoots, Config: &newConf, } - resp, err := c.delegate.ApplyCARequest(args) + resp, err := delegate.ApplyCARequest(args) if err != nil { return err } @@ -770,6 +775,7 @@ func (c *CAManager) persistNewRootAndConfig(provider ca.Provider, newActiveRoot } func (c *CAManager) UpdateConfiguration(args *structs.CARequest) (reterr error) { + delegate := c.delegate.(ca.ConsulProviderStateDelegate) // Attempt to update the state first. oldState, err := c.setState(caStateReconfig, true) if err != nil { @@ -794,7 +800,7 @@ func (c *CAManager) UpdateConfiguration(args *structs.CARequest) (reterr error) } // Exit early if it's a no-op change - state := c.delegate.State() + state := delegate.State() confIdx, config, err := state.CAConfig(nil) if err != nil { return err @@ -895,7 +901,7 @@ func (c *CAManager) UpdateConfiguration(args *structs.CARequest) (reterr error) // If the root didn't change, just update the config and return. if root != nil && root.ID == newActiveRoot.ID { args.Op = structs.CAOpSetConfig - resp, err := c.delegate.ApplyCARequest(args) + resp, err := delegate.ApplyCARequest(args) if err != nil { return err } @@ -998,7 +1004,7 @@ func (c *CAManager) UpdateConfiguration(args *structs.CARequest) (reterr error) args.Index = idx args.Config.ModifyIndex = confIdx args.Roots = newRoots - resp, err := c.delegate.ApplyCARequest(args) + resp, err := delegate.ApplyCARequest(args) if err != nil { return err } @@ -1105,6 +1111,7 @@ func (c *CAManager) intermediateCertRenewalWatch(ctx context.Context) error { // expiration. If more than half the time a cert is valid has passed, // it will try to renew it. func (c *CAManager) RenewIntermediate(ctx context.Context, isPrimary bool) error { + delegate := c.delegate.(ca.ConsulProviderStateDelegate) // Grab the 'lock' right away so the provider/config can't be changed out while we check // the intermediate. if _, err := c.setState(caStateRenewIntermediate, true); err != nil { @@ -1122,7 +1129,7 @@ func (c *CAManager) RenewIntermediate(ctx context.Context, isPrimary bool) error return fmt.Errorf("secondary CA is not yet configured.") } - state := c.delegate.State() + state := delegate.State() _, root, err := state.CARootActive(nil) if err != nil { return err @@ -1274,12 +1281,13 @@ func (c *CAManager) UpdateRoots(roots structs.IndexedCARoots) error { // initializeSecondaryProvider configures the given provider for a secondary, non-root datacenter. func (c *CAManager) initializeSecondaryProvider(provider ca.Provider, roots structs.IndexedCARoots) error { + delegate := c.delegate.(ca.ConsulProviderStateDelegate) if roots.TrustDomain == "" { return fmt.Errorf("trust domain from primary datacenter is not initialized") } clusterID := strings.Split(roots.TrustDomain, ".")[0] - _, conf, err := c.delegate.State().CAConfig(nil) + _, conf, err := delegate.State().CAConfig(nil) if err != nil { return err } @@ -1374,6 +1382,7 @@ func (l *connectSignRateLimiter) getCSRRateLimiterWithLimit(limit rate.Limit) *r func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID connect.CertURI) (*structs.IssuedCert, error) { provider, caRoot := c.getCAProvider() + delegate := c.delegate.(ca.ConsulProviderStateDelegate) if provider == nil { return nil, fmt.Errorf("CA is uninitialized and unable to sign certificates yet: provider is nil") } else if caRoot == nil { @@ -1381,7 +1390,7 @@ func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID conne } // Verify that the CSR entity is in the cluster's trust domain - state := c.delegate.State() + state := delegate.State() _, config, err := state.CAConfig(nil) if err != nil { return nil, err diff --git a/agent/consul/leader_metrics.go b/agent/consul/leader_metrics.go index 1d40b62937d5..1568ca21829a 100644 --- a/agent/consul/leader_metrics.go +++ b/agent/consul/leader_metrics.go @@ -6,9 +6,8 @@ import ( "fmt" "time" - "github.com/hashicorp/consul/agent/connect/ca" - "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/agent/connect/ca" "github.com/armon/go-metrics" "github.com/armon/go-metrics/prometheus" diff --git a/agent/service_manager.go b/agent/service_manager.go index d112fc2d0694..eac6f3deefee 100644 --- a/agent/service_manager.go +++ b/agent/service_manager.go @@ -344,7 +344,7 @@ func makeConfigRequest(bd BaseDeps, addReq AddServiceRequest) *structs.ServiceCo EnterpriseMeta: ns.EnterpriseMeta, } if req.QueryOptions.Token == "" { - req.QueryOptions.Token = bd.Tokens.AgentToken() + req.QueryOptions.Token = bd.Deps.Tokens.AgentToken() } return req } diff --git a/agent/setup.go b/agent/setup.go index 7b363cd86bcb..2493e9ca1e28 100644 --- a/agent/setup.go +++ b/agent/setup.go @@ -62,20 +62,20 @@ func NewBaseDeps(configLoader ConfigLoader, logOut io.Writer) (BaseDeps, error) cfg := result.RuntimeConfig logConf := cfg.Logging logConf.Name = logging.Agent - d.Logger, err = logging.Setup(logConf, logOut) + d.Deps.Logger, err = logging.Setup(logConf, logOut) if err != nil { return d, err } grpcLogInitOnce.Do(func() { - grpclog.SetLoggerV2(logging.NewGRPCLogger(cfg.Logging.LogLevel, d.Logger)) + grpclog.SetLoggerV2(logging.NewGRPCLogger(cfg.Logging.LogLevel, d.Deps.Logger)) }) for _, w := range result.Warnings { - d.Logger.Warn(w) + d.Deps.Logger.Warn(w) } - cfg.NodeID, err = newNodeIDFromConfig(cfg, d.Logger) + cfg.NodeID, err = newNodeIDFromConfig(cfg, d.Deps.Logger) if err != nil { return d, fmt.Errorf("failed to setup node ID: %w", err) } @@ -89,25 +89,25 @@ func NewBaseDeps(configLoader ConfigLoader, logOut io.Writer) (BaseDeps, error) return d, fmt.Errorf("failed to initialize telemetry: %w", err) } - d.TLSConfigurator, err = tlsutil.NewConfigurator(cfg.ToTLSUtilConfig(), d.Logger) + d.Deps.TLSConfigurator, err = tlsutil.NewConfigurator(cfg.ToTLSUtilConfig(), d.Deps.Logger) if err != nil { return d, err } d.RuntimeConfig = cfg - d.Tokens = new(token.Store) + d.Deps.Tokens = new(token.Store) - cfg.Cache.Logger = d.Logger.Named("cache") + cfg.Cache.Logger = d.Deps.Logger.Named("cache") // cache-types are not registered yet, but they won't be used until the components are started. d.Cache = cache.New(cfg.Cache) - d.ViewStore = submatview.NewStore(d.Logger.Named("viewstore")) - d.ConnPool = newConnPool(cfg, d.Logger, d.TLSConfigurator) + d.ViewStore = submatview.NewStore(d.Deps.Logger.Named("viewstore")) + d.Deps.ConnPool = newConnPool(cfg, d.Deps.Logger, d.Deps.TLSConfigurator) builder := resolver.NewServerResolverBuilder(resolver.Config{}) resolver.Register(builder) - d.GRPCConnPool = grpc.NewClientConnPool(builder, grpc.TLSWrapper(d.TLSConfigurator.OutgoingRPCWrapper()), d.TLSConfigurator.UseTLS) + d.Deps.GRPCConnPool = grpc.NewClientConnPool(builder, grpc.TLSWrapper(d.Deps.TLSConfigurator.OutgoingRPCWrapper()), d.Deps.TLSConfigurator.UseTLS) - d.Router = router.NewRouter(d.Logger, cfg.Datacenter, fmt.Sprintf("%s.%s", cfg.NodeName, cfg.Datacenter), builder) + d.Deps.Router = router.NewRouter(d.Deps.Logger, cfg.Datacenter, fmt.Sprintf("%s.%s", cfg.NodeName, cfg.Datacenter), builder) // this needs to happen prior to creating auto-config as some of the dependencies // must also be passed to auto-config @@ -117,14 +117,14 @@ func NewBaseDeps(configLoader ConfigLoader, logOut io.Writer) (BaseDeps, error) } acConf := autoconf.Config{ - DirectRPC: d.ConnPool, - Logger: d.Logger, + DirectRPC: d.Deps.ConnPool, + Logger: d.Deps.Logger, Loader: configLoader, - ServerProvider: d.Router, - TLSConfigurator: d.TLSConfigurator, + ServerProvider: d.Deps.Router, + TLSConfigurator: d.Deps.TLSConfigurator, Cache: d.Cache, - Tokens: d.Tokens, - EnterpriseConfig: initEnterpriseAutoConfig(d.EnterpriseDeps, cfg), + Tokens: d.Deps.Tokens, + EnterpriseConfig: initEnterpriseAutoConfig(d.Deps.EnterpriseDeps, cfg), } d.AutoConfig, err = autoconf.New(acConf) diff --git a/agent/testagent.go b/agent/testagent.go index 11410f20807c..d42a3ce234a6 100644 --- a/agent/testagent.go +++ b/agent/testagent.go @@ -192,7 +192,7 @@ func (a *TestAgent) Start(t *testing.T) error { return fmt.Errorf("failed to create base deps: %w", err) } - bd.Logger = logger + bd.Deps.Logger = logger bd.MetricsHandler = metrics.NewInmemSink(1*time.Second, time.Minute) a.Config = bd.RuntimeConfig