From 3bbf6c14bafd61048364763eb8eb7edc9cc77f10 Mon Sep 17 00:00:00 2001 From: Ryan Date: Wed, 9 Nov 2022 11:31:00 +0100 Subject: [PATCH 1/2] wrapping to 100 --- api/rpc/server.go | 4 ++-- cmd/celestia/bridge.go | 4 ++-- cmd/celestia/full.go | 4 ++-- cmd/celestia/light.go | 4 ++-- das/options.go | 15 +++++++++------ fraud/interface.go | 3 ++- fraud/proof.go | 3 ++- fraud/registry.go | 4 ++-- fraud/service.go | 6 ++++-- header/header.go | 11 ++++++----- header/p2p/exchange.go | 3 ++- header/p2p/subscriber.go | 17 +++++++++-------- header/store/height_indexer.go | 5 +++-- header/store/store.go | 6 ++++-- header/sync/ranges.go | 5 +++-- header/sync/sync.go | 14 +++++++++----- header/sync/sync_head.go | 12 +++++++----- header/testing.go | 4 ++-- libs/fslock/locker.go | 4 ++-- libs/fxutil/fxutil.go | 8 ++++---- nodebuilder/config.go | 5 +++-- nodebuilder/core/module.go | 3 ++- nodebuilder/das/daser.go | 4 ++-- nodebuilder/das/service.go | 4 +++- nodebuilder/fraud/service.go | 8 +++++--- nodebuilder/header/config.go | 4 ++-- nodebuilder/header/flags.go | 6 ++++-- nodebuilder/header/header.go | 4 ++-- nodebuilder/header/service.go | 4 +++- nodebuilder/init.go | 3 ++- nodebuilder/node.go | 17 +++++++++-------- nodebuilder/node/type.go | 9 +++++---- nodebuilder/node_bridge_test.go | 4 ++-- nodebuilder/p2p/config.go | 10 ++++++---- nodebuilder/p2p/host.go | 3 ++- nodebuilder/p2p/misc.go | 3 ++- nodebuilder/p2p/network.go | 3 ++- nodebuilder/p2p/pubsub.go | 4 ++-- nodebuilder/settings.go | 3 ++- nodebuilder/share/service.go | 4 +++- nodebuilder/state/keyring.go | 3 ++- nodebuilder/state/service.go | 7 +++++-- nodebuilder/store.go | 8 ++++---- nodebuilder/tests/swamp/swamp.go | 6 +++--- share/add.go | 6 ++++-- share/availability.go | 7 ++++--- share/availability/cache/availability.go | 4 ++-- share/availability/discovery/backoff.go | 9 +++++---- share/availability/light/availability.go | 4 ++-- share/availability/light/sample.go | 3 ++- share/availability/test/corrupt_data.go | 11 +++++++---- share/availability/test/testing.go | 11 ++++++----- share/eds/byzantine/bad_encoding.go | 5 +++-- share/eds/byzantine/share_proof.go | 4 ++-- share/eds/eds.go | 19 ++++++++++++------- share/eds/eds_test.go | 8 +++++--- share/eds/retriever.go | 12 ++++++------ share/eds/retriever_quadrant.go | 3 ++- share/get.go | 7 ++++--- share/ipld/corrupted_data_test.go | 12 ++++++------ share/ipld/get.go | 11 ++++++----- share/ipld/nmt.go | 3 ++- share/service/service.go | 17 +++++++++-------- share/share.go | 8 +++++--- share/test_helpers.go | 11 ++++++----- state/core_access.go | 3 ++- 66 files changed, 257 insertions(+), 186 deletions(-) diff --git a/api/rpc/server.go b/api/rpc/server.go index dbb080e41e..85d59ccab0 100644 --- a/api/rpc/server.go +++ b/api/rpc/server.go @@ -34,8 +34,8 @@ func NewServer(address, port string) *Server { } } -// RegisterService registers a service onto the RPC server. All methods on the service will then be exposed over the -// RPC. +// RegisterService registers a service onto the RPC server. All methods on the service will then be +// exposed over the RPC. func (s *Server) RegisterService(namespace string, service interface{}) { s.rpc.Register(namespace, service) } diff --git a/cmd/celestia/bridge.go b/cmd/celestia/bridge.go index d8b650c0c0..a4ddd99268 100644 --- a/cmd/celestia/bridge.go +++ b/cmd/celestia/bridge.go @@ -12,8 +12,8 @@ import ( "github.com/celestiaorg/celestia-node/nodebuilder/state" ) -// NOTE: We should always ensure that the added Flags below are parsed somewhere, like in the PersistentPreRun func on -// parent command. +// NOTE: We should always ensure that the added Flags below are parsed somewhere, like in the +// PersistentPreRun func on parent command. func init() { bridgeCmd.AddCommand( diff --git a/cmd/celestia/full.go b/cmd/celestia/full.go index fa4f4af65f..254545218d 100644 --- a/cmd/celestia/full.go +++ b/cmd/celestia/full.go @@ -14,8 +14,8 @@ import ( "github.com/celestiaorg/celestia-node/nodebuilder/state" ) -// NOTE: We should always ensure that the added Flags below are parsed somewhere, like in the PersistentPreRun func on -// parent command. +// NOTE: We should always ensure that the added Flags below are parsed somewhere, like in the +// PersistentPreRun func on parent command. func init() { fullCmd.AddCommand( diff --git a/cmd/celestia/light.go b/cmd/celestia/light.go index 5e94f159f5..d035f0aea2 100644 --- a/cmd/celestia/light.go +++ b/cmd/celestia/light.go @@ -14,8 +14,8 @@ import ( "github.com/celestiaorg/celestia-node/nodebuilder/state" ) -// NOTE: We should always ensure that the added Flags below are parsed somewhere, like in the PersistentPreRun func on -// parent command. +// NOTE: We should always ensure that the added Flags below are parsed somewhere, like in the +// PersistentPreRun func on parent command. func init() { lightCmd.AddCommand( diff --git a/das/options.go b/das/options.go index b4de66b776..5662bf281e 100644 --- a/das/options.go +++ b/das/options.go @@ -28,7 +28,8 @@ type Parameters struct { // ConcurrencyLimit defines the maximum amount of sampling workers running in parallel. ConcurrencyLimit int - // BackgroundStoreInterval is the period of time for background checkpointStore to perform a checkpoint backup. + // BackgroundStoreInterval is the period of time for background checkpointStore to perform a + // checkpoint backup. BackgroundStoreInterval time.Duration // PriorityQueueSize defines the size limit of the priority queue @@ -40,7 +41,8 @@ type Parameters struct { // DefaultParameters returns the default configuration values for the daser parameters func DefaultParameters() Parameters { - // TODO(@derrandz): parameters needs performance testing on real network to define optimal values (#1261) + // TODO(@derrandz): parameters needs performance testing on real network to define optimal values + // (#1261) return Parameters{ SamplingRange: 100, ConcurrencyLimit: 16, @@ -115,16 +117,17 @@ func WithConcurrencyLimit(concurrencyLimit int) Option { } } -// WithBackgroundStoreInterval is a functional option to configure the daser's `backgroundStoreInterval` parameter -// Refer to WithSamplingRange documentation to see an example of how to use this +// WithBackgroundStoreInterval is a functional option to configure the daser's +// `backgroundStoreInterval` parameter Refer to WithSamplingRange documentation to see an example +// of how to use this func WithBackgroundStoreInterval(backgroundStoreInterval time.Duration) Option { return func(d *DASer) { d.params.BackgroundStoreInterval = backgroundStoreInterval } } -// WithPriorityQueueSize is a functional option to configure the daser's `priorityQueuSize` parameter -// Refer to WithSamplingRange documentation to see an example of how to use this +// WithPriorityQueueSize is a functional option to configure the daser's `priorityQueuSize` +// parameter Refer to WithSamplingRange documentation to see an example of how to use this func WithPriorityQueueSize(priorityQueueSize int) Option { return func(d *DASer) { d.params.PriorityQueueSize = priorityQueueSize diff --git a/fraud/interface.go b/fraud/interface.go index 9fa8cc3d2b..1dd4ffe69a 100644 --- a/fraud/interface.go +++ b/fraud/interface.go @@ -24,7 +24,8 @@ type Service interface { Getter } -// Broadcaster is a generic interface that sends a `Proof` to all nodes subscribed on the Broadcaster's topic. +// Broadcaster is a generic interface that sends a `Proof` to all nodes subscribed on the +// Broadcaster's topic. type Broadcaster interface { // Broadcast takes a fraud `Proof` data structure that implements standard BinaryMarshal // interface and broadcasts it to all subscribed peers. diff --git a/fraud/proof.go b/fraud/proof.go index 65e7a183a1..e87e34440c 100644 --- a/fraud/proof.go +++ b/fraud/proof.go @@ -48,7 +48,8 @@ type Proof interface { Height() uint64 // Validate check the validity of fraud proof. // Validate throws an error if some conditions don't pass and thus fraud proof is not valid. - // NOTE: header.ExtendedHeader should pass basic validation otherwise it will panic if it's malformed. + // NOTE: header.ExtendedHeader should pass basic validation otherwise it will panic if it's + // malformed. Validate(*header.ExtendedHeader) error encoding.BinaryMarshaler diff --git a/fraud/registry.go b/fraud/registry.go index afc7cb5097..5e76ae2b37 100644 --- a/fraud/registry.go +++ b/fraud/registry.go @@ -19,8 +19,8 @@ func Register(p Proof) { panic(fmt.Sprintf("fraud: unmarshaler for %s proof is registered", p.Type())) } defaultUnmarshalers[p.Type()] = func(data []byte) (Proof, error) { - // the underlying type of `p` is a pointer to a struct and assigning `p` to a new variable is not the - // case, because it could lead to data races. + // the underlying type of `p` is a pointer to a struct and assigning `p` to a new variable is not + // the case, because it could lead to data races. // So, there is no easier way to create a hard copy of Proof other than using a reflection. proof := reflect.New(reflect.ValueOf(p).Elem().Type()).Interface().(Proof) err := proof.UnmarshalBinary(data) diff --git a/fraud/service.go b/fraud/service.go index b3b8bc5898..cea4ec0a60 100644 --- a/fraud/service.go +++ b/fraud/service.go @@ -18,7 +18,8 @@ import ( "go.opentelemetry.io/otel/trace" ) -// fraudRequests is the amount of external requests that will be tried to get fraud proofs from other peers. +// fraudRequests is the amount of external requests that will be tried to get fraud proofs from +// other peers. const fraudRequests = 5 // ProofService is responsible for validating and propagating Fraud Proofs. @@ -77,7 +78,8 @@ func (f *ProofService) registerProofTopics(proofTypes ...ProofType) error { return nil } -// Start joins fraud proofs topics, sets the stream handler for fraudProtocolID and starts syncing if syncer is enabled. +// Start joins fraud proofs topics, sets the stream handler for fraudProtocolID and starts syncing +// if syncer is enabled. func (f *ProofService) Start(context.Context) error { f.ctx, f.cancel = context.WithCancel(context.Background()) if err := f.registerProofTopics(registeredProofTypes()...); err != nil { diff --git a/header/header.go b/header/header.go index 2fa93b4412..79ea4941b0 100644 --- a/header/header.go +++ b/header/header.go @@ -76,7 +76,8 @@ func MakeExtendedHeader( } // Hash returns Hash of the wrapped RawHeader. -// NOTE: It purposely overrides Hash method of RawHeader to get it directly from Commit without recomputing. +// NOTE: It purposely overrides Hash method of RawHeader to get it directly from Commit without +// recomputing. func (eh *ExtendedHeader) Hash() bts.HexBytes { return eh.Commit.BlockID.Hash } @@ -147,8 +148,8 @@ func (eh *ExtendedHeader) UnmarshalBinary(data []byte) error { return nil } -// MarshalJSON marshals an ExtendedHeader to JSON. The ValidatorSet is wrapped with amino encoding, to be able to -// unmarshal the crypto.PubKey type back from JSON. +// MarshalJSON marshals an ExtendedHeader to JSON. The ValidatorSet is wrapped with amino encoding, +// to be able to unmarshal the crypto.PubKey type back from JSON. func (eh *ExtendedHeader) MarshalJSON() ([]byte, error) { type Alias ExtendedHeader validatorSet, err := amino.Marshal(eh.ValidatorSet) @@ -164,8 +165,8 @@ func (eh *ExtendedHeader) MarshalJSON() ([]byte, error) { }) } -// UnmarshalJSON unmarshals an ExtendedHeader from JSON. The ValidatorSet is wrapped with amino encoding, to be able to -// unmarshal the crypto.PubKey type back from JSON. +// UnmarshalJSON unmarshals an ExtendedHeader from JSON. The ValidatorSet is wrapped with amino +// encoding, to be able to unmarshal the crypto.PubKey type back from JSON. func (eh *ExtendedHeader) UnmarshalJSON(data []byte) error { type Alias ExtendedHeader aux := &struct { diff --git a/header/p2p/exchange.go b/header/p2p/exchange.go index 17f2ca2ee3..773ac575bf 100644 --- a/header/p2p/exchange.go +++ b/header/p2p/exchange.go @@ -230,7 +230,8 @@ func (ex *Exchange) request( // bestHead chooses ExtendedHeader that matches the conditions: // * should have max height among received; // * should be received at least from 2 peers; -// If neither condition is met, then latest ExtendedHeader will be returned (header of the highest height). +// If neither condition is met, then latest ExtendedHeader will be returned (header of the highest +// height). func bestHead(result []*header.ExtendedHeader) (*header.ExtendedHeader, error) { if len(result) == 0 { return nil, header.ErrNotFound diff --git a/header/p2p/subscriber.go b/header/p2p/subscriber.go index 64a4c2787e..7ae46178d4 100644 --- a/header/p2p/subscriber.go +++ b/header/p2p/subscriber.go @@ -80,7 +80,8 @@ func (p *Subscriber) Broadcast(ctx context.Context, header *header.ExtendedHeade } // msgID computes an id for a pubsub message -// TODO(@Wondertan): This cause additional allocations per each recvd message in the topic. Find a way to avoid those. +// TODO(@Wondertan): This cause additional allocations per each recvd message in the topic. Find a +// way to avoid those. func msgID(pmsg *pb.Message) string { mID := func(data []byte) string { hash := blake2b.Sum256(data) @@ -95,13 +96,13 @@ func msgID(pmsg *pb.Message) string { } // IMPORTANT NOTE: - // Due to the nature of the Tendermint consensus, validators don't necessarily collect commit signatures from the - // entire validator set, but only the minimum required amount of them (>2/3 of voting power). In addition, - // signatures are collected asynchronously. Therefore, each validator may have a different set of signatures that - // pass the minimum required voting power threshold, causing nondeterminism in the header message gossiped over the - // network. Subsequently, this causes message duplicates as each Bridge Node, connected to a personal validator, - // sends the validator's own view of commits of effectively the same header. - // + // Due to the nature of the Tendermint consensus, validators don't necessarily collect commit + // signatures from the entire validator set, but only the minimum required amount of them (>2/3 of + // voting power). In addition, signatures are collected asynchronously. Therefore, each validator + // may have a different set of signatures that pass the minimum required voting power threshold, + // causing nondeterminism in the header message gossiped over the network. Subsequently, this + // causes message duplicates as each Bridge Node, connected to a personal validator, sends the + // validator's own view of commits of effectively the same header. // To solve the problem above, we exclude nondeterministic value from message id calculation h.Commit.Signatures = nil diff --git a/header/store/height_indexer.go b/header/store/height_indexer.go index 2fd70671fb..a1e64986a4 100644 --- a/header/store/height_indexer.go +++ b/header/store/height_indexer.go @@ -10,8 +10,9 @@ import ( "github.com/celestiaorg/celestia-node/header" ) -// TODO(@Wondertan): There should be a more clever way to index heights, than just storing HeightToHash pair... -// heightIndexer simply stores and cashes mappings between header Height and Hash. +// TODO(@Wondertan): There should be a more clever way to index heights, than just storing +// HeightToHash pair... heightIndexer simply stores and cashes mappings between header Height and +// Hash. type heightIndexer struct { ds datastore.Batching cache *lru.ARCCache diff --git a/header/store/store.go b/header/store/store.go index 9a447ed355..8331e03034 100644 --- a/header/store/store.go +++ b/header/store/store.go @@ -18,11 +18,13 @@ import ( var log = logging.Logger("header/store") -// TODO(@Wondertan): Those values must be configurable and proper defaults should be set for specific node type. (#709) +// TODO(@Wondertan): Those values must be configurable and proper defaults should be set for +// specific node type. (#709) var ( // DefaultStoreCacheSize defines the amount of max entries allowed in the Header Store cache. DefaultStoreCacheSize = 4096 - // DefaultIndexCacheSize defines the amount of max entries allowed in the Height to Hash index cache. + // DefaultIndexCacheSize defines the amount of max entries allowed in the Height to Hash index + // cache. DefaultIndexCacheSize = 16384 // DefaultWriteBatchSize defines the size of the batched header write. // Headers are written in batches not to thrash the underlying Datastore with writes. diff --git a/header/sync/ranges.go b/header/sync/ranges.go index 98b0a4b6b4..c4f38b709f 100644 --- a/header/sync/ranges.go +++ b/header/sync/ranges.go @@ -6,8 +6,9 @@ import ( "github.com/celestiaorg/celestia-node/header" ) -// ranges keeps non-overlapping and non-adjacent header ranges which are used to cache headers (in ascending order). -// This prevents unnecessary / duplicate network requests for additional headers during sync. +// ranges keeps non-overlapping and non-adjacent header ranges which are used to cache headers (in +// ascending order). This prevents unnecessary / duplicate network requests for additional headers +// during sync. type ranges struct { lk sync.RWMutex ranges []*headerRange diff --git a/header/sync/sync.go b/header/sync/sync.go index ab3233ad42..55df34e99c 100644 --- a/header/sync/sync.go +++ b/header/sync/sync.go @@ -125,8 +125,9 @@ func (s State) Duration() time.Duration { } // State reports state of the current (if in progress), or last sync (if finished). -// Note that throughout the whole Syncer lifetime there might an initial sync and multiple catch-ups. -// All of them are treated as different syncs with different state IDs and other information. +// Note that throughout the whole Syncer lifetime there might an initial sync and multiple +// catch-ups. All of them are treated as different syncs with different state IDs and other +// information. func (s *Syncer) State() State { s.stateLk.RLock() state := s.state @@ -227,7 +228,8 @@ func (s *Syncer) doSync(ctx context.Context, fromHead, toHead *header.ExtendedHe return err } -// processHeaders gets and stores headers starting at the given 'from' height up to 'to' height - [from:to] +// processHeaders gets and stores headers starting at the given 'from' height up to 'to' height - +// [from:to] func (s *Syncer) processHeaders(ctx context.Context, from, to uint64) (int, error) { headers, err := s.findHeaders(ctx, from, to) if err != nil { @@ -237,14 +239,16 @@ func (s *Syncer) processHeaders(ctx context.Context, from, to uint64) (int, erro return s.store.Append(ctx, headers...) } -// TODO(@Wondertan): Number of headers that can be requested at once. Either make this configurable or, +// TODO(@Wondertan): Number of headers that can be requested at once. Either make this configurable +// or, // // find a proper rationale for constant. // // TODO(@Wondertan): Make configurable var requestSize uint64 = 512 -// findHeaders gets headers from either remote peers or from local cache of headers received by PubSub - [from:to] +// findHeaders gets headers from either remote peers or from local cache of headers received by +// PubSub - [from:to] func (s *Syncer) findHeaders(ctx context.Context, from, to uint64) ([]*header.ExtendedHeader, error) { amount := to - from + 1 // + 1 to include 'to' height as well if amount > requestSize { diff --git a/header/sync/sync_head.go b/header/sync/sync_head.go index 26b8534288..ee64cc8d81 100644 --- a/header/sync/sync_head.go +++ b/header/sync/sync_head.go @@ -59,9 +59,9 @@ func (s *Syncer) subjectiveHead(ctx context.Context) (*header.ExtendedHeader, er } // networkHead returns the latest network header. -// Known subjective head is considered network head if it is recent enough(now-timestamp<=blocktime). -// Otherwise, network header is requested from a trusted peer and set as the new subjective head, -// assuming that trusted peer is always synced. +// Known subjective head is considered network head if it is recent +// enough(now-timestamp<=blocktime). Otherwise, network header is requested from a trusted peer and +// set as the new subjective head, assuming that trusted peer is always synced. func (s *Syncer) networkHead(ctx context.Context) (*header.ExtendedHeader, error) { sbjHead, err := s.subjectiveHead(ctx) if err != nil { @@ -102,7 +102,8 @@ func (s *Syncer) networkHead(ctx context.Context) (*header.ExtendedHeader, error // incomingNetHead processes new gossiped network headers. func (s *Syncer) incomingNetHead(ctx context.Context, netHead *header.ExtendedHeader) pubsub.ValidationResult { - // Try to short-circuit netHead with append. If not adjacent/from future - try it as new network header + // Try to short-circuit netHead with append. If not adjacent/from future - try it as new network + // header _, err := s.store.Append(ctx, netHead) if err == nil { // a happy case where we appended maybe head directly, so accept @@ -128,7 +129,8 @@ func (s *Syncer) incomingNetHead(ctx context.Context, netHead *header.ExtendedHe return s.newNetHead(ctx, netHead, false) } -// newNetHead sets the network header as the new subjective head with preceding validation(per request). +// newNetHead sets the network header as the new subjective head with preceding validation(per +// request). func (s *Syncer) newNetHead(ctx context.Context, netHead *header.ExtendedHeader, trust bool) pubsub.ValidationResult { // validate netHead against subjective head if !trust { diff --git a/header/testing.go b/header/testing.go index 365e6151d3..cb55caff37 100644 --- a/header/testing.go +++ b/header/testing.go @@ -1,5 +1,5 @@ -// TODO(@Wondertan): Ideally, we should move that into subpackage, so this does not get included into binary of -// production code, but that does not matter at the moment. +// TODO(@Wondertan): Ideally, we should move that into subpackage, so this does not get included +// into binary of production code, but that does not matter at the moment. package header diff --git a/libs/fslock/locker.go b/libs/fslock/locker.go index a858d0bf7d..f451c42cc1 100644 --- a/libs/fslock/locker.go +++ b/libs/fslock/locker.go @@ -21,8 +21,8 @@ func Lock(path string) (*Locker, error) { } // Locker is a simple utility meant to create lock files. -// This is to prevent multiple processes from managing the same working directory by purpose or accident. -// NOTE: Windows is not supported. +// This is to prevent multiple processes from managing the same working directory by purpose or +// accident. NOTE: Windows is not supported. type Locker struct { file *os.File path string diff --git a/libs/fxutil/fxutil.go b/libs/fxutil/fxutil.go index a03ab0cb20..872f7c91a6 100644 --- a/libs/fxutil/fxutil.go +++ b/libs/fxutil/fxutil.go @@ -43,14 +43,14 @@ func InvokeIf(cond bool, function interface{}) fx.Option { return fx.Options() } -// ProvideAs creates an FX option that provides constructor 'cnstr' with the returned values types as 'cnstrs' -// It is as simple utility that hides away FX annotation details. +// ProvideAs creates an FX option that provides constructor 'cnstr' with the returned values types +// as 'cnstrs' It is as simple utility that hides away FX annotation details. func ProvideAs(cnstr interface{}, cnstrs ...interface{}) fx.Option { return fx.Provide(fx.Annotate(cnstr, fx.As(cnstrs...))) } -// ReplaceAs creates an FX option that substitutes types defined by constructors 'cnstrs' with the value 'val'. -// It is as simple utility that hides away FX annotation details. +// ReplaceAs creates an FX option that substitutes types defined by constructors 'cnstrs' with the +// value 'val'. It is as simple utility that hides away FX annotation details. func ReplaceAs(val interface{}, cnstrs ...interface{}) fx.Option { return fx.Replace(fx.Annotate(val, fx.As(cnstrs...))) } diff --git a/nodebuilder/config.go b/nodebuilder/config.go index a7ae3971f6..56a90fe4c5 100644 --- a/nodebuilder/config.go +++ b/nodebuilder/config.go @@ -77,9 +77,10 @@ func LoadConfig(path string) (*Config, error) { } // TODO(@Wondertan): We should have a description for each field written into w, -// so users can instantly understand purpose of each field. Ideally, we should have a utility program to parse comments -// from actual sources(*.go files) and generate docs from comments. Hint: use 'ast' package. +// so users can instantly understand purpose of each field. Ideally, we should have a utility +// program to parse comments from actual sources(*.go files) and generate docs from comments. +// Hint: use 'ast' package. // Encode encodes a given Config into w. func (cfg *Config) Encode(w io.Writer) error { return toml.NewEncoder(w).Encode(cfg) diff --git a/nodebuilder/core/module.go b/nodebuilder/core/module.go index 59d6db273c..fb33c86a42 100644 --- a/nodebuilder/core/module.go +++ b/nodebuilder/core/module.go @@ -12,7 +12,8 @@ import ( "github.com/celestiaorg/celestia-node/nodebuilder/node" ) -// ConstructModule collects all the components and services related to managing the relationship with the Core node. +// ConstructModule collects all the components and services related to managing the relationship +// with the Core node. func ConstructModule(tp node.Type, cfg *Config, options ...fx.Option) fx.Option { // sanitize config values before constructing module cfgErr := cfg.Validate() diff --git a/nodebuilder/das/daser.go b/nodebuilder/das/daser.go index 424b8aedc6..01d5414da7 100644 --- a/nodebuilder/das/daser.go +++ b/nodebuilder/das/daser.go @@ -14,8 +14,8 @@ import ( var _ Module = (*daserStub)(nil) -// daserStub is a stub implementation of the DASer that is used on bridge nodes, so that we can provide a friendlier -// error when users try to access the daser over the API. +// daserStub is a stub implementation of the DASer that is used on bridge nodes, so that we can +// provide a friendlier error when users try to access the daser over the API. type daserStub struct{} func (d daserStub) SamplingStats(context.Context) (das.SamplingStats, error) { diff --git a/nodebuilder/das/service.go b/nodebuilder/das/service.go index 9707497976..ee4a9e8334 100644 --- a/nodebuilder/das/service.go +++ b/nodebuilder/das/service.go @@ -14,7 +14,9 @@ type Module interface { // API is a wrapper around Module for the RPC. // TODO(@distractedm1nd): These structs need to be autogenerated. // -//go:generate go run github.com/golang/mock/mockgen -destination=mocks/api.go -package=mocks . Module +// Module +// +//go:generate go run github.com/golang/mock/mockgen -destination=mocks/api.go -package=mocks . type API struct { SamplingStats func(ctx context.Context) (das.SamplingStats, error) } diff --git a/nodebuilder/fraud/service.go b/nodebuilder/fraud/service.go index b4369e53d0..85862b6cd4 100644 --- a/nodebuilder/fraud/service.go +++ b/nodebuilder/fraud/service.go @@ -6,8 +6,8 @@ import ( "github.com/celestiaorg/celestia-node/fraud" ) -// Module encompasses the behavior necessary to subscribe and broadcast fraud proofs within the network. -// Any method signature changed here needs to also be changed in the API struct. +// Module encompasses the behavior necessary to subscribe and broadcast fraud proofs within the +// network. Any method signature changed here needs to also be changed in the API struct. type Module interface { fraud.Service } @@ -15,7 +15,9 @@ type Module interface { // API is a wrapper around Module for the RPC. // TODO(@distractedm1nd): These structs need to be autogenerated. // -//go:generate go run github.com/golang/mock/mockgen -destination=mocks/api.go -package=mocks . Module +// Module +// +//go:generate go run github.com/golang/mock/mockgen -destination=mocks/api.go -package=mocks . type API struct { Subscribe func(fraud.ProofType) (fraud.Subscription, error) Get func(context.Context, fraud.ProofType) ([]fraud.Proof, error) diff --git a/nodebuilder/header/config.go b/nodebuilder/header/config.go index ca0dde5dc2..3f9318dba3 100644 --- a/nodebuilder/header/config.go +++ b/nodebuilder/header/config.go @@ -16,8 +16,8 @@ type Config struct { // Only affects the node once on initial sync. TrustedHash string // TrustedPeers are the peers we trust to fetch headers from. - // Note: The trusted does *not* imply Headers are not verified, but trusted as reliable to fetch headers - // at any moment. + // Note: The trusted does *not* imply Headers are not verified, but trusted as reliable to fetch + // headers at any moment. TrustedPeers []string } diff --git a/nodebuilder/header/flags.go b/nodebuilder/header/flags.go index db9b5958e8..26e7c105ed 100644 --- a/nodebuilder/header/flags.go +++ b/nodebuilder/header/flags.go @@ -47,7 +47,8 @@ func TrustedPeersFlags() *flag.FlagSet { return flags } -// ParseTrustedPeerFlags parses Header package flags from the given cmd and applies them to the passed config. +// ParseTrustedPeerFlags parses Header package flags from the given cmd and applies them to the +// passed config. func ParseTrustedPeerFlags( cmd *cobra.Command, cfg *Config, @@ -79,7 +80,8 @@ func TrustedHashFlags() *flag.FlagSet { return flags } -// ParseTrustedHashFlags parses Header package flags from the given cmd and saves them to the passed config. +// ParseTrustedHashFlags parses Header package flags from the given cmd and saves them to the +// passed config. func ParseTrustedHashFlags( cmd *cobra.Command, cfg *Config, diff --git a/nodebuilder/header/header.go b/nodebuilder/header/header.go index a776429c1b..7b6392c32d 100644 --- a/nodebuilder/header/header.go +++ b/nodebuilder/header/header.go @@ -64,8 +64,8 @@ func newInitStore( err = store.Init(ctx, s, ex, trustedHash) if err != nil { // TODO(@Wondertan): Error is ignored, as otherwise unit tests for Node construction fail. - // This is due to requesting step of initialization, which fetches initial Header by trusted hash from - // the network. The step can't be done during unit tests and fixing it would require either + // This is due to requesting step of initialization, which fetches initial Header by trusted hash + // from the network. The step can't be done during unit tests and fixing it would require either // * Having some test/dev/offline mode for Node that mocks out all the networking // * Hardcoding full extended header in params pkg, instead of hashes, so we avoid requesting step // * Or removing explicit initialization in favor of automated initialization by Syncer diff --git a/nodebuilder/header/service.go b/nodebuilder/header/service.go index 5753034cbf..4f1b266ca4 100644 --- a/nodebuilder/header/service.go +++ b/nodebuilder/header/service.go @@ -23,7 +23,9 @@ type Module interface { // API is a wrapper around Module for the RPC. // TODO(@distractedm1nd): These structs need to be autogenerated. // -//go:generate go run github.com/golang/mock/mockgen -destination=mocks/api.go -package=mocks . Module +// Module +// +//go:generate go run github.com/golang/mock/mockgen -destination=mocks/api.go -package=mocks . type API struct { GetByHeight func(context.Context, uint64) (*header.ExtendedHeader, error) Head func(context.Context) (*header.ExtendedHeader, error) diff --git a/nodebuilder/init.go b/nodebuilder/init.go index b546b6d77a..5172cacab5 100644 --- a/nodebuilder/init.go +++ b/nodebuilder/init.go @@ -9,7 +9,8 @@ import ( "github.com/celestiaorg/celestia-node/nodebuilder/node" ) -// Init initializes the Node FileSystem Store for the given Node Type 'tp' in the directory under 'path'. +// Init initializes the Node FileSystem Store for the given Node Type 'tp' in the directory under +// 'path'. func Init(cfg Config, path string, tp node.Type) error { path, err := storePath(path) if err != nil { diff --git a/nodebuilder/node.go b/nodebuilder/node.go index c7225d3298..fd9065aaf1 100644 --- a/nodebuilder/node.go +++ b/nodebuilder/node.go @@ -31,9 +31,9 @@ const Timeout = time.Second * 15 var log = logging.Logger("node") -// Node represents the core structure of a Celestia node. It keeps references to all Celestia-specific -// components and services in one place and provides flexibility to run a Celestia node in different modes. -// Currently supported modes: +// Node represents the core structure of a Celestia node. It keeps references to all +// Celestia-specific components and services in one place and provides flexibility to run a +// Celestia node in different modes. Currently supported modes: // * Bridge // * Light // * Full @@ -78,7 +78,8 @@ func New(tp node.Type, network p2p.Network, store Store, options ...fx.Option) ( return NewWithConfig(tp, network, store, cfg, options...) } -// NewWithConfig assembles a new Node with the given type 'tp' over Store 'store' and a custom config. +// NewWithConfig assembles a new Node with the given type 'tp' over Store 'store' and a custom +// config. func NewWithConfig(tp node.Type, network p2p.Network, store Store, cfg *Config, options ...fx.Option) (*Node, error) { opts := append([]fx.Option{ConstructModule(tp, network, cfg, store)}, options...) return newNode(opts...) @@ -125,8 +126,8 @@ func (n *Node) Run(ctx context.Context) error { } // Stop shuts down the Node, all its running Modules/Services and returns. -// Canceling the given context earlier 'ctx' unblocks the Stop and aborts graceful shutdown forcing remaining -// Modules/Services to close immediately. +// Canceling the given context earlier 'ctx' unblocks the Stop and aborts graceful shutdown forcing +// remaining Modules/Services to close immediately. func (n *Node) Stop(ctx context.Context) error { ctx, cancel := context.WithTimeout(ctx, Timeout) defer cancel() @@ -143,8 +144,8 @@ func (n *Node) Stop(ctx context.Context) error { // newNode creates a new Node from given DI options. // DI options allow initializing the Node with a customized set of components and services. -// NOTE: newNode is currently meant to be used privately to create various custom Node types e.g. Light, unless we -// decide to give package users the ability to create custom node types themselves. +// NOTE: newNode is currently meant to be used privately to create various custom Node types e.g. +// Light, unless we decide to give package users the ability to create custom node types themselves. func newNode(opts ...fx.Option) (*Node, error) { node := new(Node) app := fx.New( diff --git a/nodebuilder/node/type.go b/nodebuilder/node/type.go index b6d8918edc..6ba304871e 100644 --- a/nodebuilder/node/type.go +++ b/nodebuilder/node/type.go @@ -5,11 +5,12 @@ package node type Type uint8 const ( - // Bridge is a Celestia Node that bridges the Celestia consensus network and data availability network. - // It maintains a trusted channel/connection to a Celestia Core node via the core.Client API. + // Bridge is a Celestia Node that bridges the Celestia consensus network and data availability + // network. It maintains a trusted channel/connection to a Celestia Core node via the core.Client + // API. Bridge Type = iota + 1 - // Light is a stripped-down Celestia Node which aims to be lightweight while preserving the highest possible - // security guarantees. + // Light is a stripped-down Celestia Node which aims to be lightweight while preserving the highest + // possible security guarantees. Light // Full is a Celestia Node that stores blocks in their entirety. Full diff --git a/nodebuilder/node_bridge_test.go b/nodebuilder/node_bridge_test.go index cf2c389fe2..a50eb6c0bc 100644 --- a/nodebuilder/node_bridge_test.go +++ b/nodebuilder/node_bridge_test.go @@ -32,8 +32,8 @@ func TestBridge_WithMockedCoreClient(t *testing.T) { require.NoError(t, err) } -// TestBridge_HasStubDaser verifies that a bridge node implements a stub daser that returns an error and empty -// das.SamplingStats +// TestBridge_HasStubDaser verifies that a bridge node implements a stub daser that returns an +// error and empty das.SamplingStats func TestBridge_HasStubDaser(t *testing.T) { repo := MockStore(t, DefaultConfig(node.Bridge)) diff --git a/nodebuilder/p2p/config.go b/nodebuilder/p2p/config.go index fb6678b1cd..991eb0b046 100644 --- a/nodebuilder/p2p/config.go +++ b/nodebuilder/p2p/config.go @@ -16,11 +16,13 @@ type Config struct { ListenAddresses []string // AnnounceAddresses - Addresses to be announced/advertised for peers to connect to AnnounceAddresses []string - // NoAnnounceAddresses - Addresses the P2P subsystem may know about, but that should not be announced/advertised, - // as undialable from WAN + // NoAnnounceAddresses - Addresses the P2P subsystem may know about, but that should not be + // announced/advertised, as undialable from WAN NoAnnounceAddresses []string - // TODO(@Wondertan): This should be a build-time parameter. See https://github.com/celestiaorg/celestia-node/issues/63 - // Bootstrapper is flag telling this node is a bootstrapper. + // TODO(@Wondertan): This should be a build-time parameter. See + // https://github.com/celestiaorg/celestia-node/issues/63 Bootstrapper is flag telling this node is + + // a bootstrapper. Bootstrapper bool // MutualPeers are peers which have a bidirectional peering agreement with the configured node. // Connections with those peers are protected from being trimmed, dropped or negatively scored. diff --git a/nodebuilder/p2p/host.go b/nodebuilder/p2p/host.go index 32f5290e61..23dfce9845 100644 --- a/nodebuilder/p2p/host.go +++ b/nodebuilder/p2p/host.go @@ -41,7 +41,8 @@ func Host(cfg Config, params hostParams) (HostBase, error) { libp2p.DefaultMuxers, } - // TODO(@Wondertan): Other, non Celestia bootstrapper may also enable NATService to contribute the network. + // TODO(@Wondertan): Other, non Celestia bootstrapper may also enable NATService to contribute the + // network. if cfg.Bootstrapper { opts = append(opts, libp2p.EnableNATService()) } diff --git a/nodebuilder/p2p/misc.go b/nodebuilder/p2p/misc.go index af540c8706..fd8555d630 100644 --- a/nodebuilder/p2p/misc.go +++ b/nodebuilder/p2p/misc.go @@ -15,7 +15,8 @@ import ( type ConnManagerConfig struct { // Low and High are watermarks governing the number of connections that'll be maintained. Low, High int - // GracePeriod is the amount of time a newly opened connection is given before it becomes subject to pruning. + // GracePeriod is the amount of time a newly opened connection is given before it becomes subject + // to pruning. GracePeriod time.Duration } diff --git a/nodebuilder/p2p/network.go b/nodebuilder/p2p/network.go index 07d38e10ce..2289f6d635 100644 --- a/nodebuilder/p2p/network.go +++ b/nodebuilder/p2p/network.go @@ -46,7 +46,8 @@ var networksList = map[Network]struct{}{ Private: {}, } -// listProvidedNetworks provides a string listing all known long-standing networks for things like command hints. +// listProvidedNetworks provides a string listing all known long-standing networks for things like +// command hints. func listProvidedNetworks() string { var networks string for net := range networksList { diff --git a/nodebuilder/p2p/pubsub.go b/nodebuilder/p2p/pubsub.go index 01b4c61b9b..98947331cf 100644 --- a/nodebuilder/p2p/pubsub.go +++ b/nodebuilder/p2p/pubsub.go @@ -21,8 +21,8 @@ func PubSub(cfg Config, params pubSubParams) (*pubsub.PubSub, error) { // * Hash-based MsgId function. // * Validate default peer scoring params for our use-case. // * Strict subscription filter - // * For different network types(mainnet/testnet/devnet) we should have different network topic names. - // * Hardcode positive score for bootstrap peers + // * For different network types(mainnet/testnet/devnet) we should have different network topic + // names. * Hardcode positive score for bootstrap peers // * Bootstrappers should only gossip and PX // * Peers should trust boostrappers, so peerscore for them should always be high. opts := []pubsub.Option{ diff --git a/nodebuilder/settings.go b/nodebuilder/settings.go index 12bda0676f..e02be3d3dc 100644 --- a/nodebuilder/settings.go +++ b/nodebuilder/settings.go @@ -24,7 +24,8 @@ import ( ) // WithNetwork specifies the Network to which the Node should connect to. -// WARNING: Use this option with caution and never run the Node with different networks over the same persisted Store. +// WARNING: Use this option with caution and never run the Node with different networks over the +// same persisted Store. func WithNetwork(net p2p.Network) fx.Option { return fx.Replace(net) } diff --git a/nodebuilder/share/service.go b/nodebuilder/share/service.go index 643ab43f21..4933a50f32 100644 --- a/nodebuilder/share/service.go +++ b/nodebuilder/share/service.go @@ -49,7 +49,9 @@ func NewModule(lc fx.Lifecycle, bServ blockservice.BlockService, avail share.Ava // API is a wrapper around Module for the RPC. // TODO(@distractedm1nd): These structs need to be autogenerated. // -//go:generate go run github.com/golang/mock/mockgen -destination=mocks/api.go -package=mocks . Module +// Module +// +//go:generate go run github.com/golang/mock/mockgen -destination=mocks/api.go -package=mocks . type API struct { SharesAvailable func(context.Context, *share.Root) error ProbabilityOfAvailability func() float64 diff --git a/nodebuilder/state/keyring.go b/nodebuilder/state/keyring.go index 7f6c197680..1c4ff9b1b4 100644 --- a/nodebuilder/state/keyring.go +++ b/nodebuilder/state/keyring.go @@ -19,7 +19,8 @@ func Keyring(cfg Config, ks keystore.Keystore, net p2p.Network) (*apptypes.Keyri // TODO @renaynay: Include option for setting custom `userInput` parameter with // implementation of https://github.com/celestiaorg/celestia-node/issues/415. // TODO @renaynay @Wondertan: ensure that keyring backend from config is passed - // here instead of hardcoded `BackendTest`: https://github.com/celestiaorg/celestia-node/issues/603. + // here instead of hardcoded `BackendTest`: + // https://github.com/celestiaorg/celestia-node/issues/603. encConf := encoding.MakeConfig(app.ModuleEncodingRegisters...) ring, err := keyring.New(app.Name, keyring.BackendTest, ks.Path(), os.Stdin, encConf.Codec) if err != nil { diff --git a/nodebuilder/state/service.go b/nodebuilder/state/service.go index 06dcc78756..e9d0ab39c9 100644 --- a/nodebuilder/state/service.go +++ b/nodebuilder/state/service.go @@ -31,7 +31,8 @@ type Module interface { // `AppHash` is the result of applying the previous block's transaction list. BalanceForAddress(ctx context.Context, addr state.Address) (*state.Balance, error) - // Transfer sends the given amount of coins from default wallet of the node to the given account address. + // Transfer sends the given amount of coins from default wallet of the node to the given account + // address. Transfer(ctx context.Context, to state.AccAddress, amount math.Int, gasLimit uint64) (*state.TxResponse, error) // SubmitTx submits the given transaction/message to the // Celestia network and blocks until the tx is included in @@ -76,7 +77,9 @@ type Module interface { // API is a wrapper around Module for the RPC. // TODO(@distractedm1nd): These structs need to be autogenerated. // -//go:generate go run github.com/golang/mock/mockgen -destination=mocks/api.go -package=mocks . Module +// Module +// +//go:generate go run github.com/golang/mock/mockgen -destination=mocks/api.go -package=mocks . type API struct { IsStopped func() bool Balance func(ctx context.Context) (*state.Balance, error) diff --git a/nodebuilder/store.go b/nodebuilder/store.go index 2f5ca418ac..ba75beaeb9 100644 --- a/nodebuilder/store.go +++ b/nodebuilder/store.go @@ -46,8 +46,8 @@ type Store interface { // OpenStore creates new FS Store under the given 'path'. // To be opened the Store must be initialized first, otherwise ErrNotInited is thrown. -// OpenStore takes a file Lock on directory, hence only one Store can be opened at a time under the given 'path', -// otherwise ErrOpened is thrown. +// OpenStore takes a file Lock on directory, hence only one Store can be opened at a time under the +// given 'path', otherwise ErrOpened is thrown. func OpenStore(path string) (Store, error) { path, err := storePath(path) if err != nil { @@ -139,8 +139,8 @@ func (f *fsStore) Datastore() (_ datastore.Batching, err error) { opts.TableLoadingMode = options.MemoryMap // Truncate set to true will truncate corrupted data on start if there is any. // If we don't truncate, the node will refuse to start and will beg for recovering, etc. - // If we truncate, the node will start with any uncorrupted data and reliably sync again what was corrupted - // in most cases. + // If we truncate, the node will start with any uncorrupted data and reliably sync again what was + // corrupted in most cases. opts.Truncate = true // MaxTableSize defines in memory and on disk size of LSM tree // Bigger values constantly takes more RAM diff --git a/nodebuilder/tests/swamp/swamp.go b/nodebuilder/tests/swamp/swamp.go index f156f395a3..6351c8f2c8 100644 --- a/nodebuilder/tests/swamp/swamp.go +++ b/nodebuilder/tests/swamp/swamp.go @@ -315,9 +315,9 @@ func (s *Swamp) Connect(t *testing.T, peerA, peerB peer.ID) { require.NoError(t, err) } -// Disconnect allows to break a connection between two peers without any possibility to re-establish it. -// Order is very important here. We have to unlink peers first, and only after that call disconnect. -// This is hard disconnect and peers will not be able to reconnect. +// Disconnect allows to break a connection between two peers without any possibility to +// re-establish it. Order is very important here. We have to unlink peers first, and only after +// that call disconnect. This is hard disconnect and peers will not be able to reconnect. // In order to reconnect peers again, please use swamp.Connect func (s *Swamp) Disconnect(t *testing.T, peerA, peerB peer.ID) { require.NoError(t, s.Network.UnlinkPeers(peerA, peerB)) diff --git a/share/add.go b/share/add.go index cddd992521..4e60eabb2c 100644 --- a/share/add.go +++ b/share/add.go @@ -13,7 +13,8 @@ import ( "github.com/celestiaorg/rsmt2d" ) -// AddShares erasures and extends shares to blockservice.BlockService using the provided ipld.NodeAdder. +// AddShares erasures and extends shares to blockservice.BlockService using the provided +// ipld.NodeAdder. func AddShares( ctx context.Context, shares []Share, @@ -42,7 +43,8 @@ func AddShares( return eds, batchAdder.Commit() } -// ImportShares imports flattend chunks of data into Extended Data square and saves it in blockservice.BlockService +// ImportShares imports flattend chunks of data into Extended Data square and saves it in +// blockservice.BlockService func ImportShares( ctx context.Context, shares [][]byte, diff --git a/share/availability.go b/share/availability.go index 5104dcb5bd..7bd52109ec 100644 --- a/share/availability.go +++ b/share/availability.go @@ -11,8 +11,8 @@ import ( // ErrNotAvailable is returned whenever DA sampling fails. var ErrNotAvailable = errors.New("share: data not available") -// AvailabilityTimeout specifies timeout for DA validation during which data have to be found on the network, -// otherwise ErrNotAvailable is fired. +// AvailabilityTimeout specifies timeout for DA validation during which data have to be found on +// the network, otherwise ErrNotAvailable is fired. // TODO: https://github.com/celestiaorg/celestia-node/issues/10 const AvailabilityTimeout = 20 * time.Minute @@ -22,7 +22,8 @@ type Root = da.DataAvailabilityHeader // Availability defines interface for validation of Shares' availability. type Availability interface { - // SharesAvailable subjectively validates if Shares committed to the given Root are available on the Network. + // SharesAvailable subjectively validates if Shares committed to the given Root are available on + // the Network. SharesAvailable(context.Context, *Root) error // ProbabilityOfAvailability calculates the probability of the data square // being available based on the number of samples collected. diff --git a/share/availability/cache/availability.go b/share/availability/cache/availability.go index 961dee7564..f8f3d8760c 100644 --- a/share/availability/cache/availability.go +++ b/share/availability/cache/availability.go @@ -19,8 +19,8 @@ var log = logging.Logger("share/cache") var ( // DefaultWriteBatchSize defines the size of the batched header write. // Headers are written in batches not to thrash the underlying Datastore with writes. - // TODO(@Wondertan, @renaynay): Those values must be configurable and proper defaults should be set for specific node - // type. (#709) + // TODO(@Wondertan, @renaynay): Those values must be configurable and proper defaults should be set + // for specific node type. (#709) DefaultWriteBatchSize = 2048 cacheAvailabilityPrefix = datastore.NewKey("sampling_result") diff --git a/share/availability/discovery/backoff.go b/share/availability/discovery/backoff.go index 9b0f2f6acc..9e5c627aa7 100644 --- a/share/availability/discovery/backoff.go +++ b/share/availability/discovery/backoff.go @@ -42,8 +42,8 @@ func newBackoffConnector(h host.Host, factory backoff.BackoffFactory) *backoffCo // Connect puts peer to the backoffCache and tries to establish a connection with it. func (b *backoffConnector) Connect(ctx context.Context, p peer.AddrInfo) error { - // we should lock the mutex before calling connectionData and not inside because otherwise it could be modified - // from another goroutine as it returns a pointer + // we should lock the mutex before calling connectionData and not inside because otherwise it could + // be modified from another goroutine as it returns a pointer b.cacheLk.Lock() cache := b.connectionData(p.ID) if time.Now().Before(cache.nexttry) { @@ -67,8 +67,9 @@ func (b *backoffConnector) connectionData(p peer.ID) *backoffData { return cache } -// RestartBackoff resets delay time between attempts and adds a delay for the next connection attempt to remote peer. -// It will mostly be called when host receives a notification that remote peer was disconnected. +// RestartBackoff resets delay time between attempts and adds a delay for the next connection +// attempt to remote peer. It will mostly be called when host receives a notification that remote +// peer was disconnected. func (b *backoffConnector) RestartBackoff(p peer.ID) { b.cacheLk.Lock() defer b.cacheLk.Unlock() diff --git a/share/availability/light/availability.go b/share/availability/light/availability.go index 6f8b227791..ac8c805df8 100644 --- a/share/availability/light/availability.go +++ b/share/availability/light/availability.go @@ -54,8 +54,8 @@ func (la *ShareAvailability) Stop(context.Context) error { return nil } -// SharesAvailable randomly samples DefaultSampleAmount amount of Shares committed to the given Root. -// This way SharesAvailable subjectively verifies that Shares are available. +// SharesAvailable randomly samples DefaultSampleAmount amount of Shares committed to the given +// Root. This way SharesAvailable subjectively verifies that Shares are available. func (la *ShareAvailability) SharesAvailable(ctx context.Context, dah *share.Root) error { log.Debugw("Validate availability", "root", dah.Hash()) // We assume the caller of this method has already performed basic validation on the diff --git a/share/availability/light/sample.go b/share/availability/light/sample.go index fa2719cd1f..12d8505397 100644 --- a/share/availability/light/sample.go +++ b/share/availability/light/sample.go @@ -6,7 +6,8 @@ import ( "math/big" ) -// DefaultSampleAmount sets the default amount of samples to be sampled from the network by ShareAvailability. +// DefaultSampleAmount sets the default amount of samples to be sampled from the network by +// ShareAvailability. var DefaultSampleAmount = 16 // Sample is a point in 2D space over square. diff --git a/share/availability/test/corrupt_data.go b/share/availability/test/corrupt_data.go index 6969c24728..1d7716cbb4 100644 --- a/share/availability/test/corrupt_data.go +++ b/share/availability/test/corrupt_data.go @@ -15,7 +15,8 @@ import ( var _ blockstore.Blockstore = (*FraudulentBlockstore)(nil) -// CorruptBlock is a block where the cid doesn't match the data. It fulfills the blocks.Block interface. +// CorruptBlock is a block where the cid doesn't match the data. It fulfills the blocks.Block +// interface. type CorruptBlock struct { cid cid.Cid data []byte @@ -46,8 +47,9 @@ func NewCorruptBlock(data []byte, fakeCID cid.Cid) *CorruptBlock { } } -// FraudulentBlockstore is a mock blockstore.Blockstore that saves both corrupted and original data for every block it -// receives. If FraudulentBlockstore.Attacking is true, it will serve the corrupted data on requests. +// FraudulentBlockstore is a mock blockstore.Blockstore that saves both corrupted and original data +// for every block it receives. If FraudulentBlockstore.Attacking is true, it will serve the +// corrupted data on requests. type FraudulentBlockstore struct { ds.Datastore Attacking bool @@ -85,7 +87,8 @@ func (fb FraudulentBlockstore) Put(ctx context.Context, block blocks.Block) erro return err } - // create data that doesn't match the CID with arbitrary lengths between 0 and len(block.RawData())*2 + // create data that doesn't match the CID with arbitrary lengths between 0 and + // len(block.RawData())*2 corrupted := make([]byte, mrand.Int()%(len(block.RawData())*2)) mrand.Read(corrupted) return fb.Datastore.Put(ctx, ds.NewKey("corrupt"+block.Cid().String()), corrupted) diff --git a/share/availability/test/testing.go b/share/availability/test/testing.go index 5cc78168e0..e9399ddad6 100644 --- a/share/availability/test/testing.go +++ b/share/availability/test/testing.go @@ -61,8 +61,8 @@ type TestDagNet struct { nodes []*TestNode } -// NewTestDAGNet creates a new testing swarm utility to spawn different nodes and test how they interact and/or exchange -// data. +// NewTestDAGNet creates a new testing swarm utility to spawn different nodes and test how they +// interact and/or exchange data. func NewTestDAGNet(ctx context.Context, t *testing.T) *TestDagNet { return &TestDagNet{ ctx: ctx, @@ -71,7 +71,8 @@ func NewTestDAGNet(ctx context.Context, t *testing.T) *TestDagNet { } } -// NewTestNodeWithBlockstore creates a new plain TestNode with the given blockstore that can serve and request data. +// NewTestNodeWithBlockstore creates a new plain TestNode with the given blockstore that can serve +// and request data. func (dn *TestDagNet) NewTestNodeWithBlockstore(dstore ds.Datastore, bstore blockstore.Blockstore) *TestNode { hst, err := dn.net.GenPeer() require.NoError(dn.T, err) @@ -121,8 +122,8 @@ func (dn *TestDagNet) Connect(peerA, peerB peer.ID) { } // Disconnect disconnects two peers. -// It does a hard disconnect, meaning that disconnected peers won't be able to reconnect on their own -// but only with DagNet.Connect or TestDagNet.ConnectAll. +// It does a hard disconnect, meaning that disconnected peers won't be able to reconnect on their +// own but only with DagNet.Connect or TestDagNet.ConnectAll. func (dn *TestDagNet) Disconnect(peerA, peerB peer.ID) { err := dn.net.UnlinkPeers(peerA, peerB) require.NoError(dn.T, err) diff --git a/share/eds/byzantine/bad_encoding.go b/share/eds/byzantine/bad_encoding.go index d957209be0..6632ac4972 100644 --- a/share/eds/byzantine/bad_encoding.go +++ b/share/eds/byzantine/bad_encoding.go @@ -31,8 +31,9 @@ type BadEncodingProof struct { Axis rsmt2d.Axis } -// CreateBadEncodingProof creates a new Bad Encoding Fraud Proof that should be propagated through network. -// The fraud proof will contain shares that did not pass verification and their relevant Merkle proofs. +// CreateBadEncodingProof creates a new Bad Encoding Fraud Proof that should be propagated through +// network. The fraud proof will contain shares that did not pass verification and their relevant +// Merkle proofs. func CreateBadEncodingProof( hash []byte, height uint64, diff --git a/share/eds/byzantine/share_proof.go b/share/eds/byzantine/share_proof.go index 4515b9c7ca..2d360f2aee 100644 --- a/share/eds/byzantine/share_proof.go +++ b/share/eds/byzantine/share_proof.go @@ -78,8 +78,8 @@ func GetProofsForShares( for index, share := range shares { if share != nil { proof := make([]cid.Cid, 0) - // TODO(@vgonkivs): Combine GetLeafData and GetProof in one function as the are traversing the same tree. - // Add options that will control what data will be fetched. + // TODO(@vgonkivs): Combine GetLeafData and GetProof in one function as the are traversing the same + // tree. Add options that will control what data will be fetched. s, err := ipld.GetLeaf(ctx, bGetter, root, index, len(shares)) if err != nil { return nil, err diff --git a/share/eds/eds.go b/share/eds/eds.go index 23ab44e0a9..c86bab72d0 100644 --- a/share/eds/eds.go +++ b/share/eds/eds.go @@ -29,10 +29,12 @@ import ( var ErrEmptySquare = errors.New("share: importing empty data") -// writingSession contains the components needed to write an EDS to a CARv1 file with our custom node order. +// writingSession contains the components needed to write an EDS to a CARv1 file with our custom +// node order. type writingSession struct { eds *rsmt2d.ExtendedDataSquare - // store is an in-memory blockstore, used to cache the inner nodes (proofs) while we walk the nmt tree. + // store is an in-memory blockstore, used to cache the inner nodes (proofs) while we walk the nmt + // tree. store blockstore.Blockstore w io.Writer } @@ -137,9 +139,11 @@ func (w *writingSession) writeQuadrants() error { return nil } -// writeProofs iterates over the in-memory blockstore's keys and writes all inner nodes to the CARv1 file. +// writeProofs iterates over the in-memory blockstore's keys and writes all inner nodes to the +// CARv1 file. func (w *writingSession) writeProofs(ctx context.Context) error { - // we only stored proofs to the store, so we can just iterate over them here without getting any leaves + // we only stored proofs to the store, so we can just iterate over them here without getting any + // leaves proofs, err := w.store.AllKeysChan(ctx) if err != nil { return fmt.Errorf("getting all keys from the blockstore: %w", err) @@ -161,8 +165,8 @@ func (w *writingSession) writeProofs(ctx context.Context) error { return nil } -// quadrantOrder reorders the shares in the EDS to quadrant row-by-row order, prepending the respective namespace -// to the shares. +// quadrantOrder reorders the shares in the EDS to quadrant row-by-row order, prepending the +// respective namespace to the shares. // e.g. [ Q1 R1 | Q1 R2 | Q1 R3 | Q1 R4 | Q2 R1 | Q2 R2 .... ] func quadrantOrder(eds *rsmt2d.ExtendedDataSquare) [][]byte { size := eds.Width() * eds.Width() @@ -223,7 +227,8 @@ func rootsToCids(eds *rsmt2d.ExtendedDataSquare) ([]cid.Cid, error) { // ReadEDS reads the first EDS quadrant (1/4) from an io.Reader CAR file. // Only the first quadrant will be read, which represents the original data. -// The returned EDS is guaranteed to be full and valid against the DataRoot, otherwise ReadEDS errors. +// The returned EDS is guaranteed to be full and valid against the DataRoot, otherwise ReadEDS +// errors. func ReadEDS(ctx context.Context, r io.Reader, root share.Root) (*rsmt2d.ExtendedDataSquare, error) { carReader, err := car.NewCarReader(r) if err != nil { diff --git a/share/eds/eds_test.go b/share/eds/eds_test.go index 303dd4b509..0e4db027cd 100644 --- a/share/eds/eds_test.go +++ b/share/eds/eds_test.go @@ -114,7 +114,8 @@ func TestWriteEDSInQuadrantOrder(t *testing.T) { } } -// TestInnerNodeBatchSize verifies that the number of unique inner nodes is equal to ipld.BatchSize - shareCount. +// TestInnerNodeBatchSize verifies that the number of unique inner nodes is equal to ipld.BatchSize +// - shareCount. func TestInnerNodeBatchSize(t *testing.T) { tests := []struct { name string @@ -177,8 +178,9 @@ func TestReadEDSContentIntegrityMismatch(t *testing.T) { require.ErrorContains(t, err, "share: content integrity mismatch: imported root") } -// BenchmarkReadWriteEDS benchmarks the time it takes to write and read an EDS from disk. The benchmark is run with a -// 4x4 ODS to a 64x64 ODS - a higher value can be used, but it will run for much longer. +// BenchmarkReadWriteEDS benchmarks the time it takes to write and read an EDS from disk. The +// benchmark is run with a 4x4 ODS to a 64x64 ODS - a higher value can be used, but it will run for +// much longer. func BenchmarkReadWriteEDS(b *testing.B) { ctx, cancel := context.WithCancel(context.Background()) b.Cleanup(cancel) diff --git a/share/eds/retriever.go b/share/eds/retriever.go index 6de60300fd..a7b9ab16f3 100644 --- a/share/eds/retriever.go +++ b/share/eds/retriever.go @@ -53,10 +53,10 @@ func NewRetriever(bServ blockservice.BlockService) *Retriever { // Retrieve retrieves all the data committed to DataAvailabilityHeader. // -// If not available locally, it aims to request from the network only one quadrant (1/4) of the data square -// and reconstructs the other three quadrants (3/4). If the requested quadrant is not available within -// RetrieveQuadrantTimeout, it starts requesting another quadrant until either the data is -// reconstructed, context is canceled or ErrByzantine is generated. +// If not available locally, it aims to request from the network only one quadrant (1/4) of the +// data square and reconstructs the other three quadrants (3/4). If the requested quadrant is not +// available within RetrieveQuadrantTimeout, it starts requesting another quadrant until either the +// data is reconstructed, context is canceled or ErrByzantine is generated. func (r *Retriever) Retrieve(ctx context.Context, dah *da.DataAvailabilityHeader) (*rsmt2d.ExtendedDataSquare, error) { ctx, cancel := context.WithCancel(ctx) defer cancel() // cancels all the ongoing requests if reconstruction succeeds early @@ -178,8 +178,8 @@ func (rs *retrievalSession) Reconstruct(ctx context.Context) (*rsmt2d.ExtendedDa // TODO(@Wondertan): This is bad! // * We should not reimport the square multiple times - // * We should set shares into imported square via SetShare(https://github.com/celestiaorg/rsmt2d/issues/83) - // to accomplish the above point. + // * We should set shares into imported square via + // SetShare(https://github.com/celestiaorg/rsmt2d/issues/83) to accomplish the above point. { squareImported, err := rsmt2d.ImportExtendedDataSquare(rs.square, rs.codec, rs.treeFn) if err != nil { diff --git a/share/eds/retriever_quadrant.go b/share/eds/retriever_quadrant.go index e223be36f8..9a637a27be 100644 --- a/share/eds/retriever_quadrant.go +++ b/share/eds/retriever_quadrant.go @@ -100,7 +100,8 @@ func newQuadrants(dah *da.DataAvailabilityHeader) []*quadrant { // - Goal to make formula generic for both rows and cols // - While data square is flattened by rows only // -// TODO(@Wondertan): This can be simplified by making rsmt2d working over 3D byte slice(not flattened) +// TODO(@Wondertan): This can be simplified by making rsmt2d working over 3D byte slice(not +// flattened) func (q *quadrant) index(rootIdx, cellIdx int) int { size := len(q.roots) // half square offsets, e.g. share is from Q3, diff --git a/share/get.go b/share/get.go index b6734562e7..d452378926 100644 --- a/share/get.go +++ b/share/get.go @@ -40,8 +40,8 @@ func GetShares(ctx context.Context, bGetter blockservice.BlockGetter, root cid.C ipld.GetLeaves(ctx, bGetter, root, shares, putNode) } -// GetSharesByNamespace walks the tree of a given root and returns its shares within the given namespace.ID. -// If a share could not be retrieved, err is not nil, and the returned array +// GetSharesByNamespace walks the tree of a given root and returns its shares within the given +// namespace.ID. If a share could not be retrieved, err is not nil, and the returned array // contains nil shares in place of the shares it was unable to retrieve. func GetSharesByNamespace( ctx context.Context, @@ -70,6 +70,7 @@ func GetSharesByNamespace( // leafToShare converts an NMT leaf into a Share. func leafToShare(nd format.Node) Share { - // * Additional namespace is prepended so that parity data can be identified with a parity namespace, which we cut off + // * Additional namespace is prepended so that parity data can be identified with a parity + // namespace, which we cut off return nd.RawData()[NamespaceSize:] } diff --git a/share/ipld/corrupted_data_test.go b/share/ipld/corrupted_data_test.go index 59ee922adf..8b14f6d2aa 100644 --- a/share/ipld/corrupted_data_test.go +++ b/share/ipld/corrupted_data_test.go @@ -13,12 +13,12 @@ import ( "github.com/celestiaorg/celestia-node/share/service" ) -// sharesAvailableTimeout is an arbitrarily picked interval of time in which a TestNode is expected to be able to -// complete a SharesAvailable request from a connected peer in a TestDagNet. +// sharesAvailableTimeout is an arbitrarily picked interval of time in which a TestNode is expected +// to be able to complete a SharesAvailable request from a connected peer in a TestDagNet. const sharesAvailableTimeout = 2 * time.Second -// TestNamespaceHasher_CorruptedData is an integration test that verifies that the NamespaceHasher of a recipient of -// corrupted data will not panic, and will throw away the corrupted data. +// TestNamespaceHasher_CorruptedData is an integration test that verifies that the NamespaceHasher +// of a recipient of corrupted data will not panic, and will throw away the corrupted data. func TestNamespaceHasher_CorruptedData(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) @@ -29,8 +29,8 @@ func TestNamespaceHasher_CorruptedData(t *testing.T) { provider.ShareService = service.NewShareService(provider.BlockService, full.TestAvailability(provider.BlockService)) net.ConnectAll() - // before the provider starts attacking, we should be able to retrieve successfully. We pass a size 16 block, but - // this is not important to the test and any valid block size behaves the same. + // before the provider starts attacking, we should be able to retrieve successfully. We pass a size + // 16 block, but this is not important to the test and any valid block size behaves the same. root := availability_test.RandFillBS(t, 16, provider.BlockService) getCtx, cancelGet := context.WithTimeout(ctx, sharesAvailableTimeout) t.Cleanup(cancelGet) diff --git a/share/ipld/get.go b/share/ipld/get.go index 79ca64a342..e0590de7d5 100644 --- a/share/ipld/get.go +++ b/share/ipld/get.go @@ -170,10 +170,10 @@ func GetLeaves(ctx context.Context, wg.Wait() } -// GetLeavesByNamespace returns as many leaves from the given root with the given namespace.ID as it can retrieve. -// If no shares are found, it returns both data and error as nil. -// A non-nil error means that only partial data is returned, because at least one share retrieval failed -// The following implementation is based on `GetShares`. +// GetLeavesByNamespace returns as many leaves from the given root with the given namespace.ID as +// it can retrieve. If no shares are found, it returns both data and error as nil. +// A non-nil error means that only partial data is returned, because at least one share retrieval +// failed The following implementation is based on `GetShares`. func GetLeavesByNamespace( ctx context.Context, bGetter blockservice.BlockGetter, @@ -367,7 +367,8 @@ type fetchedBounds struct { func (b *fetchedBounds) update(index int64) { lowest := atomic.LoadInt64(&b.lowest) // try to write index to the lower bound if appropriate, and retry until the atomic op is successful - // CAS ensures that we don't overwrite if the bound has been updated in another goroutine after the comparison here + // CAS ensures that we don't overwrite if the bound has been updated in another goroutine after the + // comparison here for index < lowest && !atomic.CompareAndSwapInt64(&b.lowest, lowest, index) { lowest = atomic.LoadInt64(&b.lowest) } diff --git a/share/ipld/nmt.go b/share/ipld/nmt.go index 371ee9f091..cd881b10cd 100644 --- a/share/ipld/nmt.go +++ b/share/ipld/nmt.go @@ -35,7 +35,8 @@ const ( // that contain an NMT node (inner and leaf nodes). sha256Namespace8Flagged = 0x7701 - // MaxSquareSize is currently the maximum size supported for unerasured data in rsmt2d.ExtendedDataSquare. + // MaxSquareSize is currently the maximum size supported for unerasured data in + // rsmt2d.ExtendedDataSquare. MaxSquareSize = appconsts.MaxSquareSize // NamespaceSize is a system-wide size for NMT namespaces. diff --git a/share/service/service.go b/share/service/service.go index 199fa83967..6688159c2b 100644 --- a/share/service/service.go +++ b/share/service/service.go @@ -20,8 +20,8 @@ type ShareService struct { share.Availability rtrv *eds.Retriever bServ blockservice.BlockService - // session is blockservice sub-session that applies optimization for fetching/loading related nodes, like shares - // prefer session over blockservice for fetching nodes. + // session is blockservice sub-session that applies optimization for fetching/loading related + // nodes, like shares prefer session over blockservice for fetching nodes. session blockservice.BlockGetter cancel context.CancelFunc } @@ -40,8 +40,8 @@ func (s *ShareService) Start(context.Context) error { return fmt.Errorf("share: service already started") } - // NOTE: The ctx given as param is used to control Start flow and only needed when Start is blocking, - // but this one is not. + // NOTE: The ctx given as param is used to control Start flow and only needed when Start is + // blocking, but this one is not. // // The newer context here is created to control lifecycle of the session and peer discovery. ctx, cancel := context.WithCancel(context.Background()) @@ -91,7 +91,8 @@ func (s *ShareService) GetShares(ctx context.Context, root *share.Root) ([][]sha return shares, nil } -// GetSharesByNamespace iterates over a square's row roots and accumulates the found shares in the given namespace.ID. +// GetSharesByNamespace iterates over a square's row roots and accumulates the found shares in the +// given namespace.ID. func (s *ShareService) GetSharesByNamespace( ctx context.Context, root *share.Root, @@ -127,9 +128,9 @@ func (s *ShareService) GetSharesByNamespace( } // we don't know the amount of shares in the namespace, so we cannot preallocate properly - // TODO(@Wondertan): Consider improving encoding schema for data in the shares that will also include metadata - // with the amount of shares. If we are talking about plenty of data here, proper preallocation would make a - // difference + // TODO(@Wondertan): Consider improving encoding schema for data in the shares that will also + // include metadata with the amount of shares. If we are talking about plenty of data here, proper + // preallocation would make a difference var out []share.Share for i := 0; i < len(rowRootCIDs); i++ { out = append(out, shares[i]...) diff --git a/share/share.go b/share/share.go index 8b05791e02..8923a65dd9 100644 --- a/share/share.go +++ b/share/share.go @@ -15,7 +15,8 @@ var ( ) const ( - // MaxSquareSize is currently the maximum size supported for unerasured data in rsmt2d.ExtendedDataSquare. + // MaxSquareSize is currently the maximum size supported for unerasured data in + // rsmt2d.ExtendedDataSquare. MaxSquareSize = appconsts.MaxSquareSize // NamespaceSize is a system-wide size for NMT namespaces. NamespaceSize = appconsts.NamespaceSize @@ -24,8 +25,9 @@ const ( ) // Share contains the raw share data without the corresponding namespace. -// NOTE: Alias for the byte is chosen to keep maximal compatibility, especially with rsmt2d. Ideally, we should define -// reusable type elsewhere and make everyone(Core, rsmt2d, ipld) to rely on it. +// NOTE: Alias for the byte is chosen to keep maximal compatibility, especially with rsmt2d. +// Ideally, we should define reusable type elsewhere and make everyone(Core, rsmt2d, ipld) to rely +// on it. type Share = []byte // ID gets the namespace ID from the share. diff --git a/share/test_helpers.go b/share/test_helpers.go index f2b12a2076..1475ff099b 100644 --- a/share/test_helpers.go +++ b/share/test_helpers.go @@ -13,7 +13,8 @@ import ( // EqualEDS check whether two given EDSes are equal. // TODO(Wondertan): Move to rsmt2d -// TODO(Wondertan): Propose use of int by default instead of uint for the sake convenience and Golang practices +// TODO(Wondertan): Propose use of int by default instead of uint for the sake convenience and +// Golang practices func EqualEDS(a *rsmt2d.ExtendedDataSquare, b *rsmt2d.ExtendedDataSquare) bool { if a.Width() != b.Width() { return false @@ -31,8 +32,8 @@ func EqualEDS(a *rsmt2d.ExtendedDataSquare, b *rsmt2d.ExtendedDataSquare) bool { return true } -// RandEDS generates EDS filled with the random data with the given size for original square. It uses require.TestingT -// to be able to take both a *testing.T and a *testing.B. +// RandEDS generates EDS filled with the random data with the given size for original square. It +// uses require.TestingT to be able to take both a *testing.T and a *testing.B. func RandEDS(t require.TestingT, size int) *rsmt2d.ExtendedDataSquare { shares := RandShares(t, size*size) // recompute the eds @@ -41,8 +42,8 @@ func RandEDS(t require.TestingT, size int) *rsmt2d.ExtendedDataSquare { return eds } -// RandShares generate 'total' amount of shares filled with random data. It uses require.TestingT to be able to take -// both a *testing.T and a *testing.B. +// RandShares generate 'total' amount of shares filled with random data. It uses require.TestingT +// to be able to take both a *testing.T and a *testing.B. func RandShares(t require.TestingT, total int) []Share { if total&(total-1) != 0 { t.Errorf("Namespace total must be power of 2: %d", total) diff --git a/state/core_access.go b/state/core_access.go index bd8123468d..a7fdfe5b3f 100644 --- a/state/core_access.go +++ b/state/core_access.go @@ -184,7 +184,8 @@ func (ca *CoreAccessor) BalanceForAddress(ctx context.Context, addr Address) (*B // construct an ABCI query for the height at head-1 because // the AppHash contained in the head is actually the state root // after applying the transactions contained in the previous block. - // TODO @renaynay: once https://github.com/cosmos/cosmos-sdk/pull/12674 is merged, use this method instead + // TODO @renaynay: once https://github.com/cosmos/cosmos-sdk/pull/12674 is merged, use this method + // instead prefixedAccountKey := append(banktypes.CreateAccountBalancesPrefix(addr.Bytes()), []byte(app.BondDenom)...) abciReq := abci.RequestQuery{ // TODO @renayay: once https://github.com/cosmos/cosmos-sdk/pull/12674 is merged, use const instead From 4e222cab8e805617bb58b062ab6e5aa247886cbc Mon Sep 17 00:00:00 2001 From: Ryan Date: Wed, 9 Nov 2022 11:33:26 +0100 Subject: [PATCH 2/2] adding cfmt to make fmt --- Makefile | 1 + header/p2p/exchange.go | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 41f19095f5..cb03b81be6 100644 --- a/Makefile +++ b/Makefile @@ -82,6 +82,7 @@ fmt: @find . -name '*.go' -type f -not -path "*.git*" -not -name '*.pb.go' -not -name '*pb_test.go' | xargs gofmt -w -s @find . -name '*.go' -type f -not -path "*.git*" -not -name '*.pb.go' -not -name '*pb_test.go' | xargs goimports -w -local github.com/celestiaorg @go mod tidy -compat=1.17 + @cfmt -w -m=100 ./... @markdownlint --fix --quiet --config .markdownlint.yaml . .PHONY: fmt diff --git a/header/p2p/exchange.go b/header/p2p/exchange.go index 773ac575bf..f7fd4f996d 100644 --- a/header/p2p/exchange.go +++ b/header/p2p/exchange.go @@ -220,7 +220,6 @@ func (ex *Exchange) request( if err = stream.Close(); err != nil { log.Errorw("closing stream", "err", err) } - // ensure at least one header was retrieved if len(headers) == 0 { return nil, header.ErrNotFound }