Skip to content

Commit 3e7ff30

Browse files
Refactor full node constructor (cosmos#1227)
<!-- Please read and fill out this form before submitting your PR. Please make sure you have reviewed our contributors guide before submitting your first PR. --> ## Overview Closes: cosmos#1229 <!-- Please provide an explanation of the PR, including the appropriate context, background, goal, and rationale. If there is an issue with this information, please provide a tl;dr and link the issue. --> ## Checklist <!-- Please complete the checklist to ensure that the PR is ready to be reviewed. IMPORTANT: PRs should be left in Draft until the below checklist is completed. --> - [x] New and updated code has appropriate documentation - [x] New and updated code has new and/or updated testing - [x] Required CI checks are passing - [ ] Visual proof for any user facing features like CLI or documentation updates - [x] Linked issues closed with keywords
1 parent 6c4a346 commit 3e7ff30

File tree

5 files changed

+122
-76
lines changed

5 files changed

+122
-76
lines changed

block/manager.go

+1-2
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,6 @@ func NewManager(
115115
dalc da.DataAvailabilityLayerClient,
116116
eventBus *cmtypes.EventBus,
117117
logger log.Logger,
118-
doneBuildingCh chan struct{},
119118
blockStore *goheaderstore.Store[*types.Block],
120119
) (*Manager, error) {
121120
s, err := getInitialState(store, genesis)
@@ -182,7 +181,7 @@ func NewManager(
182181
retrieveCh: make(chan struct{}, 1),
183182
logger: logger,
184183
txsAvailable: txsAvailableCh,
185-
doneBuildingBlock: doneBuildingCh,
184+
doneBuildingBlock: make(chan struct{}),
186185
buildingBlock: false,
187186
pendingBlocks: NewPendingBlocks(),
188187
}

block/manager_test.go

+1-2
Original file line numberDiff line numberDiff line change
@@ -84,8 +84,7 @@ func TestInitialState(t *testing.T) {
8484
defer func() {
8585
require.NoError(t, dalc.Stop())
8686
}()
87-
dumbChan := make(chan struct{})
88-
agg, err := NewManager(key, conf, c.genesis, c.store, nil, nil, dalc, nil, logger, dumbChan, nil)
87+
agg, err := NewManager(key, conf, c.genesis, c.store, nil, nil, dalc, nil, logger, nil)
8988
assert.NoError(err)
9089
assert.NotNil(agg)
9190
agg.lastStateMtx.RLock()

node/full.go

+112-64
Original file line numberDiff line numberDiff line change
@@ -53,115 +53,95 @@ var _ Node = &FullNode{}
5353
// It connects all the components and orchestrates their work.
5454
type FullNode struct {
5555
service.BaseService
56-
eventBus *cmtypes.EventBus
57-
proxyApp proxy.AppConns
5856

5957
genesis *cmtypes.GenesisDoc
6058
// cache of chunked genesis data.
6159
genChunks []string
6260

63-
conf config.NodeConfig
64-
P2P *p2p.Client
61+
nodeConfig config.NodeConfig
6562

63+
proxyApp proxy.AppConns
64+
eventBus *cmtypes.EventBus
65+
dalc da.DataAvailabilityLayerClient
66+
p2pClient *p2p.Client
67+
hExService *block.HeaderExchangeService
68+
bExService *block.BlockExchangeService
6669
// TODO(tzdybal): consider extracting "mempool reactor"
67-
Mempool mempool.Mempool
68-
mempoolIDs *mempoolIDs
69-
70+
Mempool mempool.Mempool
71+
mempoolIDs *mempoolIDs
7072
Store store.Store
7173
blockManager *block.Manager
72-
dalc da.DataAvailabilityLayerClient
7374

7475
// Preserves cometBFT compatibility
7576
TxIndexer txindex.TxIndexer
7677
BlockIndexer indexer.BlockIndexer
7778
IndexerService *txindex.IndexerService
7879

79-
hExService *block.HeaderExchangeService
80-
bExService *block.BlockExchangeService
81-
8280
// keep context here only because of API compatibility
8381
// - it's used in `OnStart` (defined in service.Service interface)
84-
ctx context.Context
85-
82+
ctx context.Context
8683
cancel context.CancelFunc
8784
}
8885

8986
// newFullNode creates a new Rollkit full node.
9087
func newFullNode(
9188
ctx context.Context,
92-
conf config.NodeConfig,
89+
nodeConfig config.NodeConfig,
9390
p2pKey crypto.PrivKey,
9491
signingKey crypto.PrivKey,
9592
clientCreator proxy.ClientCreator,
9693
genesis *cmtypes.GenesisDoc,
9794
logger log.Logger,
9895
) (*FullNode, error) {
99-
proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics())
100-
proxyApp.SetLogger(logger.With("module", "proxy"))
101-
if err := proxyApp.Start(); err != nil {
102-
return nil, fmt.Errorf("error starting proxy app connections: %v", err)
96+
proxyApp, err := initProxyApp(clientCreator, logger)
97+
if err != nil {
98+
return nil, err
10399
}
104100

105-
eventBus := cmtypes.NewEventBus()
106-
eventBus.SetLogger(logger.With("module", "events"))
107-
if err := eventBus.Start(); err != nil {
101+
eventBus, err := initEventBus(logger)
102+
if err != nil {
108103
return nil, err
109104
}
110105

111-
var err error
112-
var baseKV ds.TxnDatastore
113-
if conf.RootDir == "" && conf.DBPath == "" { // this is used for testing
114-
logger.Info("WARNING: working in in-memory mode")
115-
baseKV, err = store.NewDefaultInMemoryKVStore()
116-
} else {
117-
baseKV, err = store.NewDefaultKVStore(conf.RootDir, conf.DBPath, "rollkit")
118-
}
106+
baseKV, err := initBaseKV(nodeConfig, logger)
119107
if err != nil {
120108
return nil, err
121109
}
122110

123-
mainKV := newPrefixKV(baseKV, mainPrefix)
124111
dalcKV := newPrefixKV(baseKV, dalcPrefix)
125-
indexerKV := newPrefixKV(baseKV, indexerPrefix)
126-
127-
client, err := p2p.NewClient(conf.P2P, p2pKey, genesis.ChainID, baseKV, logger.With("module", "p2p"))
112+
dalc, err := initDALC(nodeConfig, dalcKV, logger)
128113
if err != nil {
129114
return nil, err
130115
}
131-
s := store.New(ctx, mainKV)
132116

133-
dalc := registry.GetClient(conf.DALayer)
134-
if dalc == nil {
135-
return nil, fmt.Errorf("couldn't get data availability client named '%s'", conf.DALayer)
136-
}
137-
err = dalc.Init(conf.NamespaceID, []byte(conf.DAConfig), dalcKV, logger.With("module", "da_client"))
117+
p2pClient, err := p2p.NewClient(nodeConfig.P2P, p2pKey, genesis.ChainID, baseKV, logger.With("module", "p2p"))
138118
if err != nil {
139-
return nil, fmt.Errorf("data availability layer client initialization error: %w", err)
119+
return nil, err
140120
}
141121

142-
indexerService, txIndexer, blockIndexer, err := createAndStartIndexerService(ctx, conf, indexerKV, eventBus, logger)
122+
mainKV := newPrefixKV(baseKV, mainPrefix)
123+
headerExchangeService, err := initHeaderExchangeService(ctx, mainKV, nodeConfig, genesis, p2pClient, logger)
143124
if err != nil {
144125
return nil, err
145126
}
146127

147-
mp := mempoolv1.NewTxMempool(logger, llcfg.DefaultMempoolConfig(), proxyApp.Mempool(), 0)
148-
mpIDs := newMempoolIDs()
149-
mp.EnableTxsAvailable()
150-
151-
headerExchangeService, err := block.NewHeaderExchangeService(ctx, mainKV, conf, genesis, client, logger.With("module", "HeaderExchangeService"))
128+
blockExchangeService, err := initBlockExchangeService(ctx, mainKV, nodeConfig, genesis, p2pClient, logger)
152129
if err != nil {
153-
return nil, fmt.Errorf("HeaderExchangeService initialization error: %w", err)
130+
return nil, err
154131
}
155132

156-
blockExchangeService, err := block.NewBlockExchangeService(ctx, mainKV, conf, genesis, client, logger.With("module", "BlockExchangeService"))
133+
mempool := initMempool(logger, proxyApp)
134+
135+
store := store.New(ctx, mainKV)
136+
blockManager, err := initBlockManager(signingKey, nodeConfig, genesis, store, mempool, proxyApp, dalc, eventBus, logger, blockExchangeService)
157137
if err != nil {
158-
return nil, fmt.Errorf("BlockExchangeService initialization error: %w", err)
138+
return nil, err
159139
}
160140

161-
doneBuildingChannel := make(chan struct{})
162-
blockManager, err := block.NewManager(signingKey, conf.BlockManagerConfig, genesis, s, mp, proxyApp.Consensus(), dalc, eventBus, logger.With("module", "BlockManager"), doneBuildingChannel, blockExchangeService.BlockStore())
141+
indexerKV := newPrefixKV(baseKV, indexerPrefix)
142+
indexerService, txIndexer, blockIndexer, err := createAndStartIndexerService(ctx, nodeConfig, indexerKV, eventBus, logger)
163143
if err != nil {
164-
return nil, fmt.Errorf("BlockManager initialization error: %w", err)
144+
return nil, err
165145
}
166146

167147
ctx, cancel := context.WithCancel(ctx)
@@ -170,13 +150,13 @@ func newFullNode(
170150
proxyApp: proxyApp,
171151
eventBus: eventBus,
172152
genesis: genesis,
173-
conf: conf,
174-
P2P: client,
153+
nodeConfig: nodeConfig,
154+
p2pClient: p2pClient,
175155
blockManager: blockManager,
176156
dalc: dalc,
177-
Mempool: mp,
178-
mempoolIDs: mpIDs,
179-
Store: s,
157+
Mempool: mempool,
158+
mempoolIDs: newMempoolIDs(),
159+
Store: store,
180160
TxIndexer: txIndexer,
181161
IndexerService: indexerService,
182162
BlockIndexer: blockIndexer,
@@ -187,12 +167,80 @@ func newFullNode(
187167
}
188168

189169
node.BaseService = *service.NewBaseService(logger, "Node", node)
190-
191-
node.P2P.SetTxValidator(node.newTxValidator())
170+
node.p2pClient.SetTxValidator(node.newTxValidator())
192171

193172
return node, nil
194173
}
195174

175+
func initProxyApp(clientCreator proxy.ClientCreator, logger log.Logger) (proxy.AppConns, error) {
176+
proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics())
177+
proxyApp.SetLogger(logger.With("module", "proxy"))
178+
if err := proxyApp.Start(); err != nil {
179+
return nil, fmt.Errorf("error starting proxy app connections: %v", err)
180+
}
181+
return proxyApp, nil
182+
}
183+
184+
func initEventBus(logger log.Logger) (*cmtypes.EventBus, error) {
185+
eventBus := cmtypes.NewEventBus()
186+
eventBus.SetLogger(logger.With("module", "events"))
187+
if err := eventBus.Start(); err != nil {
188+
return nil, err
189+
}
190+
return eventBus, nil
191+
}
192+
193+
// initBaseKV initializes the base key-value store.
194+
func initBaseKV(nodeConfig config.NodeConfig, logger log.Logger) (ds.TxnDatastore, error) {
195+
if nodeConfig.RootDir == "" && nodeConfig.DBPath == "" { // this is used for testing
196+
logger.Info("WARNING: working in in-memory mode")
197+
return store.NewDefaultInMemoryKVStore()
198+
}
199+
return store.NewDefaultKVStore(nodeConfig.RootDir, nodeConfig.DBPath, "rollkit")
200+
}
201+
202+
func initDALC(nodeConfig config.NodeConfig, dalcKV ds.TxnDatastore, logger log.Logger) (da.DataAvailabilityLayerClient, error) {
203+
dalc := registry.GetClient(nodeConfig.DALayer)
204+
if dalc == nil {
205+
return nil, fmt.Errorf("couldn't get data availability client named '%s'", nodeConfig.DALayer)
206+
}
207+
err := dalc.Init(nodeConfig.NamespaceID, []byte(nodeConfig.DAConfig), dalcKV, logger.With("module", "da_client"))
208+
if err != nil {
209+
return nil, fmt.Errorf("data availability layer client initialization error: %w", err)
210+
}
211+
return dalc, nil
212+
}
213+
214+
func initMempool(logger log.Logger, proxyApp proxy.AppConns) *mempoolv1.TxMempool {
215+
mempool := mempoolv1.NewTxMempool(logger, llcfg.DefaultMempoolConfig(), proxyApp.Mempool(), 0)
216+
mempool.EnableTxsAvailable()
217+
return mempool
218+
}
219+
220+
func initHeaderExchangeService(ctx context.Context, mainKV ds.TxnDatastore, nodeConfig config.NodeConfig, genesis *cmtypes.GenesisDoc, p2pClient *p2p.Client, logger log.Logger) (*block.HeaderExchangeService, error) {
221+
headerExchangeService, err := block.NewHeaderExchangeService(ctx, mainKV, nodeConfig, genesis, p2pClient, logger.With("module", "HeaderExchangeService"))
222+
if err != nil {
223+
return nil, fmt.Errorf("HeaderExchangeService initialization error: %w", err)
224+
}
225+
return headerExchangeService, nil
226+
}
227+
228+
func initBlockExchangeService(ctx context.Context, mainKV ds.TxnDatastore, nodeConfig config.NodeConfig, genesis *cmtypes.GenesisDoc, p2pClient *p2p.Client, logger log.Logger) (*block.BlockExchangeService, error) {
229+
blockExchangeService, err := block.NewBlockExchangeService(ctx, mainKV, nodeConfig, genesis, p2pClient, logger.With("module", "BlockExchangeService"))
230+
if err != nil {
231+
return nil, fmt.Errorf("HeaderExchangeService initialization error: %w", err)
232+
}
233+
return blockExchangeService, nil
234+
}
235+
236+
func initBlockManager(signingKey crypto.PrivKey, nodeConfig config.NodeConfig, genesis *cmtypes.GenesisDoc, store store.Store, mempool mempool.Mempool, proxyApp proxy.AppConns, dalc da.DataAvailabilityLayerClient, eventBus *cmtypes.EventBus, logger log.Logger, blockExchangeService *block.BlockExchangeService) (*block.Manager, error) {
237+
blockManager, err := block.NewManager(signingKey, nodeConfig.BlockManagerConfig, genesis, store, mempool, proxyApp.Consensus(), dalc, eventBus, logger.With("module", "BlockManager"), blockExchangeService.BlockStore())
238+
if err != nil {
239+
return nil, fmt.Errorf("BlockManager initialization error: %w", err)
240+
}
241+
return blockManager, nil
242+
}
243+
196244
// initGenesisChunks creates a chunked format of the genesis document to make it easier to
197245
// iterate through larger genesis structures.
198246
func (n *FullNode) initGenesisChunks() error {
@@ -258,7 +306,7 @@ func (n *FullNode) blockPublishLoop(ctx context.Context) {
258306
func (n *FullNode) OnStart() error {
259307

260308
n.Logger.Info("starting P2P client")
261-
err := n.P2P.Start(n.ctx)
309+
err := n.p2pClient.Start(n.ctx)
262310
if err != nil {
263311
return fmt.Errorf("error while starting P2P client: %w", err)
264312
}
@@ -275,9 +323,9 @@ func (n *FullNode) OnStart() error {
275323
return fmt.Errorf("error while starting data availability layer client: %w", err)
276324
}
277325

278-
if n.conf.Aggregator {
279-
n.Logger.Info("working in aggregator mode", "block time", n.conf.BlockTime)
280-
go n.blockManager.AggregationLoop(n.ctx, n.conf.LazyAggregator)
326+
if n.nodeConfig.Aggregator {
327+
n.Logger.Info("working in aggregator mode", "block time", n.nodeConfig.BlockTime)
328+
go n.blockManager.AggregationLoop(n.ctx, n.nodeConfig.LazyAggregator)
281329
go n.blockManager.BlockSubmissionLoop(n.ctx)
282330
go n.headerPublishLoop(n.ctx)
283331
go n.blockPublishLoop(n.ctx)
@@ -307,7 +355,7 @@ func (n *FullNode) OnStop() {
307355
n.Logger.Info("halting full node...")
308356
n.cancel()
309357
err := n.dalc.Stop()
310-
err = multierr.Append(err, n.P2P.Close())
358+
err = multierr.Append(err, n.p2pClient.Close())
311359
err = multierr.Append(err, n.hExService.Stop())
312360
err = multierr.Append(err, n.bExService.Stop())
313361
n.Logger.Error("errors while stopping node:", "errors", err)

node/full_client.go

+6-6
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,7 @@ func (c *FullClient) BroadcastTxCommit(ctx context.Context, tx cmtypes.Tx) (*cty
143143
}
144144

145145
// broadcast tx
146-
err = c.node.P2P.GossipTx(ctx, tx)
146+
err = c.node.p2pClient.GossipTx(ctx, tx)
147147
if err != nil {
148148
return nil, fmt.Errorf("tx added to local mempool but failure to broadcast: %w", err)
149149
}
@@ -192,7 +192,7 @@ func (c *FullClient) BroadcastTxAsync(ctx context.Context, tx cmtypes.Tx) (*ctyp
192192
return nil, err
193193
}
194194
// gossipTx optimistically
195-
err = c.node.P2P.GossipTx(ctx, tx)
195+
err = c.node.p2pClient.GossipTx(ctx, tx)
196196
if err != nil {
197197
return nil, fmt.Errorf("tx added to local mempool but failed to gossip: %w", err)
198198
}
@@ -217,7 +217,7 @@ func (c *FullClient) BroadcastTxSync(ctx context.Context, tx cmtypes.Tx) (*ctype
217217
// Note: we have to do this here because, unlike the tendermint mempool reactor, there
218218
// is no routine that gossips transactions after they enter the pool
219219
if r.Code == abci.CodeTypeOK {
220-
err = c.node.P2P.GossipTx(ctx, tx)
220+
err = c.node.p2pClient.GossipTx(ctx, tx)
221221
if err != nil {
222222
// the transaction must be removed from the mempool if it cannot be gossiped.
223223
// if this does not occur, then the user will not be able to try again using
@@ -348,10 +348,10 @@ func (c *FullClient) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error)
348348
res := ctypes.ResultNetInfo{
349349
Listening: true,
350350
}
351-
for _, ma := range c.node.P2P.Addrs() {
351+
for _, ma := range c.node.p2pClient.Addrs() {
352352
res.Listeners = append(res.Listeners, ma.String())
353353
}
354-
peers := c.node.P2P.Peers()
354+
peers := c.node.p2pClient.Peers()
355355
res.NPeers = len(peers)
356356
for _, peer := range peers {
357357
res.Peers = append(res.Peers, ctypes.Peer{
@@ -716,7 +716,7 @@ func (c *FullClient) Status(ctx context.Context) (*ctypes.ResultStatus, error) {
716716
state.Version.Consensus.Block,
717717
state.Version.Consensus.App,
718718
)
719-
id, addr, network, err := c.node.P2P.Info()
719+
id, addr, network, err := c.node.p2pClient.Info()
720720
if err != nil {
721721
return nil, fmt.Errorf("failed to load node p2p2 info: %w", err)
722722
}

node/full_node_integration_test.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -355,7 +355,7 @@ func testSingleAggregatorSingleFullNodeTrustedHash(t *testing.T, source Source)
355355
// Get the trusted hash from node1 and pass it to node2 config
356356
trustedHash, err := node1.hExService.HeaderStore().GetByHeight(aggCtx, 1)
357357
require.NoError(err)
358-
node2.conf.TrustedHash = trustedHash.Hash().String()
358+
node2.nodeConfig.TrustedHash = trustedHash.Hash().String()
359359
require.NoError(node2.Start())
360360
defer func() {
361361
require.NoError(node2.Stop())
@@ -447,7 +447,7 @@ func startNodes(nodes []*FullNode, apps []*mocks.Application, t *testing.T) {
447447

448448
for i := 1; i < len(nodes); i++ {
449449
data := strconv.Itoa(i) + time.Now().String()
450-
require.NoError(t, nodes[i].P2P.GossipTx(context.TODO(), []byte(data)))
450+
require.NoError(t, nodes[i].p2pClient.GossipTx(context.TODO(), []byte(data)))
451451
}
452452

453453
timeout := time.NewTimer(time.Second * 30)

0 commit comments

Comments
 (0)