From 6ff5c66103e3e084a588095e7ede90d558ac3a2a Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Tue, 29 Aug 2023 12:39:17 -0400 Subject: [PATCH 01/87] Update dependent costs in schema --- crates/client/assets/schema.sdl | 10 +++--- crates/client/src/client/schema/chain.rs | 11 +++--- crates/client/src/client/types/gas_costs.rs | 11 +++--- crates/fuel-core/src/schema/chain.rs | 40 ++++++++++----------- 4 files changed, 37 insertions(+), 35 deletions(-) diff --git a/crates/client/assets/schema.sdl b/crates/client/assets/schema.sdl index af4228b4f30..b234edd1ad3 100644 --- a/crates/client/assets/schema.sdl +++ b/crates/client/assets/schema.sdl @@ -316,12 +316,10 @@ type GasCosts { jnzb: U64! jnef: U64! jneb: U64! - k256: U64! lb: U64! log: U64! lt: U64! lw: U64! - mcpi: U64! mint: U64! mlog: U64! modOp: U64! @@ -342,9 +340,7 @@ type GasCosts { pshl: U64! ret: U64! rvrt: U64! - s256: U64! sb: U64! - scwq: U64! sll: U64! slli: U64! srl: U64! @@ -354,7 +350,6 @@ type GasCosts { subi: U64! sw: U64! sww: U64! - swwq: U64! time: U64! tr: U64! tro: U64! @@ -377,15 +372,20 @@ type GasCosts { call: DependentCost! ccp: DependentCost! csiz: DependentCost! + k256: DependentCost! ldc: DependentCost! logd: DependentCost! mcl: DependentCost! mcli: DependentCost! mcp: DependentCost! + mcpi: DependentCost! meq: DependentCost! retd: DependentCost! + s256: DependentCost! + scwq: DependentCost! smo: DependentCost! srwq: DependentCost! + swwq: DependentCost! } type Genesis { diff --git a/crates/client/src/client/schema/chain.rs b/crates/client/src/client/schema/chain.rs index 18c3f857607..cb7c9ed9736 100644 --- a/crates/client/src/client/schema/chain.rs +++ b/crates/client/src/client/schema/chain.rs @@ -164,12 +164,10 @@ include_from_impls_and_cynic! { pub jnzb: U64, pub jnef: U64, pub jneb: U64, - pub k256: U64, pub lb: U64, pub log: U64, pub lt: U64, pub lw: U64, - pub mcpi: U64, pub mint: U64, pub mlog: U64, pub mod_op: U64, @@ -190,9 +188,7 @@ include_from_impls_and_cynic! { pub pshl: U64, pub ret: U64, pub rvrt: U64, - pub s256: U64, pub sb: U64, - pub scwq: U64, pub sll: U64, pub slli: U64, pub srl: U64, @@ -202,7 +198,6 @@ include_from_impls_and_cynic! { pub subi: U64, pub sw: U64, pub sww: U64, - pub swwq: U64, pub time: U64, pub tr: U64, pub tro: U64, @@ -226,15 +221,21 @@ include_from_impls_and_cynic! { pub call: DependentCost, pub ccp: DependentCost, pub csiz: DependentCost, + pub k256: DependentCost, pub ldc: DependentCost, pub logd: DependentCost, pub mcl: DependentCost, pub mcli: DependentCost, pub mcp: DependentCost, + pub mcpi: DependentCost, pub meq: DependentCost, pub retd: DependentCost, + pub s256: DependentCost, + pub scwq: DependentCost, pub smo: DependentCost, pub srwq: DependentCost, + pub swwq: DependentCost, + } } diff --git a/crates/client/src/client/types/gas_costs.rs b/crates/client/src/client/types/gas_costs.rs index c23321ca380..2c15aad901c 100644 --- a/crates/client/src/client/types/gas_costs.rs +++ b/crates/client/src/client/types/gas_costs.rs @@ -68,12 +68,10 @@ include_from_impls! { pub jnzb: u64, pub jnef: u64, pub jneb: u64, - pub k256: u64, pub lb: u64, pub log: u64, pub lt: u64, pub lw: u64, - pub mcpi: u64, pub mint: u64, pub mlog: u64, pub mod_op: u64, @@ -94,9 +92,7 @@ include_from_impls! { pub pshl: u64, pub ret: u64, pub rvrt: u64, - pub s256: u64, pub sb: u64, - pub scwq: u64, pub sll: u64, pub slli: u64, pub srl: u64, @@ -106,7 +102,6 @@ include_from_impls! { pub subi: u64, pub sw: u64, pub sww: u64, - pub swwq: u64, pub time: u64, pub tr: u64, pub tro: u64, @@ -130,15 +125,21 @@ include_from_impls! { pub call: DependentCost, pub ccp: DependentCost, pub csiz: DependentCost, + pub k256: DependentCost, pub ldc: DependentCost, pub logd: DependentCost, pub mcl: DependentCost, pub mcli: DependentCost, pub mcp: DependentCost, + pub mcpi: DependentCost, pub meq: DependentCost, pub retd: DependentCost, + pub s256: DependentCost, + pub scwq: DependentCost, pub smo: DependentCost, pub srwq: DependentCost, + pub swwq: DependentCost, + } } diff --git a/crates/fuel-core/src/schema/chain.rs b/crates/fuel-core/src/schema/chain.rs index 0d015b61fa4..e61e7f8ddeb 100644 --- a/crates/fuel-core/src/schema/chain.rs +++ b/crates/fuel-core/src/schema/chain.rs @@ -322,10 +322,6 @@ impl GasCosts { self.0.jneb.into() } - async fn k256(&self) -> U64 { - self.0.k256.into() - } - async fn lb(&self) -> U64 { self.0.lb.into() } @@ -342,10 +338,6 @@ impl GasCosts { self.0.lw.into() } - async fn mcpi(&self) -> U64 { - self.0.mcpi.into() - } - async fn mint(&self) -> U64 { self.0.mint.into() } @@ -426,18 +418,10 @@ impl GasCosts { self.0.rvrt.into() } - async fn s256(&self) -> U64 { - self.0.s256.into() - } - async fn sb(&self) -> U64 { self.0.sb.into() } - async fn scwq(&self) -> U64 { - self.0.scwq.into() - } - async fn sll(&self) -> U64 { self.0.sll.into() } @@ -474,10 +458,6 @@ impl GasCosts { self.0.sww.into() } - async fn swwq(&self) -> U64 { - self.0.swwq.into() - } - async fn time(&self) -> U64 { self.0.time.into() } @@ -566,6 +546,10 @@ impl GasCosts { self.0.csiz.into() } + async fn k256(&self) -> DependentCost { + self.0.k256.into() + } + async fn ldc(&self) -> DependentCost { self.0.ldc.into() } @@ -586,6 +570,10 @@ impl GasCosts { self.0.mcp.into() } + async fn mcpi(&self) -> DependentCost { + self.0.mcpi.into() + } + async fn meq(&self) -> DependentCost { self.0.meq.into() } @@ -594,6 +582,14 @@ impl GasCosts { self.0.retd.into() } + async fn s256(&self) -> DependentCost { + self.0.s256.into() + } + + async fn scwq(&self) -> DependentCost { + self.0.scwq.into() + } + async fn smo(&self) -> DependentCost { self.0.smo.into() } @@ -601,6 +597,10 @@ impl GasCosts { async fn srwq(&self) -> DependentCost { self.0.srwq.into() } + + async fn swwq(&self) -> DependentCost { + self.0.swwq.into() + } } #[Object] From fc2808836f122eed44763e2efff6909a87176292 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Tue, 29 Aug 2023 12:40:42 -0400 Subject: [PATCH 02/87] Fix whitespace --- crates/client/src/client/schema/chain.rs | 1 - crates/client/src/client/types/gas_costs.rs | 1 - 2 files changed, 2 deletions(-) diff --git a/crates/client/src/client/schema/chain.rs b/crates/client/src/client/schema/chain.rs index cb7c9ed9736..566ab3f56be 100644 --- a/crates/client/src/client/schema/chain.rs +++ b/crates/client/src/client/schema/chain.rs @@ -235,7 +235,6 @@ include_from_impls_and_cynic! { pub smo: DependentCost, pub srwq: DependentCost, pub swwq: DependentCost, - } } diff --git a/crates/client/src/client/types/gas_costs.rs b/crates/client/src/client/types/gas_costs.rs index 2c15aad901c..92dce12da78 100644 --- a/crates/client/src/client/types/gas_costs.rs +++ b/crates/client/src/client/types/gas_costs.rs @@ -139,7 +139,6 @@ include_from_impls! { pub smo: DependentCost, pub srwq: DependentCost, pub swwq: DependentCost, - } } From 1801d8a69d00e3a431b716b80738e44c375d8af2 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Tue, 29 Aug 2023 12:54:58 -0400 Subject: [PATCH 03/87] Update CHANGELOG.md --- CHANGELOG.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9f98e6a5c3d..56ec1e2d919 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,5 +38,4 @@ Description of the upcoming release here. - Some fix here 2 #### Breaking -- Some breaking fix here 3 -- Some breaking fix here 4 +- [#1338](https://github.com/FuelLabs/fuel-core/pull/1338): Updated GraphQL client to use `DependentCost` for `k256`, `mcpi`, `s256`, `scwq`, `swwq` opcodes. From d5908e02748a3019e2a26e50f5f16115c1b4c668 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Thu, 31 Aug 2023 18:35:48 -0400 Subject: [PATCH 04/87] Update CHANGELOG.md --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 20eedefba13..533add48dfb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ Description of the upcoming release here. ### Added +- [1339](https://github.com/FuelLabs/fuel-core/pull/1339): Adds `baseAssetId` to `FeeParameters` in the GraphQL API. - [#1309](https://github.com/FuelLabs/fuel-core/pull/1309): Add documentation for running debug builds with CLion and Visual Studio Code. - [#1308](https://github.com/FuelLabs/fuel-core/pull/1308): Add support for loading .env files when compiling with the `env` feature. This allows users to conveniently supply CLI arguments in a secure and IDE-agnostic way. - [#1304](https://github.com/FuelLabs/fuel-core/pull/1304): Implemented `submit_and_await_commit_with_receipts` method for `FuelClient`. @@ -19,6 +20,7 @@ Description of the upcoming release here. #### Breaking +- [1339](https://github.com/FuelLabs/fuel-core/pull/1339): Added a new required field called `base_asset_id` to the `FeeParameters` definition in `ConsensusParameters`, as well as default values for `base_asset_id` in the `beta` and `dev` chainspecs. - [#1322](https://github.com/FuelLabs/fuel-core/pull/1322): The `debug` flag is added to the CLI. The flag should be used for local development only. Enabling debug mode: - Allows GraphQL Endpoints to arbitrarily advance blocks. From a3cd09c781b33e606795f1e7be47132f9b7c8196 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Thu, 31 Aug 2023 18:36:12 -0400 Subject: [PATCH 05/87] Revert "Update CHANGELOG.md" This reverts commit d5908e02748a3019e2a26e50f5f16115c1b4c668. --- CHANGELOG.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 533add48dfb..20eedefba13 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,7 +10,6 @@ Description of the upcoming release here. ### Added -- [1339](https://github.com/FuelLabs/fuel-core/pull/1339): Adds `baseAssetId` to `FeeParameters` in the GraphQL API. - [#1309](https://github.com/FuelLabs/fuel-core/pull/1309): Add documentation for running debug builds with CLion and Visual Studio Code. - [#1308](https://github.com/FuelLabs/fuel-core/pull/1308): Add support for loading .env files when compiling with the `env` feature. This allows users to conveniently supply CLI arguments in a secure and IDE-agnostic way. - [#1304](https://github.com/FuelLabs/fuel-core/pull/1304): Implemented `submit_and_await_commit_with_receipts` method for `FuelClient`. @@ -20,7 +19,6 @@ Description of the upcoming release here. #### Breaking -- [1339](https://github.com/FuelLabs/fuel-core/pull/1339): Added a new required field called `base_asset_id` to the `FeeParameters` definition in `ConsensusParameters`, as well as default values for `base_asset_id` in the `beta` and `dev` chainspecs. - [#1322](https://github.com/FuelLabs/fuel-core/pull/1322): The `debug` flag is added to the CLI. The flag should be used for local development only. Enabling debug mode: - Allows GraphQL Endpoints to arbitrarily advance blocks. From 1ce767a791a66664399f2bc9cf09273a9ebe4baf Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Tue, 5 Sep 2023 16:45:51 -0400 Subject: [PATCH 06/87] Refactoring --- crates/fuel-core/src/service/adapters/sync.rs | 7 + crates/services/sync/src/import.rs | 147 ++++++++++-------- .../test_helpers/pressure_peer_to_peer.rs | 7 + crates/services/sync/src/ports.rs | 7 + crates/types/src/services/p2p.rs | 13 ++ 5 files changed, 119 insertions(+), 62 deletions(-) diff --git a/crates/fuel-core/src/service/adapters/sync.rs b/crates/fuel-core/src/service/adapters/sync.rs index 5482eec1191..60ca0e95df7 100644 --- a/crates/fuel-core/src/service/adapters/sync.rs +++ b/crates/fuel-core/src/service/adapters/sync.rs @@ -82,6 +82,13 @@ impl PeerToPeerPort for P2PAdapter { } } + async fn get_transactions_2( + &self, + _block_id: SourcePeer>, + ) -> anyhow::Result>> { + todo!() + } + async fn report_peer( &self, peer: PeerId, diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 895542efa70..c2404197091 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -4,7 +4,7 @@ use std::{ future::Future, - iter, + // iter, ops::RangeInclusive, sync::Arc, }; @@ -14,18 +14,16 @@ use fuel_core_services::{ SharedMutex, StateWatcher, }; +use fuel_core_types::blockchain::consensus::Sealed; use fuel_core_types::{ blockchain::{ block::Block, - consensus::Sealed, - primitives::BlockId, + // consensus::Sealed, + header::BlockHeader, SealedBlock, SealedBlockHeader, }, - services::p2p::{ - PeerId, - SourcePeer, - }, + services::p2p::SourcePeer, }; use futures::{ stream::StreamExt, @@ -224,9 +222,11 @@ where let state = state.clone(); let executor = executor.clone(); async move { - let (peer_id, block) = res?; - - let res = execute_and_commit(executor.as_ref(), &state, block).await; + let SourcePeer { + peer_id, + data: sealed_block + } = res?; + let res = execute_and_commit(executor.as_ref(), &state, sealed_block).await; match &res { Ok(_) => { let _ = p2p.report_peer(peer_id.clone(), PeerReportReason::SuccessfulBlockImport) @@ -274,16 +274,26 @@ fn get_block_stream< params: &Config, p2p: Arc

, consensus: Arc, -) -> impl Stream>>> +) -> impl Stream>>>> { + // Currently: + // 1. Gets all headers in a flattened stream + // 2. For each header, maps it to a sealed block + + // Todo: + // Current: Request 1 block * transactions + // Goal: Request n blocks * transactions + get_header_stream(range, params, p2p.clone()).map({ let p2p = p2p.clone(); let consensus_port = consensus.clone(); - move |batch| { + // Todo: Accept a vector of headers + move |sealed_header| { { let p2p = p2p.clone(); let consensus_port = consensus_port.clone(); - get_sealed_blocks(batch, p2p.clone(), consensus_port.clone()) + // Todo: Pass a vector of headers + get_sealed_blocks(sealed_header, p2p.clone(), consensus_port.clone()) } .instrument(tracing::debug_span!("consensus_and_transactions")) .in_current_span() @@ -291,6 +301,8 @@ fn get_block_stream< }) } +// Todo: +// Return a stream, where the item is a Vector of sealed block headers fn get_header_stream( range: RangeInclusive, params: &Config, @@ -300,16 +312,17 @@ fn get_header_stream( header_batch_size, .. } = params; let ranges = range_chunks(range, *header_batch_size); - let p2p_gen = iter::repeat_with(move || p2p.clone()); - let iter = ranges.zip(p2p_gen); - futures::stream::iter(iter) - .then(move |(range, p2p)| async { - tracing::debug!( - "getting header range from {} to {} inclusive", - range.start(), - range.end() - ); - get_headers_batch(range, p2p).await + futures::stream::iter(ranges) + .then(move |range| { + let p2p = p2p.clone(); + async { + tracing::debug!( + "getting header range from {} to {} inclusive", + range.start(), + range.end() + ); + get_headers_batch(range, p2p).await + } }) .flatten() .into_scan_none_or_err() @@ -327,6 +340,7 @@ fn range_chunks( }) } +// todo: Accept a vector of headers async fn get_sealed_blocks< P: PeerToPeerPort + Send + Sync + 'static, C: ConsensusPort + Send + Sync + 'static, @@ -334,24 +348,19 @@ async fn get_sealed_blocks< result: anyhow::Result>, p2p: Arc

, consensus_port: Arc, -) -> anyhow::Result> { +) -> anyhow::Result>> { let header = match result { Ok(h) => h, Err(e) => return Err(e), }; let SourcePeer { peer_id, - data: header, - } = header; - let id = header.entity.id(); - let block_id = SourcePeer { - peer_id: peer_id.clone(), - data: id, - }; + data: sealed_header, + } = &header; // Check the consensus is valid on this header. if !consensus_port - .check_sealed_header(&header) + .check_sealed_header(sealed_header) .trace_err("Failed to check consensus on header")? { let _ = p2p @@ -369,10 +378,20 @@ async fn get_sealed_blocks< // Wait for the da to be at least the da height on the header. consensus_port - .await_da_height(&header.entity.da_height) + .await_da_height(&header.data.entity.da_height) .await?; - get_transactions_on_block(p2p.as_ref(), block_id, header, &peer_id).await + // todo: pass a vector of headers + let consensus = sealed_header.consensus.clone(); + let header = header.map(|header| header.entity); + let block = get_block(p2p.as_ref(), header).await?; + let block = block.map(|peer_block| { + peer_block.map(|block| Sealed { + entity: block, + consensus: consensus.clone(), + }) + }); + Ok(block) } /// Waits for a notify or shutdown signal. @@ -473,24 +492,26 @@ async fn get_headers_batch( #[tracing::instrument( skip(p2p, header), fields( - height = **header.entity.height(), - id = %header.entity.consensus.generated.application_hash + height = **header.data.height(), + id = %header.data.consensus.generated.application_hash ), err )] -async fn get_transactions_on_block

( +async fn get_block

( p2p: &P, - block_id: SourcePeer, - header: SealedBlockHeader, - peer_id: &PeerId, -) -> anyhow::Result> + header: SourcePeer, +) -> anyhow::Result>> where P: PeerToPeerPort + Send + Sync + 'static, { - let Sealed { - entity: header, - consensus, + let SourcePeer { + peer_id, + data: block_header, } = header; + let block_id = SourcePeer { + peer_id: peer_id.clone(), + data: block_header.id(), + }; // Request the transactions for this block. let maybe_txs = p2p @@ -513,25 +534,27 @@ where Ok(None) } Some(transactions) => { - match Block::try_from_executed(header, transactions) { - Some(block) => Ok(Some(( - peer_id.clone(), - SealedBlock { - entity: block, - consensus, - }, - ))), - None => { - tracing::error!( - "Failed to created block from header and transactions" - ); - let _ = p2p - .report_peer(peer_id.clone(), PeerReportReason::InvalidTransactions) - .await - .map_err(|e| tracing::error!("Failed to report invalid transaction from peer {:?}: {:?}", peer_id, e)); - Ok(None) - } + let block = + Block::try_from_executed(block_header, transactions).map(|block| { + SourcePeer { + peer_id: peer_id.clone(), + data: block, + } + }); + if block.is_none() { + tracing::error!("Failed to created block from header and transactions"); + let _ = p2p + .report_peer(peer_id.clone(), PeerReportReason::InvalidTransactions) + .await + .map_err(|e| { + tracing::error!( + "Failed to report invalid transaction from peer {:?}: {:?}", + peer_id, + e + ) + }); } + Ok(block) } } } diff --git a/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs b/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs index 108efed4ab8..0e6d49f66b9 100644 --- a/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs +++ b/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs @@ -62,6 +62,13 @@ impl PeerToPeerPort for PressurePeerToPeer { self.p2p.get_transactions(block_id).await } + async fn get_transactions_2( + &self, + _block_id: SourcePeer>, + ) -> anyhow::Result>> { + todo!() + } + async fn report_peer( &self, _peer: PeerId, diff --git a/crates/services/sync/src/ports.rs b/crates/services/sync/src/ports.rs index 1d270a70ed8..66713e0c313 100644 --- a/crates/services/sync/src/ports.rs +++ b/crates/services/sync/src/ports.rs @@ -57,6 +57,13 @@ pub trait PeerToPeerPort { block_id: SourcePeer, ) -> anyhow::Result>>; + /// Request transactions from the network for the given block + /// and source peer. + async fn get_transactions_2( + &self, + block_id: SourcePeer>, + ) -> anyhow::Result>>; + /// Report a peer for some reason to modify their reputation. async fn report_peer( &self, diff --git a/crates/types/src/services/p2p.rs b/crates/types/src/services/p2p.rs index d0f8e604e59..f10c3b1f289 100644 --- a/crates/types/src/services/p2p.rs +++ b/crates/types/src/services/p2p.rs @@ -57,6 +57,19 @@ pub struct SourcePeer { pub data: T, } +impl SourcePeer { + /// Maps a `SourcePeer` to `SourcePeer` by applying a function to the + /// contained data. The internal `peer_id` is maintained. + pub fn map(self, mut f: F) -> SourcePeer + where + F: FnMut(T) -> U, + { + let peer_id = self.peer_id; + let data = f(self.data); + SourcePeer:: { peer_id, data } + } +} + impl GossipData { /// Construct a new gossip message pub fn new( From 0869dec640de45e468aba773599eed1820bfa48d Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Tue, 5 Sep 2023 20:12:04 -0400 Subject: [PATCH 07/87] WIP --- crates/services/p2p/src/ports.rs | 5 + .../p2p/src/request_response/messages.rs | 4 +- crates/services/p2p/src/service.rs | 25 ++ crates/services/sync/src/import.rs | 235 ++++++++++-------- crates/types/src/blockchain/consensus.rs | 16 ++ crates/types/src/services/p2p.rs | 16 +- 6 files changed, 200 insertions(+), 101 deletions(-) diff --git a/crates/services/p2p/src/ports.rs b/crates/services/p2p/src/ports.rs index fe326ada5b8..0c14057a042 100644 --- a/crates/services/p2p/src/ports.rs +++ b/crates/services/p2p/src/ports.rs @@ -31,6 +31,11 @@ pub trait P2pDb: Send + Sync { &self, block_id: &BlockId, ) -> StorageResult>>; + + fn get_transactions_2( + &self, + block_ids: Vec<&BlockId>, + ) -> StorageResult>>; } pub trait BlockHeightImporter: Send + Sync { diff --git a/crates/services/p2p/src/request_response/messages.rs b/crates/services/p2p/src/request_response/messages.rs index d466f16cfa5..03049a1f3cd 100644 --- a/crates/services/p2p/src/request_response/messages.rs +++ b/crates/services/p2p/src/request_response/messages.rs @@ -33,7 +33,7 @@ pub(crate) const MAX_REQUEST_SIZE: usize = core::mem::size_of::( // This `OutboundResponse` gets prepared to be sent over the wire in `NetworkResponse` format. // The Peer that requested the message receives the response over the wire in `NetworkResponse` format. // It then unpacks it into `ResponseMessage`. -// `ResponseChannelItem` is used to forward the data within `ResponseMessage` to the receving channel. +// `ResponseChannelItem` is used to forward the data within `ResponseMessage` to the receiving channel. // Client Peer: `RequestMessage` (send request) // Server Peer: `RequestMessage` (receive request) -> `OutboundResponse` -> `NetworkResponse` (send response) // Client Peer: `NetworkResponse` (receive response) -> `ResponseMessage(data)` -> `ResponseChannelItem(channel, data)` (handle response) @@ -44,6 +44,7 @@ pub enum RequestMessage { Block(BlockHeight), SealedHeaders(Range), Transactions(#[serde_as(as = "FromInto<[u8; 32]>")] BlockId), + Transactions2(Vec), } /// Final Response Message that p2p service sends to the Orchestrator @@ -78,6 +79,7 @@ pub enum OutboundResponse { Block(Option>), SealedHeaders(Option>), Transactions(Option>>), + Transactions2(Option>>), } #[derive(Debug)] diff --git a/crates/services/p2p/src/service.rs b/crates/services/p2p/src/service.rs index 1f97cb05690..9aec7b877c8 100644 --- a/crates/services/p2p/src/service.rs +++ b/crates/services/p2p/src/service.rs @@ -92,6 +92,11 @@ enum TaskRequest { from_peer: PeerId, channel: oneshot::Sender>>, }, + GetTransactions2 { + block_ids: Vec, + from_peer: PeerId, + channel: oneshot::Sender>>, + }, // Responds back to the p2p network RespondWithGossipsubMessageReport((GossipsubMessageInfo, GossipsubMessageAcceptance)), RespondWithPeerReport { @@ -243,6 +248,11 @@ where let channel_item = ResponseChannelItem::Transactions(channel); let _ = self.p2p_service.send_request_msg(Some(from_peer), request_msg, channel_item); } + Some(TaskRequest::GetTransactions2 { block_ids, from_peer, channel }) => { + let request_msg = RequestMessage::Transactions2(block_ids); + let channel_item = ResponseChannelItem::Transactions(channel); + let _ = self.p2p_service.send_request_msg(Some(from_peer), request_msg, channel_item); + } Some(TaskRequest::RespondWithGossipsubMessageReport((message, acceptance))) => { report_message(&mut self.p2p_service, message, acceptance); } @@ -314,6 +324,21 @@ where } } } + RequestMessage::Transactions2(block_ids) => { + // match self.db.get_transactions(&block_id) { + // Ok(maybe_transactions) => { + // let response = maybe_transactions.map(Arc::new); + // let _ = self.p2p_service.send_response_msg(request_id, OutboundResponse::Transactions(response)); + // }, + // Err(e) => { + // tracing::error!("Failed to get transactions for block {:?}: {:?}", block_id, e); + // let response = None; + // let _ = self.p2p_service.send_response_msg(request_id, OutboundResponse::Transactions(response)); + // return Err(e.into()) + // } + // } + todo!() + } RequestMessage::SealedHeaders(range) => { let max_len = self.max_headers_per_request.try_into().expect("u32 should always fit into usize"); if range.len() > max_len { diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index c2404197091..15ddf31fe7d 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -284,21 +284,22 @@ fn get_block_stream< // Current: Request 1 block * transactions // Goal: Request n blocks * transactions - get_header_stream(range, params, p2p.clone()).map({ - let p2p = p2p.clone(); - let consensus_port = consensus.clone(); - // Todo: Accept a vector of headers - move |sealed_header| { - { - let p2p = p2p.clone(); - let consensus_port = consensus_port.clone(); - // Todo: Pass a vector of headers - get_sealed_blocks(sealed_header, p2p.clone(), consensus_port.clone()) + get_header_stream(range, params, p2p.clone()) + .chunks(10) + .map({ + let p2p = p2p.clone(); + let consensus_port = consensus.clone(); + move |sealed_headers| { + { + let result = sealed_headers.into_iter().collect(); + let p2p = p2p.clone(); + let consensus_port = consensus_port.clone(); + get_sealed_blocks(result, p2p.clone(), consensus_port.clone()) + } + .instrument(tracing::debug_span!("consensus_and_transactions")) + .in_current_span() } - .instrument(tracing::debug_span!("consensus_and_transactions")) - .in_current_span() - } - }) + }) } // Todo: @@ -345,53 +346,59 @@ async fn get_sealed_blocks< P: PeerToPeerPort + Send + Sync + 'static, C: ConsensusPort + Send + Sync + 'static, >( - result: anyhow::Result>, + result: anyhow::Result>>, p2p: Arc

, consensus_port: Arc, -) -> anyhow::Result>> { - let header = match result { +) -> anyhow::Result>> { + let headers = match result { Ok(h) => h, Err(e) => return Err(e), }; - let SourcePeer { - peer_id, - data: sealed_header, - } = &header; - // Check the consensus is valid on this header. - if !consensus_port - .check_sealed_header(sealed_header) - .trace_err("Failed to check consensus on header")? - { - let _ = p2p - .report_peer(peer_id.clone(), PeerReportReason::BadBlockHeader) - .await - .map_err(|e| { - tracing::error!( - "Failed to report bad block header from peer {:?}: {:?}", - peer_id, - e - ) - }); - return Ok(None) - } + for header in headers.clone() { + // Check the consensus is valid on this header. + let SourcePeer { + peer_id, + data: sealed_header, + } = header; + if !consensus_port + .check_sealed_header(&sealed_header) + .trace_err("Failed to check consensus on header")? + { + let _ = p2p + .report_peer(peer_id.clone(), PeerReportReason::BadBlockHeader) + .await + .map_err(|e| { + tracing::error!( + "Failed to report bad block header from peer {:?}: {:?}", + peer_id.clone(), + e + ) + }); - // Wait for the da to be at least the da height on the header. - consensus_port - .await_da_height(&header.data.entity.da_height) - .await?; + return Ok(vec![]) + } + + // Wait for the da to be at least the da height on the header. + consensus_port + .await_da_height(&sealed_header.entity.da_height) + .await?; + } // todo: pass a vector of headers - let consensus = sealed_header.consensus.clone(); - let header = header.map(|header| header.entity); - let block = get_block(p2p.as_ref(), header).await?; - let block = block.map(|peer_block| { - peer_block.map(|block| Sealed { - entity: block, - consensus: consensus.clone(), - }) + let consensus = headers.consensus.clone(); + let headers = sealed_headers.entity; + let source_headers = SourcePeer { + peer_id, + data: headers, + }; + let blocks = get_blocks(p2p.as_ref(), source_headers).await?; + let blocks = blocks.map(|blocks| Sealed { + entity: blocks, + consensus: consensus.clone(), }); - Ok(block) + + Ok(blocks) } /// Waits for a notify or shutdown signal. @@ -490,73 +497,103 @@ async fn get_headers_batch( } #[tracing::instrument( - skip(p2p, header), - fields( - height = **header.data.height(), - id = %header.data.consensus.generated.application_hash - ), + skip(p2p, headers), + // fields( + // height = **header.data.height(), + // id = %header.data.consensus.generated.application_hash + // ), err )] -async fn get_block

( +async fn get_blocks

( p2p: &P, - header: SourcePeer, -) -> anyhow::Result>> + headers: SourcePeer>, +) -> anyhow::Result>> where P: PeerToPeerPort + Send + Sync + 'static, { - let SourcePeer { - peer_id, - data: block_header, - } = header; - let block_id = SourcePeer { - peer_id: peer_id.clone(), - data: block_header.id(), - }; + // let SourcePeer { + // peer_id, + // data: block_headers, + // } = headers; + // let block_ids = block_headers + // .into_iter() + // .map(|block_header| block_header.id()) + // .collect::>(); + // let block_ids = SourcePeer { + // peer_id: peer_id.clone(), + // data: block_ids, + // }; // Request the transactions for this block. - let maybe_txs = p2p - .get_transactions(block_id) - .await - .trace_err("Failed to get transactions")? - .trace_none_warn("Could not find transactions for header"); - match maybe_txs { - None => { - let _ = p2p - .report_peer(peer_id.clone(), PeerReportReason::MissingTransactions) - .await - .map_err(|e| { - tracing::error!( - "Failed to report missing transactions from peer {:?}: {:?}", - peer_id, - e - ) - }); - Ok(None) - } - Some(transactions) => { - let block = - Block::try_from_executed(block_header, transactions).map(|block| { - SourcePeer { - peer_id: peer_id.clone(), - data: block, - } - }); - if block.is_none() { - tracing::error!("Failed to created block from header and transactions"); + + // Need vec of (block_header, block_id) + // Map vec of (block_header, block_id) -> vec(block_header, transactions) + + let peer_id = headers.peer_id.clone(); + let hs = headers.data.clone(); + let block_ids = hs.into_iter().map(|h| h.id()); + let z = headers.map(|headers| headers.into_iter().zip(block_ids)); + + for (block_header, block_ids) in z { + let maybe_txs = p2p + .get_transactions_2(block_ids) + .await + .trace_err("Failed to get transactions")? + .trace_none_warn("Could not find transactions for header"); + match maybe_txs { + None => { let _ = p2p - .report_peer(peer_id.clone(), PeerReportReason::InvalidTransactions) + .report_peer(peer_id.clone(), PeerReportReason::MissingTransactions) .await .map_err(|e| { tracing::error!( + "Failed to report missing transactions from peer {:?}: {:?}", + peer_id.clone(), + e + ) + }); + let response = SourcePeer { + peer_id, + data: vec![], + }; + Ok(response) + } + Some(transactions) => { + let block = + Block::try_from_executed(block_header, transactions).map(|block| { + SourcePeer { + peer_id: peer_id.clone(), + data: block, + } + }); + if block.is_none() { + tracing::error!( + "Failed to created block from header and transactions" + ); + let _ = p2p + .report_peer( + peer_id.clone(), + PeerReportReason::InvalidTransactions, + ) + .await + .map_err(|e| { + tracing::error!( "Failed to report invalid transaction from peer {:?}: {:?}", peer_id, e ) - }); + }); + } + Ok(block) } - Ok(block) } } + + let r = SourcePeer { + peer_id, + data: vec![], + }; + Ok(r) } #[tracing::instrument( diff --git a/crates/types/src/blockchain/consensus.rs b/crates/types/src/blockchain/consensus.rs index 905124f1a50..7ac502a388d 100644 --- a/crates/types/src/blockchain/consensus.rs +++ b/crates/types/src/blockchain/consensus.rs @@ -67,6 +67,22 @@ pub struct Sealed { pub consensus: Consensus, } +impl Sealed> { + /// Transpose a Sealed Vector of `Entity` into a Vector of Sealed `Entity` + pub fn transpose(self) -> Vec> { + let consensus = self.consensus; + let entities = self + .entity + .into_iter() + .map(|e| Sealed { + entity: e, + consensus: consensus.clone(), + }) + .collect(); + entities + } +} + /// A vote from a validator. /// /// This is a dummy placeholder for the Vote Struct in fuel-bft diff --git a/crates/types/src/services/p2p.rs b/crates/types/src/services/p2p.rs index f10c3b1f289..077f6a05078 100644 --- a/crates/types/src/services/p2p.rs +++ b/crates/types/src/services/p2p.rs @@ -4,7 +4,11 @@ use crate::{ fuel_tx::Transaction, fuel_types::BlockHeight, }; -use std::fmt::Debug; +use std::{ + fmt::Debug, + vec, +}; + /// Contains types and logic for Peer Reputation pub mod peer_reputation; @@ -70,6 +74,16 @@ impl SourcePeer { } } +impl FromIterator> for SourcePeer> { + fn from_iter>>(iter: U) -> Self { + let mut c = Vec::new(); + for i in iter { + c.push(i); + } + c + } +} + impl GossipData { /// Construct a new gossip message pub fn new( From 250e19cd984b615bc523d0bec04aa981384c13d4 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Tue, 5 Sep 2023 22:07:20 -0400 Subject: [PATCH 08/87] Migrate to canonical serialization --- crates/fuel-core/src/schema/tx.rs | 2 +- crates/fuel-core/src/schema/tx/types.rs | 2 +- crates/types/Cargo.toml | 2 +- crates/types/src/blockchain/header.rs | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/fuel-core/src/schema/tx.rs b/crates/fuel-core/src/schema/tx.rs index 8754af77d16..b8a6b4d151e 100644 --- a/crates/fuel-core/src/schema/tx.rs +++ b/crates/fuel-core/src/schema/tx.rs @@ -48,7 +48,7 @@ use fuel_core_types::{ UniqueIdentifier, }, fuel_types, - fuel_types::bytes::Deserializable, + fuel_types::canonical::Deserialize, fuel_vm::checked_transaction::EstimatePredicates, services::txpool, }; diff --git a/crates/fuel-core/src/schema/tx/types.rs b/crates/fuel-core/src/schema/tx/types.rs index f2e92ae8b26..0d409576c18 100644 --- a/crates/fuel-core/src/schema/tx/types.rs +++ b/crates/fuel-core/src/schema/tx/types.rs @@ -60,7 +60,7 @@ use fuel_core_types::{ Chargeable, Executable, }, - fuel_types::bytes::SerializableVec, + fuel_types::canonical::SerializedSize, fuel_vm::ProgramState as VmProgramState, services::{ txpool, diff --git a/crates/types/Cargo.toml b/crates/types/Cargo.toml index 3c2a8dbe8d1..d488b3e51eb 100644 --- a/crates/types/Cargo.toml +++ b/crates/types/Cargo.toml @@ -19,7 +19,7 @@ version = { workspace = true } [dependencies] anyhow = { workspace = true } derive_more = { version = "0.99" } -fuel-vm-private = { workspace = true, features = ["debug"] } +fuel-vm-private = { workspace = true } secrecy = "0.8" serde = { workspace = true, features = ["derive"], optional = true } tai64 = { version = "4.0", features = ["serde"] } diff --git a/crates/types/src/blockchain/header.rs b/crates/types/src/blockchain/header.rs index 30bfa112cb1..b449359afd4 100644 --- a/crates/types/src/blockchain/header.rs +++ b/crates/types/src/blockchain/header.rs @@ -12,7 +12,7 @@ use crate::{ fuel_merkle, fuel_tx::Transaction, fuel_types::{ - bytes::SerializableVec, + canonical::SerializedSize, BlockHeight, Bytes32, MessageId, From 5c4b85bf1c288f237888d8df831ed8d57cc618c2 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Wed, 6 Sep 2023 15:43:05 -0400 Subject: [PATCH 09/87] Migrate to canonical serialization --- bin/e2e-test-client/src/test_context.rs | 4 ++-- bin/e2e-test-client/src/tests/script.rs | 2 +- crates/client/src/client.rs | 2 +- crates/client/src/client/schema.rs | 9 ++++++--- crates/client/src/client/schema/tx.rs | 8 ++++---- crates/client/src/client/types.rs | 10 +++------- crates/fuel-core/src/executor.rs | 3 ++- tests/tests/contract.rs | 2 +- 8 files changed, 20 insertions(+), 20 deletions(-) diff --git a/bin/e2e-test-client/src/test_context.rs b/bin/e2e-test-client/src/test_context.rs index ac97fbff8b4..a2c14e8dd4d 100644 --- a/bin/e2e-test-client/src/test_context.rs +++ b/bin/e2e-test-client/src/test_context.rs @@ -32,7 +32,7 @@ use fuel_core_types::{ UtxoId, }, fuel_types::{ - bytes::SizedBytes, + canonical::SerializedSize, Address, AssetId, }, @@ -253,7 +253,7 @@ impl Wallet { }); let tx = tx.finalize(); - println!("The size of the transaction is {}", tx.serialized_size()); + println!("The size of the transaction is {}", tx.size()); let status = self .client diff --git a/bin/e2e-test-client/src/tests/script.rs b/bin/e2e-test-client/src/tests/script.rs index 0440d3cdee1..128c0761686 100644 --- a/bin/e2e-test-client/src/tests/script.rs +++ b/bin/e2e-test-client/src/tests/script.rs @@ -9,7 +9,7 @@ use fuel_core_types::{ ScriptExecutionResult, Transaction, }, - fuel_types::bytes::Deserializable, + fuel_types::canonical::Deserialize, }; use libtest_mimic::Failed; use std::time::Duration; diff --git a/crates/client/src/client.rs b/crates/client/src/client.rs index 7867a5020fe..7fa4eae42f8 100644 --- a/crates/client/src/client.rs +++ b/crates/client/src/client.rs @@ -42,7 +42,7 @@ use fuel_core_types::{ }, fuel_types, fuel_types::{ - bytes::SerializableVec, + canonical::SerializedSize, BlockHeight, MessageId, Nonce, diff --git a/crates/client/src/client/schema.rs b/crates/client/src/client/schema.rs index 3dac319fc2c..2ede5eaf7a6 100644 --- a/crates/client/src/client/schema.rs +++ b/crates/client/src/client/schema.rs @@ -4,7 +4,10 @@ pub mod schema { cynic::use_schema!("./assets/schema.sdl"); } -use fuel_core_types::fuel_tx; +use fuel_core_types::{ + fuel_tx, + fuel_types::canonical, +}; use hex::FromHexError; use std::{ array::TryFromSliceError, @@ -289,9 +292,9 @@ pub enum ConversionError { #[error("failed integer conversion")] IntegerConversion, #[error("failed to deserialize transaction from bytes {0}")] - TransactionFromBytesError(std::io::Error), + TransactionFromBytesError(canonical::Error), #[error("failed to deserialize receipt from bytes {0}")] - ReceiptFromBytesError(std::io::Error), + ReceiptFromBytesError(canonical::Error), #[error("failed to convert from bytes due to unexpected length")] BytesLength, #[error("Unknown variant of the {0} enum")] diff --git a/crates/client/src/client/schema/tx.rs b/crates/client/src/client/schema/tx.rs index ec5d80f15f6..35d14d441d6 100644 --- a/crates/client/src/client/schema/tx.rs +++ b/crates/client/src/client/schema/tx.rs @@ -19,7 +19,7 @@ use crate::client::{ use fuel_core_types::{ fuel_tx, fuel_types::{ - bytes::Deserializable, + canonical::Deserialize, Bytes32, }, fuel_vm, @@ -319,7 +319,7 @@ pub struct AllReceipts { pub mod tests { use super::*; use crate::client::schema::Bytes; - use fuel_core_types::fuel_types::bytes::SerializableVec; + use fuel_core_types::fuel_types::canonical::SerializedSize; #[test] fn transparent_transaction_by_id_query_gql_output() { @@ -368,7 +368,7 @@ pub mod tests { #[test] fn dry_run_tx_gql_output() { use cynic::MutationBuilder; - let mut tx = fuel_tx::Transaction::default_test_tx(); + let tx = fuel_tx::Transaction::default_test_tx(); let query = DryRun::build(DryRunArg { tx: HexString(Bytes(tx.to_bytes())), utxo_validation: None, @@ -379,7 +379,7 @@ pub mod tests { #[test] fn submit_tx_gql_output() { use cynic::MutationBuilder; - let mut tx = fuel_tx::Transaction::default_test_tx(); + let tx = fuel_tx::Transaction::default_test_tx(); let query = Submit::build(TxArg { tx: HexString(Bytes(tx.to_bytes())), }); diff --git a/crates/client/src/client/types.rs b/crates/client/src/client/types.rs index 77971dca695..471b7db9e54 100644 --- a/crates/client/src/client/types.rs +++ b/crates/client/src/client/types.rs @@ -43,13 +43,9 @@ use crate::client::schema::{ }; use fuel_core_types::{ fuel_tx::Transaction, - fuel_types::bytes::Deserializable, + fuel_types::canonical::Deserialize, fuel_vm::ProgramState, }; -use serde::{ - Deserialize, - Serialize, -}; use tai64::Tai64; pub mod primitives { @@ -79,13 +75,13 @@ pub mod primitives { pub type TransactionId = Bytes32; } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] pub struct TransactionResponse { pub transaction: Transaction, pub status: TransactionStatus, } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] pub enum TransactionStatus { Submitted { submitted_at: Tai64, diff --git a/crates/fuel-core/src/executor.rs b/crates/fuel-core/src/executor.rs index f25a50e49f7..7b82763e64a 100644 --- a/crates/fuel-core/src/executor.rs +++ b/crates/fuel-core/src/executor.rs @@ -26,6 +26,7 @@ use fuel_core_storage::{ StorageAsRef, StorageInspect, }; +#[allow(unused_imports)] use fuel_core_types::{ blockchain::{ block::{ @@ -78,6 +79,7 @@ use fuel_core_types::{ UtxoId, }, fuel_types::{ + canonical::SerializedSize, BlockHeight, MessageId, }, @@ -1719,7 +1721,6 @@ mod tests { TransactionBuilder, }, fuel_types::{ - bytes::SerializableVec, ChainId, ContractId, Salt, diff --git a/tests/tests/contract.rs b/tests/tests/contract.rs index 1c467409afa..f204ee5e598 100644 --- a/tests/tests/contract.rs +++ b/tests/tests/contract.rs @@ -18,7 +18,7 @@ use fuel_core_types::{ fuel_asm::*, fuel_tx::*, fuel_types::{ - bytes::*, + canonical::SerializedSize, ChainId, }, fuel_vm::*, From 0f2b81f409448ce966857d70fd68e28ca9ec82b1 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Wed, 6 Sep 2023 18:47:54 -0400 Subject: [PATCH 10/87] Update snapshots --- ...s__snapshot_configurable_block_height.snap | 28 ++++++++++++++----- ...ests__snapshot_contract_with_balances.snap | 28 ++++++++++++++----- ...__tests__snapshot_contract_with_state.snap | 28 ++++++++++++++----- ...ts__snapshot_contract_with_tx_pointer.snap | 28 ++++++++++++++----- ...tests__snapshot_contract_with_utxo_id.snap | 28 ++++++++++++++----- ..._tests__snapshot_local_testnet_config.snap | 28 ++++++++++++++----- ...ig__tests__snapshot_simple_coin_state.snap | 28 ++++++++++++++----- ...nfig__tests__snapshot_simple_contract.snap | 28 ++++++++++++++----- ..._tests__snapshot_simple_message_state.snap | 28 ++++++++++++++----- ..._chain__tests__chain_gql_query_output.snap | 26 +++++++++++++---- 10 files changed, 209 insertions(+), 69 deletions(-) diff --git a/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_configurable_block_height.snap b/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_configurable_block_height.snap index c858551034d..27c1a66e077 100644 --- a/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_configurable_block_height.snap +++ b/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_configurable_block_height.snap @@ -1,6 +1,5 @@ --- source: crates/chain-config/src/config.rs -assertion_line: 91 expression: json --- { @@ -72,12 +71,10 @@ expression: json "jnzb": 1, "jnef": 1, "jneb": 1, - "k256": 11, "lb": 1, "log": 9, "lt": 1, "lw": 1, - "mcpi": 33, "mint": 135, "mlog": 1, "mod": 1, @@ -98,9 +95,7 @@ expression: json "pshl": 2, "ret_contract": 13, "rvrt_contract": 13, - "s256": 2, "sb": 1, - "scwq": 13, "sll": 1, "slli": 1, "srl": 1, @@ -109,8 +104,7 @@ expression: json "sub": 1, "subi": 1, "sw": 1, - "sww": 43, - "swwq": 44, + "sww": 67, "time": 1, "tr": 105, "tro": 60, @@ -142,6 +136,10 @@ expression: json "base": 17, "dep_per_unit": 790 }, + "k256": { + "base": 11, + "dep_per_unit": 214 + }, "ldc": { "base": 15, "dep_per_unit": 272 @@ -162,6 +160,10 @@ expression: json "base": 1, "dep_per_unit": 2000 }, + "mcpi": { + "base": 3, + "dep_per_unit": 2000 + }, "meq": { "base": 1, "dep_per_unit": 2500 @@ -170,6 +172,14 @@ expression: json "base": 29, "dep_per_unit": 62 }, + "s256": { + "base": 2, + "dep_per_unit": 214 + }, + "scwq": { + "base": 13, + "dep_per_unit": 5 + }, "smo": { "base": 209, "dep_per_unit": 55 @@ -177,6 +187,10 @@ expression: json "srwq": { "base": 47, "dep_per_unit": 5 + }, + "swwq": { + "base": 44, + "dep_per_unit": 5 } } }, diff --git a/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_contract_with_balances.snap b/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_contract_with_balances.snap index dd5e976dd17..4e928552394 100644 --- a/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_contract_with_balances.snap +++ b/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_contract_with_balances.snap @@ -1,6 +1,5 @@ --- source: crates/chain-config/src/config.rs -assertion_line: 146 expression: json --- { @@ -84,12 +83,10 @@ expression: json "jnzb": 1, "jnef": 1, "jneb": 1, - "k256": 11, "lb": 1, "log": 9, "lt": 1, "lw": 1, - "mcpi": 33, "mint": 135, "mlog": 1, "mod": 1, @@ -110,9 +107,7 @@ expression: json "pshl": 2, "ret_contract": 13, "rvrt_contract": 13, - "s256": 2, "sb": 1, - "scwq": 13, "sll": 1, "slli": 1, "srl": 1, @@ -121,8 +116,7 @@ expression: json "sub": 1, "subi": 1, "sw": 1, - "sww": 43, - "swwq": 44, + "sww": 67, "time": 1, "tr": 105, "tro": 60, @@ -154,6 +148,10 @@ expression: json "base": 17, "dep_per_unit": 790 }, + "k256": { + "base": 11, + "dep_per_unit": 214 + }, "ldc": { "base": 15, "dep_per_unit": 272 @@ -174,6 +172,10 @@ expression: json "base": 1, "dep_per_unit": 2000 }, + "mcpi": { + "base": 3, + "dep_per_unit": 2000 + }, "meq": { "base": 1, "dep_per_unit": 2500 @@ -182,6 +184,14 @@ expression: json "base": 29, "dep_per_unit": 62 }, + "s256": { + "base": 2, + "dep_per_unit": 214 + }, + "scwq": { + "base": 13, + "dep_per_unit": 5 + }, "smo": { "base": 209, "dep_per_unit": 55 @@ -189,6 +199,10 @@ expression: json "srwq": { "base": 47, "dep_per_unit": 5 + }, + "swwq": { + "base": 44, + "dep_per_unit": 5 } } }, diff --git a/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_contract_with_state.snap b/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_contract_with_state.snap index 8e5ed0dd16a..801dc7a1949 100644 --- a/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_contract_with_state.snap +++ b/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_contract_with_state.snap @@ -1,6 +1,5 @@ --- source: crates/chain-config/src/config.rs -assertion_line: 130 expression: json --- { @@ -84,12 +83,10 @@ expression: json "jnzb": 1, "jnef": 1, "jneb": 1, - "k256": 11, "lb": 1, "log": 9, "lt": 1, "lw": 1, - "mcpi": 33, "mint": 135, "mlog": 1, "mod": 1, @@ -110,9 +107,7 @@ expression: json "pshl": 2, "ret_contract": 13, "rvrt_contract": 13, - "s256": 2, "sb": 1, - "scwq": 13, "sll": 1, "slli": 1, "srl": 1, @@ -121,8 +116,7 @@ expression: json "sub": 1, "subi": 1, "sw": 1, - "sww": 43, - "swwq": 44, + "sww": 67, "time": 1, "tr": 105, "tro": 60, @@ -154,6 +148,10 @@ expression: json "base": 17, "dep_per_unit": 790 }, + "k256": { + "base": 11, + "dep_per_unit": 214 + }, "ldc": { "base": 15, "dep_per_unit": 272 @@ -174,6 +172,10 @@ expression: json "base": 1, "dep_per_unit": 2000 }, + "mcpi": { + "base": 3, + "dep_per_unit": 2000 + }, "meq": { "base": 1, "dep_per_unit": 2500 @@ -182,6 +184,14 @@ expression: json "base": 29, "dep_per_unit": 62 }, + "s256": { + "base": 2, + "dep_per_unit": 214 + }, + "scwq": { + "base": 13, + "dep_per_unit": 5 + }, "smo": { "base": 209, "dep_per_unit": 55 @@ -189,6 +199,10 @@ expression: json "srwq": { "base": 47, "dep_per_unit": 5 + }, + "swwq": { + "base": 44, + "dep_per_unit": 5 } } }, diff --git a/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_contract_with_tx_pointer.snap b/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_contract_with_tx_pointer.snap index 000f92c612b..9388fd21566 100644 --- a/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_contract_with_tx_pointer.snap +++ b/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_contract_with_tx_pointer.snap @@ -1,6 +1,5 @@ --- source: crates/chain-config/src/config.rs -assertion_line: 178 expression: json --- { @@ -80,12 +79,10 @@ expression: json "jnzb": 1, "jnef": 1, "jneb": 1, - "k256": 11, "lb": 1, "log": 9, "lt": 1, "lw": 1, - "mcpi": 33, "mint": 135, "mlog": 1, "mod": 1, @@ -106,9 +103,7 @@ expression: json "pshl": 2, "ret_contract": 13, "rvrt_contract": 13, - "s256": 2, "sb": 1, - "scwq": 13, "sll": 1, "slli": 1, "srl": 1, @@ -117,8 +112,7 @@ expression: json "sub": 1, "subi": 1, "sw": 1, - "sww": 43, - "swwq": 44, + "sww": 67, "time": 1, "tr": 105, "tro": 60, @@ -150,6 +144,10 @@ expression: json "base": 17, "dep_per_unit": 790 }, + "k256": { + "base": 11, + "dep_per_unit": 214 + }, "ldc": { "base": 15, "dep_per_unit": 272 @@ -170,6 +168,10 @@ expression: json "base": 1, "dep_per_unit": 2000 }, + "mcpi": { + "base": 3, + "dep_per_unit": 2000 + }, "meq": { "base": 1, "dep_per_unit": 2500 @@ -178,6 +180,14 @@ expression: json "base": 29, "dep_per_unit": 62 }, + "s256": { + "base": 2, + "dep_per_unit": 214 + }, + "scwq": { + "base": 13, + "dep_per_unit": 5 + }, "smo": { "base": 209, "dep_per_unit": 55 @@ -185,6 +195,10 @@ expression: json "srwq": { "base": 47, "dep_per_unit": 5 + }, + "swwq": { + "base": 44, + "dep_per_unit": 5 } } }, diff --git a/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_contract_with_utxo_id.snap b/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_contract_with_utxo_id.snap index f9f85c10a0f..80ef5945a98 100644 --- a/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_contract_with_utxo_id.snap +++ b/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_contract_with_utxo_id.snap @@ -1,6 +1,5 @@ --- source: crates/chain-config/src/config.rs -assertion_line: 162 expression: json --- { @@ -80,12 +79,10 @@ expression: json "jnzb": 1, "jnef": 1, "jneb": 1, - "k256": 11, "lb": 1, "log": 9, "lt": 1, "lw": 1, - "mcpi": 33, "mint": 135, "mlog": 1, "mod": 1, @@ -106,9 +103,7 @@ expression: json "pshl": 2, "ret_contract": 13, "rvrt_contract": 13, - "s256": 2, "sb": 1, - "scwq": 13, "sll": 1, "slli": 1, "srl": 1, @@ -117,8 +112,7 @@ expression: json "sub": 1, "subi": 1, "sw": 1, - "sww": 43, - "swwq": 44, + "sww": 67, "time": 1, "tr": 105, "tro": 60, @@ -150,6 +144,10 @@ expression: json "base": 17, "dep_per_unit": 790 }, + "k256": { + "base": 11, + "dep_per_unit": 214 + }, "ldc": { "base": 15, "dep_per_unit": 272 @@ -170,6 +168,10 @@ expression: json "base": 1, "dep_per_unit": 2000 }, + "mcpi": { + "base": 3, + "dep_per_unit": 2000 + }, "meq": { "base": 1, "dep_per_unit": 2500 @@ -178,6 +180,14 @@ expression: json "base": 29, "dep_per_unit": 62 }, + "s256": { + "base": 2, + "dep_per_unit": 214 + }, + "scwq": { + "base": 13, + "dep_per_unit": 5 + }, "smo": { "base": 209, "dep_per_unit": 55 @@ -185,6 +195,10 @@ expression: json "srwq": { "base": 47, "dep_per_unit": 5 + }, + "swwq": { + "base": 44, + "dep_per_unit": 5 } } }, diff --git a/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_local_testnet_config.snap b/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_local_testnet_config.snap index 1a99665f2d7..8a09093cf07 100644 --- a/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_local_testnet_config.snap +++ b/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_local_testnet_config.snap @@ -1,6 +1,5 @@ --- source: crates/chain-config/src/config.rs -assertion_line: 68 expression: json --- { @@ -98,12 +97,10 @@ expression: json "jnzb": 1, "jnef": 1, "jneb": 1, - "k256": 11, "lb": 1, "log": 9, "lt": 1, "lw": 1, - "mcpi": 33, "mint": 135, "mlog": 1, "mod": 1, @@ -124,9 +121,7 @@ expression: json "pshl": 2, "ret_contract": 13, "rvrt_contract": 13, - "s256": 2, "sb": 1, - "scwq": 13, "sll": 1, "slli": 1, "srl": 1, @@ -135,8 +130,7 @@ expression: json "sub": 1, "subi": 1, "sw": 1, - "sww": 43, - "swwq": 44, + "sww": 67, "time": 1, "tr": 105, "tro": 60, @@ -168,6 +162,10 @@ expression: json "base": 17, "dep_per_unit": 790 }, + "k256": { + "base": 11, + "dep_per_unit": 214 + }, "ldc": { "base": 15, "dep_per_unit": 272 @@ -188,6 +186,10 @@ expression: json "base": 1, "dep_per_unit": 2000 }, + "mcpi": { + "base": 3, + "dep_per_unit": 2000 + }, "meq": { "base": 1, "dep_per_unit": 2500 @@ -196,6 +198,14 @@ expression: json "base": 29, "dep_per_unit": 62 }, + "s256": { + "base": 2, + "dep_per_unit": 214 + }, + "scwq": { + "base": 13, + "dep_per_unit": 5 + }, "smo": { "base": 209, "dep_per_unit": 55 @@ -203,6 +213,10 @@ expression: json "srwq": { "base": 47, "dep_per_unit": 5 + }, + "swwq": { + "base": 44, + "dep_per_unit": 5 } } }, diff --git a/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_simple_coin_state.snap b/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_simple_coin_state.snap index 837665bd876..81a38fd5110 100644 --- a/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_simple_coin_state.snap +++ b/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_simple_coin_state.snap @@ -1,6 +1,5 @@ --- source: crates/chain-config/src/config.rs -assertion_line: 194 expression: json --- { @@ -83,12 +82,10 @@ expression: json "jnzb": 1, "jnef": 1, "jneb": 1, - "k256": 11, "lb": 1, "log": 9, "lt": 1, "lw": 1, - "mcpi": 33, "mint": 135, "mlog": 1, "mod": 1, @@ -109,9 +106,7 @@ expression: json "pshl": 2, "ret_contract": 13, "rvrt_contract": 13, - "s256": 2, "sb": 1, - "scwq": 13, "sll": 1, "slli": 1, "srl": 1, @@ -120,8 +115,7 @@ expression: json "sub": 1, "subi": 1, "sw": 1, - "sww": 43, - "swwq": 44, + "sww": 67, "time": 1, "tr": 105, "tro": 60, @@ -153,6 +147,10 @@ expression: json "base": 17, "dep_per_unit": 790 }, + "k256": { + "base": 11, + "dep_per_unit": 214 + }, "ldc": { "base": 15, "dep_per_unit": 272 @@ -173,6 +171,10 @@ expression: json "base": 1, "dep_per_unit": 2000 }, + "mcpi": { + "base": 3, + "dep_per_unit": 2000 + }, "meq": { "base": 1, "dep_per_unit": 2500 @@ -181,6 +183,14 @@ expression: json "base": 29, "dep_per_unit": 62 }, + "s256": { + "base": 2, + "dep_per_unit": 214 + }, + "scwq": { + "base": 13, + "dep_per_unit": 5 + }, "smo": { "base": 209, "dep_per_unit": 55 @@ -188,6 +198,10 @@ expression: json "srwq": { "base": 47, "dep_per_unit": 5 + }, + "swwq": { + "base": 44, + "dep_per_unit": 5 } } }, diff --git a/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_simple_contract.snap b/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_simple_contract.snap index 122afd88a46..0dd254ffd0d 100644 --- a/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_simple_contract.snap +++ b/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_simple_contract.snap @@ -1,6 +1,5 @@ --- source: crates/chain-config/src/config.rs -assertion_line: 114 expression: json --- { @@ -78,12 +77,10 @@ expression: json "jnzb": 1, "jnef": 1, "jneb": 1, - "k256": 11, "lb": 1, "log": 9, "lt": 1, "lw": 1, - "mcpi": 33, "mint": 135, "mlog": 1, "mod": 1, @@ -104,9 +101,7 @@ expression: json "pshl": 2, "ret_contract": 13, "rvrt_contract": 13, - "s256": 2, "sb": 1, - "scwq": 13, "sll": 1, "slli": 1, "srl": 1, @@ -115,8 +110,7 @@ expression: json "sub": 1, "subi": 1, "sw": 1, - "sww": 43, - "swwq": 44, + "sww": 67, "time": 1, "tr": 105, "tro": 60, @@ -148,6 +142,10 @@ expression: json "base": 17, "dep_per_unit": 790 }, + "k256": { + "base": 11, + "dep_per_unit": 214 + }, "ldc": { "base": 15, "dep_per_unit": 272 @@ -168,6 +166,10 @@ expression: json "base": 1, "dep_per_unit": 2000 }, + "mcpi": { + "base": 3, + "dep_per_unit": 2000 + }, "meq": { "base": 1, "dep_per_unit": 2500 @@ -176,6 +178,14 @@ expression: json "base": 29, "dep_per_unit": 62 }, + "s256": { + "base": 2, + "dep_per_unit": 214 + }, + "scwq": { + "base": 13, + "dep_per_unit": 5 + }, "smo": { "base": 209, "dep_per_unit": 55 @@ -183,6 +193,10 @@ expression: json "srwq": { "base": 47, "dep_per_unit": 5 + }, + "swwq": { + "base": 44, + "dep_per_unit": 5 } } }, diff --git a/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_simple_message_state.snap b/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_simple_message_state.snap index d77a57dee6e..7378954ebe7 100644 --- a/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_simple_message_state.snap +++ b/crates/chain-config/src/snapshots/fuel_core_chain_config__config__tests__snapshot_simple_message_state.snap @@ -1,6 +1,5 @@ --- source: crates/chain-config/src/config.rs -assertion_line: 210 expression: json --- { @@ -81,12 +80,10 @@ expression: json "jnzb": 1, "jnef": 1, "jneb": 1, - "k256": 11, "lb": 1, "log": 9, "lt": 1, "lw": 1, - "mcpi": 33, "mint": 135, "mlog": 1, "mod": 1, @@ -107,9 +104,7 @@ expression: json "pshl": 2, "ret_contract": 13, "rvrt_contract": 13, - "s256": 2, "sb": 1, - "scwq": 13, "sll": 1, "slli": 1, "srl": 1, @@ -118,8 +113,7 @@ expression: json "sub": 1, "subi": 1, "sw": 1, - "sww": 43, - "swwq": 44, + "sww": 67, "time": 1, "tr": 105, "tro": 60, @@ -151,6 +145,10 @@ expression: json "base": 17, "dep_per_unit": 790 }, + "k256": { + "base": 11, + "dep_per_unit": 214 + }, "ldc": { "base": 15, "dep_per_unit": 272 @@ -171,6 +169,10 @@ expression: json "base": 1, "dep_per_unit": 2000 }, + "mcpi": { + "base": 3, + "dep_per_unit": 2000 + }, "meq": { "base": 1, "dep_per_unit": 2500 @@ -179,6 +181,14 @@ expression: json "base": 29, "dep_per_unit": 62 }, + "s256": { + "base": 2, + "dep_per_unit": 214 + }, + "scwq": { + "base": 13, + "dep_per_unit": 5 + }, "smo": { "base": 209, "dep_per_unit": 55 @@ -186,6 +196,10 @@ expression: json "srwq": { "base": 47, "dep_per_unit": 5 + }, + "swwq": { + "base": 44, + "dep_per_unit": 5 } } }, diff --git a/crates/client/src/client/schema/snapshots/fuel_core_client__client__schema__chain__tests__chain_gql_query_output.snap b/crates/client/src/client/schema/snapshots/fuel_core_client__client__schema__chain__tests__chain_gql_query_output.snap index 3e7451a622b..cb9d72df04f 100644 --- a/crates/client/src/client/schema/snapshots/fuel_core_client__client__schema__chain__tests__chain_gql_query_output.snap +++ b/crates/client/src/client/schema/snapshots/fuel_core_client__client__schema__chain__tests__chain_gql_query_output.snap @@ -1,6 +1,5 @@ --- source: crates/client/src/client/schema/chain.rs -assertion_line: 295 expression: operation.query --- query { @@ -101,12 +100,10 @@ query { jnzb jnef jneb - k256 lb log lt lw - mcpi mint mlog modOp @@ -127,9 +124,7 @@ query { pshl ret rvrt - s256 sb - scwq sll slli srl @@ -139,7 +134,6 @@ query { subi sw sww - swwq time tr tro @@ -171,6 +165,10 @@ query { base depPerUnit } + k256 { + base + depPerUnit + } ldc { base depPerUnit @@ -191,6 +189,10 @@ query { base depPerUnit } + mcpi { + base + depPerUnit + } meq { base depPerUnit @@ -199,6 +201,14 @@ query { base depPerUnit } + s256 { + base + depPerUnit + } + scwq { + base + depPerUnit + } smo { base depPerUnit @@ -207,6 +217,10 @@ query { base depPerUnit } + swwq { + base + depPerUnit + } } } } From d6fcfacff9b3fb84837653904a74e7a9eb326954 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Wed, 6 Sep 2023 18:48:08 -0400 Subject: [PATCH 11/87] Update chainspecs with dummy costs --- .../scripts/chainspec/beta_chainspec.json | 25 +++++++++++++++---- .../scripts/chainspec/dev_chainspec.json | 25 +++++++++++++++---- 2 files changed, 40 insertions(+), 10 deletions(-) diff --git a/deployment/scripts/chainspec/beta_chainspec.json b/deployment/scripts/chainspec/beta_chainspec.json index dcde979a818..a1e3c9480cd 100644 --- a/deployment/scripts/chainspec/beta_chainspec.json +++ b/deployment/scripts/chainspec/beta_chainspec.json @@ -103,12 +103,10 @@ "jnzb": 1, "jnef": 1, "jneb": 1, - "k256": 11, "lb": 1, "log": 9, "lt": 1, "lw": 1, - "mcpi": 33, "mint": 135, "mlog": 1, "mod": 1, @@ -129,9 +127,7 @@ "pshl": 2, "ret_contract": 13, "rvrt_contract": 13, - "s256": 2, "sb": 1, - "scwq": 13, "sll": 1, "slli": 1, "srl": 1, @@ -141,7 +137,6 @@ "subi": 1, "sw": 1, "sww": 43, - "swwq": 44, "time": 1, "tr": 105, "tro": 60, @@ -173,6 +168,10 @@ "base": 17, "dep_per_unit": 790 }, + "k256": { + "base": 0, + "dep_per_unit": 0 + }, "ldc": { "base": 15, "dep_per_unit": 272 @@ -193,6 +192,10 @@ "base": 1, "dep_per_unit": 2000 }, + "mcpi": { + "base": 0, + "dep_per_unit": 0 + }, "meq": { "base": 1, "dep_per_unit": 2500 @@ -201,6 +204,14 @@ "base": 29, "dep_per_unit": 62 }, + "s256": { + "base": 0, + "dep_per_unit": 0 + }, + "scwq": { + "base": 0, + "dep_per_unit": 0 + }, "smo": { "base": 209, "dep_per_unit": 55 @@ -208,6 +219,10 @@ "srwq": { "base": 47, "dep_per_unit": 5 + }, + "swwq": { + "base": 0, + "dep_per_unit": 0 } } }, diff --git a/deployment/scripts/chainspec/dev_chainspec.json b/deployment/scripts/chainspec/dev_chainspec.json index 35491b9221c..d3de30062fa 100644 --- a/deployment/scripts/chainspec/dev_chainspec.json +++ b/deployment/scripts/chainspec/dev_chainspec.json @@ -73,12 +73,10 @@ "jnzb": 1, "jnef": 1, "jneb": 1, - "k256": 11, "lb": 1, "log": 9, "lt": 1, "lw": 1, - "mcpi": 33, "mint": 135, "mlog": 1, "mod": 1, @@ -99,9 +97,7 @@ "pshl": 2, "ret_contract": 13, "rvrt_contract": 13, - "s256": 2, "sb": 1, - "scwq": 13, "sll": 1, "slli": 1, "srl": 1, @@ -111,7 +107,6 @@ "subi": 1, "sw": 1, "sww": 43, - "swwq": 44, "time": 1, "tr": 105, "tro": 60, @@ -143,6 +138,10 @@ "base": 17, "dep_per_unit": 790 }, + "k256": { + "base": 0, + "dep_per_unit": 0 + }, "ldc": { "base": 15, "dep_per_unit": 272 @@ -163,6 +162,10 @@ "base": 1, "dep_per_unit": 2000 }, + "mcpi": { + "base": 0, + "dep_per_unit": 0 + }, "meq": { "base": 1, "dep_per_unit": 2500 @@ -171,6 +174,14 @@ "base": 29, "dep_per_unit": 62 }, + "s256": { + "base": 0, + "dep_per_unit": 0 + }, + "scwq": { + "base": 0, + "dep_per_unit": 0 + }, "smo": { "base": 209, "dep_per_unit": 55 @@ -178,6 +189,10 @@ "srwq": { "base": 47, "dep_per_unit": 5 + }, + "swwq": { + "base": 0, + "dep_per_unit": 0 } } }, From 5edbf531e6f3e5e6413aaaecac74a36feb93b080 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Wed, 13 Sep 2023 14:54:48 -0400 Subject: [PATCH 12/87] WIP --- crates/services/sync/src/import.rs | 44 ++++++++---------------------- crates/types/src/services/p2p.rs | 34 ++++++++++++++--------- 2 files changed, 32 insertions(+), 46 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 15ddf31fe7d..0b2dfb5de53 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -274,7 +274,7 @@ fn get_block_stream< params: &Config, p2p: Arc

, consensus: Arc, -) -> impl Stream>>>> +) -> impl Stream>>>> { // Currently: // 1. Gets all headers in a flattened stream @@ -286,19 +286,13 @@ fn get_block_stream< get_header_stream(range, params, p2p.clone()) .chunks(10) + .map({ move |sealed_headers| sealed_headers.into_iter().collect() }) + .into_scan_err() + .scan_err() .map({ let p2p = p2p.clone(); let consensus_port = consensus.clone(); - move |sealed_headers| { - { - let result = sealed_headers.into_iter().collect(); - let p2p = p2p.clone(); - let consensus_port = consensus_port.clone(); - get_sealed_blocks(result, p2p.clone(), consensus_port.clone()) - } - .instrument(tracing::debug_span!("consensus_and_transactions")) - .in_current_span() - } + move |x| get_sealed_blocks(x, p2p.clone(), consensus_port.clone()) }) } @@ -346,14 +340,14 @@ async fn get_sealed_blocks< P: PeerToPeerPort + Send + Sync + 'static, C: ConsensusPort + Send + Sync + 'static, >( - result: anyhow::Result>>, + headers: Vec>, p2p: Arc

, consensus_port: Arc, ) -> anyhow::Result>> { - let headers = match result { - Ok(h) => h, - Err(e) => return Err(e), - }; + // let headers = match result { + // Ok(h) => h, + // Err(e) => return Err(e), + // }; for header in headers.clone() { // Check the consensus is valid on this header. @@ -511,19 +505,6 @@ async fn get_blocks

( where P: PeerToPeerPort + Send + Sync + 'static, { - // let SourcePeer { - // peer_id, - // data: block_headers, - // } = headers; - // let block_ids = block_headers - // .into_iter() - // .map(|block_header| block_header.id()) - // .collect::>(); - // let block_ids = SourcePeer { - // peer_id: peer_id.clone(), - // data: block_ids, - // }; - // Request the transactions for this block. // Need vec of (block_header, block_id) @@ -552,10 +533,7 @@ where e ) }); - let response = SourcePeer { - peer_id, - data: vec![], - }; + let response = peer_id.bind(vec![]); Ok(response) } Some(transactions) => { diff --git a/crates/types/src/services/p2p.rs b/crates/types/src/services/p2p.rs index 077f6a05078..eb169be9247 100644 --- a/crates/types/src/services/p2p.rs +++ b/crates/types/src/services/p2p.rs @@ -4,10 +4,7 @@ use crate::{ fuel_tx::Transaction, fuel_types::BlockHeight, }; -use std::{ - fmt::Debug, - vec, -}; +use std::fmt::Debug; /// Contains types and logic for Peer Reputation pub mod peer_reputation; @@ -74,15 +71,15 @@ impl SourcePeer { } } -impl FromIterator> for SourcePeer> { - fn from_iter>>(iter: U) -> Self { - let mut c = Vec::new(); - for i in iter { - c.push(i); - } - c - } -} +// impl FromIterator> for SourcePeer> { +// fn from_iter>>(iter: U) -> Self { +// let mut c = Vec::new(); +// for i in iter { +// c.push(i); +// } +// c +// } +// } impl GossipData { /// Construct a new gossip message @@ -141,3 +138,14 @@ impl From for Vec { peer_id.0 } } + +impl PeerId { + /// Bind the PeerId and given data of type T together to generate a + /// SourcePeer + pub fn bind(self, data: T) -> SourcePeer { + SourcePeer { + peer_id: self, + data, + } + } +} From e33c07058bef97215cc6b7f0a3e8198af8297741 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Wed, 13 Sep 2023 17:32:03 -0400 Subject: [PATCH 13/87] WIP --- crates/services/p2p/src/service.rs | 19 ++++++++++ crates/services/sync/src/import.rs | 58 +++++++++++++++++++++++------- crates/services/sync/src/ports.rs | 2 +- 3 files changed, 65 insertions(+), 14 deletions(-) diff --git a/crates/services/p2p/src/service.rs b/crates/services/p2p/src/service.rs index 9aec7b877c8..68ac898c291 100644 --- a/crates/services/p2p/src/service.rs +++ b/crates/services/p2p/src/service.rs @@ -477,6 +477,25 @@ impl SharedState { receiver.await.map_err(|e| anyhow!("{}", e)) } + pub async fn get_transactions2_from_peer( + &self, + peer_id: Vec, + block_ids: Vec, + ) -> anyhow::Result>> { + let (sender, receiver) = oneshot::channel(); + let from_peer = PeerId::from_bytes(&peer_id).expect("Valid PeerId"); + + self.request_sender + .send(TaskRequest::GetTransactions2 { + block_ids, + from_peer, + channel: sender, + }) + .await?; + + receiver.await.map_err(|e| anyhow!("{}", e)) + } + pub fn broadcast_vote(&self, vote: Arc) -> anyhow::Result<()> { self.request_sender .try_send(TaskRequest::BroadcastVote(vote))?; diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 0b2dfb5de53..65b537c8654 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -14,7 +14,13 @@ use fuel_core_services::{ SharedMutex, StateWatcher, }; -use fuel_core_types::blockchain::consensus::Sealed; +use fuel_core_types::{ + blockchain::consensus::Sealed, + services::{ + block_importer::Source, + p2p::PeerId, + }, +}; use fuel_core_types::{ blockchain::{ block::Block, @@ -306,8 +312,13 @@ fn get_header_stream( let Config { header_batch_size, .. } = params; + let mut peer = None; let ranges = range_chunks(range, *header_batch_size); futures::stream::iter(ranges) + // For each range: + // - Asynchronously get the batch of headers from the range + // - Flatten the batches of headers into a single stream + // - Stop at None/Err .then(move |range| { let p2p = p2p.clone(); async { @@ -316,7 +327,17 @@ fn get_header_stream( range.start(), range.end() ); - get_headers_batch(range, p2p).await + + let (peer_id, stream) = get_headers_batch(range, p2p).await; + let same = if let Some(peer_id) = peer_id { + if peer.is_none() { + peer = Some(peer_id); + } + peer.map(|peer| peer == peer_id).unwrap_or(false) + } else { + false + }; + } }) .flatten() @@ -413,10 +434,25 @@ async fn wait_for_notify_or_shutdown( matches!(r, futures::future::Either::Left(_)) } +// TODO: +// This function streams items of SourcePeers. +// However, internally, it uses +// p2p.get_sealed_block_headers(start..end) +// which returns a SourcePeer> +// This means that it is guaranteed that all block headers are sourced from the +// same peer. +// This means that the stream item is attaching the same SourcePeer to +// each SealedBlockHeader, but there is no visible guarantee to the caller that +// each SourcePeer is the same. +// Instead, it may be superior to return a single SourcePeer separately so that +// the caller knows all headers come from a single source async fn get_headers_batch( mut range: RangeInclusive, p2p: Arc, -) -> impl Stream>>> { +) -> ( + Option, + impl Stream>>, +) { tracing::debug!( "getting header range from {} to {} inclusive", range.start(), @@ -428,13 +464,12 @@ async fn get_headers_batch( .get_sealed_block_headers(start..end) .await .trace_err("Failed to get headers"); - let sorted_headers = match res { + let (peer_id, sorted_headers) = match res { Ok(sourced_headers) => { let SourcePeer { peer_id, data: maybe_headers, } = sourced_headers; - let cloned_peer_id = peer_id.clone(); let headers = match maybe_headers { None => { tracing::error!( @@ -450,11 +485,7 @@ async fn get_headers_batch( .map(move |header| { let header = range.next().and_then(|height| { if *(header.entity.height()) == height.into() { - let sourced_header = SourcePeer { - peer_id: cloned_peer_id.clone(), - data: header, - }; - Some(sourced_header) + Some(header) } else { None } @@ -482,12 +513,13 @@ async fn get_headers_batch( }); } } - headers + (Some(peer_id), headers) } - Err(e) => vec![Err(e)], + Err(e) => (None, vec![Err(e)]), }; - futures::stream::iter(sorted_headers) + let stream = futures::stream::iter(sorted_headers); + (peer_id, stream) } #[tracing::instrument( diff --git a/crates/services/sync/src/ports.rs b/crates/services/sync/src/ports.rs index 66713e0c313..8c233ec4ed8 100644 --- a/crates/services/sync/src/ports.rs +++ b/crates/services/sync/src/ports.rs @@ -61,7 +61,7 @@ pub trait PeerToPeerPort { /// and source peer. async fn get_transactions_2( &self, - block_id: SourcePeer>, + block_ids: SourcePeer>, ) -> anyhow::Result>>; /// Report a peer for some reason to modify their reputation. From e47349cf7730049c4f76de9d45010142002f8335 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Thu, 14 Sep 2023 12:37:16 -0400 Subject: [PATCH 14/87] WIP --- crates/fuel-core/src/service/adapters/sync.rs | 37 ++- crates/services/p2p/src/service.rs | 53 +++-- crates/services/sync/src/import.rs | 220 ++++++++++-------- crates/services/sync/src/ports.rs | 8 +- 4 files changed, 196 insertions(+), 122 deletions(-) diff --git a/crates/fuel-core/src/service/adapters/sync.rs b/crates/fuel-core/src/service/adapters/sync.rs index 60ca0e95df7..12d97d8e6a5 100644 --- a/crates/fuel-core/src/service/adapters/sync.rs +++ b/crates/fuel-core/src/service/adapters/sync.rs @@ -48,13 +48,30 @@ impl PeerToPeerPort for P2PAdapter { } } + async fn select_peer( + &self, + block_height: BlockHeight, + ) -> anyhow::Result> { + if let Some(service) = &self.service { + let peer_id = service.select_peer(block_height).await?; + Ok(peer_id) + } else { + Err(anyhow::anyhow!("No P2P service available")) + } + } + async fn get_sealed_block_headers( &self, - block_range_height: Range, + block_height_range: SourcePeer>, ) -> anyhow::Result>>> { + let SourcePeer { + peer_id, + data: block_height_range, + } = block_height_range; if let Some(service) = &self.service { - let (peer_id, headers) = - service.get_sealed_block_headers(block_range_height).await?; + let headers = service + .get_sealed_block_headers(peer_id.into(), block_height_range) + .await?; let sourced_headers = SourcePeer { peer_id: peer_id.into(), data: headers, @@ -84,9 +101,19 @@ impl PeerToPeerPort for P2PAdapter { async fn get_transactions_2( &self, - _block_id: SourcePeer>, + block_ids: SourcePeer>, ) -> anyhow::Result>> { - todo!() + let SourcePeer { + peer_id, + data: blocks, + } = block_ids; + if let Some(service) = &self.service { + service + .get_transactions_2_from_peer(peer_id.into(), blocks) + .await + } else { + Err(anyhow::anyhow!("No P2P service available")) + } } async fn report_peer( diff --git a/crates/services/p2p/src/service.rs b/crates/services/p2p/src/service.rs index 68ac898c291..bff21dafd94 100644 --- a/crates/services/p2p/src/service.rs +++ b/crates/services/p2p/src/service.rs @@ -50,6 +50,7 @@ use fuel_core_types::{ GossipsubMessageAcceptance, GossipsubMessageInfo, PeerId as FuelPeerId, + PeerId, TransactionGossipData, }, }; @@ -85,7 +86,8 @@ enum TaskRequest { }, GetSealedHeaders { block_height_range: Range, - channel: oneshot::Sender<(PeerId, Option>)>, + from_peer: PeerId, + channel: oneshot::Sender>>, }, GetTransactions { block_id: BlockId, @@ -104,6 +106,10 @@ enum TaskRequest { score: AppScore, reporting_service: &'static str, }, + SelectPeer { + block_height: BlockHeight, + channel: oneshot::Sender>, + }, } impl Debug for TaskRequest { @@ -232,23 +238,17 @@ where let peer = self.p2p_service.peer_manager().get_peer_id_with_height(&height); let _ = self.p2p_service.send_request_msg(peer, request_msg, channel_item); } - Some(TaskRequest::GetSealedHeaders { block_height_range, channel: response}) => { + Some(TaskRequest::GetSealedHeaders { block_height_range, from_peer, channel: response}) => { let request_msg = RequestMessage::SealedHeaders(block_height_range.clone()); let channel_item = ResponseChannelItem::SealedHeaders(response); - - // Note: this range has already been check for - // validity in `SharedState::get_sealed_block_headers`. - let block_height = BlockHeight::from(block_height_range.end - 1); - let peer = self.p2p_service.peer_manager() - .get_peer_id_with_height(&block_height); - let _ = self.p2p_service.send_request_msg(peer, request_msg, channel_item); + let _ = self.p2p_service.send_request_msg(Some(from_peer), request_msg, channel_item); } Some(TaskRequest::GetTransactions { block_id, from_peer, channel }) => { let request_msg = RequestMessage::Transactions(block_id); let channel_item = ResponseChannelItem::Transactions(channel); let _ = self.p2p_service.send_request_msg(Some(from_peer), request_msg, channel_item); } - Some(TaskRequest::GetTransactions2 { block_ids, from_peer, channel }) => { + Some(TaskRequest::GetTransactions2 { block_ids, from_peer, channel }) => { let request_msg = RequestMessage::Transactions2(block_ids); let channel_item = ResponseChannelItem::Transactions(channel); let _ = self.p2p_service.send_request_msg(Some(from_peer), request_msg, channel_item); @@ -259,6 +259,11 @@ where Some(TaskRequest::RespondWithPeerReport { peer_id, score, reporting_service }) => { self.p2p_service.report_peer(peer_id, score, reporting_service) } + Some(TaskRequest::SelectPeer { block_height, channel }) => { + let peer = self.p2p_service.peer_manager() + .get_peer_id_with_height(&block_height); + let _ = channel.send(peer); + } None => { unreachable!("The `Task` is holder of the `Sender`, so it should not be possible"); } @@ -433,11 +438,29 @@ impl SharedState { receiver.await.map_err(|e| anyhow!("{}", e)) } + pub async fn select_peer( + &self, + block_height: BlockHeight, + ) -> anyhow::Result> { + let (sender, receiver) = oneshot::channel(); + + self.request_sender + .send(TaskRequest::SelectPeer { + block_height, + channel: sender, + }) + .await?; + + receiver.await.map_err(|e| anyhow!("{}", e)) + } + pub async fn get_sealed_block_headers( &self, + peer_id: Vec, block_height_range: Range, - ) -> anyhow::Result<(Vec, Option>)> { + ) -> anyhow::Result>> { let (sender, receiver) = oneshot::channel(); + let from_peer = PeerId::from_bytes(&peer_id).expect("Valid PeerId"); if block_height_range.is_empty() { return Err(anyhow!( @@ -448,14 +471,12 @@ impl SharedState { self.request_sender .send(TaskRequest::GetSealedHeaders { block_height_range, + from_peer, channel: sender, }) .await?; - receiver - .await - .map(|(peer_id, headers)| (peer_id.to_bytes(), headers)) - .map_err(|e| anyhow!("{}", e)) + receiver.await.map_err(|e| anyhow!("{}", e)) } pub async fn get_transactions_from_peer( @@ -477,7 +498,7 @@ impl SharedState { receiver.await.map_err(|e| anyhow!("{}", e)) } - pub async fn get_transactions2_from_peer( + pub async fn get_transactions_2_from_peer( &self, peer_id: Vec, block_ids: Vec, diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 65b537c8654..f53baca8085 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -15,11 +15,10 @@ use fuel_core_services::{ StateWatcher, }; use fuel_core_types::{ + self, blockchain::consensus::Sealed, - services::{ - block_importer::Source, - p2p::PeerId, - }, + fuel_types::BlockHeight, + services::p2p::PeerId, }; use fuel_core_types::{ blockchain::{ @@ -287,38 +286,42 @@ fn get_block_stream< // 2. For each header, maps it to a sealed block // Todo: - // Current: Request 1 block * transactions - // Goal: Request n blocks * transactions + // Current: Request 1 block * transactions from a given peer + // Goal: Request n blocks * transactions from x given peers - get_header_stream(range, params, p2p.clone()) - .chunks(10) - .map({ move |sealed_headers| sealed_headers.into_iter().collect() }) - .into_scan_err() - .scan_err() - .map({ - let p2p = p2p.clone(); - let consensus_port = consensus.clone(); - move |x| get_sealed_blocks(x, p2p.clone(), consensus_port.clone()) + // Headers from single peer + get_header_batch_streams(range, params, p2p.clone()) + .then({ + move |header_batch_stream| async { + let batch = header_batch_stream + .into_scan_err() + .scan_err() + .collect::>() + .await; + let iter = get_sealed_blocks(batch, p2p.clone(), consensus.clone()).await; + iter + } }) + .flatten() } +type HeaderStreamStreamItem = anyhow::Result>; + // Todo: // Return a stream, where the item is a Vector of sealed block headers -fn get_header_stream( +fn get_header_batch_streams( range: RangeInclusive, params: &Config, p2p: Arc

, -) -> impl Stream>> { + // ) -> impl Stream>> { +) -> impl Stream> { let Config { header_batch_size, .. } = params; - let mut peer = None; let ranges = range_chunks(range, *header_batch_size); futures::stream::iter(ranges) // For each range: - // - Asynchronously get the batch of headers from the range - // - Flatten the batches of headers into a single stream - // - Stop at None/Err + // - Asynchronously get a batch of headers from the range .then(move |range| { let p2p = p2p.clone(); async { @@ -327,22 +330,9 @@ fn get_header_stream( range.start(), range.end() ); - - let (peer_id, stream) = get_headers_batch(range, p2p).await; - let same = if let Some(peer_id) = peer_id { - if peer.is_none() { - peer = Some(peer_id); - } - peer.map(|peer| peer == peer_id).unwrap_or(false) - } else { - false - }; - + get_headers_batch(range, p2p).await } }) - .flatten() - .into_scan_none_or_err() - .scan_none_or_err() } fn range_chunks( @@ -356,58 +346,75 @@ fn range_chunks( }) } +async fn check_sealed_header< + P: PeerToPeerPort + Send + Sync + 'static, + C: ConsensusPort + Send + Sync + 'static, +>( + header: &SealedBlockHeader, + peer_id: PeerId, + p2p: Arc

, + consensus_port: Arc, +) -> anyhow::Result { + let validity = consensus_port + .check_sealed_header(header) + .trace_err("Failed to check consensus on header")?; + if !validity { + let _ = p2p + .report_peer(peer_id.clone(), PeerReportReason::BadBlockHeader) + .await + .map_err(|e| { + tracing::error!( + "Failed to report bad block header from peer {:?}: {:?}", + peer_id.clone(), + e + ) + }); + } + Ok(validity) +} + // todo: Accept a vector of headers async fn get_sealed_blocks< P: PeerToPeerPort + Send + Sync + 'static, C: ConsensusPort + Send + Sync + 'static, >( - headers: Vec>, + headers: Vec>>, p2p: Arc

, consensus_port: Arc, ) -> anyhow::Result>> { - // let headers = match result { - // Ok(h) => h, - // Err(e) => return Err(e), - // }; - + let headers = match headers { + Ok(h) => h, + Err(e) => return Err(e), + }; + let SourcePeer { + peer_id, + data: headers, + } = headers; for header in headers.clone() { // Check the consensus is valid on this header. - let SourcePeer { - peer_id, - data: sealed_header, - } = header; - if !consensus_port - .check_sealed_header(&sealed_header) - .trace_err("Failed to check consensus on header")? - { - let _ = p2p - .report_peer(peer_id.clone(), PeerReportReason::BadBlockHeader) - .await - .map_err(|e| { - tracing::error!( - "Failed to report bad block header from peer {:?}: {:?}", - peer_id.clone(), - e - ) - }); - - return Ok(vec![]) + if !check_sealed_header( + &header, + peer_id.clone(), + p2p.clone(), + consensus_port.clone(), + )? { + return Ok(peer_id.bind(vec![])) } // Wait for the da to be at least the da height on the header. consensus_port - .await_da_height(&sealed_header.entity.da_height) + .await_da_height(&header.entity.da_height) .await?; } // todo: pass a vector of headers - let consensus = headers.consensus.clone(); - let headers = sealed_headers.entity; - let source_headers = SourcePeer { - peer_id, - data: headers, - }; - let blocks = get_blocks(p2p.as_ref(), source_headers).await?; + // let consensus = headers.consensus.clone(); + // let headers = sealed_headers.entity; + // let source_headers = SourcePeer { + // peer_id, + // data: headers, + // }; + let blocks = get_blocks(p2p.as_ref(), headers).await?; let blocks = blocks.map(|blocks| Sealed { entity: blocks, consensus: consensus.clone(), @@ -434,13 +441,15 @@ async fn wait_for_notify_or_shutdown( matches!(r, futures::future::Either::Left(_)) } +type HeadersBatchItem = anyhow::Result>>; + // TODO: -// This function streams items of SourcePeers. +// This function streams items of Result>>. // However, internally, it uses // p2p.get_sealed_block_headers(start..end) -// which returns a SourcePeer> -// This means that it is guaranteed that all block headers are sourced from the -// same peer. +// which returns a Result>>> +// Because the SourcePeer wraps the vector, it implies a guarantee that all +// block headers are sourced from the same peer. // This means that the stream item is attaching the same SourcePeer to // each SealedBlockHeader, but there is no visible guarantee to the caller that // each SourcePeer is the same. @@ -449,22 +458,29 @@ async fn wait_for_notify_or_shutdown( async fn get_headers_batch( mut range: RangeInclusive, p2p: Arc, -) -> ( - Option, - impl Stream>>, -) { +) -> SourcePeer>>> { tracing::debug!( "getting header range from {} to {} inclusive", range.start(), range.end() ); + let start = *range.start(); let end = *range.end() + 1; + + let block_height = BlockHeight::from(range.end()); + let peer = p2p + .select_peer(block_height) + .await + .map_err(|e| vec![Err(e)])? + .ok_or_else(|| vec![Err(anyhow!("No peer"))]) + .trace_err("Failed to select a peer")?; + let res = p2p - .get_sealed_block_headers(start..end) + .get_sealed_block_headers(peer.bind(start..end)) .await .trace_err("Failed to get headers"); - let (peer_id, sorted_headers) = match res { + match res { Ok(sourced_headers) => { let SourcePeer { peer_id, @@ -485,7 +501,8 @@ async fn get_headers_batch( .map(move |header| { let header = range.next().and_then(|height| { if *(header.entity.height()) == height.into() { - Some(header) + let sourced = peer_id.bind(header); + Some(sourced) } else { None } @@ -498,30 +515,34 @@ async fn get_headers_batch( if headers.len() != expected_len as usize || headers.iter().any(|h| h.is_err()) { - let _ = p2p - .report_peer( - peer_id.clone(), - PeerReportReason::MissingBlockHeaders, - ) - .await - .map_err(|e| { - tracing::error!( - "Failed to report missing block header from peer {:?}: {:?}", - peer_id, - e - ) - }); + report_peer( + p2p.clone(), + peer_id.clone(), + PeerReportReason::MissingBlockHeaders, + ) + .await; } } - (Some(peer_id), headers) + let stream = futures::stream::iter(headers); + peer_id.bind(stream) } - Err(e) => (None, vec![Err(e)]), - }; - let stream = futures::stream::iter(sorted_headers); - (peer_id, stream) + Err(e) => vec![Err(e)], + } +} + +async fn report_peer( + p2p: Arc, + peer_id: PeerId, + reason: PeerReportReason, +) { + let _ = p2p + .report_peer(peer_id.clone(), reason) + .await + .map_err(|e| tracing::error!("Failed to report peer {:?}: {:?}", peer_id, e)); } +// Get blocks correlating to the headers from a specific peer #[tracing::instrument( skip(p2p, headers), // fields( @@ -542,8 +563,7 @@ where // Need vec of (block_header, block_id) // Map vec of (block_header, block_id) -> vec(block_header, transactions) - let peer_id = headers.peer_id.clone(); - let hs = headers.data.clone(); + let SourcePeer { peer_id, data: hs } = headers; let block_ids = hs.into_iter().map(|h| h.id()); let z = headers.map(|headers| headers.into_iter().zip(block_ids)); diff --git a/crates/services/sync/src/ports.rs b/crates/services/sync/src/ports.rs index 8c233ec4ed8..40a28590119 100644 --- a/crates/services/sync/src/ports.rs +++ b/crates/services/sync/src/ports.rs @@ -44,10 +44,16 @@ pub trait PeerToPeerPort { /// Stream of newly observed block heights. fn height_stream(&self) -> BoxStream; + /// Get a peer based on the block height + async fn select_peer( + &self, + block_height: BlockHeight, + ) -> anyhow::Result>; + /// Request a range of sealed block headers from the network. async fn get_sealed_block_headers( &self, - block_height_range: Range, + block_height_range: SourcePeer>, ) -> anyhow::Result>>>; /// Request transactions from the network for the given block From 17226990b22cdd309d7b2fb830463546e20ee7bf Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Thu, 14 Sep 2023 18:23:47 -0400 Subject: [PATCH 15/87] WIP --- crates/fuel-core/src/service/adapters/sync.rs | 3 +- crates/services/p2p/src/codecs/postcard.rs | 9 + crates/services/p2p/src/p2p_service.rs | 4 +- .../p2p/src/request_response/messages.rs | 5 +- crates/services/p2p/src/service.rs | 10 +- crates/services/sync/src/import.rs | 277 ++++++++---------- crates/services/sync/src/ports.rs | 3 +- crates/types/src/services/p2p.rs | 17 ++ 8 files changed, 165 insertions(+), 163 deletions(-) diff --git a/crates/fuel-core/src/service/adapters/sync.rs b/crates/fuel-core/src/service/adapters/sync.rs index 12d97d8e6a5..aa73366762f 100644 --- a/crates/fuel-core/src/service/adapters/sync.rs +++ b/crates/fuel-core/src/service/adapters/sync.rs @@ -28,6 +28,7 @@ use fuel_core_types::{ }, PeerId, SourcePeer, + TransactionData, }, }; use std::ops::Range; @@ -102,7 +103,7 @@ impl PeerToPeerPort for P2PAdapter { async fn get_transactions_2( &self, block_ids: SourcePeer>, - ) -> anyhow::Result>> { + ) -> anyhow::Result>> { let SourcePeer { peer_id, data: blocks, diff --git a/crates/services/p2p/src/codecs/postcard.rs b/crates/services/p2p/src/codecs/postcard.rs index 50f6e699eb0..0305836e738 100644 --- a/crates/services/p2p/src/codecs/postcard.rs +++ b/crates/services/p2p/src/codecs/postcard.rs @@ -246,6 +246,15 @@ impl RequestResponseConverter for PostcardCodec { Ok(NetworkResponse::Transactions(response)) } + OutboundResponse::Transactions2(transactions) => { + let response = if let Some(transactions) = transactions { + Some(self.serialize(transactions.as_ref())?) + } else { + None + }; + + Ok(NetworkResponse::Transactions(response)) + } OutboundResponse::SealedHeaders(maybe_headers) => { let response = maybe_headers .as_ref() diff --git a/crates/services/p2p/src/p2p_service.rs b/crates/services/p2p/src/p2p_service.rs index 967cd09f1c1..ca247a4cd9e 100644 --- a/crates/services/p2p/src/p2p_service.rs +++ b/crates/services/p2p/src/p2p_service.rs @@ -566,7 +566,7 @@ impl FuelP2PService { } } FuelBehaviourEvent::RequestResponse(req_res_event) => match req_res_event { - RequestResponseEvent::Message { peer, message } => match message { + RequestResponseEvent::Message { message, .. } => match message { RequestResponseMessage::Request { request, channel, @@ -613,7 +613,7 @@ impl FuelP2PService { Some(ResponseChannelItem::SealedHeaders(channel)), Ok(ResponseMessage::SealedHeaders(headers)), ) => { - if channel.send((peer, headers)).is_err() { + if channel.send(headers).is_err() { debug!( "Failed to send through the channel for {:?}", request_id diff --git a/crates/services/p2p/src/request_response/messages.rs b/crates/services/p2p/src/request_response/messages.rs index 03049a1f3cd..33a2a79c02b 100644 --- a/crates/services/p2p/src/request_response/messages.rs +++ b/crates/services/p2p/src/request_response/messages.rs @@ -11,8 +11,8 @@ use fuel_core_types::{ }, fuel_tx::Transaction, fuel_types::BlockHeight, + services::p2p::TransactionData, }; -use libp2p::PeerId; use serde::{ Deserialize, Serialize, @@ -59,8 +59,9 @@ pub enum ResponseMessage { #[derive(Debug)] pub enum ResponseChannelItem { Block(oneshot::Sender>), - SealedHeaders(oneshot::Sender<(PeerId, Option>)>), + SealedHeaders(oneshot::Sender>>), Transactions(oneshot::Sender>>), + Transactions2(oneshot::Sender>>), } /// Response that is sent over the wire diff --git a/crates/services/p2p/src/service.rs b/crates/services/p2p/src/service.rs index bff21dafd94..2415a6daaf9 100644 --- a/crates/services/p2p/src/service.rs +++ b/crates/services/p2p/src/service.rs @@ -50,7 +50,7 @@ use fuel_core_types::{ GossipsubMessageAcceptance, GossipsubMessageInfo, PeerId as FuelPeerId, - PeerId, + TransactionData, TransactionGossipData, }, }; @@ -97,7 +97,7 @@ enum TaskRequest { GetTransactions2 { block_ids: Vec, from_peer: PeerId, - channel: oneshot::Sender>>, + channel: oneshot::Sender>>, }, // Responds back to the p2p network RespondWithGossipsubMessageReport((GossipsubMessageInfo, GossipsubMessageAcceptance)), @@ -250,7 +250,7 @@ where } Some(TaskRequest::GetTransactions2 { block_ids, from_peer, channel }) => { let request_msg = RequestMessage::Transactions2(block_ids); - let channel_item = ResponseChannelItem::Transactions(channel); + let channel_item = ResponseChannelItem::Transactions2(channel); let _ = self.p2p_service.send_request_msg(Some(from_peer), request_msg, channel_item); } Some(TaskRequest::RespondWithGossipsubMessageReport((message, acceptance))) => { @@ -329,7 +329,7 @@ where } } } - RequestMessage::Transactions2(block_ids) => { + RequestMessage::Transactions2(_block_ids) => { // match self.db.get_transactions(&block_id) { // Ok(maybe_transactions) => { // let response = maybe_transactions.map(Arc::new); @@ -502,7 +502,7 @@ impl SharedState { &self, peer_id: Vec, block_ids: Vec, - ) -> anyhow::Result>> { + ) -> anyhow::Result>> { let (sender, receiver) = oneshot::channel(); let from_peer = PeerId::from_bytes(&peer_id).expect("Valid PeerId"); diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index f53baca8085..4afea830e66 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -16,15 +16,18 @@ use fuel_core_services::{ }; use fuel_core_types::{ self, - blockchain::consensus::Sealed, + // blockchain::consensus::Sealed, fuel_types::BlockHeight, - services::p2p::PeerId, + services::p2p::{ + PeerId, + TransactionData, + }, }; use fuel_core_types::{ blockchain::{ block::Block, // consensus::Sealed, - header::BlockHeader, + // header::BlockHeader, SealedBlock, SealedBlockHeader, }, @@ -32,7 +35,7 @@ use fuel_core_types::{ }; use futures::{ stream::StreamExt, - FutureExt, + // FutureExt, Stream, }; use tokio::sync::Notify; @@ -185,8 +188,27 @@ where let shutdown_signal = shutdown.clone(); let (shutdown_guard, mut shutdown_guard_recv) = tokio::sync::mpsc::channel::<()>(1); + + let block_height = BlockHeight::from(*range.end()); + let peer = p2p.select_peer(block_height).await; + + if let Err(err) = peer { + let err = Err(err); + return (0, err) + } + + let peer = peer.expect("Checked"); + + if let None = peer { + let err = Err(anyhow!("Expected peer")); + return (0, err) + } + + let peer = peer.expect("Checked"); + let block_stream = - get_block_stream(range.clone(), params, p2p.clone(), consensus.clone()); + get_block_stream(peer, range.clone(), params, p2p.clone(), consensus.clone()) + .await; let result = block_stream .map(move |stream_block_batch| { let shutdown_guard = shutdown_guard.clone(); @@ -271,57 +293,52 @@ where } } -fn get_block_stream< +async fn get_block_stream< P: PeerToPeerPort + Send + Sync + 'static, C: ConsensusPort + Send + Sync + 'static, >( + peer: PeerId, range: RangeInclusive, params: &Config, p2p: Arc

, consensus: Arc, -) -> impl Stream>>>> -{ - // Currently: - // 1. Gets all headers in a flattened stream - // 2. For each header, maps it to a sealed block - - // Todo: - // Current: Request 1 block * transactions from a given peer - // Goal: Request n blocks * transactions from x given peers - - // Headers from single peer - get_header_batch_streams(range, params, p2p.clone()) - .then({ - move |header_batch_stream| async { - let batch = header_batch_stream - .into_scan_err() - .scan_err() - .collect::>() - .await; - let iter = get_sealed_blocks(batch, p2p.clone(), consensus.clone()).await; - iter +) -> impl Stream>>>> { + get_header_stream(peer.clone(), range, params, p2p.clone()) + .chunks(10) + .map({ + let p2p = p2p.clone(); + let consensus_port = consensus.clone(); + let peer = peer.clone(); + move |batch| { + { + let p2p = p2p.clone(); + let consensus_port = consensus_port.clone(); + let peer = peer.clone(); + let batch = + batch.into_iter().filter_map(|header| header.ok()).collect(); + let headers = peer.bind(batch); + get_sealed_blocks(headers, p2p.clone(), consensus_port.clone()) + } + .instrument(tracing::debug_span!("consensus_and_transactions")) + .in_current_span() } }) - .flatten() } -type HeaderStreamStreamItem = anyhow::Result>; +type HeaderStreamItem = anyhow::Result; -// Todo: -// Return a stream, where the item is a Vector of sealed block headers -fn get_header_batch_streams( +fn get_header_stream( + peer: PeerId, range: RangeInclusive, params: &Config, p2p: Arc

, // ) -> impl Stream>> { -) -> impl Stream> { +) -> impl Stream { let Config { header_batch_size, .. } = params; let ranges = range_chunks(range, *header_batch_size); futures::stream::iter(ranges) - // For each range: - // - Asynchronously get a batch of headers from the range .then(move |range| { let p2p = p2p.clone(); async { @@ -330,9 +347,12 @@ fn get_header_batch_streams( range.start(), range.end() ); - get_headers_batch(range, p2p).await + get_headers_batch(peer, range, p2p).await } }) + .flatten() + .into_scan_none_or_err() + .scan_none_or_err() } fn range_chunks( @@ -359,68 +379,37 @@ async fn check_sealed_header< .check_sealed_header(header) .trace_err("Failed to check consensus on header")?; if !validity { - let _ = p2p - .report_peer(peer_id.clone(), PeerReportReason::BadBlockHeader) - .await - .map_err(|e| { - tracing::error!( - "Failed to report bad block header from peer {:?}: {:?}", - peer_id.clone(), - e - ) - }); + report_peer( + p2p.clone(), + peer_id.clone(), + PeerReportReason::BadBlockHeader, + ) + .await; } Ok(validity) } -// todo: Accept a vector of headers async fn get_sealed_blocks< P: PeerToPeerPort + Send + Sync + 'static, C: ConsensusPort + Send + Sync + 'static, >( - headers: Vec>>, + headers: SourcePeer>, p2p: Arc

, - consensus_port: Arc, -) -> anyhow::Result>> { - let headers = match headers { - Ok(h) => h, - Err(e) => return Err(e), - }; - let SourcePeer { - peer_id, - data: headers, - } = headers; - for header in headers.clone() { + consensus: Arc, +) -> anyhow::Result>> { + let SourcePeer { peer_id, data } = &headers; + for header in data { // Check the consensus is valid on this header. - if !check_sealed_header( - &header, - peer_id.clone(), - p2p.clone(), - consensus_port.clone(), - )? { - return Ok(peer_id.bind(vec![])) + if !check_sealed_header(header, peer_id.clone(), p2p.clone(), consensus.clone()) + .await? + { + return Ok(vec![]) } // Wait for the da to be at least the da height on the header. - consensus_port - .await_da_height(&header.entity.da_height) - .await?; + consensus.await_da_height(&header.entity.da_height).await?; } - - // todo: pass a vector of headers - // let consensus = headers.consensus.clone(); - // let headers = sealed_headers.entity; - // let source_headers = SourcePeer { - // peer_id, - // data: headers, - // }; - let blocks = get_blocks(p2p.as_ref(), headers).await?; - let blocks = blocks.map(|blocks| Sealed { - entity: blocks, - consensus: consensus.clone(), - }); - - Ok(blocks) + get_blocks(p2p.as_ref(), headers).await } /// Waits for a notify or shutdown signal. @@ -456,9 +445,10 @@ type HeadersBatchItem = anyhow::Result>>; // Instead, it may be superior to return a single SourcePeer separately so that // the caller knows all headers come from a single source async fn get_headers_batch( + peer: PeerId, mut range: RangeInclusive, p2p: Arc, -) -> SourcePeer>>> { +) -> impl Stream>> { tracing::debug!( "getting header range from {} to {} inclusive", range.start(), @@ -468,14 +458,6 @@ async fn get_headers_batch( let start = *range.start(); let end = *range.end() + 1; - let block_height = BlockHeight::from(range.end()); - let peer = p2p - .select_peer(block_height) - .await - .map_err(|e| vec![Err(e)])? - .ok_or_else(|| vec![Err(anyhow!("No peer"))]) - .trace_err("Failed to select a peer")?; - let res = p2p .get_sealed_block_headers(peer.bind(start..end)) .await @@ -524,7 +506,7 @@ async fn get_headers_batch( } } let stream = futures::stream::iter(headers); - peer_id.bind(stream) + stream } Err(e) => vec![Err(e)], @@ -553,77 +535,68 @@ async fn report_peer( )] async fn get_blocks

( p2p: &P, - headers: SourcePeer>, -) -> anyhow::Result>> + headers: SourcePeer>, +) -> anyhow::Result>> where P: PeerToPeerPort + Send + Sync + 'static, { // Request the transactions for this block. - // Need vec of (block_header, block_id) - // Map vec of (block_header, block_id) -> vec(block_header, transactions) - - let SourcePeer { peer_id, data: hs } = headers; - let block_ids = hs.into_iter().map(|h| h.id()); - let z = headers.map(|headers| headers.into_iter().zip(block_ids)); - - for (block_header, block_ids) in z { - let maybe_txs = p2p - .get_transactions_2(block_ids) - .await - .trace_err("Failed to get transactions")? - .trace_none_warn("Could not find transactions for header"); - match maybe_txs { - None => { - let _ = p2p - .report_peer(peer_id.clone(), PeerReportReason::MissingTransactions) - .await - .map_err(|e| { - tracing::error!( - "Failed to report missing transactions from peer {:?}: {:?}", - peer_id.clone(), - e - ) - }); - let response = peer_id.bind(vec![]); - Ok(response) - } - Some(transactions) => { - let block = - Block::try_from_executed(block_header, transactions).map(|block| { - SourcePeer { - peer_id: peer_id.clone(), - data: block, - } - }); + let block_ids = headers.as_ref().map(|headers| { + headers + .iter() + .map(|header| header.entity.id()) + .collect::>() + }); + let peer_id = block_ids.peer_id.clone(); + let maybe_txs = p2p + .get_transactions_2(block_ids) + .await + .trace_err("Failed to get transactions")? + .trace_none_warn("Could not find transactions for header"); + match maybe_txs { + None => { + report_peer( + p2p.clone(), + peer_id.clone(), + PeerReportReason::MissingTransactions, + ) + .await; + Ok(vec![]) + } + Some(transaction_data) => { + let headers = headers.data; + let mut blocks = vec![]; + let iter = headers.into_iter().zip(transaction_data); + for (block_header, td) in iter { + let SealedBlockHeader { + consensus, + entity: header, + } = block_header; + let TransactionData { transactions, .. } = td; + let block = Block::try_from_executed(header, transactions).map(|block| { + SealedBlock { + entity: block, + consensus, + } + }); if block.is_none() { tracing::error!( "Failed to created block from header and transactions" ); - let _ = p2p - .report_peer( - peer_id.clone(), - PeerReportReason::InvalidTransactions, - ) - .await - .map_err(|e| { - tracing::error!( - "Failed to report invalid transaction from peer {:?}: {:?}", - peer_id, - e - ) - }); + report_peer( + p2p.clone(), + peer_id.clone(), + PeerReportReason::InvalidTransactions, + ) + .await; } - Ok(block) + blocks.push(block); } + + Ok(blocks) } } - - let r = SourcePeer { - peer_id, - data: vec![], - }; - Ok(r) } #[tracing::instrument( diff --git a/crates/services/sync/src/ports.rs b/crates/services/sync/src/ports.rs index 40a28590119..9b631c141e7 100644 --- a/crates/services/sync/src/ports.rs +++ b/crates/services/sync/src/ports.rs @@ -15,6 +15,7 @@ use fuel_core_types::{ services::p2p::{ PeerId, SourcePeer, + TransactionData, }, }; use std::ops::Range; @@ -68,7 +69,7 @@ pub trait PeerToPeerPort { async fn get_transactions_2( &self, block_ids: SourcePeer>, - ) -> anyhow::Result>>; + ) -> anyhow::Result>>; /// Report a peer for some reason to modify their reputation. async fn report_peer( diff --git a/crates/types/src/services/p2p.rs b/crates/types/src/services/p2p.rs index eb169be9247..1917c1b7bfd 100644 --- a/crates/types/src/services/p2p.rs +++ b/crates/types/src/services/p2p.rs @@ -1,6 +1,7 @@ //! Contains types related to P2P data use crate::{ + blockchain::primitives::BlockId, fuel_tx::Transaction, fuel_types::BlockHeight, }; @@ -9,6 +10,14 @@ use std::fmt::Debug; /// Contains types and logic for Peer Reputation pub mod peer_reputation; +/// Maps BlockId to its transactions +#[derive(Debug)] +pub struct TransactionData { + /// Block id + pub block_id: BlockId, + /// transactions + pub transactions: Vec, +} /// Lightweight representation of gossipped data that only includes IDs #[derive(Debug, Clone, Hash, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] @@ -69,6 +78,14 @@ impl SourcePeer { let data = f(self.data); SourcePeer:: { peer_id, data } } + + /// Asref + pub fn as_ref(&self) -> SourcePeer<&T> { + SourcePeer { + peer_id: self.peer_id.clone(), + data: &self.data, + } + } } // impl FromIterator> for SourcePeer> { From f96ea14306cde211b4dfc8aee9d709548825d521 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 15 Sep 2023 14:50:23 -0400 Subject: [PATCH 16/87] WIP --- Cargo.lock | 1 + crates/fuel-core/src/service/adapters/p2p.rs | 7 + crates/fuel-core/src/service/adapters/sync.rs | 7 +- crates/services/p2p/src/service.rs | 7 +- crates/services/sync/Cargo.toml | 1 + crates/services/sync/src/import.rs | 149 +++++++++--------- .../test_helpers/pressure_peer_to_peer.rs | 37 +++-- 7 files changed, 114 insertions(+), 95 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fd9ac84ee2c..ca6314ec8b7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3122,6 +3122,7 @@ dependencies = [ "fuel-core-types", "futures", "mockall", + "rand 0.8.5", "test-case", "tokio", "tracing", diff --git a/crates/fuel-core/src/service/adapters/p2p.rs b/crates/fuel-core/src/service/adapters/p2p.rs index 11f23d7be4f..cd712b62e48 100644 --- a/crates/fuel-core/src/service/adapters/p2p.rs +++ b/crates/fuel-core/src/service/adapters/p2p.rs @@ -45,6 +45,13 @@ impl P2pDb for Database { ) -> StorageResult>> { self.get_transactions_on_block(block_id) } + + fn get_transactions_2( + &self, + _block_ids: Vec<&BlockId>, + ) -> StorageResult>> { + todo!() + } } impl BlockHeightImporter for BlockImporterAdapter { diff --git a/crates/fuel-core/src/service/adapters/sync.rs b/crates/fuel-core/src/service/adapters/sync.rs index aa73366762f..22664cde2bd 100644 --- a/crates/fuel-core/src/service/adapters/sync.rs +++ b/crates/fuel-core/src/service/adapters/sync.rs @@ -71,12 +71,9 @@ impl PeerToPeerPort for P2PAdapter { } = block_height_range; if let Some(service) = &self.service { let headers = service - .get_sealed_block_headers(peer_id.into(), block_height_range) + .get_sealed_block_headers(peer_id.clone().into(), block_height_range) .await?; - let sourced_headers = SourcePeer { - peer_id: peer_id.into(), - data: headers, - }; + let sourced_headers = peer_id.bind(headers); Ok(sourced_headers) } else { Err(anyhow::anyhow!("No P2P service available")) diff --git a/crates/services/p2p/src/service.rs b/crates/services/p2p/src/service.rs index 2415a6daaf9..d6e9b1fe888 100644 --- a/crates/services/p2p/src/service.rs +++ b/crates/services/p2p/src/service.rs @@ -441,7 +441,7 @@ impl SharedState { pub async fn select_peer( &self, block_height: BlockHeight, - ) -> anyhow::Result> { + ) -> anyhow::Result> { let (sender, receiver) = oneshot::channel(); self.request_sender @@ -451,7 +451,10 @@ impl SharedState { }) .await?; - receiver.await.map_err(|e| anyhow!("{}", e)) + receiver + .await + .map(|peer_id| peer_id.map(|peer_id| peer_id.to_bytes().into())) + .map_err(|e| anyhow!("{}", e)) } pub async fn get_sealed_block_headers( diff --git a/crates/services/sync/Cargo.toml b/crates/services/sync/Cargo.toml index 13f40c3b8fd..f83ba4e90ab 100644 --- a/crates/services/sync/Cargo.toml +++ b/crates/services/sync/Cargo.toml @@ -16,6 +16,7 @@ fuel-core-services = { workspace = true } fuel-core-types = { workspace = true } futures = { workspace = true } mockall = { workspace = true, optional = true } +rand = { workspace = true } tokio = { workspace = true, features = ["full"] } tracing = { workspace = true } diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 4afea830e66..a020e2f421e 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -2,6 +2,10 @@ //! This module contains the import task which is responsible for //! importing blocks from the network into the local blockchain. +use futures::{ + FutureExt, + TryStreamExt, +}; use std::{ future::Future, // iter, @@ -127,6 +131,7 @@ impl Import { self.notify.notify_one() } } + impl Import where P: PeerToPeerPort + Send + Sync + 'static, @@ -206,9 +211,14 @@ where let peer = peer.expect("Checked"); - let block_stream = - get_block_stream(peer, range.clone(), params, p2p.clone(), consensus.clone()) - .await; + let block_stream = get_block_stream( + peer.clone(), + range.clone(), + params, + p2p.clone(), + consensus.clone(), + ) + .await; let result = block_stream .map(move |stream_block_batch| { let shutdown_guard = shutdown_guard.clone(); @@ -222,7 +232,7 @@ where blocks = stream_block_batch => blocks, // If a shutdown signal is received during the stream, terminate early and // return an empty response - _ = shutdown_signal.while_started() => Ok(None) + _ = shutdown_signal.while_started() => Ok(vec![]) } }).then(|task| async { task.map_err(|e| anyhow!(e))? }) }) @@ -230,8 +240,8 @@ where .buffered(params.block_stream_buffer_size) // Continue the stream unless an error or none occurs. // Note the error will be returned but the stream will close. - .into_scan_none_or_err() - .scan_none_or_err() + .into_scan_err() + .scan_err() // Continue the stream until the shutdown signal is received. .take_until({ let mut s = shutdown.clone(); @@ -240,30 +250,33 @@ where tracing::info!("In progress import stream shutting down"); } }) + // Then execute and commit the block .then({ let state = state.clone(); let executor = executor.clone(); let p2p = p2p.clone(); + let peer = peer.clone(); move |res| { - let p2p = p2p.clone(); let state = state.clone(); let executor = executor.clone(); + let p2p = p2p.clone(); + let peer = peer.clone(); async move { - let SourcePeer { - peer_id, - data: sealed_block - } = res?; - let res = execute_and_commit(executor.as_ref(), &state, sealed_block).await; + let executor = executor.clone(); + let sealed_blocks = res?; + let iter = futures::stream::iter(sealed_blocks); + let res = iter.then(|sealed_block| async { + let executor = executor.clone(); + execute_and_commit(executor.as_ref(), &state, sealed_block).await + }).try_collect::>().await; match &res { Ok(_) => { - let _ = p2p.report_peer(peer_id.clone(), PeerReportReason::SuccessfulBlockImport) - .await - .map_err(|e| tracing::error!("Failed to report successful block import for peer {:?}: {:?}", peer_id, e)); + report_peer(p2p.as_ref(), peer.clone(), PeerReportReason::SuccessfulBlockImport).await; }, Err(e) => { // If this fails, then it means that consensus has approved a block that is invalid. // This would suggest a more serious issue than a bad peer, e.g. a fork or an out-of-date client. - tracing::error!("Failed to execute and commit block from peer {:?}: {:?}", peer_id, e); + tracing::error!("Failed to execute and commit block from peer {:?}: {:?}", peer, e); }, } res @@ -302,7 +315,7 @@ async fn get_block_stream< params: &Config, p2p: Arc

, consensus: Arc, -) -> impl Stream>>>> { +) -> impl Stream>>> { get_header_stream(peer.clone(), range, params, p2p.clone()) .chunks(10) .map({ @@ -319,21 +332,19 @@ async fn get_block_stream< let headers = peer.bind(batch); get_sealed_blocks(headers, p2p.clone(), consensus_port.clone()) } - .instrument(tracing::debug_span!("consensus_and_transactions")) - .in_current_span() + // .instrument(tracing::debug_span!("consensus_and_transactions")) + // .in_current_span() } }) } -type HeaderStreamItem = anyhow::Result; - fn get_header_stream( peer: PeerId, range: RangeInclusive, params: &Config, p2p: Arc

, // ) -> impl Stream>> { -) -> impl Stream { +) -> impl Stream> { let Config { header_batch_size, .. } = params; @@ -341,13 +352,14 @@ fn get_header_stream( futures::stream::iter(ranges) .then(move |range| { let p2p = p2p.clone(); - async { + let peer = peer.clone(); + async move { tracing::debug!( "getting header range from {} to {} inclusive", range.start(), range.end() ); - get_headers_batch(peer, range, p2p).await + get_headers_batch(peer, range, p2p.as_ref()).await } }) .flatten() @@ -380,7 +392,7 @@ async fn check_sealed_header< .trace_err("Failed to check consensus on header")?; if !validity { report_peer( - p2p.clone(), + p2p.as_ref(), peer_id.clone(), PeerReportReason::BadBlockHeader, ) @@ -396,7 +408,7 @@ async fn get_sealed_blocks< headers: SourcePeer>, p2p: Arc

, consensus: Arc, -) -> anyhow::Result>> { +) -> anyhow::Result> { let SourcePeer { peer_id, data } = &headers; for header in data { // Check the consensus is valid on this header. @@ -407,7 +419,7 @@ async fn get_sealed_blocks< } // Wait for the da to be at least the da height on the header. - consensus.await_da_height(&header.entity.da_height).await?; + consensus.await_da_height(&header.entity.da_height).await? } get_blocks(p2p.as_ref(), headers).await } @@ -430,25 +442,14 @@ async fn wait_for_notify_or_shutdown( matches!(r, futures::future::Either::Left(_)) } -type HeadersBatchItem = anyhow::Result>>; - -// TODO: -// This function streams items of Result>>. -// However, internally, it uses -// p2p.get_sealed_block_headers(start..end) -// which returns a Result>>> -// Because the SourcePeer wraps the vector, it implies a guarantee that all -// block headers are sourced from the same peer. -// This means that the stream item is attaching the same SourcePeer to -// each SealedBlockHeader, but there is no visible guarantee to the caller that -// each SourcePeer is the same. -// Instead, it may be superior to return a single SourcePeer separately so that -// the caller knows all headers come from a single source -async fn get_headers_batch( +async fn get_headers_batch

( peer: PeerId, mut range: RangeInclusive, - p2p: Arc, -) -> impl Stream>> { + p2p: &P, +) -> impl Stream>> +where + P: PeerToPeerPort + Send + Sync + 'static, +{ tracing::debug!( "getting header range from {} to {} inclusive", range.start(), @@ -462,7 +463,7 @@ async fn get_headers_batch( .get_sealed_block_headers(peer.bind(start..end)) .await .trace_err("Failed to get headers"); - match res { + let headers = match res { Ok(sourced_headers) => { let SourcePeer { peer_id, @@ -483,8 +484,7 @@ async fn get_headers_batch( .map(move |header| { let header = range.next().and_then(|height| { if *(header.entity.height()) == height.into() { - let sourced = peer_id.bind(header); - Some(sourced) + Some(header) } else { None } @@ -498,26 +498,26 @@ async fn get_headers_batch( || headers.iter().any(|h| h.is_err()) { report_peer( - p2p.clone(), + p2p, peer_id.clone(), PeerReportReason::MissingBlockHeaders, ) .await; } } - let stream = futures::stream::iter(headers); - stream + headers } Err(e) => vec![Err(e)], - } + }; + futures::stream::iter(headers) } -async fn report_peer( - p2p: Arc, - peer_id: PeerId, - reason: PeerReportReason, -) { +async fn report_peer

(p2p: &P, peer_id: PeerId, reason: PeerReportReason) +where + P: PeerToPeerPort + Send + Sync + 'static, +{ + // Failure to report a peer is a non-fatal error; ignore the error let _ = p2p .report_peer(peer_id.clone(), reason) .await @@ -536,7 +536,7 @@ async fn report_peer( async fn get_blocks

( p2p: &P, headers: SourcePeer>, -) -> anyhow::Result>> +) -> anyhow::Result> where P: PeerToPeerPort + Send + Sync + 'static, { @@ -554,38 +554,36 @@ where .await .trace_err("Failed to get transactions")? .trace_none_warn("Could not find transactions for header"); - match maybe_txs { + let blocks = match maybe_txs { None => { - report_peer( - p2p.clone(), - peer_id.clone(), - PeerReportReason::MissingTransactions, - ) - .await; - Ok(vec![]) + report_peer(p2p, peer_id.clone(), PeerReportReason::MissingTransactions) + .await; + vec![] } Some(transaction_data) => { let headers = headers.data; - let mut blocks = vec![]; let iter = headers.into_iter().zip(transaction_data); + let mut blocks = vec![]; for (block_header, td) in iter { let SealedBlockHeader { consensus, entity: header, } = block_header; let TransactionData { transactions, .. } = td; - let block = Block::try_from_executed(header, transactions).map(|block| { - SealedBlock { - entity: block, - consensus, + let block = Block::try_from_executed(header, transactions) + .ok_or(anyhow!("Failed to create a new block: Transactions do not match the header.")) + .map(|block| { + SealedBlock { + entity: block, + consensus, } }); - if block.is_none() { + if block.is_err() { tracing::error!( "Failed to created block from header and transactions" ); report_peer( - p2p.clone(), + p2p, peer_id.clone(), PeerReportReason::InvalidTransactions, ) @@ -593,10 +591,11 @@ where } blocks.push(block); } - - Ok(blocks) + blocks } - } + }; + let result = blocks.into_iter().collect(); + result } #[tracing::instrument( diff --git a/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs b/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs index 0e6d49f66b9..1cd373b3278 100644 --- a/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs +++ b/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs @@ -20,8 +20,14 @@ use fuel_core_types::{ services::p2p::{ PeerId, SourcePeer, + TransactionData, }, }; +use rand::{ + prelude::StdRng, + Rng, + SeedableRng, +}; use std::{ ops::Range, time::Duration, @@ -39,14 +45,23 @@ impl PeerToPeerPort for PressurePeerToPeer { self.p2p.height_stream() } + async fn select_peer( + &self, + _block_height: BlockHeight, + ) -> anyhow::Result> { + let mut rng = StdRng::seed_from_u64(0xF00DF00D); + let peer_id = PeerId::from(rng.gen::<[u8; 32]>().to_vec()); + Ok(Some(peer_id)) + } + async fn get_sealed_block_headers( &self, - block_height_range: Range, + block_height_range: SourcePeer>, ) -> anyhow::Result>>> { self.counts.apply(|c| c.inc_headers()); tokio::time::sleep(self.durations[0]).await; self.counts.apply(|c| c.dec_headers()); - for _ in block_height_range.clone() { + for _ in block_height_range.data.clone() { self.counts.apply(|c| c.inc_blocks()); } self.p2p.get_sealed_block_headers(block_height_range).await @@ -65,7 +80,7 @@ impl PeerToPeerPort for PressurePeerToPeer { async fn get_transactions_2( &self, _block_id: SourcePeer>, - ) -> anyhow::Result>> { + ) -> anyhow::Result>> { todo!() } @@ -82,19 +97,15 @@ impl PressurePeerToPeer { pub fn new(counts: SharedCounts, delays: [Duration; 2]) -> Self { let mut mock = MockPeerToPeerPort::default(); mock.expect_get_sealed_block_headers().returning(|range| { - let headers = Some( - range + let headers = range.map(|range| { + let range = range .clone() .map(BlockHeight::from) .map(empty_header) - .collect(), - ); - let peer_id = vec![].into(); - let source_peer_data = SourcePeer { - peer_id, - data: headers, - }; - Ok(source_peer_data) + .collect(); + Some(range) + }); + Ok(headers) }); mock.expect_get_transactions() .returning(|_| Ok(Some(vec![]))); From f5e695fef0b6caba1cf738ccfdec95c3130f850b Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 15 Sep 2023 18:09:40 -0400 Subject: [PATCH 17/87] WIP --- crates/fuel-core/src/database/sealed_block.rs | 17 +++ crates/fuel-core/src/service/adapters/p2p.rs | 7 +- crates/services/p2p/src/codecs/postcard.rs | 9 ++ crates/services/p2p/src/p2p_service.rs | 22 +++- crates/services/p2p/src/ports.rs | 5 +- .../p2p/src/request_response/messages.rs | 4 +- crates/services/p2p/src/service.rs | 36 ++++--- crates/services/sync/src/import.rs | 9 +- .../test_helpers/pressure_peer_to_peer.rs | 3 +- crates/services/sync/src/import/tests.rs | 101 ++++++++++++++---- crates/services/sync/src/service/tests.rs | 15 ++- crates/types/src/services/p2p.rs | 18 ++-- 12 files changed, 181 insertions(+), 65 deletions(-) diff --git a/crates/fuel-core/src/database/sealed_block.rs b/crates/fuel-core/src/database/sealed_block.rs index fa1d4f07b4d..f89cb358d77 100644 --- a/crates/fuel-core/src/database/sealed_block.rs +++ b/crates/fuel-core/src/database/sealed_block.rs @@ -25,6 +25,7 @@ use fuel_core_types::{ }, fuel_tx::Transaction, fuel_types::BlockHeight, + services::p2p::TransactionData, }; use std::ops::Range; @@ -135,4 +136,20 @@ impl Database { .get_sealed_block_by_id(block_id)? .map(|Sealed { entity: block, .. }| block.into_inner().1)) } + + pub fn get_transactions_on_blocks( + &self, + block_ids: &Vec, + ) -> StorageResult>> { + let transactions = block_ids + .iter() + .map(|block_id| { + let transactions = self + .get_sealed_block_by_id(block_id)? + .map(|Sealed { entity: block, .. }| block.into_inner().1); + Ok(transactions) + }) + .collect::>()?; + Ok(transactions) + } } diff --git a/crates/fuel-core/src/service/adapters/p2p.rs b/crates/fuel-core/src/service/adapters/p2p.rs index cd712b62e48..18330b9b4ba 100644 --- a/crates/fuel-core/src/service/adapters/p2p.rs +++ b/crates/fuel-core/src/service/adapters/p2p.rs @@ -14,6 +14,7 @@ use fuel_core_types::{ }, fuel_tx::Transaction, fuel_types::BlockHeight, + services::p2p::TransactionData, }; use std::ops::Range; @@ -48,9 +49,9 @@ impl P2pDb for Database { fn get_transactions_2( &self, - _block_ids: Vec<&BlockId>, - ) -> StorageResult>> { - todo!() + block_ids: &Vec, + ) -> StorageResult>> { + self.get_transactions_on_blocks(block_ids) } } diff --git a/crates/services/p2p/src/codecs/postcard.rs b/crates/services/p2p/src/codecs/postcard.rs index 0305836e738..c1b533f7a7a 100644 --- a/crates/services/p2p/src/codecs/postcard.rs +++ b/crates/services/p2p/src/codecs/postcard.rs @@ -213,6 +213,15 @@ impl RequestResponseConverter for PostcardCodec { Ok(ResponseMessage::Transactions(response)) } + NetworkResponse::Transactions2(tx_bytes) => { + let response = if let Some(tx_bytes) = tx_bytes { + Some(self.deserialize(tx_bytes)?) + } else { + None + }; + + Ok(ResponseMessage::Transactions2(response)) + } NetworkResponse::Headers(headers_bytes) => { let response = headers_bytes .as_ref() diff --git a/crates/services/p2p/src/p2p_service.rs b/crates/services/p2p/src/p2p_service.rs index ca247a4cd9e..614fa47d725 100644 --- a/crates/services/p2p/src/p2p_service.rs +++ b/crates/services/p2p/src/p2p_service.rs @@ -1595,7 +1595,7 @@ mod tests { let expected = arbitrary_headers_for_range(range.clone()); - if let Ok((_, sealed_headers)) = response_message { + if let Ok(sealed_headers) = response_message { let check = expected.iter().zip(sealed_headers.unwrap().iter()).all(|(a, b)| eq_except_metadata(a, b)); let _ = tx_test_end.send(check).await; } else { @@ -1609,6 +1609,22 @@ mod tests { assert!(node_a.send_request_msg(None, request_msg.clone(), ResponseChannelItem::Transactions(tx_orchestrator)).is_ok()); let tx_test_end = tx_test_end.clone(); + tokio::spawn(async move { + let response_message = rx_orchestrator.await; + + if let Ok(Some(transactions)) = response_message { + let _ = tx_test_end.send(transactions.len() == 5).await; + } else { + tracing::error!("Orchestrator failed to receive a message: {:?}", response_message); + let _ = tx_test_end.send(false).await; + } + }); + } + RequestMessage::Transactions2(_) => { + let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); + assert!(node_a.send_request_msg(None, request_msg.clone(), ResponseChannelItem::Transactions2(tx_orchestrator)).is_ok()); + let tx_test_end = tx_test_end.clone(); + tokio::spawn(async move { let response_message = rx_orchestrator.await; @@ -1650,6 +1666,10 @@ mod tests { let transactions = (0..5).map(|_| Transaction::default_test_tx()).collect(); let _ = node_b.send_response_msg(*request_id, OutboundResponse::Transactions(Some(Arc::new(transactions)))); } + RequestMessage::Transactions2(_) => { + let transactions = (0..5).map(|_| Transaction::default_test_tx()).collect(); + let _ = node_b.send_response_msg(*request_id, OutboundResponse::Transactions(Some(Arc::new(transactions)))); + } } } diff --git a/crates/services/p2p/src/ports.rs b/crates/services/p2p/src/ports.rs index 0c14057a042..d715fe05137 100644 --- a/crates/services/p2p/src/ports.rs +++ b/crates/services/p2p/src/ports.rs @@ -8,6 +8,7 @@ use fuel_core_types::{ }, fuel_tx::Transaction, fuel_types::BlockHeight, + services::p2p::TransactionData, }; use std::ops::Range; @@ -34,8 +35,8 @@ pub trait P2pDb: Send + Sync { fn get_transactions_2( &self, - block_ids: Vec<&BlockId>, - ) -> StorageResult>>; + block_ids: &Vec, + ) -> StorageResult>>; } pub trait BlockHeightImporter: Send + Sync { diff --git a/crates/services/p2p/src/request_response/messages.rs b/crates/services/p2p/src/request_response/messages.rs index 33a2a79c02b..545359497f2 100644 --- a/crates/services/p2p/src/request_response/messages.rs +++ b/crates/services/p2p/src/request_response/messages.rs @@ -53,6 +53,7 @@ pub enum ResponseMessage { SealedBlock(Box>), SealedHeaders(Option>), Transactions(Option>), + Transactions2(Option>), } /// Holds oneshot channels for specific responses @@ -71,6 +72,7 @@ pub enum NetworkResponse { Block(Option>), Headers(Option>), Transactions(Option>), + Transactions2(Option>), } /// Initial state of the `ResponseMessage` prior to having its inner value serialized @@ -80,7 +82,7 @@ pub enum OutboundResponse { Block(Option>), SealedHeaders(Option>), Transactions(Option>>), - Transactions2(Option>>), + Transactions2(Option>>), } #[derive(Debug)] diff --git a/crates/services/p2p/src/service.rs b/crates/services/p2p/src/service.rs index d6e9b1fe888..c2a5775d812 100644 --- a/crates/services/p2p/src/service.rs +++ b/crates/services/p2p/src/service.rs @@ -329,20 +329,19 @@ where } } } - RequestMessage::Transactions2(_block_ids) => { - // match self.db.get_transactions(&block_id) { - // Ok(maybe_transactions) => { - // let response = maybe_transactions.map(Arc::new); - // let _ = self.p2p_service.send_response_msg(request_id, OutboundResponse::Transactions(response)); - // }, - // Err(e) => { - // tracing::error!("Failed to get transactions for block {:?}: {:?}", block_id, e); - // let response = None; - // let _ = self.p2p_service.send_response_msg(request_id, OutboundResponse::Transactions(response)); - // return Err(e.into()) - // } - // } - todo!() + RequestMessage::Transactions2(block_ids) => { + match self.db.get_transactions_2(&block_ids) { + Ok(maybe_transactions) => { + let response = maybe_transactions.map(Arc::new); + let _ = self.p2p_service.send_response_msg(request_id, OutboundResponse::Transactions2(response)); + }, + Err(e) => { + tracing::error!("Failed to get transactions for blocks {:?}: {:?}", block_ids, e); + let response = None; + let _ = self.p2p_service.send_response_msg(request_id, OutboundResponse::Transactions2(response)); + return Err(e.into()) + } + } } RequestMessage::SealedHeaders(range) => { let max_len = self.max_headers_per_request.try_into().expect("u32 should always fit into usize"); @@ -674,10 +673,17 @@ pub mod tests { fn get_transactions( &self, - _block_id: &fuel_core_types::blockchain::primitives::BlockId, + _block_id: &BlockId, ) -> StorageResult>> { unimplemented!() } + + fn get_transactions_2( + &self, + _block_ids: &Vec, + ) -> StorageResult>> { + unimplemented!() + } } #[derive(Clone, Debug)] diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index a020e2f421e..9e6653edf93 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -22,10 +22,7 @@ use fuel_core_types::{ self, // blockchain::consensus::Sealed, fuel_types::BlockHeight, - services::p2p::{ - PeerId, - TransactionData, - }, + services::p2p::PeerId, }; use fuel_core_types::{ blockchain::{ @@ -550,6 +547,7 @@ where }); let peer_id = block_ids.peer_id.clone(); let maybe_txs = p2p + // flattened vec of all transactions Vec of all transactions .get_transactions_2(block_ids) .await .trace_err("Failed to get transactions")? @@ -564,12 +562,11 @@ where let headers = headers.data; let iter = headers.into_iter().zip(transaction_data); let mut blocks = vec![]; - for (block_header, td) in iter { + for (block_header, transactions) in iter { let SealedBlockHeader { consensus, entity: header, } = block_header; - let TransactionData { transactions, .. } = td; let block = Block::try_from_executed(header, transactions) .ok_or(anyhow!("Failed to create a new block: Transactions do not match the header.")) .map(|block| { diff --git a/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs b/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs index 1cd373b3278..73159bdd970 100644 --- a/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs +++ b/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs @@ -50,7 +50,8 @@ impl PeerToPeerPort for PressurePeerToPeer { _block_height: BlockHeight, ) -> anyhow::Result> { let mut rng = StdRng::seed_from_u64(0xF00DF00D); - let peer_id = PeerId::from(rng.gen::<[u8; 32]>().to_vec()); + let bytes = rng.gen::<[u8; 32]>().to_vec(); + let peer_id = PeerId::from(bytes); Ok(Some(peer_id)) } diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index 38f7c57f120..8a42101d067 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -14,18 +14,29 @@ use crate::{ }, }; use fuel_core_types::fuel_tx::Transaction; -use test_case::test_case; +// use test_case::test_case; use super::*; -#[test_case(State::new(None, 5), Mocks::times([6]) => (State::new(5, None), true) ; "executes 5")] -#[test_case(State::new(3, 5), Mocks::times([2]) => (State::new(5, None), true) ; "executes 3 to 5")] +// #[test_case(State::new(None, 5), Mocks::times([6]) => (State::new(5, None), true) ; "executes 5")] +// #[test_case(State::new(3, 5), Mocks::times([2]) => (State::new(5, None), true) ; "executes 3 to 5")] +// #[tokio::test] +// async fn test_import(state: State, mocks: Mocks) -> (State, bool) { +// let state = SharedMutex::new(state); +// test_import_inner(state, mocks, None).await +// } + #[tokio::test] -async fn test_import(state: State, mocks: Mocks) -> (State, bool) { +async fn test_import_3_to_5() { + let state = State::new(3, 5); + let mocks = Mocks::times([2]); let state = SharedMutex::new(state); - test_import_inner(state, mocks, None).await + let v = test_import_inner(state, mocks, None).await; + let expected = (State::new(5, None), true); + assert_eq!(v, expected); } +#[ignore] #[tokio::test] async fn import__signature_fails_on_header_5_only() { // given @@ -53,6 +64,7 @@ async fn import__signature_fails_on_header_5_only() { assert_eq!((State::new(4, None), true), res); } +#[ignore] #[tokio::test] async fn import__signature_fails_on_header_4_only() { // given @@ -80,6 +92,7 @@ async fn import__signature_fails_on_header_4_only() { assert_eq!((State::new(3, None), true), res); } +#[ignore] #[tokio::test] async fn import__header_not_found() { // given @@ -102,6 +115,7 @@ async fn import__header_not_found() { assert_eq!((State::new(3, None), true), res); } +#[ignore] #[tokio::test] async fn import__header_response_incomplete() { // given @@ -124,6 +138,7 @@ async fn import__header_response_incomplete() { assert_eq!((State::new(3, None), false), res); } +#[ignore] #[tokio::test] async fn import__header_5_not_found() { // given @@ -149,6 +164,7 @@ async fn import__header_5_not_found() { assert_eq!((State::new(4, None), true), res); } +#[ignore] #[tokio::test] async fn import__header_4_not_found() { // given @@ -174,6 +190,7 @@ async fn import__header_4_not_found() { assert_eq!((State::new(3, None), true), res); } +#[ignore] #[tokio::test] async fn import__transactions_not_found() { // given @@ -204,6 +221,7 @@ async fn import__transactions_not_found() { assert_eq!((State::new(3, None), true), res); } +#[ignore] #[tokio::test] async fn import__transactions_not_found_for_header_4() { // given @@ -240,6 +258,7 @@ async fn import__transactions_not_found_for_header_4() { assert_eq!((State::new(3, None), true), res); } +#[ignore] #[tokio::test] async fn import__transactions_not_found_for_header_5() { // given @@ -276,6 +295,7 @@ async fn import__transactions_not_found_for_header_5() { assert_eq!((State::new(4, None), true), res); } +#[ignore] #[tokio::test] async fn import__p2p_error() { // given @@ -298,6 +318,7 @@ async fn import__p2p_error() { assert_eq!((State::new(3, None), false), res); } +#[ignore] #[tokio::test] async fn import__p2p_error_on_4_transactions() { // given @@ -334,6 +355,7 @@ async fn import__p2p_error_on_4_transactions() { assert_eq!((State::new(3, None), false), res); } +#[ignore] #[tokio::test] async fn import__p2p_error_on_5_transactions() { // given @@ -370,6 +392,7 @@ async fn import__p2p_error_on_5_transactions() { assert_eq!((State::new(4, None), false), res); } +#[ignore] #[tokio::test] async fn import__consensus_error_on_4() { // given @@ -403,6 +426,7 @@ async fn import__consensus_error_on_4() { assert_eq!((State::new(3, None), false), res); } +#[ignore] #[tokio::test] async fn import__consensus_error_on_5() { // given @@ -436,6 +460,7 @@ async fn import__consensus_error_on_5() { assert_eq!((State::new(4, None), false), res); } +#[ignore] #[tokio::test] async fn import__execution_error_on_header_4() { // given @@ -465,6 +490,7 @@ async fn import__execution_error_on_header_4() { assert_eq!((State::new(3, None), false), res); } +#[ignore] #[tokio::test] async fn import__execution_error_on_header_5() { // given @@ -494,6 +520,7 @@ async fn import__execution_error_on_header_5() { assert_eq!((State::new(4, None), false), res); } +#[ignore] #[tokio::test] async fn signature_always_fails() { // given @@ -517,6 +544,7 @@ async fn signature_always_fails() { assert_eq!((State::new(3, None), true), res); } +#[ignore] #[tokio::test] async fn import__can_work_in_two_loops() { // given @@ -527,8 +555,11 @@ async fn import__can_work_in_two_loops() { .times(2) .returning(move |range| { state.apply(|s| s.observe(6)); - let headers = range.clone().map(|h| empty_header(h.into())).collect(); - Ok(peer_sourced_headers(Some(headers))) + let headers = range.map(|range| { + let headers = range.clone().map(|h| empty_header(h.into())).collect(); + Some(headers) + }); + Ok(headers) }); p2p.expect_get_transactions() .times(3) @@ -599,6 +630,7 @@ async fn test_import_inner( (final_state, received_notify_signal) } +#[ignore] #[tokio::test] async fn import__happy_path_sends_good_peer_report() { // Given @@ -609,6 +641,7 @@ async fn import__happy_path_sends_good_peer_report() { .await; } +#[ignore] #[tokio::test] async fn import__multiple_blocks_happy_path_sends_good_peer_report() { // Given @@ -620,6 +653,7 @@ async fn import__multiple_blocks_happy_path_sends_good_peer_report() { .await; } +#[ignore] #[tokio::test] async fn import__missing_headers_sends_peer_report() { // Given @@ -631,6 +665,7 @@ async fn import__missing_headers_sends_peer_report() { .await; } +#[ignore] #[tokio::test] async fn import__bad_block_header_sends_peer_report() { // Given @@ -642,6 +677,7 @@ async fn import__bad_block_header_sends_peer_report() { .await; } +#[ignore] #[tokio::test] async fn import__missing_transactions_sends_peer_report() { // Given @@ -744,9 +780,15 @@ impl PeerReportTestBuider { } fn p2p(&self, expected_report: PeerReportReason) -> Arc { - let peer_id = self.shared_peer_id.clone(); let mut p2p = MockPeerToPeerPort::default(); + let peer_id = self.shared_peer_id.clone(); + p2p.expect_select_peer().times(1).returning(move |_| { + let peer_id = peer_id.clone(); + Ok(Some(peer_id.clone().into())) + }); + + let peer_id = self.shared_peer_id.clone(); if let Some(get_headers) = self.get_sealed_headers.clone() { p2p.expect_get_sealed_block_headers().returning(move |_| { Ok(peer_sourced_headers_peer_id( @@ -757,10 +799,13 @@ impl PeerReportTestBuider { } else { p2p.expect_get_sealed_block_headers() .returning(move |range| { - Ok(peer_sourced_headers_peer_id( - Some(range.clone().map(|h| empty_header(h.into())).collect()), - peer_id.clone().into(), - )) + dbg!(&range.peer_id); + let headers = range.map(|range| { + let headers = + range.clone().map(|h| empty_header(h.into())).collect(); + Some(headers) + }); + Ok(headers) }); } @@ -864,24 +909,40 @@ impl DefaultMocks for MockConsensusPort { } impl DefaultMocks for MockPeerToPeerPort { - fn times(t: T) -> Self + fn times(_t: T) -> Self where T: IntoIterator + Clone, ::IntoIter: Clone, { let mut p2p = MockPeerToPeerPort::default(); - let mut t = t.into_iter().cycle(); + // let mut t = t.into_iter().cycle(); + + p2p.expect_select_peer().times(1).returning(|_| { + let bytes = vec![1u8, 2, 3, 4, 5]; + let peer_id = bytes.into(); + dbg!(&peer_id); + Ok(Some(peer_id)) + }); p2p.expect_get_sealed_block_headers() .times(1) .returning(|range| { - Ok(peer_sourced_headers(Some( - range.clone().map(|h| empty_header(h.into())).collect(), - ))) + dbg!(&range); + let headers = range.map(|range| { + let headers = range.clone().map(|h| empty_header(h.into())).collect(); + Some(headers) + }); + Ok(headers) }); - p2p.expect_get_transactions() - .times(t.next().unwrap()) - .returning(|_| Ok(Some(vec![]))); + + // p2p.expect_get_transactions() + // .times(t.next().unwrap()) + // .returning(|_| Ok(Some(vec![]))); + + p2p.expect_get_transactions_2().times(1).returning(|_| { + dbg!("ADFADDSDF"); + Ok(Some(vec![])) + }); p2p } } diff --git a/crates/services/sync/src/service/tests.rs b/crates/services/sync/src/service/tests.rs index 64dad7c22b1..4fceaa381b1 100644 --- a/crates/services/sync/src/service/tests.rs +++ b/crates/services/sync/src/service/tests.rs @@ -8,10 +8,7 @@ use futures::{ }; use crate::{ - import::test_helpers::{ - empty_header, - peer_sourced_headers, - }, + import::test_helpers::empty_header, ports::{ MockBlockImporterPort, MockConsensusPort, @@ -38,13 +35,15 @@ async fn test_new_service() { .into_boxed() }); p2p.expect_get_sealed_block_headers().returning(|range| { - Ok(peer_sourced_headers(Some( - range + let headers = range.map(|range| { + let headers = range .clone() .map(BlockHeight::from) .map(empty_header) - .collect(), - ))) + .collect::>(); + Some(headers) + }); + Ok(headers) }); p2p.expect_get_transactions() .returning(|_| Ok(Some(vec![]))); diff --git a/crates/types/src/services/p2p.rs b/crates/types/src/services/p2p.rs index 1917c1b7bfd..f14e3b09855 100644 --- a/crates/types/src/services/p2p.rs +++ b/crates/types/src/services/p2p.rs @@ -1,7 +1,7 @@ //! Contains types related to P2P data use crate::{ - blockchain::primitives::BlockId, + // blockchain::primitives::BlockId, fuel_tx::Transaction, fuel_types::BlockHeight, }; @@ -11,13 +11,15 @@ use std::fmt::Debug; pub mod peer_reputation; /// Maps BlockId to its transactions -#[derive(Debug)] -pub struct TransactionData { - /// Block id - pub block_id: BlockId, - /// transactions - pub transactions: Vec, -} +// #[derive(Debug, Clone)] +// #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +// pub struct TransactionData { +// /// Block id +// pub block_id: BlockId, +// /// transactions +// pub transactions: Vec, +// } +pub type TransactionData = Vec; /// Lightweight representation of gossipped data that only includes IDs #[derive(Debug, Clone, Hash, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] From 347b15eb6d5611caf92bcbc3d814d22f5f6b14dd Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 15 Sep 2023 18:16:19 -0400 Subject: [PATCH 18/87] Update tests.rs --- crates/services/sync/src/import/tests.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index 8a42101d067..b3fd3a1a5ce 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -36,7 +36,6 @@ async fn test_import_3_to_5() { assert_eq!(v, expected); } -#[ignore] #[tokio::test] async fn import__signature_fails_on_header_5_only() { // given From d103e0f7b88e0d0fedef23f89e23161b9f8be36b Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 15 Sep 2023 18:40:58 -0400 Subject: [PATCH 19/87] WIP Tests --- crates/services/sync/src/import.rs | 5 +++- crates/services/sync/src/import/tests.rs | 36 +++++++++++++----------- crates/types/src/services/p2p.rs | 1 + 3 files changed, 24 insertions(+), 18 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 9e6653edf93..71261bb6f89 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -226,7 +226,10 @@ where let mut shutdown_signal = shutdown_signal.clone(); tokio::select! { // Stream a batch of blocks - blocks = stream_block_batch => blocks, + blocks = stream_block_batch => { + dbg!(&blocks); + blocks + }, // If a shutdown signal is received during the stream, terminate early and // return an empty response _ = shutdown_signal.while_started() => Ok(vec![]) diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index b3fd3a1a5ce..caf3389e362 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -13,18 +13,21 @@ use crate::{ PeerReportReason, }, }; -use fuel_core_types::fuel_tx::Transaction; -// use test_case::test_case; +use fuel_core_types::{ + fuel_tx::Transaction, + services::p2p::TransactionData, +}; +use test_case::test_case; use super::*; -// #[test_case(State::new(None, 5), Mocks::times([6]) => (State::new(5, None), true) ; "executes 5")] -// #[test_case(State::new(3, 5), Mocks::times([2]) => (State::new(5, None), true) ; "executes 3 to 5")] -// #[tokio::test] -// async fn test_import(state: State, mocks: Mocks) -> (State, bool) { -// let state = SharedMutex::new(state); -// test_import_inner(state, mocks, None).await -// } +#[test_case(State::new(None, 5), Mocks::times([6]) => (State::new(5, None), true) ; "executes 5")] +#[test_case(State::new(3, 5), Mocks::times([2]) => (State::new(5, None), true) ; "executes 3 to 5")] +#[tokio::test] +async fn test_import(state: State, mocks: Mocks) -> (State, bool) { + let state = SharedMutex::new(state); + test_import_inner(state, mocks, None).await +} #[tokio::test] async fn test_import_3_to_5() { @@ -934,14 +937,13 @@ impl DefaultMocks for MockPeerToPeerPort { Ok(headers) }); - // p2p.expect_get_transactions() - // .times(t.next().unwrap()) - // .returning(|_| Ok(Some(vec![]))); - - p2p.expect_get_transactions_2().times(1).returning(|_| { - dbg!("ADFADDSDF"); - Ok(Some(vec![])) - }); + p2p.expect_get_transactions_2() + .times(1) + .returning(|block_ids| { + let data = block_ids.data; + let v = data.into_iter().map(|_| TransactionData::new()).collect(); + Ok(Some(v)) + }); p2p } } diff --git a/crates/types/src/services/p2p.rs b/crates/types/src/services/p2p.rs index f14e3b09855..724736a42c1 100644 --- a/crates/types/src/services/p2p.rs +++ b/crates/types/src/services/p2p.rs @@ -20,6 +20,7 @@ pub mod peer_reputation; // pub transactions: Vec, // } pub type TransactionData = Vec; + /// Lightweight representation of gossipped data that only includes IDs #[derive(Debug, Clone, Hash, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] From 2e4bdebb960974edd3480c449bb9f055523edf0e Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 15 Sep 2023 19:19:19 -0400 Subject: [PATCH 20/87] WIP --- crates/services/sync/src/import.rs | 43 +++++++++++++++++++++--------- crates/types/src/services/p2p.rs | 10 ------- 2 files changed, 31 insertions(+), 22 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 71261bb6f89..f1053cf2645 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -409,18 +409,38 @@ async fn get_sealed_blocks< p2p: Arc

, consensus: Arc, ) -> anyhow::Result> { - let SourcePeer { peer_id, data } = &headers; - for header in data { - // Check the consensus is valid on this header. - if !check_sealed_header(header, peer_id.clone(), p2p.clone(), consensus.clone()) - .await? - { - return Ok(vec![]) - } + let SourcePeer { peer_id, data } = headers; + + let stream = futures::stream::iter(data) + .then(|header| async { + let validity = check_sealed_header( + &header, + peer_id.clone(), + p2p.clone(), + consensus.clone(), + ) + .await; + validity.map(|validity| (validity, header)) + }) + .try_filter_map(|(validity, header)| async { + let header = if validity { Some(header) } else { None }; + Ok(header) + }) + .collect::>>() + .await?; + + // for header in data { + // // Check the consensus is valid on this header. + // if !check_sealed_header(header, peer_id.clone(), p2p.clone(), consensus.clone()) + // .await? + // { + // return Ok(vec![]) + // } + // + // // Wait for the da to be at least the da height on the header. + // consensus.await_da_height(&header.entity.da_height).await? + // } - // Wait for the da to be at least the da height on the header. - consensus.await_da_height(&header.entity.da_height).await? - } get_blocks(p2p.as_ref(), headers).await } @@ -550,7 +570,6 @@ where }); let peer_id = block_ids.peer_id.clone(); let maybe_txs = p2p - // flattened vec of all transactions Vec of all transactions .get_transactions_2(block_ids) .await .trace_err("Failed to get transactions")? diff --git a/crates/types/src/services/p2p.rs b/crates/types/src/services/p2p.rs index 724736a42c1..a5dfff623af 100644 --- a/crates/types/src/services/p2p.rs +++ b/crates/types/src/services/p2p.rs @@ -91,16 +91,6 @@ impl SourcePeer { } } -// impl FromIterator> for SourcePeer> { -// fn from_iter>>(iter: U) -> Self { -// let mut c = Vec::new(); -// for i in iter { -// c.push(i); -// } -// c -// } -// } - impl GossipData { /// Construct a new gossip message pub fn new( From b0e495ad6b18eb221003937f59df0d7606dc18c8 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 15 Sep 2023 20:16:41 -0400 Subject: [PATCH 21/87] WIP --- crates/services/sync/src/import.rs | 57 +++++++++++++----------- crates/services/sync/src/import/tests.rs | 1 - 2 files changed, 31 insertions(+), 27 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index f1053cf2645..99fcfd3cf6d 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -410,36 +410,41 @@ async fn get_sealed_blocks< consensus: Arc, ) -> anyhow::Result> { let SourcePeer { peer_id, data } = headers; + let p = peer_id.clone(); + let p2p_ = p2p.clone(); + let consensus = consensus.clone(); + let headers = futures::stream::iter(data) + .then(move |header| { + let p = p.clone(); + let p2p = p2p_.clone(); + let consensus = consensus.clone(); + async move { + let p = p.clone(); + let p2p = p2p.clone(); + let consensus = consensus.clone(); + let validity = + check_sealed_header(&header, p, p2p.clone(), consensus.clone()) + .await + .and_then(|validity| { + validity + .then(|| ()) + .ok_or_else(|| anyhow!("sealed header not valid")) + }); - let stream = futures::stream::iter(data) - .then(|header| async { - let validity = check_sealed_header( - &header, - peer_id.clone(), - p2p.clone(), - consensus.clone(), - ) - .await; - validity.map(|validity| (validity, header)) - }) - .try_filter_map(|(validity, header)| async { - let header = if validity { Some(header) } else { None }; - Ok(header) + // Wait for the da to be at least the da height on the header. + if validity.is_ok() { + consensus.await_da_height(&header.entity.da_height).await?; + } + + validity.map(|_| header) + } }) - .collect::>>() + .into_scan_err() + .scan_err() + .try_collect() .await?; - // for header in data { - // // Check the consensus is valid on this header. - // if !check_sealed_header(header, peer_id.clone(), p2p.clone(), consensus.clone()) - // .await? - // { - // return Ok(vec![]) - // } - // - // // Wait for the da to be at least the da height on the header. - // consensus.await_da_height(&header.entity.da_height).await? - // } + let headers = peer_id.bind(headers); get_blocks(p2p.as_ref(), headers).await } diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index caf3389e362..e878f3606ef 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -66,7 +66,6 @@ async fn import__signature_fails_on_header_5_only() { assert_eq!((State::new(4, None), true), res); } -#[ignore] #[tokio::test] async fn import__signature_fails_on_header_4_only() { // given From 0fd450039d26d0092efac8098aefe5e0d4633779 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 15 Sep 2023 20:39:05 -0400 Subject: [PATCH 22/87] WIP --- crates/services/sync/src/import.rs | 5 +---- crates/services/sync/src/import/tests.rs | 23 +++++++++++++++++++---- 2 files changed, 20 insertions(+), 8 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 99fcfd3cf6d..82a5c8950f4 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -226,10 +226,7 @@ where let mut shutdown_signal = shutdown_signal.clone(); tokio::select! { // Stream a batch of blocks - blocks = stream_block_batch => { - dbg!(&blocks); - blocks - }, + blocks = stream_block_batch => blocks, // If a shutdown signal is received during the stream, terminate early and // return an empty response _ = shutdown_signal.while_started() => Ok(vec![]) diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index e878f3606ef..1ad6017e165 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -72,17 +72,33 @@ async fn import__signature_fails_on_header_4_only() { let mut consensus_port = MockConsensusPort::default(); consensus_port .expect_check_sealed_header() - .times(2) + .times(1) .returning(|h| Ok(**h.entity.height() != 4)); consensus_port .expect_await_da_height() - .times(1) + .times(0) .returning(|_| Ok(())); + let mut p2p = MockPeerToPeerPort::default(); + p2p.expect_select_peer().times(1).returning(|_| { + let bytes = vec![1u8, 2, 3, 4, 5]; + let peer_id = bytes.into(); + Ok(Some(peer_id)) + }); + p2p.expect_get_sealed_block_headers() + .times(1) + .returning(|range| { + let headers = range.map(|range| { + let headers = range.clone().map(|h| empty_header(h.into())).collect(); + Some(headers) + }); + Ok(headers) + }); + p2p.expect_get_transactions_2().times(0); let state = State::new(3, 5).into(); let mocks = Mocks { consensus_port, - p2p: DefaultMocks::times([1]), + p2p, executor: DefaultMocks::times([0]), }; @@ -921,7 +937,6 @@ impl DefaultMocks for MockPeerToPeerPort { p2p.expect_select_peer().times(1).returning(|_| { let bytes = vec![1u8, 2, 3, 4, 5]; let peer_id = bytes.into(); - dbg!(&peer_id); Ok(Some(peer_id)) }); From 6bb319e8ea2bd89639a1b21f445f4bdfef4ea13e Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 15 Sep 2023 21:07:51 -0400 Subject: [PATCH 23/87] Update import.rs --- crates/services/sync/src/import.rs | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 82a5c8950f4..bae614a993d 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -420,13 +420,7 @@ async fn get_sealed_blocks< let p2p = p2p.clone(); let consensus = consensus.clone(); let validity = - check_sealed_header(&header, p, p2p.clone(), consensus.clone()) - .await - .and_then(|validity| { - validity - .then(|| ()) - .ok_or_else(|| anyhow!("sealed header not valid")) - }); + check_sealed_header(&header, p, p2p.clone(), consensus.clone()).await; // Wait for the da to be at least the da height on the header. if validity.is_ok() { From 95764baccb2b78a764828dbf2dbab864408a7cfc Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 15 Sep 2023 21:35:13 -0400 Subject: [PATCH 24/87] WIP --- crates/services/sync/src/import.rs | 23 ++++++++++++++--------- crates/services/sync/src/import/tests.rs | 6 +++--- 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index bae614a993d..0d7a75392ab 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -226,7 +226,10 @@ where let mut shutdown_signal = shutdown_signal.clone(); tokio::select! { // Stream a batch of blocks - blocks = stream_block_batch => blocks, + blocks = stream_block_batch => { + dbg!(&blocks); + blocks + }, // If a shutdown signal is received during the stream, terminate early and // return an empty response _ = shutdown_signal.while_started() => Ok(vec![]) @@ -289,6 +292,7 @@ where // find any errors. // Fold the stream into a count and any errors. .fold((0usize, Ok(())), |(count, res), result| async move { + dbg!(&result); match result { Ok(_) => (count + 1, res), Err(e) => (count, Err(e)), @@ -314,7 +318,7 @@ async fn get_block_stream< consensus: Arc, ) -> impl Stream>>> { get_header_stream(peer.clone(), range, params, p2p.clone()) - .chunks(10) + .chunks(1) .map({ let p2p = p2p.clone(); let consensus_port = consensus.clone(); @@ -420,18 +424,19 @@ async fn get_sealed_blocks< let p2p = p2p.clone(); let consensus = consensus.clone(); let validity = - check_sealed_header(&header, p, p2p.clone(), consensus.clone()).await; + check_sealed_header(&header, p, p2p.clone(), consensus.clone()) + .await?; - // Wait for the da to be at least the da height on the header. - if validity.is_ok() { + if validity { consensus.await_da_height(&header.entity.da_height).await?; + Ok(Some(header)) + } else { + Ok(None) } - - validity.map(|_| header) } }) - .into_scan_err() - .scan_err() + .into_scan_none_or_err() + .scan_none_or_err() .try_collect() .await?; diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index 1ad6017e165..5f78f5df29d 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -72,11 +72,11 @@ async fn import__signature_fails_on_header_4_only() { let mut consensus_port = MockConsensusPort::default(); consensus_port .expect_check_sealed_header() - .times(1) + .times(2) .returning(|h| Ok(**h.entity.height() != 4)); consensus_port .expect_await_da_height() - .times(0) + .times(1) .returning(|_| Ok(())); let mut p2p = MockPeerToPeerPort::default(); p2p.expect_select_peer().times(1).returning(|_| { @@ -93,7 +93,7 @@ async fn import__signature_fails_on_header_4_only() { }); Ok(headers) }); - p2p.expect_get_transactions_2().times(0); + p2p.expect_get_transactions_2().times(2); let state = State::new(3, 5).into(); let mocks = Mocks { From 7bfa573d1ae235e25b85164e0defb307ea3f8c6c Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 15 Sep 2023 21:44:25 -0400 Subject: [PATCH 25/87] Update tests.rs --- crates/services/sync/src/import/tests.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index 5f78f5df29d..1cd12f67514 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -93,7 +93,13 @@ async fn import__signature_fails_on_header_4_only() { }); Ok(headers) }); - p2p.expect_get_transactions_2().times(2); + p2p.expect_get_transactions_2() + .times(2) + .returning(|block_ids| { + let data = block_ids.data; + let v = data.into_iter().map(|_| TransactionData::new()).collect(); + Ok(Some(v)) + }); let state = State::new(3, 5).into(); let mocks = Mocks { From b4266bb9f542c0b5d03c68fac86eb3e5d658ca7d Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 15 Sep 2023 21:57:15 -0400 Subject: [PATCH 26/87] WIP --- crates/services/sync/src/import.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 0d7a75392ab..95410424833 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -240,8 +240,8 @@ where .buffered(params.block_stream_buffer_size) // Continue the stream unless an error or none occurs. // Note the error will be returned but the stream will close. - .into_scan_err() - .scan_err() + .into_scan_none_or_err() + .scan_none_or_err() // Continue the stream until the shutdown signal is received. .take_until({ let mut s = shutdown.clone(); @@ -316,7 +316,7 @@ async fn get_block_stream< params: &Config, p2p: Arc

, consensus: Arc, -) -> impl Stream>>> { +) -> impl Stream>>>> { get_header_stream(peer.clone(), range, params, p2p.clone()) .chunks(1) .map({ @@ -409,7 +409,7 @@ async fn get_sealed_blocks< headers: SourcePeer>, p2p: Arc

, consensus: Arc, -) -> anyhow::Result> { +) -> anyhow::Result>> { let SourcePeer { peer_id, data } = headers; let p = peer_id.clone(); let p2p_ = p2p.clone(); From 84001cc856d8c5bec15841a6b25bf259113f86cf Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 15 Sep 2023 22:59:06 -0400 Subject: [PATCH 27/87] WIP --- crates/services/sync/src/import.rs | 55 ++++++++++++++++++++++-------- 1 file changed, 41 insertions(+), 14 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 95410424833..75118f8750b 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -240,8 +240,8 @@ where .buffered(params.block_stream_buffer_size) // Continue the stream unless an error or none occurs. // Note the error will be returned but the stream will close. - .into_scan_none_or_err() - .scan_none_or_err() + .into_scan_empty_or_err() + .scan_empty_or_err() // Continue the stream until the shutdown signal is received. .take_until({ let mut s = shutdown.clone(); @@ -316,7 +316,7 @@ async fn get_block_stream< params: &Config, p2p: Arc

, consensus: Arc, -) -> impl Stream>>>> { +) -> impl Stream>>> { get_header_stream(peer.clone(), range, params, p2p.clone()) .chunks(1) .map({ @@ -409,7 +409,7 @@ async fn get_sealed_blocks< headers: SourcePeer>, p2p: Arc

, consensus: Arc, -) -> anyhow::Result>> { +) -> anyhow::Result> { let SourcePeer { peer_id, data } = headers; let p = peer_id.clone(); let p2p_ = p2p.clone(); @@ -590,15 +590,15 @@ where consensus, entity: header, } = block_header; - let block = Block::try_from_executed(header, transactions) - .ok_or(anyhow!("Failed to create a new block: Transactions do not match the header.")) - .map(|block| { - SealedBlock { - entity: block, - consensus, + let block = Block::try_from_executed(header, transactions).map(|block| { + SealedBlock { + entity: block, + consensus, } }); - if block.is_err() { + if let Some(block) = block { + blocks.push(block); + } else { tracing::error!( "Failed to created block from header and transactions" ); @@ -609,13 +609,11 @@ where ) .await; } - blocks.push(block); } blocks } }; - let result = blocks.into_iter().collect(); - result + Ok(blocks) } #[tracing::instrument( @@ -656,6 +654,12 @@ trait StreamUtil: Sized { ScanNoneErr(self) } + /// Close the stream if an error occurs or a `None` is received. + /// Return the error if the stream closes. + fn into_scan_empty_or_err(self) -> ScanEmptyErr { + ScanEmptyErr(self) + } + /// Turn a stream of `Result` into a stream of `Result`. /// Close the stream if an error occurs. /// Return the error if the stream closes. @@ -667,6 +671,7 @@ trait StreamUtil: Sized { impl StreamUtil for S {} struct ScanNoneErr(S); +struct ScanEmptyErr(S); struct ScanErr(S); impl ScanNoneErr { @@ -688,6 +693,28 @@ impl ScanNoneErr { } } +impl ScanEmptyErr { + /// Scan the stream for empty vector or errors. + fn scan_empty_or_err(self) -> impl Stream>> + where + S: Stream>> + Send + 'static, + { + let stream = self.0.boxed(); + futures::stream::unfold((false, stream), |(mut is_err, mut stream)| async move { + if is_err { + None + } else { + let result = stream.next().await?; + is_err = result.is_err(); + result + .map(|v| v.is_empty().then(|| v)) + .transpose() + .map(|result| (result, (is_err, stream))) + } + }) + } +} + impl ScanErr { /// Scan the stream for errors. fn scan_err(self) -> impl Stream> From cec91d85eb742d743d55a2c195394b4a9ec3a019 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 15 Sep 2023 23:07:53 -0400 Subject: [PATCH 28/87] Update import.rs --- crates/services/sync/src/import.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 75118f8750b..f553183d63e 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -707,7 +707,7 @@ impl ScanEmptyErr { let result = stream.next().await?; is_err = result.is_err(); result - .map(|v| v.is_empty().then(|| v)) + .map(|v| (!v.is_empty()).then(|| v)) .transpose() .map(|result| (result, (is_err, stream))) } From f58defe20325cc2b1d3108274df86bf336b29ac4 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 15 Sep 2023 23:20:31 -0400 Subject: [PATCH 29/87] Update tests.rs --- crates/services/sync/src/import/tests.rs | 26 ++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index 1cd12f67514..8c3af8dea2e 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -31,7 +31,7 @@ async fn test_import(state: State, mocks: Mocks) -> (State, bool) { #[tokio::test] async fn test_import_3_to_5() { - let state = State::new(3, 5); + let state = State::new(0, 5); let mocks = Mocks::times([2]); let state = SharedMutex::new(state); let v = test_import_inner(state, mocks, None).await; @@ -51,11 +51,33 @@ async fn import__signature_fails_on_header_5_only() { .expect_await_da_height() .times(1) .returning(|_| Ok(())); + let mut p2p = MockPeerToPeerPort::default(); + p2p.expect_select_peer().times(1).returning(|_| { + let bytes = vec![1u8, 2, 3, 4, 5]; + let peer_id = bytes.into(); + Ok(Some(peer_id)) + }); + p2p.expect_get_sealed_block_headers() + .times(1) + .returning(|range| { + let headers = range.map(|range| { + let headers = range.clone().map(|h| empty_header(h.into())).collect(); + Some(headers) + }); + Ok(headers) + }); + p2p.expect_get_transactions_2() + .times(2) + .returning(|block_ids| { + let data = block_ids.data; + let v = data.into_iter().map(|_| TransactionData::new()).collect(); + Ok(Some(v)) + }); let state = State::new(3, 5).into(); let mocks = Mocks { consensus_port, - p2p: DefaultMocks::times([1]), + p2p, executor: DefaultMocks::times([1]), }; From 00a682fc0dab85289a70ebc1ec82fcd9d02a0dcb Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Mon, 18 Sep 2023 19:48:37 -0400 Subject: [PATCH 30/87] WIP --- crates/services/p2p/src/p2p_service.rs | 2 +- crates/services/sync/src/import.rs | 192 +++++++++------- crates/services/sync/src/import/tests.rs | 271 +++++++++++++++-------- 3 files changed, 296 insertions(+), 169 deletions(-) diff --git a/crates/services/p2p/src/p2p_service.rs b/crates/services/p2p/src/p2p_service.rs index 614fa47d725..105f681532b 100644 --- a/crates/services/p2p/src/p2p_service.rs +++ b/crates/services/p2p/src/p2p_service.rs @@ -1557,7 +1557,7 @@ mod tests { tokio::select! { message_sent = rx_test_end.recv() => { // we received a signal to end the test - assert!(message_sent.unwrap(), "Receuved incorrect or missing missing messsage"); + assert!(message_sent.unwrap(), "Received incorrect or missing missing message"); break; } node_a_event = node_a.next_event() => { diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index f553183d63e..b43905e1673 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -8,7 +8,6 @@ use futures::{ }; use std::{ future::Future, - // iter, ops::RangeInclusive, sync::Arc, }; @@ -216,6 +215,7 @@ where consensus.clone(), ) .await; + let result = block_stream .map(move |stream_block_batch| { let shutdown_guard = shutdown_guard.clone(); @@ -226,10 +226,7 @@ where let mut shutdown_signal = shutdown_signal.clone(); tokio::select! { // Stream a batch of blocks - blocks = stream_block_batch => { - dbg!(&blocks); - blocks - }, + blocks = stream_block_batch => blocks, // If a shutdown signal is received during the stream, terminate early and // return an empty response _ = shutdown_signal.while_started() => Ok(vec![]) @@ -238,7 +235,7 @@ where }) // Request up to `block_stream_buffer_size` transactions from the network. .buffered(params.block_stream_buffer_size) - // Continue the stream unless an error or none occurs. + // Continue the stream unless an error or empty batch occurs. // Note the error will be returned but the stream will close. .into_scan_empty_or_err() .scan_empty_or_err() @@ -263,10 +260,13 @@ where let peer = peer.clone(); async move { let executor = executor.clone(); + dbg!(&res); let sealed_blocks = res?; + dbg!(&sealed_blocks); let iter = futures::stream::iter(sealed_blocks); let res = iter.then(|sealed_block| async { let executor = executor.clone(); + dbg!(&sealed_block); execute_and_commit(executor.as_ref(), &state, sealed_block).await }).try_collect::>().await; match &res { @@ -317,26 +317,53 @@ async fn get_block_stream< p2p: Arc

, consensus: Arc, ) -> impl Stream>>> { - get_header_stream(peer.clone(), range, params, p2p.clone()) - .chunks(1) + let Config { + header_batch_size, .. + } = params; + let header_stream = get_header_stream(peer.clone(), range, params, p2p.clone()); + let peer_ = peer.clone(); + let p2p_ = p2p.clone(); + let consensus_ = consensus.clone(); + let generator = futures::stream::repeat_with(move || { + (peer_.clone(), p2p_.clone(), consensus_.clone()) + }); + let iter = header_stream.zip(generator); + let i = iter + .then(|(header, (peer, p2p, consensus))| async move { + let header = header?; + let validity = + check_sealed_header(&header, peer, p2p.clone(), consensus.clone()) + .await?; + if validity { + consensus.await_da_height(&header.entity.da_height).await?; + Ok(Some(header)) + } else { + Ok(None) + } + }) + .into_scan_none_or_err() + .scan_none_or_err() + .chunks(*header_batch_size as usize) .map({ let p2p = p2p.clone(); let consensus_port = consensus.clone(); let peer = peer.clone(); - move |batch| { + move |headers| { { let p2p = p2p.clone(); - let consensus_port = consensus_port.clone(); + let _consensus_port = consensus_port.clone(); let peer = peer.clone(); - let batch = - batch.into_iter().filter_map(|header| header.ok()).collect(); - let headers = peer.bind(batch); - get_sealed_blocks(headers, p2p.clone(), consensus_port.clone()) + let headers = peer.bind(headers); + + get_blocks(p2p, headers) } // .instrument(tracing::debug_span!("consensus_and_transactions")) // .in_current_span() } - }) + }); + + i + // i.into_scan_none_or_err().scan_none_or_err() } fn get_header_stream( @@ -344,7 +371,6 @@ fn get_header_stream( range: RangeInclusive, params: &Config, p2p: Arc

, - // ) -> impl Stream>> { ) -> impl Stream> { let Config { header_batch_size, .. @@ -402,48 +428,16 @@ async fn check_sealed_header< Ok(validity) } -async fn get_sealed_blocks< - P: PeerToPeerPort + Send + Sync + 'static, - C: ConsensusPort + Send + Sync + 'static, ->( - headers: SourcePeer>, - p2p: Arc

, - consensus: Arc, -) -> anyhow::Result> { - let SourcePeer { peer_id, data } = headers; - let p = peer_id.clone(); - let p2p_ = p2p.clone(); - let consensus = consensus.clone(); - let headers = futures::stream::iter(data) - .then(move |header| { - let p = p.clone(); - let p2p = p2p_.clone(); - let consensus = consensus.clone(); - async move { - let p = p.clone(); - let p2p = p2p.clone(); - let consensus = consensus.clone(); - let validity = - check_sealed_header(&header, p, p2p.clone(), consensus.clone()) - .await?; - - if validity { - consensus.await_da_height(&header.entity.da_height).await?; - Ok(Some(header)) - } else { - Ok(None) - } - } - }) - .into_scan_none_or_err() - .scan_none_or_err() - .try_collect() - .await?; - - let headers = peer_id.bind(headers); - - get_blocks(p2p.as_ref(), headers).await -} +// async fn get_sealed_blocks< +// P: PeerToPeerPort + Send + Sync + 'static, +// C: ConsensusPort + Send + Sync + 'static, +// >( +// headers: SourcePeer>>, +// p2p: Arc

, +// _consensus: Arc, +// ) -> anyhow::Result> { +// get_blocks(p2p.as_ref(), headers).await +// } /// Waits for a notify or shutdown signal. /// Returns true if the notify signal was received. @@ -476,10 +470,8 @@ where range.start(), range.end() ); - let start = *range.start(); let end = *range.end() + 1; - let res = p2p .get_sealed_block_headers(peer.bind(start..end)) .await @@ -529,7 +521,10 @@ where headers } - Err(e) => vec![Err(e)], + Err(e) => { + dbg!(&e); + vec![Err(e)] + } }; futures::stream::iter(headers) } @@ -555,34 +550,43 @@ where err )] async fn get_blocks

( - p2p: &P, - headers: SourcePeer>, + p2p: Arc

, + headers: SourcePeer>>, ) -> anyhow::Result> where P: PeerToPeerPort + Send + Sync + 'static, { - // Request the transactions for this block. - - let block_ids = headers.as_ref().map(|headers| { - headers - .iter() - .map(|header| header.entity.id()) - .collect::>() - }); - let peer_id = block_ids.peer_id.clone(); + // Get transactions for the set of valid block ids + // Return the error as well if there is one + + let SourcePeer { + peer_id, + data: headers, + } = headers; + let headers = headers + .into_iter() + .filter_map(|r| r.ok()) + .collect::>(); + let block_ids = headers.iter().map(|header| header.entity.id()).collect(); + let block_ids = peer_id.clone().bind(block_ids); let maybe_txs = p2p .get_transactions_2(block_ids) .await .trace_err("Failed to get transactions")? .trace_none_warn("Could not find transactions for header"); - let blocks = match maybe_txs { + + match maybe_txs { None => { - report_peer(p2p, peer_id.clone(), PeerReportReason::MissingTransactions) - .await; - vec![] + report_peer( + p2p.as_ref(), + peer_id.clone(), + PeerReportReason::MissingTransactions, + ) + .await; + Ok(vec![]) } Some(transaction_data) => { - let headers = headers.data; + let headers = headers; let iter = headers.into_iter().zip(transaction_data); let mut blocks = vec![]; for (block_header, transactions) in iter { @@ -603,17 +607,16 @@ where "Failed to created block from header and transactions" ); report_peer( - p2p, + p2p.as_ref(), peer_id.clone(), PeerReportReason::InvalidTransactions, ) .await; } } - blocks + Ok(blocks) } - }; - Ok(blocks) + } } #[tracing::instrument( @@ -693,6 +696,33 @@ impl ScanNoneErr { } } +#[cfg(test)] +mod test { + use crate::import::StreamUtil; + use anyhow::anyhow; + use futures::StreamExt; + + #[tokio::test] + async fn test_it() { + let i = [Ok(Some(0)), Ok(Some(1)), Ok(None)]; + let stream = futures::stream::iter(i) + .into_scan_none_or_err() + .scan_none_or_err(); + let output = stream.collect::>().await; + println!("{:?}", output); + } + + // #[tokio::test] + // async fn test_it_2() { + // let i = [Ok(Some(0)), Ok(Some(1)), Err(anyhow!("err!"))]; + // let stream = futures::stream::iter(i) + // .into_scan_none_or_err() + // .scan_none_or_err(); + // let output = stream.collect::>>().await; + // println!("{:?}", output); + // } +} + impl ScanEmptyErr { /// Scan the stream for empty vector or errors. fn scan_empty_or_err(self) -> impl Stream>> diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index 8c3af8dea2e..8d7fad47e6d 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -29,9 +29,19 @@ async fn test_import(state: State, mocks: Mocks) -> (State, bool) { test_import_inner(state, mocks, None).await } +#[tokio::test] +async fn test_import_0_to_5() { + let state = State::new(None, 5); + let mocks = Mocks::times([6]); + let state = SharedMutex::new(state); + let v = test_import_inner(state, mocks, None).await; + let expected = (State::new(5, None), true); + assert_eq!(v, expected); +} + #[tokio::test] async fn test_import_3_to_5() { - let state = State::new(0, 5); + let state = State::new(3, 5); let mocks = Mocks::times([2]); let state = SharedMutex::new(state); let v = test_import_inner(state, mocks, None).await; @@ -67,7 +77,7 @@ async fn import__signature_fails_on_header_5_only() { Ok(headers) }); p2p.expect_get_transactions_2() - .times(2) + .times(1) .returning(|block_ids| { let data = block_ids.data; let v = data.into_iter().map(|_| TransactionData::new()).collect(); @@ -94,11 +104,11 @@ async fn import__signature_fails_on_header_4_only() { let mut consensus_port = MockConsensusPort::default(); consensus_port .expect_check_sealed_header() - .times(2) + .times(1) .returning(|h| Ok(**h.entity.height() != 4)); consensus_port .expect_await_da_height() - .times(1) + .times(0) .returning(|_| Ok(())); let mut p2p = MockPeerToPeerPort::default(); p2p.expect_select_peer().times(1).returning(|_| { @@ -116,7 +126,7 @@ async fn import__signature_fails_on_header_4_only() { Ok(headers) }); p2p.expect_get_transactions_2() - .times(2) + .times(1) .returning(|block_ids| { let data = block_ids.data; let v = data.into_iter().map(|_| TransactionData::new()).collect(); @@ -137,11 +147,15 @@ async fn import__signature_fails_on_header_4_only() { assert_eq!((State::new(3, None), true), res); } -#[ignore] #[tokio::test] async fn import__header_not_found() { // given let mut p2p = MockPeerToPeerPort::default(); + p2p.expect_select_peer().times(1).returning(|_| { + let bytes = vec![1u8, 2, 3, 4, 5]; + let peer_id = bytes.into(); + Ok(Some(peer_id)) + }); p2p.expect_get_sealed_block_headers() .times(1) .returning(|_| Ok(peer_sourced_headers(Some(Vec::new())))); @@ -160,11 +174,15 @@ async fn import__header_not_found() { assert_eq!((State::new(3, None), true), res); } -#[ignore] #[tokio::test] async fn import__header_response_incomplete() { // given let mut p2p = MockPeerToPeerPort::default(); + p2p.expect_select_peer().times(1).returning(|_| { + let bytes = vec![1u8, 2, 3, 4, 5]; + let peer_id = bytes.into(); + Ok(Some(peer_id)) + }); p2p.expect_get_sealed_block_headers() .times(1) .returning(|_| Ok(peer_sourced_headers(None))); @@ -183,17 +201,25 @@ async fn import__header_response_incomplete() { assert_eq!((State::new(3, None), false), res); } -#[ignore] #[tokio::test] async fn import__header_5_not_found() { // given let mut p2p = MockPeerToPeerPort::default(); + p2p.expect_select_peer().times(1).returning(|_| { + let bytes = vec![1u8, 2, 3, 4, 5]; + let peer_id = bytes.into(); + Ok(Some(peer_id)) + }); p2p.expect_get_sealed_block_headers() .times(1) .returning(|_| Ok(peer_sourced_headers(Some(vec![empty_header(4.into())])))); - p2p.expect_get_transactions() + p2p.expect_get_transactions_2() .times(1) - .returning(|_| Ok(Some(vec![]))); + .returning(|block_ids| { + let data = block_ids.data; + let v = data.into_iter().map(|_| TransactionData::new()).collect(); + Ok(Some(v)) + }); let state = State::new(3, 5).into(); let mocks = Mocks { @@ -209,17 +235,25 @@ async fn import__header_5_not_found() { assert_eq!((State::new(4, None), true), res); } -#[ignore] #[tokio::test] async fn import__header_4_not_found() { // given let mut p2p = MockPeerToPeerPort::default(); + p2p.expect_select_peer().times(1).returning(|_| { + let bytes = vec![1u8, 2, 3, 4, 5]; + let peer_id = bytes.into(); + Ok(Some(peer_id)) + }); p2p.expect_get_sealed_block_headers() .times(1) .returning(|_| Ok(peer_sourced_headers(Some(vec![empty_header(5.into())])))); - p2p.expect_get_transactions() + p2p.expect_get_transactions_2() .times(0) - .returning(|_| Ok(Some(vec![]))); + .returning(|block_ids| { + let data = block_ids.data; + let v = data.into_iter().map(|_| TransactionData::new()).collect(); + Ok(Some(v)) + }); let state = State::new(3, 5).into(); let mocks = Mocks { @@ -235,11 +269,15 @@ async fn import__header_4_not_found() { assert_eq!((State::new(3, None), true), res); } -#[ignore] #[tokio::test] async fn import__transactions_not_found() { // given let mut p2p = MockPeerToPeerPort::default(); + p2p.expect_select_peer().times(1).returning(|_| { + let bytes = vec![1u8, 2, 3, 4, 5]; + let peer_id = bytes.into(); + Ok(Some(peer_id)) + }); p2p.expect_get_sealed_block_headers() .times(1) .returning(|_| { @@ -248,8 +286,8 @@ async fn import__transactions_not_found() { empty_header(5.into()), ]))) }); - p2p.expect_get_transactions() - .times(2) + p2p.expect_get_transactions_2() + .times(1) .returning(|_| Ok(None)); let state = State::new(3, 5).into(); @@ -266,11 +304,15 @@ async fn import__transactions_not_found() { assert_eq!((State::new(3, None), true), res); } -#[ignore] #[tokio::test] async fn import__transactions_not_found_for_header_4() { // given let mut p2p = MockPeerToPeerPort::default(); + p2p.expect_select_peer().times(1).returning(|_| { + let bytes = vec![1u8, 2, 3, 4, 5]; + let peer_id = bytes.into(); + Ok(Some(peer_id)) + }); p2p.expect_get_sealed_block_headers() .times(1) .returning(|_| { @@ -280,14 +322,18 @@ async fn import__transactions_not_found_for_header_4() { ]))) }); let mut height = 3; - p2p.expect_get_transactions().times(2).returning(move |_| { - height += 1; - if height == 4 { - Ok(None) - } else { - Ok(Some(vec![])) - } - }); + p2p.expect_get_transactions_2() + .times(1) + .returning(move |block_ids| { + height += 1; + if height == 4 { + Ok(None) + } else { + let data = block_ids.data; + let v = data.into_iter().map(|_| TransactionData::new()).collect(); + Ok(Some(v)) + } + }); let state = State::new(3, 5).into(); let mocks = Mocks { @@ -308,6 +354,11 @@ async fn import__transactions_not_found_for_header_4() { async fn import__transactions_not_found_for_header_5() { // given let mut p2p = MockPeerToPeerPort::default(); + p2p.expect_select_peer().times(1).returning(|_| { + let bytes = vec![1u8, 2, 3, 4, 5]; + let peer_id = bytes.into(); + Ok(Some(peer_id)) + }); p2p.expect_get_sealed_block_headers() .times(1) .returning(|_| { @@ -316,15 +367,12 @@ async fn import__transactions_not_found_for_header_5() { empty_header(5.into()), ]))) }); - let mut height = 3; - p2p.expect_get_transactions().times(2).returning(move |_| { - height += 1; - if height == 5 { - Ok(None) - } else { - Ok(Some(vec![])) - } - }); + p2p.expect_get_transactions_2() + .times(1) + .returning(move |_| { + let v = vec![TransactionData::new()]; + Ok(Some(v)) + }); let state = State::new(3, 5).into(); let mocks = Mocks { @@ -340,14 +388,25 @@ async fn import__transactions_not_found_for_header_5() { assert_eq!((State::new(4, None), true), res); } -#[ignore] #[tokio::test] async fn import__p2p_error() { // given let mut p2p = MockPeerToPeerPort::default(); + p2p.expect_select_peer().times(1).returning(|_| { + let bytes = vec![1u8, 2, 3, 4, 5]; + let peer_id = bytes.into(); + Ok(Some(peer_id)) + }); p2p.expect_get_sealed_block_headers() .times(1) .returning(|_| Err(anyhow::anyhow!("Some network error"))); + p2p.expect_get_transactions_2() + .times(1) + .returning(|block_ids| { + let data = block_ids.data; + let v = data.into_iter().map(|_| TransactionData::new()).collect(); + Ok(Some(v)) + }); let state = State::new(3, 5).into(); let mocks = Mocks { @@ -368,6 +427,11 @@ async fn import__p2p_error() { async fn import__p2p_error_on_4_transactions() { // given let mut p2p = MockPeerToPeerPort::default(); + p2p.expect_select_peer().times(1).returning(|_| { + let bytes = vec![1u8, 2, 3, 4, 5]; + let peer_id = bytes.into(); + Ok(Some(peer_id)) + }); p2p.expect_get_sealed_block_headers() .times(1) .returning(|_| { @@ -376,15 +440,9 @@ async fn import__p2p_error_on_4_transactions() { empty_header(5.into()), ]))) }); - let mut height = 3; - p2p.expect_get_transactions().times(2).returning(move |_| { - height += 1; - if height == 4 { - Err(anyhow::anyhow!("Some network error")) - } else { - Ok(Some(vec![])) - } - }); + p2p.expect_get_transactions_2() + .times(1) + .returning(|_| Err(anyhow::anyhow!("Some network error"))); let state = State::new(3, 5).into(); let mocks = Mocks { @@ -400,51 +458,54 @@ async fn import__p2p_error_on_4_transactions() { assert_eq!((State::new(3, None), false), res); } -#[ignore] -#[tokio::test] -async fn import__p2p_error_on_5_transactions() { - // given - let mut p2p = MockPeerToPeerPort::default(); - p2p.expect_get_sealed_block_headers() - .times(1) - .returning(|_| { - Ok(peer_sourced_headers(Some(vec![ - empty_header(4.into()), - empty_header(5.into()), - ]))) - }); - let mut height = 3; - p2p.expect_get_transactions().times(2).returning(move |_| { - height += 1; - if height == 5 { - Err(anyhow::anyhow!("Some network error")) - } else { - Ok(Some(vec![])) - } - }); - - let state = State::new(3, 5).into(); - let mocks = Mocks { - p2p, - consensus_port: DefaultMocks::times([2]), - executor: DefaultMocks::times([1]), - }; - - // when - let res = test_import_inner(state, mocks, None).await; - - // then - assert_eq!((State::new(4, None), false), res); -} +// #[tokio::test] +// async fn import__p2p_error_on_5_transactions() { +// // given +// let mut p2p = MockPeerToPeerPort::default(); +// p2p.expect_select_peer().times(1).returning(|_| { +// let bytes = vec![1u8, 2, 3, 4, 5]; +// let peer_id = bytes.into(); +// Ok(Some(peer_id)) +// }); +// p2p.expect_get_sealed_block_headers() +// .times(1) +// .returning(|_| { +// Ok(peer_sourced_headers(Some(vec![ +// empty_header(4.into()), +// empty_header(5.into()), +// ]))) +// }); +// let mut height = 3; +// p2p.expect_get_transactions().times(2).returning(move |_| { +// height += 1; +// if height == 5 { +// Err(anyhow::anyhow!("Some network error")) +// } else { +// Ok(Some(vec![])) +// } +// }); +// +// let state = State::new(3, 5).into(); +// let mocks = Mocks { +// p2p, +// consensus_port: DefaultMocks::times([2]), +// executor: DefaultMocks::times([1]), +// }; +// +// // when +// let res = test_import_inner(state, mocks, None).await; +// +// // then +// assert_eq!((State::new(4, None), false), res); +// } -#[ignore] #[tokio::test] async fn import__consensus_error_on_4() { // given let mut consensus_port = MockConsensusPort::default(); consensus_port .expect_check_sealed_header() - .times(2) + .times(1) .returning(|h| { if **h.entity.height() == 4 { Err(anyhow::anyhow!("Some consensus error")) @@ -454,13 +515,29 @@ async fn import__consensus_error_on_4() { }); consensus_port .expect_await_da_height() - .times(1) + .times(0) .returning(|_| Ok(())); + let mut p2p = MockPeerToPeerPort::default(); + p2p.expect_select_peer().times(1).returning(|_| { + let bytes = vec![1u8, 2, 3, 4, 5]; + let peer_id = bytes.into(); + Ok(Some(peer_id)) + }); + p2p.expect_get_sealed_block_headers() + .times(1) + .returning(|_| { + Ok(peer_sourced_headers(Some(vec![ + empty_header(4.into()), + empty_header(5.into()), + ]))) + }); + p2p.expect_get_transactions_2().times(0); + let state = State::new(3, 5).into(); let mocks = Mocks { consensus_port, - p2p: DefaultMocks::times([1]), + p2p, executor: DefaultMocks::times([0]), }; @@ -471,7 +548,6 @@ async fn import__consensus_error_on_4() { assert_eq!((State::new(3, None), false), res); } -#[ignore] #[tokio::test] async fn import__consensus_error_on_5() { // given @@ -491,10 +567,32 @@ async fn import__consensus_error_on_5() { .times(1) .returning(|_| Ok(())); + let mut p2p = MockPeerToPeerPort::default(); + p2p.expect_select_peer().times(1).returning(|_| { + let bytes = vec![1u8, 2, 3, 4, 5]; + let peer_id = bytes.into(); + Ok(Some(peer_id)) + }); + p2p.expect_get_sealed_block_headers() + .times(1) + .returning(|_| { + Ok(peer_sourced_headers(Some(vec![ + empty_header(4.into()), + empty_header(5.into()), + ]))) + }); + p2p.expect_get_transactions_2() + .times(1) + .returning(|block_ids| { + let data = block_ids.data; + let v = data.into_iter().map(|_| TransactionData::new()).collect(); + Ok(Some(v)) + }); + let state = State::new(3, 5).into(); let mocks = Mocks { consensus_port, - p2p: DefaultMocks::times([1]), + p2p, executor: DefaultMocks::times([1]), }; @@ -971,7 +1069,6 @@ impl DefaultMocks for MockPeerToPeerPort { p2p.expect_get_sealed_block_headers() .times(1) .returning(|range| { - dbg!(&range); let headers = range.map(|range| { let headers = range.clone().map(|h| empty_header(h.into())).collect(); Some(headers) From 80ab2dca8f30834ff32c9f6a4e899ac8d63b4a42 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Mon, 18 Sep 2023 20:06:30 -0400 Subject: [PATCH 31/87] WIP --- crates/services/sync/src/import.rs | 75 ++++++++++++------------------ 1 file changed, 29 insertions(+), 46 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index b43905e1673..9aab135eb1d 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -321,49 +321,32 @@ async fn get_block_stream< header_batch_size, .. } = params; let header_stream = get_header_stream(peer.clone(), range, params, p2p.clone()); - let peer_ = peer.clone(); - let p2p_ = p2p.clone(); - let consensus_ = consensus.clone(); - let generator = futures::stream::repeat_with(move || { - (peer_.clone(), p2p_.clone(), consensus_.clone()) - }); - let iter = header_stream.zip(generator); - let i = iter - .then(|(header, (peer, p2p, consensus))| async move { - let header = header?; - let validity = - check_sealed_header(&header, peer, p2p.clone(), consensus.clone()) - .await?; - if validity { - consensus.await_da_height(&header.entity.da_height).await?; - Ok(Some(header)) - } else { - Ok(None) - } - }) - .into_scan_none_or_err() - .scan_none_or_err() - .chunks(*header_batch_size as usize) - .map({ - let p2p = p2p.clone(); - let consensus_port = consensus.clone(); - let peer = peer.clone(); - move |headers| { - { - let p2p = p2p.clone(); - let _consensus_port = consensus_port.clone(); - let peer = peer.clone(); - let headers = peer.bind(headers); - - get_blocks(p2p, headers) - } - // .instrument(tracing::debug_span!("consensus_and_transactions")) - // .in_current_span() - } - }); - - i - // i.into_scan_none_or_err().scan_none_or_err() + let generator = + futures::stream::repeat((peer.clone(), p2p.clone(), consensus.clone())); + let iter = header_stream.zip(generator.clone()); + iter.then(|(header, (peer, p2p, consensus))| async move { + let header = header?; + let validity = + check_sealed_header(&header, peer, p2p.clone(), consensus.clone()).await?; + if validity { + consensus.await_da_height(&header.entity.da_height).await?; + Ok(Some(header)) + } else { + Ok(None) + } + }) + .into_scan_none_or_err() + .scan_none_or_err() + .chunks(*header_batch_size as usize) + .zip(generator) + .map(|(headers, (peer, p2p, ..))| { + { + let headers = peer.bind(headers); + get_blocks(p2p, headers) + } + .instrument(tracing::debug_span!("consensus_and_transactions")) + .in_current_span() + }) } fn get_header_stream( @@ -699,7 +682,7 @@ impl ScanNoneErr { #[cfg(test)] mod test { use crate::import::StreamUtil; - use anyhow::anyhow; + // use anyhow::anyhow; use futures::StreamExt; #[tokio::test] @@ -718,8 +701,8 @@ mod test { // let stream = futures::stream::iter(i) // .into_scan_none_or_err() // .scan_none_or_err(); - // let output = stream.collect::>>().await; - // println!("{:?}", output); + // let output = stream.collect::>().await; + // println!("{:?}", r); // } } From 90c87b0b339238f4ed10e2502cd360224e2b06d2 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Tue, 19 Sep 2023 13:45:37 -0400 Subject: [PATCH 32/87] WIP --- crates/services/sync/src/import.rs | 97 +++++++++++------------- crates/services/sync/src/import/tests.rs | 47 +++++++++--- 2 files changed, 84 insertions(+), 60 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 9aab135eb1d..a62869adc6e 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -3,6 +3,7 @@ //! importing blocks from the network into the local blockchain. use futures::{ + stream, FutureExt, TryStreamExt, }; @@ -207,15 +208,16 @@ where let peer = peer.expect("Checked"); + let generator = + stream::repeat((peer.clone(), state.clone(), p2p.clone(), executor.clone())); + let block_stream = get_block_stream( peer.clone(), range.clone(), params, p2p.clone(), consensus.clone(), - ) - .await; - + ); let result = block_stream .map(move |stream_block_batch| { let shutdown_guard = shutdown_guard.clone(); @@ -226,7 +228,10 @@ where let mut shutdown_signal = shutdown_signal.clone(); tokio::select! { // Stream a batch of blocks - blocks = stream_block_batch => blocks, + blocks = stream_block_batch => { + dbg!(&blocks); + blocks + }, // If a shutdown signal is received during the stream, terminate early and // return an empty response _ = shutdown_signal.while_started() => Ok(vec![]) @@ -247,44 +252,30 @@ where tracing::info!("In progress import stream shutting down"); } }) + .zip(generator) // Then execute and commit the block - .then({ - let state = state.clone(); - let executor = executor.clone(); - let p2p = p2p.clone(); - let peer = peer.clone(); - move |res| { - let state = state.clone(); - let executor = executor.clone(); - let p2p = p2p.clone(); - let peer = peer.clone(); - async move { - let executor = executor.clone(); - dbg!(&res); - let sealed_blocks = res?; - dbg!(&sealed_blocks); - let iter = futures::stream::iter(sealed_blocks); - let res = iter.then(|sealed_block| async { - let executor = executor.clone(); - dbg!(&sealed_block); - execute_and_commit(executor.as_ref(), &state, sealed_block).await - }).try_collect::>().await; - match &res { - Ok(_) => { - report_peer(p2p.as_ref(), peer.clone(), PeerReportReason::SuccessfulBlockImport).await; - }, - Err(e) => { - // If this fails, then it means that consensus has approved a block that is invalid. - // This would suggest a more serious issue than a bad peer, e.g. a fork or an out-of-date client. - tracing::error!("Failed to execute and commit block from peer {:?}: {:?}", peer, e); - }, - } - res + .then( + |(res, (peer, state, p2p, executor))| async move { + let sealed_blocks = res?; + let sealed_blocks = futures::stream::iter(sealed_blocks); + let res = sealed_blocks.then(|sealed_block| async { + execute_and_commit(executor.as_ref(), &state, sealed_block).await + }).try_collect::>().await; + match &res { + Ok(_) => { + report_peer(p2p.as_ref(), peer.clone(), PeerReportReason::SuccessfulBlockImport).await; + }, + Err(e) => { + // If this fails, then it means that consensus has approved a block that is invalid. + // This would suggest a more serious issue than a bad peer, e.g. a fork or an out-of-date client. + tracing::error!("Failed to execute and commit block from peer {:?}: {:?}", peer, e); + }, } + res } .instrument(tracing::debug_span!("execute_and_commit")) .in_current_span() - }) + ) // Continue the stream unless an error occurs. .into_scan_err() .scan_err() @@ -292,7 +283,6 @@ where // find any errors. // Fold the stream into a count and any errors. .fold((0usize, Ok(())), |(count, res), result| async move { - dbg!(&result); match result { Ok(_) => (count + 1, res), Err(e) => (count, Err(e)), @@ -307,7 +297,7 @@ where } } -async fn get_block_stream< +fn get_block_stream< P: PeerToPeerPort + Send + Sync + 'static, C: ConsensusPort + Send + Sync + 'static, >( @@ -546,9 +536,11 @@ where peer_id, data: headers, } = headers; + let (headers, _err): (Vec<_>, Vec<_>) = + headers.into_iter().partition(|item| item.is_err()); let headers = headers .into_iter() - .filter_map(|r| r.ok()) + .filter_map(|header| header.ok()) .collect::>(); let block_ids = headers.iter().map(|header| header.entity.id()).collect(); let block_ids = peer_id.clone().bind(block_ids); @@ -682,8 +674,11 @@ impl ScanNoneErr { #[cfg(test)] mod test { use crate::import::StreamUtil; - // use anyhow::anyhow; - use futures::StreamExt; + use anyhow::anyhow; + use futures::{ + StreamExt, + TryStreamExt, + }; #[tokio::test] async fn test_it() { @@ -695,15 +690,15 @@ mod test { println!("{:?}", output); } - // #[tokio::test] - // async fn test_it_2() { - // let i = [Ok(Some(0)), Ok(Some(1)), Err(anyhow!("err!"))]; - // let stream = futures::stream::iter(i) - // .into_scan_none_or_err() - // .scan_none_or_err(); - // let output = stream.collect::>().await; - // println!("{:?}", r); - // } + #[tokio::test] + async fn test_it_2() { + let i = [Ok(Some(0)), Ok(Some(1)), Err(anyhow!("err!"))]; + let stream = futures::stream::iter(i) + .into_scan_none_or_err() + .scan_none_or_err(); + let output = stream.try_collect::>().await; + assert!(output.is_err()); + } } impl ScanEmptyErr { diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index 8d7fad47e6d..7121777c6a3 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -17,17 +17,17 @@ use fuel_core_types::{ fuel_tx::Transaction, services::p2p::TransactionData, }; -use test_case::test_case; +// use test_case::test_case; use super::*; -#[test_case(State::new(None, 5), Mocks::times([6]) => (State::new(5, None), true) ; "executes 5")] -#[test_case(State::new(3, 5), Mocks::times([2]) => (State::new(5, None), true) ; "executes 3 to 5")] -#[tokio::test] -async fn test_import(state: State, mocks: Mocks) -> (State, bool) { - let state = SharedMutex::new(state); - test_import_inner(state, mocks, None).await -} +// #[test_case(State::new(None, 5), Mocks::times([6]) => (State::new(5, None), true) ; "executes 5")] +// #[test_case(State::new(3, 5), Mocks::times([2]) => (State::new(5, None), true) ; "executes 3 to 5")] +// #[tokio::test] +// async fn test_import(state: State, mocks: Mocks) -> (State, bool) { +// let state = SharedMutex::new(state); +// test_import_inner(state, mocks, None).await +// } #[tokio::test] async fn test_import_0_to_5() { @@ -41,8 +41,37 @@ async fn test_import_0_to_5() { #[tokio::test] async fn test_import_3_to_5() { + let consensus_port = MockConsensusPort::times([2]); + let mut p2p = MockPeerToPeerPort::default(); + p2p.expect_select_peer().times(1).returning(|_| { + let bytes = vec![1u8, 2, 3, 4, 5]; + let peer_id = bytes.into(); + Ok(Some(peer_id)) + }); + p2p.expect_get_sealed_block_headers() + .times(1) + .returning(|range| { + let headers = range.map(|range| { + let headers = range.clone().map(|h| empty_header(h.into())).collect(); + Some(headers) + }); + Ok(headers) + }); + p2p.expect_get_transactions_2() + .times(1) + .returning(|block_ids| { + let data = block_ids.data; + let v = data.into_iter().map(|_| TransactionData::new()).collect(); + Ok(Some(v)) + }); + + let mocks = Mocks { + consensus_port, + p2p, + executor: DefaultMocks::times([1]), + }; + let state = State::new(3, 5); - let mocks = Mocks::times([2]); let state = SharedMutex::new(state); let v = test_import_inner(state, mocks, None).await; let expected = (State::new(5, None), true); From 02b5a008245e5f4acdb801b360c7a26482d00d1f Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Tue, 19 Sep 2023 19:58:32 -0400 Subject: [PATCH 33/87] WIP --- crates/services/sync/src/import.rs | 262 +++++++++++------------ crates/services/sync/src/import/tests.rs | 10 +- 2 files changed, 133 insertions(+), 139 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index a62869adc6e..81313deb031 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -50,10 +50,7 @@ use crate::{ PeerToPeerPort, }, state::State, - tracing_helpers::{ - TraceErr, - TraceNone, - }, + tracing_helpers::TraceErr, }; #[cfg(any(test, feature = "benchmarking"))] @@ -193,19 +190,15 @@ where let block_height = BlockHeight::from(*range.end()); let peer = p2p.select_peer(block_height).await; - if let Err(err) = peer { let err = Err(err); return (0, err) } - let peer = peer.expect("Checked"); - if let None = peer { let err = Err(anyhow!("Expected peer")); return (0, err) } - let peer = peer.expect("Checked"); let generator = @@ -218,78 +211,80 @@ where p2p.clone(), consensus.clone(), ); + let result = block_stream - .map(move |stream_block_batch| { - let shutdown_guard = shutdown_guard.clone(); - let shutdown_signal = shutdown_signal.clone(); - tokio::spawn(async move { - // Hold a shutdown sender for the lifetime of the spawned task - let _shutdown_guard = shutdown_guard.clone(); - let mut shutdown_signal = shutdown_signal.clone(); - tokio::select! { - // Stream a batch of blocks - blocks = stream_block_batch => { - dbg!(&blocks); - blocks - }, - // If a shutdown signal is received during the stream, terminate early and - // return an empty response - _ = shutdown_signal.while_started() => Ok(vec![]) + .map(move |stream_block_batch| { + let shutdown_guard = shutdown_guard.clone(); + let shutdown_signal = shutdown_signal.clone(); + tokio::spawn(async move { + // Hold a shutdown sender for the lifetime of the spawned task + let _shutdown_guard = shutdown_guard.clone(); + let mut shutdown_signal = shutdown_signal.clone(); + tokio::select! { + // Stream a batch of blocks + blocks = stream_block_batch => { + dbg!(&blocks); + blocks + }, + // If a shutdown signal is received during the stream, terminate early and + // return an empty response + _ = shutdown_signal.while_started() => Ok((vec![], None)) + } + }).then(|task| async { task.map_err(|e| anyhow!(e))? }) + }) + // Request up to `block_stream_buffer_size` transactions from the network. + .buffered(params.block_stream_buffer_size) + // Continue the stream unless an error or empty batch occurs. + // Note the error will be returned but the stream will close. + .into_scan_err() + .scan_err() + // Continue the stream until the shutdown signal is received. + .take_until({ + let mut s = shutdown.clone(); + async move { + let _ = s.while_started().await; + tracing::info!("In progress import stream shutting down"); } - }).then(|task| async { task.map_err(|e| anyhow!(e))? }) - }) - // Request up to `block_stream_buffer_size` transactions from the network. - .buffered(params.block_stream_buffer_size) - // Continue the stream unless an error or empty batch occurs. - // Note the error will be returned but the stream will close. - .into_scan_empty_or_err() - .scan_empty_or_err() - // Continue the stream until the shutdown signal is received. - .take_until({ - let mut s = shutdown.clone(); - async move { - let _ = s.while_started().await; - tracing::info!("In progress import stream shutting down"); - } - }) + }) + // Then execute and commit the block .zip(generator) - // Then execute and commit the block - .then( - |(res, (peer, state, p2p, executor))| async move { - let sealed_blocks = res?; - let sealed_blocks = futures::stream::iter(sealed_blocks); - let res = sealed_blocks.then(|sealed_block| async { - execute_and_commit(executor.as_ref(), &state, sealed_block).await - }).try_collect::>().await; - match &res { - Ok(_) => { - report_peer(p2p.as_ref(), peer.clone(), PeerReportReason::SuccessfulBlockImport).await; - }, - Err(e) => { - // If this fails, then it means that consensus has approved a block that is invalid. - // This would suggest a more serious issue than a bad peer, e.g. a fork or an out-of-date client. - tracing::error!("Failed to execute and commit block from peer {:?}: {:?}", peer, e); - }, + .then( + |(res, (peer, state, p2p, executor))| async move { + let (sealed_blocks, e) = res?; + let sealed_blocks = futures::stream::iter(sealed_blocks); + let res = sealed_blocks.then(|sealed_block| async { + execute_and_commit(executor.as_ref(), &state, sealed_block).await + }).try_collect::>().await.and_then(|v| e.map_or(Ok(v), |e| Err(e))); + match &res { + Ok(_) => { + report_peer(p2p.as_ref(), peer.clone(), PeerReportReason::SuccessfulBlockImport).await; + }, + Err(e) => { + // If this fails, then it means that consensus has approved a block that is invalid. + // This would suggest a more serious issue than a bad peer, e.g. a fork or an out-of-date client. + tracing::error!("Failed to execute and commit block from peer {:?}: {:?}", peer, e); + }, + } + res } - res - } - .instrument(tracing::debug_span!("execute_and_commit")) + .instrument(tracing::debug_span!("execute_and_commit")) + .in_current_span() + ) + // Continue the stream unless an error occurs. + .into_scan_err() + .scan_err() + // Count the number of successfully executed blocks and + // find any errors. + // Fold the stream into a count and any errors. + .fold((0usize, Ok(())), |(count, res), result| async move { + dbg!(&result); + match result { + Ok(_) => (count + 1, res), + Err(e) => (count, Err(e)), + } + }) .in_current_span() - ) - // Continue the stream unless an error occurs. - .into_scan_err() - .scan_err() - // Count the number of successfully executed blocks and - // find any errors. - // Fold the stream into a count and any errors. - .fold((0usize, Ok(())), |(count, res), result| async move { - match result { - Ok(_) => (count + 1, res), - Err(e) => (count, Err(e)), - } - }) - .in_current_span() - .await; + .await; // Wait for any spawned tasks to shutdown let _ = shutdown_guard_recv.recv().await; @@ -306,7 +301,9 @@ fn get_block_stream< params: &Config, p2p: Arc

, consensus: Arc, -) -> impl Stream>>> { +) -> impl Stream< + Item = impl Future, Option)>>, +> { let Config { header_batch_size, .. } = params; @@ -514,57 +511,56 @@ where } // Get blocks correlating to the headers from a specific peer -#[tracing::instrument( - skip(p2p, headers), - // fields( - // height = **header.data.height(), - // id = %header.data.consensus.generated.application_hash - // ), - err -)] +// #[tracing::instrument( +// skip(p2p, headers), +// // fields( +// // height = **header.data.height(), +// // id = %header.data.consensus.generated.application_hash +// // ), +// err +// )] async fn get_blocks

( p2p: Arc

, headers: SourcePeer>>, -) -> anyhow::Result> +) -> anyhow::Result<(Vec, Option)> where P: PeerToPeerPort + Send + Sync + 'static, { // Get transactions for the set of valid block ids // Return the error as well if there is one - let SourcePeer { - peer_id, - data: headers, - } = headers; - let (headers, _err): (Vec<_>, Vec<_>) = - headers.into_iter().partition(|item| item.is_err()); - let headers = headers - .into_iter() - .filter_map(|header| header.ok()) + let mut err = None; + let SourcePeer { peer_id, data } = headers; + let headers = data + .iter() + .take_while(|item| item.is_ok()) + .map(|item| item.as_ref().unwrap()) .collect::>(); + if headers.len() < data.len() { + err = Some(anyhow!("An error occurred!!")); + } + if headers.is_empty() { + return Ok((vec![], err)) + } let block_ids = headers.iter().map(|header| header.entity.id()).collect(); let block_ids = peer_id.clone().bind(block_ids); - let maybe_txs = p2p - .get_transactions_2(block_ids) - .await - .trace_err("Failed to get transactions")? - .trace_none_warn("Could not find transactions for header"); - + let maybe_txs = p2p.get_transactions_2(block_ids).await; match maybe_txs { - None => { + Ok(None) => { report_peer( p2p.as_ref(), peer_id.clone(), PeerReportReason::MissingTransactions, ) .await; - Ok(vec![]) + Ok((vec![], err)) } - Some(transaction_data) => { + Ok(Some(transaction_data)) => { let headers = headers; let iter = headers.into_iter().zip(transaction_data); let mut blocks = vec![]; for (block_header, transactions) in iter { + let block_header = block_header.clone(); let SealedBlockHeader { consensus, entity: header, @@ -589,7 +585,11 @@ where .await; } } - Ok(blocks) + Ok((blocks, err)) + } + Err(error) => { + err = Some(error); + Ok((vec![], err)) } } } @@ -632,11 +632,11 @@ trait StreamUtil: Sized { ScanNoneErr(self) } - /// Close the stream if an error occurs or a `None` is received. - /// Return the error if the stream closes. - fn into_scan_empty_or_err(self) -> ScanEmptyErr { - ScanEmptyErr(self) - } + // /// Close the stream if an error occurs or a `None` is received. + // /// Return the error if the stream closes. + // fn into_scan_empty_or_err(self) -> ScanEmptyErr { + // ScanEmptyErr(self) + // } /// Turn a stream of `Result` into a stream of `Result`. /// Close the stream if an error occurs. @@ -649,7 +649,7 @@ trait StreamUtil: Sized { impl StreamUtil for S {} struct ScanNoneErr(S); -struct ScanEmptyErr(S); +// struct ScanEmptyErr(S); struct ScanErr(S); impl ScanNoneErr { @@ -701,27 +701,27 @@ mod test { } } -impl ScanEmptyErr { - /// Scan the stream for empty vector or errors. - fn scan_empty_or_err(self) -> impl Stream>> - where - S: Stream>> + Send + 'static, - { - let stream = self.0.boxed(); - futures::stream::unfold((false, stream), |(mut is_err, mut stream)| async move { - if is_err { - None - } else { - let result = stream.next().await?; - is_err = result.is_err(); - result - .map(|v| (!v.is_empty()).then(|| v)) - .transpose() - .map(|result| (result, (is_err, stream))) - } - }) - } -} +// impl ScanEmptyErr { +// /// Scan the stream for empty vector or errors. +// fn scan_empty_or_err(self) -> impl Stream>> +// where +// S: Stream>> + Send + 'static, +// { +// let stream = self.0.boxed(); +// futures::stream::unfold((false, stream), |(mut is_err, mut stream)| async move { +// if is_err { +// None +// } else { +// let result = stream.next().await?; +// is_err = result.is_err(); +// result +// .map(|v| (!v.is_empty()).then(|| v)) +// .transpose() +// .map(|result| (result, (is_err, stream))) +// } +// }) +// } +// } impl ScanErr { /// Scan the stream for errors. diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index 7121777c6a3..6f50c2eac37 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -155,7 +155,7 @@ async fn import__signature_fails_on_header_4_only() { Ok(headers) }); p2p.expect_get_transactions_2() - .times(1) + .times(0) .returning(|block_ids| { let data = block_ids.data; let v = data.into_iter().map(|_| TransactionData::new()).collect(); @@ -429,13 +429,7 @@ async fn import__p2p_error() { p2p.expect_get_sealed_block_headers() .times(1) .returning(|_| Err(anyhow::anyhow!("Some network error"))); - p2p.expect_get_transactions_2() - .times(1) - .returning(|block_ids| { - let data = block_ids.data; - let v = data.into_iter().map(|_| TransactionData::new()).collect(); - Ok(Some(v)) - }); + p2p.expect_get_transactions_2().times(0); let state = State::new(3, 5).into(); let mocks = Mocks { From 433c03321606aba1a76cc3997ff8967d9ee5c698 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Tue, 19 Sep 2023 21:46:53 -0400 Subject: [PATCH 34/87] WIP --- crates/services/sync/src/import.rs | 63 +++++++++++------------- crates/services/sync/src/import/tests.rs | 17 ++----- 2 files changed, 35 insertions(+), 45 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 81313deb031..3e2666c3ff7 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -271,8 +271,8 @@ where .in_current_span() ) // Continue the stream unless an error occurs. - .into_scan_err() - .scan_err() + .into_scan_empty_or_err() + .scan_empty_or_err() // Count the number of successfully executed blocks and // find any errors. // Fold the stream into a count and any errors. @@ -526,15 +526,12 @@ async fn get_blocks

( where P: PeerToPeerPort + Send + Sync + 'static, { - // Get transactions for the set of valid block ids - // Return the error as well if there is one - let mut err = None; let SourcePeer { peer_id, data } = headers; let headers = data .iter() .take_while(|item| item.is_ok()) - .map(|item| item.as_ref().unwrap()) + .map(|item| item.as_ref().expect("Result is checked for Ok")) .collect::>(); if headers.len() < data.len() { err = Some(anyhow!("An error occurred!!")); @@ -632,11 +629,11 @@ trait StreamUtil: Sized { ScanNoneErr(self) } - // /// Close the stream if an error occurs or a `None` is received. - // /// Return the error if the stream closes. - // fn into_scan_empty_or_err(self) -> ScanEmptyErr { - // ScanEmptyErr(self) - // } + /// Close the stream if an error occurs or an empty `Vector` is received. + /// Return the error if the stream closes. + fn into_scan_empty_or_err(self) -> ScanEmptyErr { + ScanEmptyErr(self) + } /// Turn a stream of `Result` into a stream of `Result`. /// Close the stream if an error occurs. @@ -649,7 +646,7 @@ trait StreamUtil: Sized { impl StreamUtil for S {} struct ScanNoneErr(S); -// struct ScanEmptyErr(S); +struct ScanEmptyErr(S); struct ScanErr(S); impl ScanNoneErr { @@ -701,27 +698,27 @@ mod test { } } -// impl ScanEmptyErr { -// /// Scan the stream for empty vector or errors. -// fn scan_empty_or_err(self) -> impl Stream>> -// where -// S: Stream>> + Send + 'static, -// { -// let stream = self.0.boxed(); -// futures::stream::unfold((false, stream), |(mut is_err, mut stream)| async move { -// if is_err { -// None -// } else { -// let result = stream.next().await?; -// is_err = result.is_err(); -// result -// .map(|v| (!v.is_empty()).then(|| v)) -// .transpose() -// .map(|result| (result, (is_err, stream))) -// } -// }) -// } -// } +impl ScanEmptyErr { + /// Scan the stream for empty vector or errors. + fn scan_empty_or_err(self) -> impl Stream>> + where + S: Stream>> + Send + 'static, + { + let stream = self.0.boxed(); + futures::stream::unfold((false, stream), |(mut is_err, mut stream)| async move { + if is_err { + None + } else { + let result = stream.next().await?; + is_err = result.is_err(); + result + .map(|v| (!v.is_empty()).then(|| v)) + .transpose() + .map(|result| (result, (is_err, stream))) + } + }) + } +} impl ScanErr { /// Scan the stream for errors. diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index 6f50c2eac37..f7d59843d9d 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -68,7 +68,7 @@ async fn test_import_3_to_5() { let mocks = Mocks { consensus_port, p2p, - executor: DefaultMocks::times([1]), + executor: DefaultMocks::times([2]), }; let state = State::new(3, 5); @@ -378,7 +378,6 @@ async fn import__transactions_not_found_for_header_4() { assert_eq!((State::new(3, None), true), res); } -#[ignore] #[tokio::test] async fn import__transactions_not_found_for_header_5() { // given @@ -445,7 +444,6 @@ async fn import__p2p_error() { assert_eq!((State::new(3, None), false), res); } -#[ignore] #[tokio::test] async fn import__p2p_error_on_4_transactions() { // given @@ -626,7 +624,6 @@ async fn import__consensus_error_on_5() { assert_eq!((State::new(4, None), false), res); } -#[ignore] #[tokio::test] async fn import__execution_error_on_header_4() { // given @@ -656,7 +653,6 @@ async fn import__execution_error_on_header_4() { assert_eq!((State::new(3, None), false), res); } -#[ignore] #[tokio::test] async fn import__execution_error_on_header_5() { // given @@ -686,14 +682,13 @@ async fn import__execution_error_on_header_5() { assert_eq!((State::new(4, None), false), res); } -#[ignore] #[tokio::test] async fn signature_always_fails() { // given let mut consensus_port = MockConsensusPort::default(); consensus_port .expect_check_sealed_header() - .times(2) + .times(1) .returning(|_| Ok(false)); let state = State::new(3, 5).into(); @@ -710,7 +705,6 @@ async fn signature_always_fails() { assert_eq!((State::new(3, None), true), res); } -#[ignore] #[tokio::test] async fn import__can_work_in_two_loops() { // given @@ -796,7 +790,6 @@ async fn test_import_inner( (final_state, received_notify_signal) } -#[ignore] #[tokio::test] async fn import__happy_path_sends_good_peer_report() { // Given @@ -1075,13 +1068,13 @@ impl DefaultMocks for MockConsensusPort { } impl DefaultMocks for MockPeerToPeerPort { - fn times(_t: T) -> Self + fn times(t: T) -> Self where T: IntoIterator + Clone, ::IntoIter: Clone, { let mut p2p = MockPeerToPeerPort::default(); - // let mut t = t.into_iter().cycle(); + let mut t = t.into_iter().cycle(); p2p.expect_select_peer().times(1).returning(|_| { let bytes = vec![1u8, 2, 3, 4, 5]; @@ -1100,7 +1093,7 @@ impl DefaultMocks for MockPeerToPeerPort { }); p2p.expect_get_transactions_2() - .times(1) + .times(t.next().unwrap()) .returning(|block_ids| { let data = block_ids.data; let v = data.into_iter().map(|_| TransactionData::new()).collect(); From 8203611634f6c1727623b2098b69c5f5e40a32da Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Wed, 20 Sep 2023 14:15:13 -0400 Subject: [PATCH 35/87] WIP --- crates/services/p2p/src/p2p_service.rs | 2 +- crates/services/sync/src/import/tests.rs | 37 ++++++++++++++++++++++-- 2 files changed, 35 insertions(+), 4 deletions(-) diff --git a/crates/services/p2p/src/p2p_service.rs b/crates/services/p2p/src/p2p_service.rs index 105f681532b..6699ffc2f5e 100644 --- a/crates/services/p2p/src/p2p_service.rs +++ b/crates/services/p2p/src/p2p_service.rs @@ -1557,7 +1557,7 @@ mod tests { tokio::select! { message_sent = rx_test_end.recv() => { // we received a signal to end the test - assert!(message_sent.unwrap(), "Received incorrect or missing missing message"); + assert!(message_sent.unwrap(), "Received incorrect or missing message"); break; } node_a_event = node_a.next_event() => { diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index f7d59843d9d..c49e8a4a6a0 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -31,8 +31,37 @@ use super::*; #[tokio::test] async fn test_import_0_to_5() { + let consensus_port = MockConsensusPort::times([2]); + let mut p2p = MockPeerToPeerPort::default(); + p2p.expect_select_peer().times(1).returning(|_| { + let bytes = vec![1u8, 2, 3, 4, 5]; + let peer_id = bytes.into(); + Ok(Some(peer_id)) + }); + p2p.expect_get_sealed_block_headers() + .times(1) + .returning(|range| { + let headers = range.map(|range| { + let headers = range.clone().map(|h| empty_header(h.into())).collect(); + Some(headers) + }); + Ok(headers) + }); + p2p.expect_get_transactions_2() + .times(1) + .returning(|block_ids| { + let data = block_ids.data; + let v = data.into_iter().map(|_| TransactionData::new()).collect(); + Ok(Some(v)) + }); + + let mocks = Mocks { + consensus_port, + p2p, + executor: DefaultMocks::times([2]), + }; + let state = State::new(None, 5); - let mocks = Mocks::times([6]); let state = SharedMutex::new(state); let v = test_import_inner(state, mocks, None).await; let expected = (State::new(5, None), true); @@ -852,6 +881,7 @@ struct PeerReportTestBuider { shared_peer_id: Vec, get_sealed_headers: Option>>, get_transactions: Option>>, + get_transactions_2: Option>>, check_sealed_header: Option, block_count: u32, debug: bool, @@ -863,6 +893,7 @@ impl PeerReportTestBuider { shared_peer_id: vec![1, 2, 3, 4], get_sealed_headers: None, get_transactions: None, + get_transactions_2: None, check_sealed_header: None, block_count: 1, debug: false, @@ -968,8 +999,8 @@ impl PeerReportTestBuider { }); } - let get_transactions = self.get_transactions.clone().unwrap_or(Some(vec![])); - p2p.expect_get_transactions() + let get_transactions = self.get_transactions_2.clone().unwrap_or(Some(vec![])); + p2p.expect_get_transactions_2() .returning(move |_| Ok(get_transactions.clone())); let peer_id = self.shared_peer_id.clone(); From 6115eb2d5522ec2ad5da76ce4dd566a6c0088f57 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Wed, 20 Sep 2023 17:10:27 -0400 Subject: [PATCH 36/87] All tests passing --- crates/services/sync/src/import.rs | 6 +- crates/services/sync/src/import/tests.rs | 170 ++++++++++++++++------- 2 files changed, 124 insertions(+), 52 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 3e2666c3ff7..d5b5f866200 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -222,10 +222,7 @@ where let mut shutdown_signal = shutdown_signal.clone(); tokio::select! { // Stream a batch of blocks - blocks = stream_block_batch => { - dbg!(&blocks); - blocks - }, + blocks = stream_block_batch => blocks, // If a shutdown signal is received during the stream, terminate early and // return an empty response _ = shutdown_signal.while_started() => Ok((vec![], None)) @@ -550,6 +547,7 @@ where PeerReportReason::MissingTransactions, ) .await; + // err = Some(anyhow!("Missing transactions!!")); Ok((vec![], err)) } Ok(Some(transaction_data)) => { diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index c49e8a4a6a0..4c095aab3e3 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -13,10 +13,7 @@ use crate::{ PeerReportReason, }, }; -use fuel_core_types::{ - fuel_tx::Transaction, - services::p2p::TransactionData, -}; +use fuel_core_types::services::p2p::TransactionData; // use test_case::test_case; use super::*; @@ -31,7 +28,7 @@ use super::*; #[tokio::test] async fn test_import_0_to_5() { - let consensus_port = MockConsensusPort::times([2]); + let consensus_port = MockConsensusPort::times([6]); let mut p2p = MockPeerToPeerPort::default(); p2p.expect_select_peer().times(1).returning(|_| { let bytes = vec![1u8, 2, 3, 4, 5]; @@ -58,7 +55,7 @@ async fn test_import_0_to_5() { let mocks = Mocks { consensus_port, p2p, - executor: DefaultMocks::times([2]), + executor: DefaultMocks::times([6]), }; let state = State::new(None, 5); @@ -656,6 +653,28 @@ async fn import__consensus_error_on_5() { #[tokio::test] async fn import__execution_error_on_header_4() { // given + let mut p2p = MockPeerToPeerPort::default(); + p2p.expect_select_peer().times(1).returning(|_| { + let bytes = vec![1u8, 2, 3, 4, 5]; + let peer_id = bytes.into(); + Ok(Some(peer_id)) + }); + p2p.expect_get_sealed_block_headers() + .times(1) + .returning(|_| { + Ok(peer_sourced_headers(Some(vec![ + empty_header(4.into()), + empty_header(5.into()), + ]))) + }); + p2p.expect_get_transactions_2() + .times(1) + .returning(|block_ids| { + let data = block_ids.data; + let v = data.into_iter().map(|_| TransactionData::new()).collect(); + Ok(Some(v)) + }); + let mut executor = MockBlockImporterPort::default(); executor .expect_execute_and_commit() @@ -671,7 +690,7 @@ async fn import__execution_error_on_header_4() { let state = State::new(3, 5).into(); let mocks = Mocks { consensus_port: DefaultMocks::times([2]), - p2p: DefaultMocks::times([2]), + p2p, executor, }; @@ -685,6 +704,28 @@ async fn import__execution_error_on_header_4() { #[tokio::test] async fn import__execution_error_on_header_5() { // given + let mut p2p = MockPeerToPeerPort::default(); + p2p.expect_select_peer().times(1).returning(|_| { + let bytes = vec![1u8, 2, 3, 4, 5]; + let peer_id = bytes.into(); + Ok(Some(peer_id)) + }); + p2p.expect_get_sealed_block_headers() + .times(1) + .returning(|_| { + Ok(peer_sourced_headers(Some(vec![ + empty_header(4.into()), + empty_header(5.into()), + ]))) + }); + p2p.expect_get_transactions_2() + .times(1) + .returning(|block_ids| { + let data = block_ids.data; + let v = data.into_iter().map(|_| TransactionData::new()).collect(); + Ok(Some(v)) + }); + let mut executor = MockBlockImporterPort::default(); executor .expect_execute_and_commit() @@ -700,7 +741,7 @@ async fn import__execution_error_on_header_5() { let state = State::new(3, 5).into(); let mocks = Mocks { consensus_port: DefaultMocks::times([2]), - p2p: DefaultMocks::times([2]), + p2p, executor, }; @@ -740,6 +781,11 @@ async fn import__can_work_in_two_loops() { let s = SharedMutex::new(State::new(3, 5)); let state = s.clone(); let mut p2p = MockPeerToPeerPort::default(); + p2p.expect_select_peer().times(2).returning(|_| { + let bytes = vec![1u8, 2, 3, 4, 5]; + let peer_id = bytes.into(); + Ok(Some(peer_id)) + }); p2p.expect_get_sealed_block_headers() .times(2) .returning(move |range| { @@ -750,9 +796,13 @@ async fn import__can_work_in_two_loops() { }); Ok(headers) }); - p2p.expect_get_transactions() - .times(3) - .returning(move |_| Ok(Some(vec![]))); + p2p.expect_get_transactions_2() + .times(2) + .returning(|block_ids| { + let data = block_ids.data; + let v = data.into_iter().map(|_| TransactionData::new()).collect(); + Ok(Some(v)) + }); let c = DefaultMocks::times([2]); let mocks = Mocks { consensus_port: DefaultMocks::times([3]), @@ -822,77 +872,73 @@ async fn test_import_inner( #[tokio::test] async fn import__happy_path_sends_good_peer_report() { // Given - PeerReportTestBuider::new() + PeerReportTestBuilder::new() // When (no changes) // Then - .run_with_expected_report(PeerReportReason::SuccessfulBlockImport) + .run_with_expected_reports([PeerReportReason::SuccessfulBlockImport]) .await; } -#[ignore] #[tokio::test] async fn import__multiple_blocks_happy_path_sends_good_peer_report() { // Given - PeerReportTestBuider::new() + PeerReportTestBuilder::new() // When .times(3) // Then - .run_with_expected_report(PeerReportReason::SuccessfulBlockImport) + .run_with_expected_reports([PeerReportReason::SuccessfulBlockImport]) .await; } -#[ignore] #[tokio::test] async fn import__missing_headers_sends_peer_report() { // Given - PeerReportTestBuider::new() + PeerReportTestBuilder::new() // When .with_get_headers(None) // Then - .run_with_expected_report(PeerReportReason::MissingBlockHeaders) + .run_with_expected_reports([PeerReportReason::MissingBlockHeaders]) .await; } -#[ignore] #[tokio::test] async fn import__bad_block_header_sends_peer_report() { // Given - PeerReportTestBuider::new() + PeerReportTestBuilder::new() // When .with_check_sealed_header(false) // Then - .run_with_expected_report(PeerReportReason::BadBlockHeader) + .run_with_expected_reports([PeerReportReason::BadBlockHeader]) .await; } -#[ignore] #[tokio::test] async fn import__missing_transactions_sends_peer_report() { // Given - PeerReportTestBuider::new() + PeerReportTestBuilder::new() // When - .with_get_transactions(None) + .with_get_transactions_2(None) // Then - .run_with_expected_report(PeerReportReason::MissingTransactions) + .run_with_expected_reports([PeerReportReason::MissingTransactions, PeerReportReason::SuccessfulBlockImport]) .await; } -struct PeerReportTestBuider { +struct PeerReportTestBuilder { shared_peer_id: Vec, get_sealed_headers: Option>>, - get_transactions: Option>>, + // get_transactions: Option>>, get_transactions_2: Option>>, check_sealed_header: Option, block_count: u32, debug: bool, } -impl PeerReportTestBuider { +impl PeerReportTestBuilder { pub fn new() -> Self { Self { shared_peer_id: vec![1, 2, 3, 4], get_sealed_headers: None, - get_transactions: None, + // get_transactions: None, get_transactions_2: None, check_sealed_header: None, block_count: 1, @@ -914,11 +960,19 @@ impl PeerReportTestBuider { self } - pub fn with_get_transactions( + // pub fn with_get_transactions( + // mut self, + // get_transactions: Option>, + // ) -> Self { + // self.get_transactions = Some(get_transactions); + // self + // } + + pub fn with_get_transactions_2( mut self, - get_transactions: Option>, + get_transactions: Option>, ) -> Self { - self.get_transactions = Some(get_transactions); + self.get_transactions_2 = Some(get_transactions); self } @@ -932,14 +986,18 @@ impl PeerReportTestBuider { self } - pub async fn run_with_expected_report(self, expected_report: PeerReportReason) { + pub async fn run_with_expected_reports(self, expected_reports: R) + where + R: IntoIterator, + ::IntoIter: Send, + { if self.debug { let _ = tracing_subscriber::fmt() .with_max_level(tracing::Level::DEBUG) .try_init(); } - let p2p = self.p2p(expected_report); + let p2p = self.p2p(expected_reports); let executor = self.executor(); let consensus = self.consensus(); @@ -969,7 +1027,11 @@ impl PeerReportTestBuider { let _ = import.import(&mut watcher).await; } - fn p2p(&self, expected_report: PeerReportReason) -> Arc { + fn p2p(&self, expected_reports: R) -> Arc + where + R: IntoIterator, + ::IntoIter: std::marker::Send, + { let mut p2p = MockPeerToPeerPort::default(); let peer_id = self.shared_peer_id.clone(); @@ -989,7 +1051,6 @@ impl PeerReportTestBuider { } else { p2p.expect_get_sealed_block_headers() .returning(move |range| { - dbg!(&range.peer_id); let headers = range.map(|range| { let headers = range.clone().map(|h| empty_header(h.into())).collect(); @@ -999,18 +1060,31 @@ impl PeerReportTestBuider { }); } - let get_transactions = self.get_transactions_2.clone().unwrap_or(Some(vec![])); - p2p.expect_get_transactions_2() - .returning(move |_| Ok(get_transactions.clone())); + let transactions = self.get_transactions_2.clone(); + if let Some(t) = transactions { + p2p.expect_get_transactions_2() + .returning(move |_| Ok(t.clone())); + } else { + p2p.expect_get_transactions_2().returning(|block_ids| { + let data = block_ids.data; + let v = data.into_iter().map(|_| TransactionData::new()).collect(); + Ok(Some(v)) + }); + } - let peer_id = self.shared_peer_id.clone(); - p2p.expect_report_peer() - .times(self.block_count as usize) - .withf(move |peer, report| { - let peer_id = peer_id.clone(); - peer.as_ref() == peer_id && report == &expected_report - }) - .returning(|_, _| Ok(())); + let mut seq = mockall::Sequence::new(); + let peer_id: PeerId = self.shared_peer_id.clone().into(); + let expected_reports = expected_reports.into_iter(); + for expected_report in expected_reports { + p2p.expect_report_peer() + .times(1) + .with( + mockall::predicate::eq(peer_id.clone()), + mockall::predicate::eq(expected_report), + ) + .returning(|_, _| Ok(())) + .in_sequence(&mut seq); + } Arc::new(p2p) } From 3ab36c5752cf7f7d589cefcbab6f91aeff3190c9 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Wed, 20 Sep 2023 17:58:41 -0400 Subject: [PATCH 37/87] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c4041ed0339..09c2c49ac9d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ Description of the upcoming release here. ### Changed +- [#1349](https://github.com/FuelLabs/fuel-core/pull/1349): Updated peer-to-peer transactions API to support multiple blocks in a single request, and updated block synchronization to request multiple blocks based on the configured range of headers. - [#1366](https://github.com/FuelLabs/fuel-core/pull/1366): Improve caching during docker builds in CI by replacing gha - [#1358](https://github.com/FuelLabs/fuel-core/pull/1358): Upgraded the Rust version used in CI to 1.72.0. Also includes associated Clippy changes. - [#1318](https://github.com/FuelLabs/fuel-core/pull/1318): Modified block synchronization to use asynchronous task execution when retrieving block headers. From 63abe76f95f956af669f2d40b531fb86b90c75a0 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Wed, 20 Sep 2023 18:33:12 -0400 Subject: [PATCH 38/87] Update --- crates/services/sync/src/import.rs | 1 - crates/services/sync/src/import/tests.rs | 2 -- 2 files changed, 3 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index d5b5f866200..c3f11dcf865 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -547,7 +547,6 @@ where PeerReportReason::MissingTransactions, ) .await; - // err = Some(anyhow!("Missing transactions!!")); Ok((vec![], err)) } Ok(Some(transaction_data)) => { diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index 4c095aab3e3..94dca300879 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -989,7 +989,6 @@ impl PeerReportTestBuilder { pub async fn run_with_expected_reports(self, expected_reports: R) where R: IntoIterator, - ::IntoIter: Send, { if self.debug { let _ = tracing_subscriber::fmt() @@ -1030,7 +1029,6 @@ impl PeerReportTestBuilder { fn p2p(&self, expected_reports: R) -> Arc where R: IntoIterator, - ::IntoIter: std::marker::Send, { let mut p2p = MockPeerToPeerPort::default(); From d3b517f86c55a2eb2e60ab215a53bde46ad005e4 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Wed, 20 Sep 2023 18:55:40 -0400 Subject: [PATCH 39/87] Clean up --- crates/services/p2p/src/service.rs | 3 +-- crates/services/sync/src/import.rs | 2 -- crates/services/sync/src/lib.rs | 2 ++ crates/types/src/blockchain/consensus.rs | 16 ---------------- 4 files changed, 3 insertions(+), 20 deletions(-) diff --git a/crates/services/p2p/src/service.rs b/crates/services/p2p/src/service.rs index ab0d4afbfc8..0091178f80e 100644 --- a/crates/services/p2p/src/service.rs +++ b/crates/services/p2p/src/service.rs @@ -482,8 +482,7 @@ where let _ = self.p2p_service.report_peer(peer_id, score, reporting_service); } Some(TaskRequest::SelectPeer { block_height, channel }) => { - let peer = self.p2p_service.peer_manager() - .get_peer_id_with_height(&block_height); + let peer = self.p2p_service.get_peer_id_with_height(&block_height); let _ = channel.send(peer); } None => { diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index c3f11dcf865..04f0d3ab1e2 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -274,7 +274,6 @@ where // find any errors. // Fold the stream into a count and any errors. .fold((0usize, Ok(())), |(count, res), result| async move { - dbg!(&result); match result { Ok(_) => (count + 1, res), Err(e) => (count, Err(e)), @@ -489,7 +488,6 @@ where } Err(e) => { - dbg!(&e); vec![Err(e)] } }; diff --git a/crates/services/sync/src/lib.rs b/crates/services/sync/src/lib.rs index 9087e9aadc4..2b2fcdc87c8 100644 --- a/crates/services/sync/src/lib.rs +++ b/crates/services/sync/src/lib.rs @@ -13,5 +13,7 @@ mod tracing_helpers; pub use import::Config; +use rand as _; + #[cfg(test)] fuel_core_trace::enable_tracing!(); diff --git a/crates/types/src/blockchain/consensus.rs b/crates/types/src/blockchain/consensus.rs index 7ac502a388d..905124f1a50 100644 --- a/crates/types/src/blockchain/consensus.rs +++ b/crates/types/src/blockchain/consensus.rs @@ -67,22 +67,6 @@ pub struct Sealed { pub consensus: Consensus, } -impl Sealed> { - /// Transpose a Sealed Vector of `Entity` into a Vector of Sealed `Entity` - pub fn transpose(self) -> Vec> { - let consensus = self.consensus; - let entities = self - .entity - .into_iter() - .map(|e| Sealed { - entity: e, - consensus: consensus.clone(), - }) - .collect(); - entities - } -} - /// A vote from a validator. /// /// This is a dummy placeholder for the Vote Struct in fuel-bft From be2c6e2a8269d3eb08cc0f5d1702cbb150206845 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Thu, 21 Sep 2023 17:20:31 -0400 Subject: [PATCH 40/87] WIP --- crates/services/sync/src/import.rs | 57 +++++++++++++++--------------- 1 file changed, 28 insertions(+), 29 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 04f0d3ab1e2..fa2c405f025 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -315,6 +315,12 @@ fn get_block_stream< consensus.await_da_height(&header.entity.da_height).await?; Ok(Some(header)) } else { + // TODO: Could be replaced with an error, then + // .into_scan_none_or_err() + // .scan_none_or_err() + // becomes: + // .into_scan_err() + // .scan_err() Ok(None) } }) @@ -323,12 +329,9 @@ fn get_block_stream< .chunks(*header_batch_size as usize) .zip(generator) .map(|(headers, (peer, p2p, ..))| { - { - let headers = peer.bind(headers); - get_blocks(p2p, headers) - } - .instrument(tracing::debug_span!("consensus_and_transactions")) - .in_current_span() + { get_blocks(p2p, peer, headers) } + .instrument(tracing::debug_span!("consensus_and_transactions")) + .in_current_span() }) } @@ -394,17 +397,6 @@ async fn check_sealed_header< Ok(validity) } -// async fn get_sealed_blocks< -// P: PeerToPeerPort + Send + Sync + 'static, -// C: ConsensusPort + Send + Sync + 'static, -// >( -// headers: SourcePeer>>, -// p2p: Arc

, -// _consensus: Arc, -// ) -> anyhow::Result> { -// get_blocks(p2p.as_ref(), headers).await -// } - /// Waits for a notify or shutdown signal. /// Returns true if the notify signal was received. async fn wait_for_notify_or_shutdown( @@ -456,7 +448,8 @@ where start, end ); - vec![Err(anyhow::anyhow!("Headers provider was unable to fulfill request for unspecified reason. Possibly because requested batch size was too large"))] + let error = Err(anyhow::anyhow!("Headers provider was unable to fulfill request for unspecified reason. Possibly because requested batch size was too large")); + vec![error] } Some(headers) => headers .into_iter() @@ -488,7 +481,8 @@ where } Err(e) => { - vec![Err(e)] + let error = Err(e); + vec![error] } }; futures::stream::iter(headers) @@ -516,24 +510,27 @@ where // )] async fn get_blocks

( p2p: Arc

, - headers: SourcePeer>>, + peer_id: PeerId, + headers: Vec>, ) -> anyhow::Result<(Vec, Option)> where P: PeerToPeerPort + Send + Sync + 'static, { - let mut err = None; - let SourcePeer { peer_id, data } = headers; - let headers = data - .iter() - .take_while(|item| item.is_ok()) - .map(|item| item.as_ref().expect("Result is checked for Ok")) + let (headers, errors): (Vec<_>, Vec<_>) = + headers.into_iter().partition(|r| r.is_ok()); + let headers = headers + .into_iter() + .map(|item| item.expect("Result is checked for Ok")) .collect::>(); - if headers.len() < data.len() { - err = Some(anyhow!("An error occurred!!")); - } + let mut errors = errors + .into_iter() + .map(|item| item.expect_err("Result is checked for Error")) + .collect::>(); + let mut err = errors.pop(); if headers.is_empty() { return Ok((vec![], err)) } + let block_ids = headers.iter().map(|header| header.entity.id()).collect(); let block_ids = peer_id.clone().bind(block_ids); let maybe_txs = p2p.get_transactions_2(block_ids).await; @@ -580,6 +577,8 @@ where Ok((blocks, err)) } Err(error) => { + // Failure to retrieve transactions due to a networking error, + // invalid response, or any other reason constitutes a fatal error. err = Some(error); Ok((vec![], err)) } From ae2bbc7b8abbdccbede5a811cebd6680d9f581de Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Thu, 21 Sep 2023 17:22:51 -0400 Subject: [PATCH 41/87] Update import.rs --- crates/services/sync/src/import.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index fa2c405f025..29fe5da41b4 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -195,7 +195,7 @@ where return (0, err) } let peer = peer.expect("Checked"); - if let None = peer { + if peer.is_none() { let err = Err(anyhow!("Expected peer")); return (0, err) } @@ -251,7 +251,7 @@ where let sealed_blocks = futures::stream::iter(sealed_blocks); let res = sealed_blocks.then(|sealed_block| async { execute_and_commit(executor.as_ref(), &state, sealed_block).await - }).try_collect::>().await.and_then(|v| e.map_or(Ok(v), |e| Err(e))); + }).try_collect::>().await.and_then(|v| e.map_or(Ok(v), Err)); match &res { Ok(_) => { report_peer(p2p.as_ref(), peer.clone(), PeerReportReason::SuccessfulBlockImport).await; @@ -706,7 +706,7 @@ impl ScanEmptyErr { let result = stream.next().await?; is_err = result.is_err(); result - .map(|v| (!v.is_empty()).then(|| v)) + .map(|v| (!v.is_empty()).then_some(v)) .transpose() .map(|result| (result, (is_err, stream))) } From a2bdcff7aec616830cab02109cdcd3712977026c Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 22 Sep 2023 20:48:52 -0400 Subject: [PATCH 42/87] WIP --- Cargo.lock | 1 + Cargo.toml | 1 + crates/client/Cargo.toml | 2 +- crates/fuel-core/src/service/adapters/sync.rs | 12 +- crates/services/sync/Cargo.toml | 1 + crates/services/sync/src/import.rs | 270 +++++++++--------- .../services/sync/src/import/test_helpers.rs | 21 +- .../test_helpers/pressure_peer_to_peer.rs | 9 +- crates/services/sync/src/import/tests.rs | 145 ++++------ crates/services/sync/src/ports.rs | 2 +- crates/services/sync/src/service/tests.rs | 7 +- 11 files changed, 211 insertions(+), 260 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 989f1655607..1c77d4a369a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3118,6 +3118,7 @@ version = "0.20.4" dependencies = [ "anyhow", "async-trait", + "derive_more", "fuel-core-services", "fuel-core-trace", "fuel-core-types", diff --git a/Cargo.toml b/Cargo.toml index 09ab1ab6318..e599dc7ddc2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -82,6 +82,7 @@ anyhow = "1.0" async-trait = "0.1" cynic = { version = "2.2.1", features = ["http-reqwest"] } clap = "4.1" +derive_more = { version = "0.99" } hyper = { version = "0.14.26" } rand = "0.8" parking_lot = "0.12" diff --git a/crates/client/Cargo.toml b/crates/client/Cargo.toml index cd1963b4981..4c70704d3a4 100644 --- a/crates/client/Cargo.toml +++ b/crates/client/Cargo.toml @@ -13,7 +13,7 @@ description = "Tx client and schema specification." [dependencies] anyhow = { workspace = true } cynic = { workspace = true } -derive_more = { version = "0.99" } +derive_more = { workspace = true } eventsource-client = { version = "0.10.2", optional = true } fuel-core-types = { workspace = true, features = ["serde"] } futures = { workspace = true, optional = true } diff --git a/crates/fuel-core/src/service/adapters/sync.rs b/crates/fuel-core/src/service/adapters/sync.rs index 22664cde2bd..f30f46e7880 100644 --- a/crates/fuel-core/src/service/adapters/sync.rs +++ b/crates/fuel-core/src/service/adapters/sync.rs @@ -64,20 +64,20 @@ impl PeerToPeerPort for P2PAdapter { async fn get_sealed_block_headers( &self, block_height_range: SourcePeer>, - ) -> anyhow::Result>>> { + ) -> SourcePeer>>> { let SourcePeer { peer_id, data: block_height_range, } = block_height_range; - if let Some(service) = &self.service { + let result = if let Some(service) = &self.service { let headers = service .get_sealed_block_headers(peer_id.clone().into(), block_height_range) - .await?; - let sourced_headers = peer_id.bind(headers); - Ok(sourced_headers) + .await; + headers } else { Err(anyhow::anyhow!("No P2P service available")) - } + }; + peer_id.bind(result) } async fn get_transactions( diff --git a/crates/services/sync/Cargo.toml b/crates/services/sync/Cargo.toml index f83ba4e90ab..b14022eb2a6 100644 --- a/crates/services/sync/Cargo.toml +++ b/crates/services/sync/Cargo.toml @@ -12,6 +12,7 @@ version = { workspace = true } [dependencies] anyhow = { workspace = true } async-trait = { workspace = true } +derive_more = { workspace = true } fuel-core-services = { workspace = true } fuel-core-types = { workspace = true } futures = { workspace = true } diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 29fe5da41b4..7a3f13713d7 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -158,7 +158,7 @@ where ); self.state.apply(|s| s.failed_to_process(incomplete_range)); } - result?; + result.map_err(|e| anyhow!(e))?; } Ok(()) } @@ -174,7 +174,7 @@ where &self, range: RangeInclusive, shutdown: &StateWatcher, - ) -> (usize, anyhow::Result<()>) { + ) -> (usize, Result<(), ImportError>) { let Self { state, params, @@ -189,17 +189,12 @@ where tokio::sync::mpsc::channel::<()>(1); let block_height = BlockHeight::from(*range.end()); - let peer = p2p.select_peer(block_height).await; + let peer = select_peer(block_height, p2p.as_ref()).await; if let Err(err) = peer { let err = Err(err); return (0, err) } let peer = peer.expect("Checked"); - if peer.is_none() { - let err = Err(anyhow!("Expected peer")); - return (0, err) - } - let peer = peer.expect("Checked"); let generator = stream::repeat((peer.clone(), state.clone(), p2p.clone(), executor.clone())); @@ -276,7 +271,8 @@ where .fold((0usize, Ok(())), |(count, res), result| async move { match result { Ok(_) => (count + 1, res), - Err(e) => (count, Err(e)), + Err(e) if !is_fatal_error(&e) => (count, Ok(())), + Err(e) => (count, Err(e)) } }) .in_current_span() @@ -288,6 +284,14 @@ where } } +fn is_fatal_error(e: &ImportError) -> bool { + match e { + ImportError::BlockHeightMismatch => false, + ImportError::BadBlockHeader => false, + _ => true, + } +} + fn get_block_stream< P: PeerToPeerPort + Send + Sync + 'static, C: ConsensusPort + Send + Sync + 'static, @@ -298,7 +302,7 @@ fn get_block_stream< p2p: Arc

, consensus: Arc, ) -> impl Stream< - Item = impl Future, Option)>>, + Item = impl Future, Option), ImportError>>, > { let Config { header_batch_size, .. @@ -309,23 +313,12 @@ fn get_block_stream< let iter = header_stream.zip(generator.clone()); iter.then(|(header, (peer, p2p, consensus))| async move { let header = header?; - let validity = - check_sealed_header(&header, peer, p2p.clone(), consensus.clone()).await?; - if validity { - consensus.await_da_height(&header.entity.da_height).await?; - Ok(Some(header)) - } else { - // TODO: Could be replaced with an error, then - // .into_scan_none_or_err() - // .scan_none_or_err() - // becomes: - // .into_scan_err() - // .scan_err() - Ok(None) - } + check_sealed_header(&header, peer, p2p.clone(), consensus.clone()).await?; + consensus.await_da_height(&header.entity.da_height).await?; + Ok(header) }) - .into_scan_none_or_err() - .scan_none_or_err() + .into_scan_err() + .scan_err() .chunks(*header_batch_size as usize) .zip(generator) .map(|(headers, (peer, p2p, ..))| { @@ -340,7 +333,7 @@ fn get_header_stream( range: RangeInclusive, params: &Config, p2p: Arc

, -) -> impl Stream> { +) -> impl Stream> { let Config { header_batch_size, .. } = params; @@ -359,8 +352,8 @@ fn get_header_stream( } }) .flatten() - .into_scan_none_or_err() - .scan_none_or_err() + .into_scan_err() + .scan_err() } fn range_chunks( @@ -382,19 +375,22 @@ async fn check_sealed_header< peer_id: PeerId, p2p: Arc

, consensus_port: Arc, -) -> anyhow::Result { +) -> Result<(), ImportError> { let validity = consensus_port .check_sealed_header(header) + .map_err(ImportError::ConsensusError) .trace_err("Failed to check consensus on header")?; - if !validity { + if validity { + Ok(()) + } else { report_peer( p2p.as_ref(), peer_id.clone(), PeerReportReason::BadBlockHeader, ) .await; + Err(ImportError::BadBlockHeader) } - Ok(validity) } /// Waits for a notify or shutdown signal. @@ -415,11 +411,42 @@ async fn wait_for_notify_or_shutdown( matches!(r, futures::future::Either::Left(_)) } -async fn get_headers_batch

( +#[derive(Debug, derive_more::Display)] +enum ImportError { + ConsensusError(anyhow::Error), + NetworkError(anyhow::Error), + NoSuitablePeer, + MissingBlockHeaders, + BadBlockHeader, + BlockHeightMismatch, + Other(anyhow::Error), +} + +impl From for ImportError { + fn from(value: anyhow::Error) -> Self { + ImportError::Other(value) + } +} + +async fn select_peer

(block_height: BlockHeight, p2p: &P) -> Result +where + P: PeerToPeerPort + Send + Sync + 'static, +{ + tracing::debug!("getting peer for block height {}", block_height); + let res = p2p.select_peer(block_height).await; + let peer_id = match res { + Ok(Some(peer_id)) => Ok(peer_id), + Ok(None) => Err(ImportError::NoSuitablePeer), + Err(e) => Err(e.into()), + }; + peer_id +} + +async fn get_sealed_block_headers

( peer: PeerId, - mut range: RangeInclusive, + range: RangeInclusive, p2p: &P, -) -> impl Stream>> +) -> Result, ImportError> where P: PeerToPeerPort + Send + Sync + 'static, { @@ -431,40 +458,53 @@ where let start = *range.start(); let end = *range.end() + 1; let res = p2p - .get_sealed_block_headers(peer.bind(start..end)) - .await - .trace_err("Failed to get headers"); + .get_sealed_block_headers(peer.clone().bind(start..end)) + .await; + let SourcePeer { data: headers, .. } = res; + let headers = match headers { + Ok(Some(headers)) => Ok(headers), + Ok(None) => Err(ImportError::MissingBlockHeaders), + Err(e) => Err(e.into()), + }; + if let Err(e) = &headers { + if matches!(e, ImportError::MissingBlockHeaders) { + report_peer(p2p, peer.clone(), PeerReportReason::MissingBlockHeaders).await; + } + } + headers +} + +async fn get_headers_batch

( + peer_id: PeerId, + range: RangeInclusive, + p2p: &P, +) -> impl Stream> +where + P: PeerToPeerPort + Send + Sync + 'static, +{ + tracing::debug!( + "getting header range from {} to {} inclusive", + range.start(), + range.end() + ); + let start = *range.start(); + let end = *range.end() + 1; + let res = get_sealed_block_headers(peer_id.clone(), range.clone(), p2p).await; let headers = match res { - Ok(sourced_headers) => { - let SourcePeer { - peer_id, - data: maybe_headers, - } = sourced_headers; - let headers = match maybe_headers { - None => { - tracing::error!( - "No headers received from peer {:?} for range {} to {}", - peer_id, - start, - end - ); - let error = Err(anyhow::anyhow!("Headers provider was unable to fulfill request for unspecified reason. Possibly because requested batch size was too large")); - vec![error] - } - Some(headers) => headers - .into_iter() - .map(move |header| { - let header = range.next().and_then(|height| { - if *(header.entity.height()) == height.into() { - Some(header) - } else { - None - } - }); + Ok(headers) => { + let headers = headers.into_iter(); + let heights = range.clone().into_iter().map(BlockHeight::from); + let headers = headers + .zip(heights) + .map(move |(header, expected_height)| { + let height = *header.entity.height(); + if height == expected_height { Ok(header) - }) - .collect(), - }; + } else { + Err(ImportError::BlockHeightMismatch) + } + }) + .collect::>(); if let Some(expected_len) = end.checked_sub(start) { if headers.len() != expected_len as usize || headers.iter().any(|h| h.is_err()) @@ -479,7 +519,6 @@ where } headers } - Err(e) => { let error = Err(e); vec![error] @@ -511,8 +550,8 @@ where async fn get_blocks

( p2p: Arc

, peer_id: PeerId, - headers: Vec>, -) -> anyhow::Result<(Vec, Option)> + headers: Vec>, +) -> Result<(Vec, Option), ImportError> where P: PeerToPeerPort + Send + Sync + 'static, { @@ -579,7 +618,7 @@ where Err(error) => { // Failure to retrieve transactions due to a networking error, // invalid response, or any other reason constitutes a fatal error. - err = Some(error); + err = Some(ImportError::NetworkError(error)); Ok((vec![], err)) } } @@ -597,13 +636,16 @@ async fn execute_and_commit( executor: &E, state: &SharedMutex, block: SealedBlock, -) -> anyhow::Result<()> +) -> Result<(), ImportError> where E: BlockImporterPort + Send + Sync + 'static, { // Execute and commit the block. let height = *block.entity.header().height(); - let r = executor.execute_and_commit(block).await; + let r = executor + .execute_and_commit(block) + .await + .map_err(ImportError::from); // If the block executed successfully, mark it as committed. if r.is_ok() { @@ -619,9 +661,9 @@ trait StreamUtil: Sized { /// Turn a stream of `Result>` into a stream of `Result`. /// Close the stream if an error occurs or a `None` is received. /// Return the error if the stream closes. - fn into_scan_none_or_err(self) -> ScanNoneErr { - ScanNoneErr(self) - } + // fn into_scan_none_or_err(self) -> ScanNoneErr { + // ScanNoneErr(self) + // } /// Close the stream if an error occurs or an empty `Vector` is received. /// Return the error if the stream closes. @@ -639,64 +681,34 @@ trait StreamUtil: Sized { impl StreamUtil for S {} -struct ScanNoneErr(S); +// struct ScanNoneErr(S); struct ScanEmptyErr(S); struct ScanErr(S); -impl ScanNoneErr { - /// Scan the stream for `None` or errors. - fn scan_none_or_err(self) -> impl Stream> - where - S: Stream>> + Send + 'static, - { - let stream = self.0.boxed(); - futures::stream::unfold((false, stream), |(mut is_err, mut stream)| async move { - if is_err { - None - } else { - let result = stream.next().await?; - is_err = result.is_err(); - result.transpose().map(|result| (result, (is_err, stream))) - } - }) - } -} - -#[cfg(test)] -mod test { - use crate::import::StreamUtil; - use anyhow::anyhow; - use futures::{ - StreamExt, - TryStreamExt, - }; - - #[tokio::test] - async fn test_it() { - let i = [Ok(Some(0)), Ok(Some(1)), Ok(None)]; - let stream = futures::stream::iter(i) - .into_scan_none_or_err() - .scan_none_or_err(); - let output = stream.collect::>().await; - println!("{:?}", output); - } - - #[tokio::test] - async fn test_it_2() { - let i = [Ok(Some(0)), Ok(Some(1)), Err(anyhow!("err!"))]; - let stream = futures::stream::iter(i) - .into_scan_none_or_err() - .scan_none_or_err(); - let output = stream.try_collect::>().await; - assert!(output.is_err()); - } -} +// impl ScanNoneErr { +// /// Scan the stream for `None` or errors. +// fn scan_none_or_err(self) -> impl Stream> +// where +// S: Stream, ImportError>> + Send + 'static, +// { +// let stream = self.0.boxed(); +// futures::stream::unfold((false, stream), |(mut is_err, mut stream)| async move { +// if is_err { +// None +// } else { +// let result = stream.next().await?; +// is_err = result.is_err(); +// result.transpose().map(|result| (result, (is_err, stream))) +// } +// }) +// } +// } impl ScanEmptyErr { /// Scan the stream for empty vector or errors. - fn scan_empty_or_err(self) -> impl Stream>> + fn scan_empty_or_err(self) -> impl Stream, ImportError>> where - S: Stream>> + Send + 'static, + S: Stream, ImportError>> + Send + 'static, { let stream = self.0.boxed(); futures::stream::unfold((false, stream), |(mut is_err, mut stream)| async move { @@ -716,9 +728,9 @@ impl ScanEmptyErr { impl ScanErr { /// Scan the stream for errors. - fn scan_err(self) -> impl Stream> + fn scan_err(self) -> impl Stream> where - S: Stream> + Send + 'static, + S: Stream> + Send + 'static, { let stream = self.0.boxed(); futures::stream::unfold((false, stream), |(mut err, mut stream)| async move { diff --git a/crates/services/sync/src/import/test_helpers.rs b/crates/services/sync/src/import/test_helpers.rs index a4efd9a6f3f..94f39b15330 100644 --- a/crates/services/sync/src/import/test_helpers.rs +++ b/crates/services/sync/src/import/test_helpers.rs @@ -21,10 +21,7 @@ pub use counts::{ Count, SharedCounts, }; -use fuel_core_types::services::p2p::{ - PeerId, - SourcePeer, -}; + pub use pressure_block_importer::PressureBlockImporter; pub use pressure_consensus::PressureConsensus; pub use pressure_peer_to_peer::PressurePeerToPeer; @@ -42,19 +39,3 @@ pub fn empty_header(h: BlockHeight) -> SealedBlockHeader { consensus, } } - -pub fn peer_sourced_headers( - headers: Option>, -) -> SourcePeer>> { - peer_sourced_headers_peer_id(headers, vec![].into()) -} - -pub fn peer_sourced_headers_peer_id( - headers: Option>, - peer_id: PeerId, -) -> SourcePeer>> { - SourcePeer { - peer_id, - data: headers, - } -} diff --git a/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs b/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs index 73159bdd970..62a1f920138 100644 --- a/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs +++ b/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs @@ -58,7 +58,7 @@ impl PeerToPeerPort for PressurePeerToPeer { async fn get_sealed_block_headers( &self, block_height_range: SourcePeer>, - ) -> anyhow::Result>>> { + ) -> SourcePeer>>> { self.counts.apply(|c| c.inc_headers()); tokio::time::sleep(self.durations[0]).await; self.counts.apply(|c| c.dec_headers()); @@ -98,15 +98,14 @@ impl PressurePeerToPeer { pub fn new(counts: SharedCounts, delays: [Duration; 2]) -> Self { let mut mock = MockPeerToPeerPort::default(); mock.expect_get_sealed_block_headers().returning(|range| { - let headers = range.map(|range| { + range.map(|range| { let range = range .clone() .map(BlockHeight::from) .map(empty_header) .collect(); - Some(range) - }); - Ok(headers) + Ok(Some(range)) + }) }); mock.expect_get_transactions() .returning(|_| Ok(Some(vec![]))); diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index 94dca300879..9f34613eb18 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -1,11 +1,7 @@ #![allow(non_snake_case)] use crate::{ - import::test_helpers::{ - empty_header, - peer_sourced_headers, - peer_sourced_headers_peer_id, - }, + import::test_helpers::empty_header, ports::{ MockBlockImporterPort, MockConsensusPort, @@ -38,11 +34,10 @@ async fn test_import_0_to_5() { p2p.expect_get_sealed_block_headers() .times(1) .returning(|range| { - let headers = range.map(|range| { + range.map(|range| { let headers = range.clone().map(|h| empty_header(h.into())).collect(); - Some(headers) - }); - Ok(headers) + Ok(Some(headers)) + }) }); p2p.expect_get_transactions_2() .times(1) @@ -77,11 +72,10 @@ async fn test_import_3_to_5() { p2p.expect_get_sealed_block_headers() .times(1) .returning(|range| { - let headers = range.map(|range| { + range.map(|range| { let headers = range.clone().map(|h| empty_header(h.into())).collect(); - Some(headers) - }); - Ok(headers) + Ok(Some(headers)) + }) }); p2p.expect_get_transactions_2() .times(1) @@ -125,11 +119,10 @@ async fn import__signature_fails_on_header_5_only() { p2p.expect_get_sealed_block_headers() .times(1) .returning(|range| { - let headers = range.map(|range| { + range.map(|range| { let headers = range.clone().map(|h| empty_header(h.into())).collect(); - Some(headers) - }); - Ok(headers) + Ok(Some(headers)) + }) }); p2p.expect_get_transactions_2() .times(1) @@ -174,11 +167,10 @@ async fn import__signature_fails_on_header_4_only() { p2p.expect_get_sealed_block_headers() .times(1) .returning(|range| { - let headers = range.map(|range| { + range.map(|range| { let headers = range.clone().map(|h| empty_header(h.into())).collect(); - Some(headers) - }); - Ok(headers) + Ok(Some(headers)) + }) }); p2p.expect_get_transactions_2() .times(0) @@ -213,7 +205,7 @@ async fn import__header_not_found() { }); p2p.expect_get_sealed_block_headers() .times(1) - .returning(|_| Ok(peer_sourced_headers(Some(Vec::new())))); + .returning(|range| range.map(|_| Ok(Some(Vec::new())))); let state = State::new(3, 5).into(); let mocks = Mocks { @@ -240,7 +232,7 @@ async fn import__header_response_incomplete() { }); p2p.expect_get_sealed_block_headers() .times(1) - .returning(|_| Ok(peer_sourced_headers(None))); + .returning(|range| range.map(|_| Ok(None))); let state = State::new(3, 5).into(); let mocks = Mocks { @@ -267,7 +259,7 @@ async fn import__header_5_not_found() { }); p2p.expect_get_sealed_block_headers() .times(1) - .returning(|_| Ok(peer_sourced_headers(Some(vec![empty_header(4.into())])))); + .returning(|range| range.map(|_| Ok(Some(vec![empty_header(4.into())])))); p2p.expect_get_transactions_2() .times(1) .returning(|block_ids| { @@ -301,14 +293,8 @@ async fn import__header_4_not_found() { }); p2p.expect_get_sealed_block_headers() .times(1) - .returning(|_| Ok(peer_sourced_headers(Some(vec![empty_header(5.into())])))); - p2p.expect_get_transactions_2() - .times(0) - .returning(|block_ids| { - let data = block_ids.data; - let v = data.into_iter().map(|_| TransactionData::new()).collect(); - Ok(Some(v)) - }); + .returning(|range| range.map(|_| Ok(Some(vec![empty_header(5.into())])))); + p2p.expect_get_transactions_2().times(0); let state = State::new(3, 5).into(); let mocks = Mocks { @@ -335,11 +321,8 @@ async fn import__transactions_not_found() { }); p2p.expect_get_sealed_block_headers() .times(1) - .returning(|_| { - Ok(peer_sourced_headers(Some(vec![ - empty_header(4.into()), - empty_header(5.into()), - ]))) + .returning(|range| { + range.map(|_| Ok(Some(vec![empty_header(4.into()), empty_header(5.into())]))) }); p2p.expect_get_transactions_2() .times(1) @@ -370,11 +353,8 @@ async fn import__transactions_not_found_for_header_4() { }); p2p.expect_get_sealed_block_headers() .times(1) - .returning(|_| { - Ok(peer_sourced_headers(Some(vec![ - empty_header(4.into()), - empty_header(5.into()), - ]))) + .returning(|range| { + range.map(|_| Ok(Some(vec![empty_header(4.into()), empty_header(5.into())]))) }); let mut height = 3; p2p.expect_get_transactions_2() @@ -415,11 +395,8 @@ async fn import__transactions_not_found_for_header_5() { }); p2p.expect_get_sealed_block_headers() .times(1) - .returning(|_| { - Ok(peer_sourced_headers(Some(vec![ - empty_header(4.into()), - empty_header(5.into()), - ]))) + .returning(|range| { + range.map(|_| Ok(Some(vec![empty_header(4.into()), empty_header(5.into())]))) }); p2p.expect_get_transactions_2() .times(1) @@ -453,7 +430,7 @@ async fn import__p2p_error() { }); p2p.expect_get_sealed_block_headers() .times(1) - .returning(|_| Err(anyhow::anyhow!("Some network error"))); + .returning(|range| range.map(|_| Err(anyhow::anyhow!("Some network error")))); p2p.expect_get_transactions_2().times(0); let state = State::new(3, 5).into(); @@ -481,11 +458,8 @@ async fn import__p2p_error_on_4_transactions() { }); p2p.expect_get_sealed_block_headers() .times(1) - .returning(|_| { - Ok(peer_sourced_headers(Some(vec![ - empty_header(4.into()), - empty_header(5.into()), - ]))) + .returning(|range| { + range.map(|_| Ok(Some(vec![empty_header(4.into()), empty_header(5.into())]))) }); p2p.expect_get_transactions_2() .times(1) @@ -573,11 +547,8 @@ async fn import__consensus_error_on_4() { }); p2p.expect_get_sealed_block_headers() .times(1) - .returning(|_| { - Ok(peer_sourced_headers(Some(vec![ - empty_header(4.into()), - empty_header(5.into()), - ]))) + .returning(|range| { + range.map(|_| Ok(Some(vec![empty_header(4.into()), empty_header(5.into())]))) }); p2p.expect_get_transactions_2().times(0); @@ -622,11 +593,8 @@ async fn import__consensus_error_on_5() { }); p2p.expect_get_sealed_block_headers() .times(1) - .returning(|_| { - Ok(peer_sourced_headers(Some(vec![ - empty_header(4.into()), - empty_header(5.into()), - ]))) + .returning(|range| { + range.map(|_| Ok(Some(vec![empty_header(4.into()), empty_header(5.into())]))) }); p2p.expect_get_transactions_2() .times(1) @@ -661,11 +629,8 @@ async fn import__execution_error_on_header_4() { }); p2p.expect_get_sealed_block_headers() .times(1) - .returning(|_| { - Ok(peer_sourced_headers(Some(vec![ - empty_header(4.into()), - empty_header(5.into()), - ]))) + .returning(|range| { + range.map(|_| Ok(Some(vec![empty_header(4.into()), empty_header(5.into())]))) }); p2p.expect_get_transactions_2() .times(1) @@ -712,11 +677,8 @@ async fn import__execution_error_on_header_5() { }); p2p.expect_get_sealed_block_headers() .times(1) - .returning(|_| { - Ok(peer_sourced_headers(Some(vec![ - empty_header(4.into()), - empty_header(5.into()), - ]))) + .returning(|range| { + range.map(|_| Ok(Some(vec![empty_header(4.into()), empty_header(5.into())]))) }); p2p.expect_get_transactions_2() .times(1) @@ -760,6 +722,9 @@ async fn signature_always_fails() { .expect_check_sealed_header() .times(1) .returning(|_| Ok(false)); + consensus_port + .expect_await_da_height() + .returning(|_| Ok(())); let state = State::new(3, 5).into(); let mocks = Mocks { @@ -790,11 +755,10 @@ async fn import__can_work_in_two_loops() { .times(2) .returning(move |range| { state.apply(|s| s.observe(6)); - let headers = range.map(|range| { + range.map(|range| { let headers = range.clone().map(|h| empty_header(h.into())).collect(); - Some(headers) - }); - Ok(headers) + Ok(Some(headers)) + }) }); p2p.expect_get_transactions_2() .times(2) @@ -895,7 +859,7 @@ async fn import__missing_headers_sends_peer_report() { // Given PeerReportTestBuilder::new() // When - .with_get_headers(None) + .with_get_sealed_block_headers(None) // Then .run_with_expected_reports([PeerReportReason::MissingBlockHeaders]) .await; @@ -952,7 +916,7 @@ impl PeerReportTestBuilder { self } - pub fn with_get_headers( + pub fn with_get_sealed_block_headers( mut self, get_headers: Option>, ) -> Self { @@ -1038,23 +1002,17 @@ impl PeerReportTestBuilder { Ok(Some(peer_id.clone().into())) }); - let peer_id = self.shared_peer_id.clone(); if let Some(get_headers) = self.get_sealed_headers.clone() { - p2p.expect_get_sealed_block_headers().returning(move |_| { - Ok(peer_sourced_headers_peer_id( - get_headers.clone(), - peer_id.clone().into(), - )) - }); + p2p.expect_get_sealed_block_headers() + .returning(move |range| range.map(|_| Ok(get_headers.clone()))); } else { p2p.expect_get_sealed_block_headers() .returning(move |range| { - let headers = range.map(|range| { + range.map(|range| { let headers = range.clone().map(|h| empty_header(h.into())).collect(); - Some(headers) - }); - Ok(headers) + Ok(Some(headers)) + }) }); } @@ -1188,11 +1146,10 @@ impl DefaultMocks for MockPeerToPeerPort { p2p.expect_get_sealed_block_headers() .times(1) .returning(|range| { - let headers = range.map(|range| { + range.map(|range| { let headers = range.clone().map(|h| empty_header(h.into())).collect(); - Some(headers) - }); - Ok(headers) + Ok(Some(headers)) + }) }); p2p.expect_get_transactions_2() diff --git a/crates/services/sync/src/ports.rs b/crates/services/sync/src/ports.rs index 9b631c141e7..203bc786698 100644 --- a/crates/services/sync/src/ports.rs +++ b/crates/services/sync/src/ports.rs @@ -55,7 +55,7 @@ pub trait PeerToPeerPort { async fn get_sealed_block_headers( &self, block_height_range: SourcePeer>, - ) -> anyhow::Result>>>; + ) -> SourcePeer>>>; /// Request transactions from the network for the given block /// and source peer. diff --git a/crates/services/sync/src/service/tests.rs b/crates/services/sync/src/service/tests.rs index 4fceaa381b1..51f701f4d4c 100644 --- a/crates/services/sync/src/service/tests.rs +++ b/crates/services/sync/src/service/tests.rs @@ -35,15 +35,14 @@ async fn test_new_service() { .into_boxed() }); p2p.expect_get_sealed_block_headers().returning(|range| { - let headers = range.map(|range| { + range.map(|range| { let headers = range .clone() .map(BlockHeight::from) .map(empty_header) .collect::>(); - Some(headers) - }); - Ok(headers) + Ok(Some(headers)) + }) }); p2p.expect_get_transactions() .returning(|_| Ok(Some(vec![]))); From 8f17f5c5c0a8a76efc58d91db74c00d9271ddcb7 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 22 Sep 2023 21:12:55 -0400 Subject: [PATCH 43/87] WIP --- crates/services/sync/src/import.rs | 48 +++++++++++++++++------------- 1 file changed, 27 insertions(+), 21 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 7a3f13713d7..67a31b9dd70 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -226,7 +226,7 @@ where }) // Request up to `block_stream_buffer_size` transactions from the network. .buffered(params.block_stream_buffer_size) - // Continue the stream unless an error or empty batch occurs. + // Continue the stream unless an error occurs. // Note the error will be returned but the stream will close. .into_scan_err() .scan_err() @@ -271,8 +271,8 @@ where .fold((0usize, Ok(())), |(count, res), result| async move { match result { Ok(_) => (count + 1, res), - Err(e) if !is_fatal_error(&e) => (count, Ok(())), - Err(e) => (count, Err(e)) + Err(e) if !e.is_fatal() => (count, Ok(())), + Err(e) => (count, Err(e)) } }) .in_current_span() @@ -284,14 +284,6 @@ where } } -fn is_fatal_error(e: &ImportError) -> bool { - match e { - ImportError::BlockHeightMismatch => false, - ImportError::BadBlockHeader => false, - _ => true, - } -} - fn get_block_stream< P: PeerToPeerPort + Send + Sync + 'static, C: ConsensusPort + Send + Sync + 'static, @@ -314,7 +306,10 @@ fn get_block_stream< iter.then(|(header, (peer, p2p, consensus))| async move { let header = header?; check_sealed_header(&header, peer, p2p.clone(), consensus.clone()).await?; - consensus.await_da_height(&header.entity.da_height).await?; + consensus + .await_da_height(&header.entity.da_height) + .await + .map_err(ImportError::ConsensusError)?; Ok(header) }) .into_scan_err() @@ -414,6 +409,7 @@ async fn wait_for_notify_or_shutdown( #[derive(Debug, derive_more::Display)] enum ImportError { ConsensusError(anyhow::Error), + ExecutionError(anyhow::Error), NetworkError(anyhow::Error), NoSuitablePeer, MissingBlockHeaders, @@ -428,6 +424,16 @@ impl From for ImportError { } } +impl ImportError { + fn is_fatal(&self) -> bool { + match self { + ImportError::BlockHeightMismatch => false, + ImportError::BadBlockHeader => false, + _ => true, + } + } +} + async fn select_peer

(block_height: BlockHeight, p2p: &P) -> Result where P: PeerToPeerPort + Send + Sync + 'static, @@ -539,14 +545,14 @@ where } // Get blocks correlating to the headers from a specific peer -// #[tracing::instrument( -// skip(p2p, headers), -// // fields( -// // height = **header.data.height(), -// // id = %header.data.consensus.generated.application_hash -// // ), -// err -// )] +#[tracing::instrument( + skip(p2p, headers), + // fields( + // height = **header.data.height(), + // id = %header.data.consensus.generated.application_hash + // ), + err +)] async fn get_blocks

( p2p: Arc

, peer_id: PeerId, @@ -645,7 +651,7 @@ where let r = executor .execute_and_commit(block) .await - .map_err(ImportError::from); + .map_err(ImportError::ExecutionError); // If the block executed successfully, mark it as committed. if r.is_ok() { From 117da4a0c7b59735ec5c69d894d08018e1a37ea8 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 22 Sep 2023 21:33:02 -0400 Subject: [PATCH 44/87] WIP --- crates/services/sync/src/import.rs | 56 +++++++++++++++--------- crates/services/sync/src/import/tests.rs | 2 +- 2 files changed, 37 insertions(+), 21 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 67a31b9dd70..d4d5622fb49 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -24,6 +24,11 @@ use fuel_core_types::{ fuel_types::BlockHeight, services::p2p::PeerId, }; +use fuel_core_types::{ + blockchain::primitives::BlockId, + // fuel_tx::Transaction, + services::p2p::TransactionData, +}; use fuel_core_types::{ blockchain::{ block::Block, @@ -410,9 +415,9 @@ async fn wait_for_notify_or_shutdown( enum ImportError { ConsensusError(anyhow::Error), ExecutionError(anyhow::Error), - NetworkError(anyhow::Error), NoSuitablePeer, MissingBlockHeaders, + MissingTransactions, BadBlockHeader, BlockHeightMismatch, Other(anyhow::Error), @@ -429,6 +434,7 @@ impl ImportError { match self { ImportError::BlockHeightMismatch => false, ImportError::BadBlockHeader => false, + ImportError::MissingTransactions => false, _ => true, } } @@ -469,17 +475,37 @@ where let SourcePeer { data: headers, .. } = res; let headers = match headers { Ok(Some(headers)) => Ok(headers), - Ok(None) => Err(ImportError::MissingBlockHeaders), - Err(e) => Err(e.into()), - }; - if let Err(e) = &headers { - if matches!(e, ImportError::MissingBlockHeaders) { + Ok(None) => { report_peer(p2p, peer.clone(), PeerReportReason::MissingBlockHeaders).await; + Err(ImportError::MissingBlockHeaders) } - } + Err(e) => Err(e.into()), + }; headers } +async fn get_transactions

( + peer_id: PeerId, + block_ids: Vec, + p2p: &P, +) -> Result, ImportError> +where + P: PeerToPeerPort + Send + Sync + 'static, +{ + let block_ids = peer_id.clone().bind(block_ids); + let res = p2p.get_transactions_2(block_ids).await; + let transactions = match res { + Ok(Some(transactions)) => Ok(transactions), + Ok(None) => { + report_peer(p2p, peer_id.clone(), PeerReportReason::MissingTransactions) + .await; + Err(ImportError::MissingTransactions) + } + Err(e) => Err(e.into()), + }; + transactions +} + async fn get_headers_batch

( peer_id: PeerId, range: RangeInclusive, @@ -577,19 +603,9 @@ where } let block_ids = headers.iter().map(|header| header.entity.id()).collect(); - let block_ids = peer_id.clone().bind(block_ids); - let maybe_txs = p2p.get_transactions_2(block_ids).await; + let maybe_txs = get_transactions(peer_id.clone(), block_ids, p2p.as_ref()).await; match maybe_txs { - Ok(None) => { - report_peer( - p2p.as_ref(), - peer_id.clone(), - PeerReportReason::MissingTransactions, - ) - .await; - Ok((vec![], err)) - } - Ok(Some(transaction_data)) => { + Ok(transaction_data) => { let headers = headers; let iter = headers.into_iter().zip(transaction_data); let mut blocks = vec![]; @@ -624,7 +640,7 @@ where Err(error) => { // Failure to retrieve transactions due to a networking error, // invalid response, or any other reason constitutes a fatal error. - err = Some(ImportError::NetworkError(error)); + err = Some(error); Ok((vec![], err)) } } diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index 9f34613eb18..8d950e0b36c 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -883,7 +883,7 @@ async fn import__missing_transactions_sends_peer_report() { // When .with_get_transactions_2(None) // Then - .run_with_expected_reports([PeerReportReason::MissingTransactions, PeerReportReason::SuccessfulBlockImport]) + .run_with_expected_reports([PeerReportReason::MissingTransactions]) .await; } From 32b4baf2b25e4d1793b5928e5143abc11348b96c Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Sat, 23 Sep 2023 12:47:42 -0400 Subject: [PATCH 45/87] Rename TransactionData to Transactions --- crates/fuel-core/src/database/sealed_block.rs | 4 +- crates/fuel-core/src/service/adapters/p2p.rs | 4 +- crates/fuel-core/src/service/adapters/sync.rs | 4 +- crates/services/p2p/src/ports.rs | 4 +- .../p2p/src/request_response/messages.rs | 8 +- crates/services/p2p/src/service.rs | 15 +- crates/services/sync/src/import.rs | 145 +++++++++--------- .../test_helpers/pressure_peer_to_peer.rs | 4 +- crates/services/sync/src/import/tests.rs | 32 ++-- crates/services/sync/src/ports.rs | 4 +- crates/types/src/services/p2p.rs | 12 +- 11 files changed, 118 insertions(+), 118 deletions(-) diff --git a/crates/fuel-core/src/database/sealed_block.rs b/crates/fuel-core/src/database/sealed_block.rs index f89cb358d77..a1cb3390d71 100644 --- a/crates/fuel-core/src/database/sealed_block.rs +++ b/crates/fuel-core/src/database/sealed_block.rs @@ -25,7 +25,7 @@ use fuel_core_types::{ }, fuel_tx::Transaction, fuel_types::BlockHeight, - services::p2p::TransactionData, + services::p2p::Transactions, }; use std::ops::Range; @@ -140,7 +140,7 @@ impl Database { pub fn get_transactions_on_blocks( &self, block_ids: &Vec, - ) -> StorageResult>> { + ) -> StorageResult>> { let transactions = block_ids .iter() .map(|block_id| { diff --git a/crates/fuel-core/src/service/adapters/p2p.rs b/crates/fuel-core/src/service/adapters/p2p.rs index 18330b9b4ba..1a577e0788d 100644 --- a/crates/fuel-core/src/service/adapters/p2p.rs +++ b/crates/fuel-core/src/service/adapters/p2p.rs @@ -14,7 +14,7 @@ use fuel_core_types::{ }, fuel_tx::Transaction, fuel_types::BlockHeight, - services::p2p::TransactionData, + services::p2p::Transactions, }; use std::ops::Range; @@ -50,7 +50,7 @@ impl P2pDb for Database { fn get_transactions_2( &self, block_ids: &Vec, - ) -> StorageResult>> { + ) -> StorageResult>> { self.get_transactions_on_blocks(block_ids) } } diff --git a/crates/fuel-core/src/service/adapters/sync.rs b/crates/fuel-core/src/service/adapters/sync.rs index f30f46e7880..1c9d3ffd7c8 100644 --- a/crates/fuel-core/src/service/adapters/sync.rs +++ b/crates/fuel-core/src/service/adapters/sync.rs @@ -28,7 +28,7 @@ use fuel_core_types::{ }, PeerId, SourcePeer, - TransactionData, + Transactions, }, }; use std::ops::Range; @@ -100,7 +100,7 @@ impl PeerToPeerPort for P2PAdapter { async fn get_transactions_2( &self, block_ids: SourcePeer>, - ) -> anyhow::Result>> { + ) -> anyhow::Result>> { let SourcePeer { peer_id, data: blocks, diff --git a/crates/services/p2p/src/ports.rs b/crates/services/p2p/src/ports.rs index d715fe05137..3f0d6d10e7e 100644 --- a/crates/services/p2p/src/ports.rs +++ b/crates/services/p2p/src/ports.rs @@ -8,7 +8,7 @@ use fuel_core_types::{ }, fuel_tx::Transaction, fuel_types::BlockHeight, - services::p2p::TransactionData, + services::p2p::Transactions, }; use std::ops::Range; @@ -36,7 +36,7 @@ pub trait P2pDb: Send + Sync { fn get_transactions_2( &self, block_ids: &Vec, - ) -> StorageResult>>; + ) -> StorageResult>>; } pub trait BlockHeightImporter: Send + Sync { diff --git a/crates/services/p2p/src/request_response/messages.rs b/crates/services/p2p/src/request_response/messages.rs index b3c5e2f5e60..007b18d3567 100644 --- a/crates/services/p2p/src/request_response/messages.rs +++ b/crates/services/p2p/src/request_response/messages.rs @@ -11,7 +11,7 @@ use fuel_core_types::{ }, fuel_tx::Transaction, fuel_types::BlockHeight, - services::p2p::TransactionData, + services::p2p::Transactions, }; use serde::{ Deserialize, @@ -54,7 +54,7 @@ pub enum ResponseMessage { SealedBlock(Box>), SealedHeaders(Option>), Transactions(Option>), - Transactions2(Option>), + Transactions2(Option>), } /// Holds oneshot channels for specific responses @@ -63,7 +63,7 @@ pub enum ResponseChannelItem { Block(oneshot::Sender>), SealedHeaders(oneshot::Sender>>), Transactions(oneshot::Sender>>), - Transactions2(oneshot::Sender>>), + Transactions2(oneshot::Sender>>), } /// Response that is sent over the wire @@ -83,7 +83,7 @@ pub enum OutboundResponse { Block(Option>), SealedHeaders(Option>), Transactions(Option>>), - Transactions2(Option>>), + Transactions2(Option>>), } #[derive(Debug, Error)] diff --git a/crates/services/p2p/src/service.rs b/crates/services/p2p/src/service.rs index 0091178f80e..b3579ded6a9 100644 --- a/crates/services/p2p/src/service.rs +++ b/crates/services/p2p/src/service.rs @@ -51,8 +51,8 @@ use fuel_core_types::{ GossipsubMessageAcceptance, GossipsubMessageInfo, PeerId as FuelPeerId, - TransactionData, TransactionGossipData, + Transactions, }, }; use futures::{ @@ -108,7 +108,7 @@ enum TaskRequest { GetTransactions2 { block_ids: Vec, from_peer: PeerId, - channel: oneshot::Sender>>, + channel: oneshot::Sender>>, }, // Responds back to the p2p network RespondWithGossipsubMessageReport((GossipsubMessageInfo, GossipsubMessageAcceptance)), @@ -737,7 +737,7 @@ impl SharedState { &self, peer_id: Vec, block_ids: Vec, - ) -> anyhow::Result>> { + ) -> anyhow::Result>> { let (sender, receiver) = oneshot::channel(); let from_peer = PeerId::from_bytes(&peer_id).expect("Valid PeerId"); @@ -921,7 +921,7 @@ pub mod tests { fn get_transactions_2( &self, _block_ids: &Vec, - ) -> StorageResult>> { + ) -> StorageResult>> { unimplemented!() } } @@ -1046,6 +1046,13 @@ pub mod tests { ) -> StorageResult>> { todo!() } + + fn get_transactions_2( + &self, + block_ids: &Vec, + ) -> StorageResult>> { + todo!() + } } struct FakeBroadcast { diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index d4d5622fb49..d70ec30ecda 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -27,7 +27,7 @@ use fuel_core_types::{ use fuel_core_types::{ blockchain::primitives::BlockId, // fuel_tx::Transaction, - services::p2p::TransactionData, + services::p2p::Transactions, }; use fuel_core_types::{ blockchain::{ @@ -44,7 +44,10 @@ use futures::{ // FutureExt, Stream, }; -use tokio::sync::Notify; +use tokio::{ + sync::Notify, + task::JoinError, +}; use tracing::Instrument; use crate::{ @@ -131,6 +134,36 @@ impl Import { } } +#[derive(Debug, derive_more::Display)] +enum ImportError { + ConsensusError(anyhow::Error), + ExecutionError(anyhow::Error), + NoSuitablePeer, + MissingBlockHeaders, + MissingTransactions, + BadBlockHeader, + BlockHeightMismatch, + JoinError(JoinError), + Other(anyhow::Error), +} + +impl From for ImportError { + fn from(value: anyhow::Error) -> Self { + ImportError::Other(value) + } +} + +impl ImportError { + fn is_fatal(&self) -> bool { + match self { + ImportError::BlockHeightMismatch => false, + ImportError::BadBlockHeader => false, + ImportError::MissingTransactions => false, + _ => true, + } + } +} + impl Import where P: PeerToPeerPort + Send + Sync + 'static, @@ -210,7 +243,8 @@ where params, p2p.clone(), consensus.clone(), - ); + ) + .await; let result = block_stream .map(move |stream_block_batch| { @@ -225,9 +259,9 @@ where blocks = stream_block_batch => blocks, // If a shutdown signal is received during the stream, terminate early and // return an empty response - _ = shutdown_signal.while_started() => Ok((vec![], None)) + _ = shutdown_signal.while_started() => (vec![], None) } - }).then(|task| async { task.map_err(|e| anyhow!(e))? }) + }).then(|task| async { task.map_err(ImportError::JoinError) }) }) // Request up to `block_stream_buffer_size` transactions from the network. .buffered(params.block_stream_buffer_size) @@ -246,12 +280,12 @@ where // Then execute and commit the block .zip(generator) .then( - |(res, (peer, state, p2p, executor))| async move { - let (sealed_blocks, e) = res?; + |(blocks_result, (peer, state, p2p, executor))| async move { + let (sealed_blocks, error) = blocks_result?; let sealed_blocks = futures::stream::iter(sealed_blocks); let res = sealed_blocks.then(|sealed_block| async { execute_and_commit(executor.as_ref(), &state, sealed_block).await - }).try_collect::>().await.and_then(|v| e.map_or(Ok(v), Err)); + }).try_collect::>().await.and_then(|v| error.map_or(Ok(v), Err)); match &res { Ok(_) => { report_peer(p2p.as_ref(), peer.clone(), PeerReportReason::SuccessfulBlockImport).await; @@ -289,7 +323,7 @@ where } } -fn get_block_stream< +async fn get_block_stream< P: PeerToPeerPort + Send + Sync + 'static, C: ConsensusPort + Send + Sync + 'static, >( @@ -298,9 +332,7 @@ fn get_block_stream< params: &Config, p2p: Arc

, consensus: Arc, -) -> impl Stream< - Item = impl Future, Option), ImportError>>, -> { +) -> impl Stream, Option)>> { let Config { header_batch_size, .. } = params; @@ -308,24 +340,22 @@ fn get_block_stream< let generator = futures::stream::repeat((peer.clone(), p2p.clone(), consensus.clone())); let iter = header_stream.zip(generator.clone()); - iter.then(|(header, (peer, p2p, consensus))| async move { - let header = header?; - check_sealed_header(&header, peer, p2p.clone(), consensus.clone()).await?; - consensus - .await_da_height(&header.entity.da_height) - .await - .map_err(ImportError::ConsensusError)?; - Ok(header) - }) - .into_scan_err() - .scan_err() - .chunks(*header_batch_size as usize) - .zip(generator) - .map(|(headers, (peer, p2p, ..))| { - { get_blocks(p2p, peer, headers) } - .instrument(tracing::debug_span!("consensus_and_transactions")) - .in_current_span() - }) + let checked_headers = iter + .then(|(header, (peer, p2p, consensus))| async move { + let header = header?; + check_sealed_header(&header, peer, p2p.clone(), consensus.clone()).await?; + consensus + .await_da_height(&header.entity.da_height) + .await + .map_err(ImportError::ConsensusError)?; + Ok(header) + }) + .into_scan_err() + .scan_err(); + checked_headers + .chunks(*header_batch_size as usize) + .zip(generator) + .map(|(headers, (peer_id, p2p, ..))| get_blocks(p2p, peer_id, headers)) } fn get_header_stream( @@ -411,35 +441,6 @@ async fn wait_for_notify_or_shutdown( matches!(r, futures::future::Either::Left(_)) } -#[derive(Debug, derive_more::Display)] -enum ImportError { - ConsensusError(anyhow::Error), - ExecutionError(anyhow::Error), - NoSuitablePeer, - MissingBlockHeaders, - MissingTransactions, - BadBlockHeader, - BlockHeightMismatch, - Other(anyhow::Error), -} - -impl From for ImportError { - fn from(value: anyhow::Error) -> Self { - ImportError::Other(value) - } -} - -impl ImportError { - fn is_fatal(&self) -> bool { - match self { - ImportError::BlockHeightMismatch => false, - ImportError::BadBlockHeader => false, - ImportError::MissingTransactions => false, - _ => true, - } - } -} - async fn select_peer

(block_height: BlockHeight, p2p: &P) -> Result where P: PeerToPeerPort + Send + Sync + 'static, @@ -488,7 +489,7 @@ async fn get_transactions

( peer_id: PeerId, block_ids: Vec, p2p: &P, -) -> Result, ImportError> +) -> Result, ImportError> where P: PeerToPeerPort + Send + Sync + 'static, { @@ -571,19 +572,19 @@ where } // Get blocks correlating to the headers from a specific peer -#[tracing::instrument( - skip(p2p, headers), - // fields( - // height = **header.data.height(), - // id = %header.data.consensus.generated.application_hash - // ), - err -)] +// #[tracing::instrument( +// skip(p2p, headers), +// fields( +// height = **header.data.height(), +// id = %header.data.consensus.generated.application_hash +// ), +// err +// )] async fn get_blocks

( p2p: Arc

, peer_id: PeerId, headers: Vec>, -) -> Result<(Vec, Option), ImportError> +) -> (Vec, Option) where P: PeerToPeerPort + Send + Sync + 'static, { @@ -599,7 +600,7 @@ where .collect::>(); let mut err = errors.pop(); if headers.is_empty() { - return Ok((vec![], err)) + return (vec![], err) } let block_ids = headers.iter().map(|header| header.entity.id()).collect(); @@ -635,13 +636,13 @@ where .await; } } - Ok((blocks, err)) + (blocks, err) } Err(error) => { // Failure to retrieve transactions due to a networking error, // invalid response, or any other reason constitutes a fatal error. err = Some(error); - Ok((vec![], err)) + (vec![], err) } } } diff --git a/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs b/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs index 62a1f920138..ef956a1f72c 100644 --- a/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs +++ b/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs @@ -20,7 +20,7 @@ use fuel_core_types::{ services::p2p::{ PeerId, SourcePeer, - TransactionData, + Transactions, }, }; use rand::{ @@ -81,7 +81,7 @@ impl PeerToPeerPort for PressurePeerToPeer { async fn get_transactions_2( &self, _block_id: SourcePeer>, - ) -> anyhow::Result>> { + ) -> anyhow::Result>> { todo!() } diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index 8d950e0b36c..d5b7016992a 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -9,7 +9,7 @@ use crate::{ PeerReportReason, }, }; -use fuel_core_types::services::p2p::TransactionData; +use fuel_core_types::services::p2p::Transactions; // use test_case::test_case; use super::*; @@ -43,7 +43,7 @@ async fn test_import_0_to_5() { .times(1) .returning(|block_ids| { let data = block_ids.data; - let v = data.into_iter().map(|_| TransactionData::new()).collect(); + let v = data.into_iter().map(|_| Transactions::new()).collect(); Ok(Some(v)) }); @@ -81,7 +81,7 @@ async fn test_import_3_to_5() { .times(1) .returning(|block_ids| { let data = block_ids.data; - let v = data.into_iter().map(|_| TransactionData::new()).collect(); + let v = data.into_iter().map(|_| Transactions::new()).collect(); Ok(Some(v)) }); @@ -128,7 +128,7 @@ async fn import__signature_fails_on_header_5_only() { .times(1) .returning(|block_ids| { let data = block_ids.data; - let v = data.into_iter().map(|_| TransactionData::new()).collect(); + let v = data.into_iter().map(|_| Transactions::new()).collect(); Ok(Some(v)) }); @@ -176,7 +176,7 @@ async fn import__signature_fails_on_header_4_only() { .times(0) .returning(|block_ids| { let data = block_ids.data; - let v = data.into_iter().map(|_| TransactionData::new()).collect(); + let v = data.into_iter().map(|_| Transactions::new()).collect(); Ok(Some(v)) }); @@ -264,7 +264,7 @@ async fn import__header_5_not_found() { .times(1) .returning(|block_ids| { let data = block_ids.data; - let v = data.into_iter().map(|_| TransactionData::new()).collect(); + let v = data.into_iter().map(|_| Transactions::new()).collect(); Ok(Some(v)) }); @@ -365,7 +365,7 @@ async fn import__transactions_not_found_for_header_4() { Ok(None) } else { let data = block_ids.data; - let v = data.into_iter().map(|_| TransactionData::new()).collect(); + let v = data.into_iter().map(|_| Transactions::new()).collect(); Ok(Some(v)) } }); @@ -401,7 +401,7 @@ async fn import__transactions_not_found_for_header_5() { p2p.expect_get_transactions_2() .times(1) .returning(move |_| { - let v = vec![TransactionData::new()]; + let v = vec![Transactions::new()]; Ok(Some(v)) }); @@ -600,7 +600,7 @@ async fn import__consensus_error_on_5() { .times(1) .returning(|block_ids| { let data = block_ids.data; - let v = data.into_iter().map(|_| TransactionData::new()).collect(); + let v = data.into_iter().map(|_| Transactions::new()).collect(); Ok(Some(v)) }); @@ -636,7 +636,7 @@ async fn import__execution_error_on_header_4() { .times(1) .returning(|block_ids| { let data = block_ids.data; - let v = data.into_iter().map(|_| TransactionData::new()).collect(); + let v = data.into_iter().map(|_| Transactions::new()).collect(); Ok(Some(v)) }); @@ -684,7 +684,7 @@ async fn import__execution_error_on_header_5() { .times(1) .returning(|block_ids| { let data = block_ids.data; - let v = data.into_iter().map(|_| TransactionData::new()).collect(); + let v = data.into_iter().map(|_| Transactions::new()).collect(); Ok(Some(v)) }); @@ -764,7 +764,7 @@ async fn import__can_work_in_two_loops() { .times(2) .returning(|block_ids| { let data = block_ids.data; - let v = data.into_iter().map(|_| TransactionData::new()).collect(); + let v = data.into_iter().map(|_| Transactions::new()).collect(); Ok(Some(v)) }); let c = DefaultMocks::times([2]); @@ -891,7 +891,7 @@ struct PeerReportTestBuilder { shared_peer_id: Vec, get_sealed_headers: Option>>, // get_transactions: Option>>, - get_transactions_2: Option>>, + get_transactions_2: Option>>, check_sealed_header: Option, block_count: u32, debug: bool, @@ -934,7 +934,7 @@ impl PeerReportTestBuilder { pub fn with_get_transactions_2( mut self, - get_transactions: Option>, + get_transactions: Option>, ) -> Self { self.get_transactions_2 = Some(get_transactions); self @@ -1023,7 +1023,7 @@ impl PeerReportTestBuilder { } else { p2p.expect_get_transactions_2().returning(|block_ids| { let data = block_ids.data; - let v = data.into_iter().map(|_| TransactionData::new()).collect(); + let v = data.into_iter().map(|_| Transactions::new()).collect(); Ok(Some(v)) }); } @@ -1156,7 +1156,7 @@ impl DefaultMocks for MockPeerToPeerPort { .times(t.next().unwrap()) .returning(|block_ids| { let data = block_ids.data; - let v = data.into_iter().map(|_| TransactionData::new()).collect(); + let v = data.into_iter().map(|_| Transactions::new()).collect(); Ok(Some(v)) }); p2p diff --git a/crates/services/sync/src/ports.rs b/crates/services/sync/src/ports.rs index 203bc786698..e817eb3b01a 100644 --- a/crates/services/sync/src/ports.rs +++ b/crates/services/sync/src/ports.rs @@ -15,7 +15,7 @@ use fuel_core_types::{ services::p2p::{ PeerId, SourcePeer, - TransactionData, + Transactions, }, }; use std::ops::Range; @@ -69,7 +69,7 @@ pub trait PeerToPeerPort { async fn get_transactions_2( &self, block_ids: SourcePeer>, - ) -> anyhow::Result>>; + ) -> anyhow::Result>>; /// Report a peer for some reason to modify their reputation. async fn report_peer( diff --git a/crates/types/src/services/p2p.rs b/crates/types/src/services/p2p.rs index a5dfff623af..bacfff330f5 100644 --- a/crates/types/src/services/p2p.rs +++ b/crates/types/src/services/p2p.rs @@ -10,16 +10,8 @@ use std::fmt::Debug; /// Contains types and logic for Peer Reputation pub mod peer_reputation; -/// Maps BlockId to its transactions -// #[derive(Debug, Clone)] -// #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -// pub struct TransactionData { -// /// Block id -// pub block_id: BlockId, -// /// transactions -// pub transactions: Vec, -// } -pub type TransactionData = Vec; +/// List of transactions +pub type Transactions = Vec; /// Lightweight representation of gossipped data that only includes IDs #[derive(Debug, Clone, Hash, PartialEq, Eq)] From 497e48c7d1b45ddae6fb752861d6a1efd0da017e Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Sat, 23 Sep 2023 13:46:32 -0400 Subject: [PATCH 46/87] Update service.rs --- crates/services/p2p/src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/services/p2p/src/service.rs b/crates/services/p2p/src/service.rs index b3579ded6a9..8a69bfa1810 100644 --- a/crates/services/p2p/src/service.rs +++ b/crates/services/p2p/src/service.rs @@ -1049,7 +1049,7 @@ pub mod tests { fn get_transactions_2( &self, - block_ids: &Vec, + _block_ids: &Vec, ) -> StorageResult>> { todo!() } From 79573f3861cd4cd0f571994146db13357159b751 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Sat, 23 Sep 2023 16:12:28 -0400 Subject: [PATCH 47/87] Minor --- crates/types/src/services/p2p.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/types/src/services/p2p.rs b/crates/types/src/services/p2p.rs index bacfff330f5..67e9bbabe0c 100644 --- a/crates/types/src/services/p2p.rs +++ b/crates/types/src/services/p2p.rs @@ -71,7 +71,7 @@ impl SourcePeer { { let peer_id = self.peer_id; let data = f(self.data); - SourcePeer:: { peer_id, data } + SourcePeer { peer_id, data } } /// Asref From 69df9413fdfb4f424c33421d0b26af4451b6b2bb Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Sat, 23 Sep 2023 16:52:56 -0400 Subject: [PATCH 48/87] Clippy --- crates/fuel-core/src/database/sealed_block.rs | 2 +- crates/fuel-core/src/service/adapters/p2p.rs | 2 +- crates/services/p2p/src/ports.rs | 2 +- crates/services/p2p/src/service.rs | 4 +- crates/services/sync/src/import.rs | 56 ++++++++++--------- 5 files changed, 34 insertions(+), 32 deletions(-) diff --git a/crates/fuel-core/src/database/sealed_block.rs b/crates/fuel-core/src/database/sealed_block.rs index a1cb3390d71..b98aea12a0c 100644 --- a/crates/fuel-core/src/database/sealed_block.rs +++ b/crates/fuel-core/src/database/sealed_block.rs @@ -139,7 +139,7 @@ impl Database { pub fn get_transactions_on_blocks( &self, - block_ids: &Vec, + block_ids: &[BlockId], ) -> StorageResult>> { let transactions = block_ids .iter() diff --git a/crates/fuel-core/src/service/adapters/p2p.rs b/crates/fuel-core/src/service/adapters/p2p.rs index 1a577e0788d..6955f62655c 100644 --- a/crates/fuel-core/src/service/adapters/p2p.rs +++ b/crates/fuel-core/src/service/adapters/p2p.rs @@ -49,7 +49,7 @@ impl P2pDb for Database { fn get_transactions_2( &self, - block_ids: &Vec, + block_ids: &[BlockId], ) -> StorageResult>> { self.get_transactions_on_blocks(block_ids) } diff --git a/crates/services/p2p/src/ports.rs b/crates/services/p2p/src/ports.rs index 3f0d6d10e7e..14baed2f50c 100644 --- a/crates/services/p2p/src/ports.rs +++ b/crates/services/p2p/src/ports.rs @@ -35,7 +35,7 @@ pub trait P2pDb: Send + Sync { fn get_transactions_2( &self, - block_ids: &Vec, + block_ids: &[BlockId], ) -> StorageResult>>; } diff --git a/crates/services/p2p/src/service.rs b/crates/services/p2p/src/service.rs index 8a69bfa1810..13cd05eb261 100644 --- a/crates/services/p2p/src/service.rs +++ b/crates/services/p2p/src/service.rs @@ -920,7 +920,7 @@ pub mod tests { fn get_transactions_2( &self, - _block_ids: &Vec, + _block_ids: &[BlockId], ) -> StorageResult>> { unimplemented!() } @@ -1049,7 +1049,7 @@ pub mod tests { fn get_transactions_2( &self, - _block_ids: &Vec, + _block_ids: &[BlockId], ) -> StorageResult>> { todo!() } diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index d70ec30ecda..2b045b47c1d 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -155,6 +155,7 @@ impl From for ImportError { impl ImportError { fn is_fatal(&self) -> bool { + #[allow(clippy::match_like_matches_macro)] match self { ImportError::BlockHeightMismatch => false, ImportError::BadBlockHeader => false, @@ -327,7 +328,7 @@ async fn get_block_stream< P: PeerToPeerPort + Send + Sync + 'static, C: ConsensusPort + Send + Sync + 'static, >( - peer: PeerId, + peer_id: PeerId, range: RangeInclusive, params: &Config, p2p: Arc

, @@ -336,18 +337,16 @@ async fn get_block_stream< let Config { header_batch_size, .. } = params; - let header_stream = get_header_stream(peer.clone(), range, params, p2p.clone()); + let header_stream = get_header_stream(peer_id.clone(), range, params, p2p.clone()); let generator = - futures::stream::repeat((peer.clone(), p2p.clone(), consensus.clone())); + futures::stream::repeat((peer_id.clone(), p2p.clone(), consensus.clone())); let iter = header_stream.zip(generator.clone()); let checked_headers = iter - .then(|(header, (peer, p2p, consensus))| async move { + .then(|(header, (peer_id, p2p, consensus))| async move { let header = header?; - check_sealed_header(&header, peer, p2p.clone(), consensus.clone()).await?; - consensus - .await_da_height(&header.entity.da_height) - .await - .map_err(ImportError::ConsensusError)?; + check_sealed_header(&header, peer_id, p2p.as_ref(), consensus.as_ref()) + .await?; + await_da_height(&header, consensus.as_ref()).await?; Ok(header) }) .into_scan_err() @@ -403,8 +402,8 @@ async fn check_sealed_header< >( header: &SealedBlockHeader, peer_id: PeerId, - p2p: Arc

, - consensus_port: Arc, + p2p: &P, + consensus_port: &C, ) -> Result<(), ImportError> { let validity = consensus_port .check_sealed_header(header) @@ -413,16 +412,22 @@ async fn check_sealed_header< if validity { Ok(()) } else { - report_peer( - p2p.as_ref(), - peer_id.clone(), - PeerReportReason::BadBlockHeader, - ) - .await; + report_peer(p2p, peer_id.clone(), PeerReportReason::BadBlockHeader).await; Err(ImportError::BadBlockHeader) } } +async fn await_da_height( + header: &SealedBlockHeader, + consensus: &C, +) -> Result<(), ImportError> { + consensus + .await_da_height(&header.entity.da_height) + .await + .map_err(ImportError::ConsensusError)?; + Ok(()) +} + /// Waits for a notify or shutdown signal. /// Returns true if the notify signal was received. async fn wait_for_notify_or_shutdown( @@ -447,12 +452,11 @@ where { tracing::debug!("getting peer for block height {}", block_height); let res = p2p.select_peer(block_height).await; - let peer_id = match res { + match res { Ok(Some(peer_id)) => Ok(peer_id), Ok(None) => Err(ImportError::NoSuitablePeer), Err(e) => Err(e.into()), - }; - peer_id + } } async fn get_sealed_block_headers

( @@ -474,15 +478,14 @@ where .get_sealed_block_headers(peer.clone().bind(start..end)) .await; let SourcePeer { data: headers, .. } = res; - let headers = match headers { + match headers { Ok(Some(headers)) => Ok(headers), Ok(None) => { report_peer(p2p, peer.clone(), PeerReportReason::MissingBlockHeaders).await; Err(ImportError::MissingBlockHeaders) } Err(e) => Err(e.into()), - }; - headers + } } async fn get_transactions

( @@ -495,7 +498,7 @@ where { let block_ids = peer_id.clone().bind(block_ids); let res = p2p.get_transactions_2(block_ids).await; - let transactions = match res { + match res { Ok(Some(transactions)) => Ok(transactions), Ok(None) => { report_peer(p2p, peer_id.clone(), PeerReportReason::MissingTransactions) @@ -503,8 +506,7 @@ where Err(ImportError::MissingTransactions) } Err(e) => Err(e.into()), - }; - transactions + } } async fn get_headers_batch

( @@ -526,7 +528,7 @@ where let headers = match res { Ok(headers) => { let headers = headers.into_iter(); - let heights = range.clone().into_iter().map(BlockHeight::from); + let heights = range.map(BlockHeight::from); let headers = headers .zip(heights) .map(move |(header, expected_height)| { From 76b475ea1dd5da36dab6ad68956ad30e65bd9786 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Sat, 23 Sep 2023 17:08:43 -0400 Subject: [PATCH 49/87] Update sync.rs --- crates/fuel-core/src/service/adapters/sync.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/crates/fuel-core/src/service/adapters/sync.rs b/crates/fuel-core/src/service/adapters/sync.rs index 1c9d3ffd7c8..107d61a2e93 100644 --- a/crates/fuel-core/src/service/adapters/sync.rs +++ b/crates/fuel-core/src/service/adapters/sync.rs @@ -70,10 +70,9 @@ impl PeerToPeerPort for P2PAdapter { data: block_height_range, } = block_height_range; let result = if let Some(service) = &self.service { - let headers = service + service .get_sealed_block_headers(peer_id.clone().into(), block_height_range) - .await; - headers + .await } else { Err(anyhow::anyhow!("No P2P service available")) }; From 4c129d31094f58a955651895b436c6c9775c70ce Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Sun, 24 Sep 2023 09:34:43 -0400 Subject: [PATCH 50/87] Fix import tests --- crates/services/sync/src/import.rs | 18 ++++++++++-------- .../sync/src/import/test_helpers/counts.rs | 7 +++++++ .../test_helpers/pressure_peer_to_peer.rs | 15 +++++++++++++-- 3 files changed, 30 insertions(+), 10 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 2b045b47c1d..b59dbcd09b3 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -154,6 +154,9 @@ impl From for ImportError { } impl ImportError { + /// All `ImportErrors` will stop the import stream. Fatal `ImportErrors` + /// will prevent the notify signal at the end of the import. Non-fatal + /// `ImportErrors` will allow the notify signal at the end of the import. fn is_fatal(&self) -> bool { #[allow(clippy::match_like_matches_macro)] match self { @@ -174,12 +177,12 @@ where #[tracing::instrument(skip_all)] /// Execute imports until a shutdown is requested. pub async fn import(&self, shutdown: &mut StateWatcher) -> anyhow::Result { - self.import_inner(shutdown).await?; + self.import_inner(shutdown).await.map_err(|e| anyhow!(e))?; Ok(wait_for_notify_or_shutdown(&self.notify, shutdown).await) } - async fn import_inner(&self, shutdown: &StateWatcher) -> anyhow::Result<()> { + async fn import_inner(&self, shutdown: &StateWatcher) -> Result<(), ImportError> { // If there is a range to process, launch the stream. if let Some(range) = self.state.apply(|s| s.process_range()) { // Launch the stream to import the range. @@ -197,7 +200,7 @@ where ); self.state.apply(|s| s.failed_to_process(incomplete_range)); } - result.map_err(|e| anyhow!(e))?; + result?; } Ok(()) } @@ -235,7 +238,7 @@ where } let peer = peer.expect("Checked"); - let generator = + let context_generator = stream::repeat((peer.clone(), state.clone(), p2p.clone(), executor.clone())); let block_stream = get_block_stream( @@ -244,8 +247,7 @@ where params, p2p.clone(), consensus.clone(), - ) - .await; + ); let result = block_stream .map(move |stream_block_batch| { @@ -279,7 +281,7 @@ where } }) // Then execute and commit the block - .zip(generator) + .zip(context_generator) .then( |(blocks_result, (peer, state, p2p, executor))| async move { let (sealed_blocks, error) = blocks_result?; @@ -324,7 +326,7 @@ where } } -async fn get_block_stream< +fn get_block_stream< P: PeerToPeerPort + Send + Sync + 'static, C: ConsensusPort + Send + Sync + 'static, >( diff --git a/crates/services/sync/src/import/test_helpers/counts.rs b/crates/services/sync/src/import/test_helpers/counts.rs index d98e75ddd30..0ef831f36a6 100644 --- a/crates/services/sync/src/import/test_helpers/counts.rs +++ b/crates/services/sync/src/import/test_helpers/counts.rs @@ -29,9 +29,16 @@ impl Counts { self.now.transactions += 1; self.max.transactions = self.max.transactions.max(self.now.transactions); } + pub fn add_transactions(&mut self, transactions: usize) { + self.now.transactions += transactions; + self.max.transactions = self.max.transactions.max(self.now.transactions); + } pub fn dec_transactions(&mut self) { self.now.transactions -= 1; } + pub fn sub_transactions(&mut self, transactions: usize) { + self.now.transactions -= transactions; + } pub fn inc_consensus(&mut self) { self.now.consensus += 1; self.max.consensus = self.max.consensus.max(self.now.consensus); diff --git a/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs b/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs index ef956a1f72c..8ad2aa535a9 100644 --- a/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs +++ b/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs @@ -80,9 +80,15 @@ impl PeerToPeerPort for PressurePeerToPeer { async fn get_transactions_2( &self, - _block_id: SourcePeer>, + block_ids: SourcePeer>, ) -> anyhow::Result>> { - todo!() + let transactions_count = block_ids.data.len(); + self.counts + .apply(|c| c.add_transactions(transactions_count)); + tokio::time::sleep(self.durations[1]).await; + self.counts + .apply(|c| c.sub_transactions(transactions_count)); + self.p2p.get_transactions_2(block_ids).await } async fn report_peer( @@ -109,6 +115,11 @@ impl PressurePeerToPeer { }); mock.expect_get_transactions() .returning(|_| Ok(Some(vec![]))); + mock.expect_get_transactions_2().returning(|block_ids| { + let data = block_ids.data; + let v = data.into_iter().map(|_| Transactions::new()).collect(); + Ok(Some(v)) + }); Self { p2p: mock, durations: delays, From 7ee4304a478ff90fd5632e562ca4b66fd2d97548 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Sun, 24 Sep 2023 10:09:24 -0400 Subject: [PATCH 51/87] Tests WIP --- crates/services/sync/src/service/tests.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/crates/services/sync/src/service/tests.rs b/crates/services/sync/src/service/tests.rs index 51f701f4d4c..73453fa3f8b 100644 --- a/crates/services/sync/src/service/tests.rs +++ b/crates/services/sync/src/service/tests.rs @@ -2,6 +2,7 @@ use fuel_core_services::{ stream::IntoBoxStream, Service, }; +use fuel_core_types::services::p2p::PeerId; use futures::{ stream, StreamExt, @@ -34,6 +35,10 @@ async fn test_new_service() { }) .into_boxed() }); + p2p.expect_select_peer().times(1).returning(move |_| { + let peer_id: PeerId = vec![1, 2, 3, 4, 5].into(); + Ok(Some(peer_id)) + }); p2p.expect_get_sealed_block_headers().returning(|range| { range.map(|range| { let headers = range @@ -44,8 +49,11 @@ async fn test_new_service() { Ok(Some(headers)) }) }); - p2p.expect_get_transactions() - .returning(|_| Ok(Some(vec![]))); + p2p.expect_get_transactions_2().returning(|block_ids| { + let data = block_ids.data; + let v = data.into_iter().map(|_| Vec::new()).collect(); + Ok(Some(v)) + }); let mut importer = MockBlockImporterPort::default(); importer .expect_committed_height_stream() From 4db827dd1fe41fb3b39ae57314c2187267ed9785 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Mon, 25 Sep 2023 14:27:38 -0400 Subject: [PATCH 52/87] Serialization WIP --- crates/fuel-core/src/database/sealed_block.rs | 8 ++++-- crates/fuel-core/src/p2p_test_helpers.rs | 2 +- crates/fuel-core/src/service/adapters/p2p.rs | 7 +++-- crates/services/p2p/src/codecs/postcard.rs | 6 +++-- crates/services/p2p/src/p2p_service.rs | 11 ++++++++ crates/services/p2p/src/ports.rs | 7 +++-- .../p2p/src/request_response/messages.rs | 14 +++++++--- crates/services/p2p/src/service.rs | 26 ++++++++++++------- crates/types/src/blockchain/primitives.rs | 22 ++++++++++++++++ crates/types/src/services/p2p.rs | 1 - 10 files changed, 80 insertions(+), 24 deletions(-) diff --git a/crates/fuel-core/src/database/sealed_block.rs b/crates/fuel-core/src/database/sealed_block.rs index b98aea12a0c..64a7258bb4a 100644 --- a/crates/fuel-core/src/database/sealed_block.rs +++ b/crates/fuel-core/src/database/sealed_block.rs @@ -19,7 +19,10 @@ use fuel_core_types::{ Genesis, Sealed, }, - primitives::BlockId, + primitives::{ + BlockId, + BlockIds, + }, SealedBlock, SealedBlockHeader, }, @@ -139,9 +142,10 @@ impl Database { pub fn get_transactions_on_blocks( &self, - block_ids: &[BlockId], + block_ids: &BlockIds, ) -> StorageResult>> { let transactions = block_ids + .0 .iter() .map(|block_id| { let transactions = self diff --git a/crates/fuel-core/src/p2p_test_helpers.rs b/crates/fuel-core/src/p2p_test_helpers.rs index 92dc84f0384..fb7a6040f27 100644 --- a/crates/fuel-core/src/p2p_test_helpers.rs +++ b/crates/fuel-core/src/p2p_test_helpers.rs @@ -316,7 +316,7 @@ pub fn make_config(name: String, chain_config: ChainConfig) -> Config { pub async fn make_node(node_config: Config, test_txs: Vec) -> Node { let db = Database::in_memory(); let node = tokio::time::timeout( - Duration::from_secs(1), + Duration::from_secs(1000), FuelService::from_database(db.clone(), node_config), ) .await diff --git a/crates/fuel-core/src/service/adapters/p2p.rs b/crates/fuel-core/src/service/adapters/p2p.rs index 6955f62655c..1fd713a028c 100644 --- a/crates/fuel-core/src/service/adapters/p2p.rs +++ b/crates/fuel-core/src/service/adapters/p2p.rs @@ -8,7 +8,10 @@ use fuel_core_services::stream::BoxStream; use fuel_core_storage::Result as StorageResult; use fuel_core_types::{ blockchain::{ - primitives::BlockId, + primitives::{ + BlockId, + BlockIds, + }, SealedBlock, SealedBlockHeader, }, @@ -49,7 +52,7 @@ impl P2pDb for Database { fn get_transactions_2( &self, - block_ids: &[BlockId], + block_ids: &BlockIds, ) -> StorageResult>> { self.get_transactions_on_blocks(block_ids) } diff --git a/crates/services/p2p/src/codecs/postcard.rs b/crates/services/p2p/src/codecs/postcard.rs index c1b533f7a7a..140f80028d6 100644 --- a/crates/services/p2p/src/codecs/postcard.rs +++ b/crates/services/p2p/src/codecs/postcard.rs @@ -60,8 +60,10 @@ impl PostcardCodec { &self, encoded_data: &'a [u8], ) -> Result { - postcard::from_bytes(encoded_data) - .map_err(|e| io::Error::new(io::ErrorKind::Other, e.to_string())) + postcard::from_bytes(encoded_data).map_err(|e| { + dbg!(&e); + io::Error::new(io::ErrorKind::Other, e.to_string()) + }) } fn serialize(&self, data: &D) -> Result, io::Error> { diff --git a/crates/services/p2p/src/p2p_service.rs b/crates/services/p2p/src/p2p_service.rs index 7c358d475a2..6496e28deb5 100644 --- a/crates/services/p2p/src/p2p_service.rs +++ b/crates/services/p2p/src/p2p_service.rs @@ -609,6 +609,17 @@ impl FuelP2PService { ); } } + ( + Some(ResponseChannelItem::Transactions2(channel)), + Ok(ResponseMessage::Transactions2(transactions)), + ) => { + if channel.send(transactions).is_err() { + debug!( + "Failed to send through the channel for {:?}", + request_id + ); + } + } ( Some(ResponseChannelItem::SealedHeaders(channel)), Ok(ResponseMessage::SealedHeaders(headers)), diff --git a/crates/services/p2p/src/ports.rs b/crates/services/p2p/src/ports.rs index 14baed2f50c..1b91032eeaa 100644 --- a/crates/services/p2p/src/ports.rs +++ b/crates/services/p2p/src/ports.rs @@ -2,7 +2,10 @@ use fuel_core_services::stream::BoxStream; use fuel_core_storage::Result as StorageResult; use fuel_core_types::{ blockchain::{ - primitives::BlockId, + primitives::{ + BlockId, + BlockIds, + }, SealedBlock, SealedBlockHeader, }, @@ -35,7 +38,7 @@ pub trait P2pDb: Send + Sync { fn get_transactions_2( &self, - block_ids: &[BlockId], + block_ids: &BlockIds, ) -> StorageResult>>; } diff --git a/crates/services/p2p/src/request_response/messages.rs b/crates/services/p2p/src/request_response/messages.rs index 007b18d3567..9c2fbf50515 100644 --- a/crates/services/p2p/src/request_response/messages.rs +++ b/crates/services/p2p/src/request_response/messages.rs @@ -5,12 +5,18 @@ use std::{ use fuel_core_types::{ blockchain::{ - primitives::BlockId, + primitives::{ + BlockId, + BlockIds, + }, SealedBlock, SealedBlockHeader, }, fuel_tx::Transaction, - fuel_types::BlockHeight, + fuel_types::{ + BlockHeight, + Bytes32, + }, services::p2p::Transactions, }; use serde::{ @@ -27,7 +33,7 @@ use tokio::sync::oneshot; pub(crate) const REQUEST_RESPONSE_PROTOCOL_ID: &[u8] = b"/fuel/req_res/0.0.1"; /// Max Size in Bytes of the Request Message -pub(crate) const MAX_REQUEST_SIZE: usize = core::mem::size_of::(); +pub(crate) const MAX_REQUEST_SIZE: usize = core::mem::size_of::() * 50; // Peer receives a `RequestMessage`. // It prepares a response in form of `OutboundResponse` @@ -45,7 +51,7 @@ pub enum RequestMessage { Block(BlockHeight), SealedHeaders(Range), Transactions(#[serde_as(as = "FromInto<[u8; 32]>")] BlockId), - Transactions2(Vec), + Transactions2(#[serde_as(as = "FromInto>")] BlockIds), } /// Final Response Message that p2p service sends to the Orchestrator diff --git a/crates/services/p2p/src/service.rs b/crates/services/p2p/src/service.rs index 13cd05eb261..70d151995b6 100644 --- a/crates/services/p2p/src/service.rs +++ b/crates/services/p2p/src/service.rs @@ -35,7 +35,10 @@ use fuel_core_types::{ blockchain::{ block::Block, consensus::ConsensusVote, - primitives::BlockId, + primitives::{ + BlockId, + BlockIds, + }, SealedBlock, SealedBlockHeader, }, @@ -460,7 +463,7 @@ where let _ = self.p2p_service.send_request_msg(peer, request_msg, channel_item); } Some(TaskRequest::GetSealedHeaders { block_height_range, from_peer, channel: response}) => { - let request_msg = RequestMessage::SealedHeaders(block_height_range.clone()); + let request_msg = RequestMessage::SealedHeaders(block_height_range); let channel_item = ResponseChannelItem::SealedHeaders(response); let _ = self.p2p_service.send_request_msg(Some(from_peer), request_msg, channel_item); } @@ -470,6 +473,7 @@ where let _ = self.p2p_service.send_request_msg(Some(from_peer), request_msg, channel_item); } Some(TaskRequest::GetTransactions2 { block_ids, from_peer, channel }) => { + let block_ids = BlockIds(block_ids); let request_msg = RequestMessage::Transactions2(block_ids); let channel_item = ResponseChannelItem::Transactions2(channel); let _ = self.p2p_service.send_request_msg(Some(from_peer), request_msg, channel_item); @@ -740,16 +744,18 @@ impl SharedState { ) -> anyhow::Result>> { let (sender, receiver) = oneshot::channel(); let from_peer = PeerId::from_bytes(&peer_id).expect("Valid PeerId"); + dbg!(&from_peer); - self.request_sender - .send(TaskRequest::GetTransactions2 { - block_ids, - from_peer, - channel: sender, - }) - .await?; + let request = TaskRequest::GetTransactions2 { + block_ids, + from_peer, + channel: sender, + }; + self.request_sender.send(request).await?; - receiver.await.map_err(|e| anyhow!("{}", e)) + let r = receiver.await.map_err(|e| anyhow!("Receiver error! {}", e)); + dbg!(&r); + r } pub fn broadcast_vote(&self, vote: Arc) -> anyhow::Result<()> { diff --git a/crates/types/src/blockchain/primitives.rs b/crates/types/src/blockchain/primitives.rs index 0738e710e28..52376515bd8 100644 --- a/crates/types/src/blockchain/primitives.rs +++ b/crates/types/src/blockchain/primitives.rs @@ -162,3 +162,25 @@ impl From<[u8; 32]> for BlockId { Self(bytes.into()) } } + +/// Array of BlockId +#[derive(serde::Serialize, serde::Deserialize, Eq, PartialEq, Debug, Clone)] +pub struct BlockIds(pub Vec); + +impl From> for BlockIds { + fn from(value: Vec<[u8; 32]>) -> Self { + let block_ids = value.into_iter().map(BlockId::from).collect(); + Self(block_ids) + } +} + +impl From for Vec<[u8; 32]> { + fn from(value: BlockIds) -> Self { + let bytes = value + .0 + .into_iter() + .map(|block_id| block_id.0.into()) + .collect(); + bytes + } +} diff --git a/crates/types/src/services/p2p.rs b/crates/types/src/services/p2p.rs index 67e9bbabe0c..919f043e800 100644 --- a/crates/types/src/services/p2p.rs +++ b/crates/types/src/services/p2p.rs @@ -1,7 +1,6 @@ //! Contains types related to P2P data use crate::{ - // blockchain::primitives::BlockId, fuel_tx::Transaction, fuel_types::BlockHeight, }; From d377593627265d8c90eaf532c357bd3feb895117 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Mon, 25 Sep 2023 14:30:48 -0400 Subject: [PATCH 53/87] Update postcard.rs --- crates/services/p2p/src/codecs/postcard.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/crates/services/p2p/src/codecs/postcard.rs b/crates/services/p2p/src/codecs/postcard.rs index 140f80028d6..c1b533f7a7a 100644 --- a/crates/services/p2p/src/codecs/postcard.rs +++ b/crates/services/p2p/src/codecs/postcard.rs @@ -60,10 +60,8 @@ impl PostcardCodec { &self, encoded_data: &'a [u8], ) -> Result { - postcard::from_bytes(encoded_data).map_err(|e| { - dbg!(&e); - io::Error::new(io::ErrorKind::Other, e.to_string()) - }) + postcard::from_bytes(encoded_data) + .map_err(|e| io::Error::new(io::ErrorKind::Other, e.to_string())) } fn serialize(&self, data: &D) -> Result, io::Error> { From 6483a320a62340d452369526b9fbc196db7a316e Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Mon, 25 Sep 2023 15:48:28 -0400 Subject: [PATCH 54/87] Integration tests passing --- crates/fuel-core/src/database/sealed_block.rs | 3 ++- crates/services/p2p/src/codecs/postcard.rs | 2 +- .../p2p/src/request_response/messages.rs | 7 ++--- crates/services/p2p/src/service.rs | 4 +-- crates/services/sync/src/import.rs | 13 +++++----- .../test_helpers/pressure_peer_to_peer.rs | 2 +- crates/services/sync/src/import/tests.rs | 26 +++++++++---------- crates/services/sync/src/service/tests.rs | 7 +++-- crates/types/src/blockchain/primitives.rs | 4 +-- crates/types/src/services/p2p.rs | 4 ++- 10 files changed, 38 insertions(+), 34 deletions(-) diff --git a/crates/fuel-core/src/database/sealed_block.rs b/crates/fuel-core/src/database/sealed_block.rs index 64a7258bb4a..603e567f581 100644 --- a/crates/fuel-core/src/database/sealed_block.rs +++ b/crates/fuel-core/src/database/sealed_block.rs @@ -150,7 +150,8 @@ impl Database { .map(|block_id| { let transactions = self .get_sealed_block_by_id(block_id)? - .map(|Sealed { entity: block, .. }| block.into_inner().1); + .map(|Sealed { entity: block, .. }| block.into_inner().1) + .map(|transactions| Transactions(transactions)); Ok(transactions) }) .collect::>()?; diff --git a/crates/services/p2p/src/codecs/postcard.rs b/crates/services/p2p/src/codecs/postcard.rs index c1b533f7a7a..b1cea65b353 100644 --- a/crates/services/p2p/src/codecs/postcard.rs +++ b/crates/services/p2p/src/codecs/postcard.rs @@ -262,7 +262,7 @@ impl RequestResponseConverter for PostcardCodec { None }; - Ok(NetworkResponse::Transactions(response)) + Ok(NetworkResponse::Transactions2(response)) } OutboundResponse::SealedHeaders(maybe_headers) => { let response = maybe_headers diff --git a/crates/services/p2p/src/request_response/messages.rs b/crates/services/p2p/src/request_response/messages.rs index 9c2fbf50515..b2cb9259470 100644 --- a/crates/services/p2p/src/request_response/messages.rs +++ b/crates/services/p2p/src/request_response/messages.rs @@ -13,10 +13,7 @@ use fuel_core_types::{ SealedBlockHeader, }, fuel_tx::Transaction, - fuel_types::{ - BlockHeight, - Bytes32, - }, + fuel_types::BlockHeight, services::p2p::Transactions, }; use serde::{ @@ -33,7 +30,7 @@ use tokio::sync::oneshot; pub(crate) const REQUEST_RESPONSE_PROTOCOL_ID: &[u8] = b"/fuel/req_res/0.0.1"; /// Max Size in Bytes of the Request Message -pub(crate) const MAX_REQUEST_SIZE: usize = core::mem::size_of::() * 50; +pub(crate) const MAX_REQUEST_SIZE: usize = core::mem::size_of::() * 100; // Peer receives a `RequestMessage`. // It prepares a response in form of `OutboundResponse` diff --git a/crates/services/p2p/src/service.rs b/crates/services/p2p/src/service.rs index 70d151995b6..99ae02b43df 100644 --- a/crates/services/p2p/src/service.rs +++ b/crates/services/p2p/src/service.rs @@ -926,7 +926,7 @@ pub mod tests { fn get_transactions_2( &self, - _block_ids: &[BlockId], + _block_ids: &BlockIds, ) -> StorageResult>> { unimplemented!() } @@ -1055,7 +1055,7 @@ pub mod tests { fn get_transactions_2( &self, - _block_ids: &[BlockId], + _block_ids: &BlockIds, ) -> StorageResult>> { todo!() } diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index b59dbcd09b3..6cd4dcf9373 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -620,12 +620,13 @@ where consensus, entity: header, } = block_header; - let block = Block::try_from_executed(header, transactions).map(|block| { - SealedBlock { - entity: block, - consensus, - } - }); + let block = + Block::try_from_executed(header, transactions.0).map(|block| { + SealedBlock { + entity: block, + consensus, + } + }); if let Some(block) = block { blocks.push(block); } else { diff --git a/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs b/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs index 8ad2aa535a9..44793992f99 100644 --- a/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs +++ b/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs @@ -117,7 +117,7 @@ impl PressurePeerToPeer { .returning(|_| Ok(Some(vec![]))); mock.expect_get_transactions_2().returning(|block_ids| { let data = block_ids.data; - let v = data.into_iter().map(|_| Transactions::new()).collect(); + let v = data.into_iter().map(|_| Transactions::default()).collect(); Ok(Some(v)) }); Self { diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index d5b7016992a..9ff1708dc6a 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -43,7 +43,7 @@ async fn test_import_0_to_5() { .times(1) .returning(|block_ids| { let data = block_ids.data; - let v = data.into_iter().map(|_| Transactions::new()).collect(); + let v = data.into_iter().map(|_| Transactions::default()).collect(); Ok(Some(v)) }); @@ -81,7 +81,7 @@ async fn test_import_3_to_5() { .times(1) .returning(|block_ids| { let data = block_ids.data; - let v = data.into_iter().map(|_| Transactions::new()).collect(); + let v = data.into_iter().map(|_| Transactions::default()).collect(); Ok(Some(v)) }); @@ -128,7 +128,7 @@ async fn import__signature_fails_on_header_5_only() { .times(1) .returning(|block_ids| { let data = block_ids.data; - let v = data.into_iter().map(|_| Transactions::new()).collect(); + let v = data.into_iter().map(|_| Transactions::default()).collect(); Ok(Some(v)) }); @@ -176,7 +176,7 @@ async fn import__signature_fails_on_header_4_only() { .times(0) .returning(|block_ids| { let data = block_ids.data; - let v = data.into_iter().map(|_| Transactions::new()).collect(); + let v = data.into_iter().map(|_| Transactions::default()).collect(); Ok(Some(v)) }); @@ -264,7 +264,7 @@ async fn import__header_5_not_found() { .times(1) .returning(|block_ids| { let data = block_ids.data; - let v = data.into_iter().map(|_| Transactions::new()).collect(); + let v = data.into_iter().map(|_| Transactions::default()).collect(); Ok(Some(v)) }); @@ -365,7 +365,7 @@ async fn import__transactions_not_found_for_header_4() { Ok(None) } else { let data = block_ids.data; - let v = data.into_iter().map(|_| Transactions::new()).collect(); + let v = data.into_iter().map(|_| Transactions::default()).collect(); Ok(Some(v)) } }); @@ -401,7 +401,7 @@ async fn import__transactions_not_found_for_header_5() { p2p.expect_get_transactions_2() .times(1) .returning(move |_| { - let v = vec![Transactions::new()]; + let v = vec![Transactions::default()]; Ok(Some(v)) }); @@ -600,7 +600,7 @@ async fn import__consensus_error_on_5() { .times(1) .returning(|block_ids| { let data = block_ids.data; - let v = data.into_iter().map(|_| Transactions::new()).collect(); + let v = data.into_iter().map(|_| Transactions::default()).collect(); Ok(Some(v)) }); @@ -636,7 +636,7 @@ async fn import__execution_error_on_header_4() { .times(1) .returning(|block_ids| { let data = block_ids.data; - let v = data.into_iter().map(|_| Transactions::new()).collect(); + let v = data.into_iter().map(|_| Transactions::default()).collect(); Ok(Some(v)) }); @@ -684,7 +684,7 @@ async fn import__execution_error_on_header_5() { .times(1) .returning(|block_ids| { let data = block_ids.data; - let v = data.into_iter().map(|_| Transactions::new()).collect(); + let v = data.into_iter().map(|_| Transactions::default()).collect(); Ok(Some(v)) }); @@ -764,7 +764,7 @@ async fn import__can_work_in_two_loops() { .times(2) .returning(|block_ids| { let data = block_ids.data; - let v = data.into_iter().map(|_| Transactions::new()).collect(); + let v = data.into_iter().map(|_| Transactions::default()).collect(); Ok(Some(v)) }); let c = DefaultMocks::times([2]); @@ -1023,7 +1023,7 @@ impl PeerReportTestBuilder { } else { p2p.expect_get_transactions_2().returning(|block_ids| { let data = block_ids.data; - let v = data.into_iter().map(|_| Transactions::new()).collect(); + let v = data.into_iter().map(|_| Transactions::default()).collect(); Ok(Some(v)) }); } @@ -1156,7 +1156,7 @@ impl DefaultMocks for MockPeerToPeerPort { .times(t.next().unwrap()) .returning(|block_ids| { let data = block_ids.data; - let v = data.into_iter().map(|_| Transactions::new()).collect(); + let v = data.into_iter().map(|_| Transactions::default()).collect(); Ok(Some(v)) }); p2p diff --git a/crates/services/sync/src/service/tests.rs b/crates/services/sync/src/service/tests.rs index 73453fa3f8b..6648695dfd3 100644 --- a/crates/services/sync/src/service/tests.rs +++ b/crates/services/sync/src/service/tests.rs @@ -2,7 +2,10 @@ use fuel_core_services::{ stream::IntoBoxStream, Service, }; -use fuel_core_types::services::p2p::PeerId; +use fuel_core_types::services::p2p::{ + PeerId, + Transactions, +}; use futures::{ stream, StreamExt, @@ -51,7 +54,7 @@ async fn test_new_service() { }); p2p.expect_get_transactions_2().returning(|block_ids| { let data = block_ids.data; - let v = data.into_iter().map(|_| Vec::new()).collect(); + let v = data.into_iter().map(|_| Transactions::default()).collect(); Ok(Some(v)) }); let mut importer = MockBlockImporterPort::default(); diff --git a/crates/types/src/blockchain/primitives.rs b/crates/types/src/blockchain/primitives.rs index 52376515bd8..609074fdd4d 100644 --- a/crates/types/src/blockchain/primitives.rs +++ b/crates/types/src/blockchain/primitives.rs @@ -163,8 +163,8 @@ impl From<[u8; 32]> for BlockId { } } -/// Array of BlockId -#[derive(serde::Serialize, serde::Deserialize, Eq, PartialEq, Debug, Clone)] +/// Vector of BlockId +#[derive(Eq, PartialEq, Debug, Clone)] pub struct BlockIds(pub Vec); impl From> for BlockIds { diff --git a/crates/types/src/services/p2p.rs b/crates/types/src/services/p2p.rs index 919f043e800..3175efba5b0 100644 --- a/crates/types/src/services/p2p.rs +++ b/crates/types/src/services/p2p.rs @@ -10,7 +10,9 @@ use std::fmt::Debug; pub mod peer_reputation; /// List of transactions -pub type Transactions = Vec; +#[derive(Debug, Clone, Default)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +pub struct Transactions(pub Vec); /// Lightweight representation of gossipped data that only includes IDs #[derive(Debug, Clone, Hash, PartialEq, Eq)] From 5c72d46852f0b0b881787dddef74df5c6aa77f03 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Mon, 25 Sep 2023 17:34:00 -0400 Subject: [PATCH 55/87] Use range instead of blockids --- crates/fuel-core/src/database/sealed_block.rs | 17 ++++----- crates/fuel-core/src/service/adapters/p2p.rs | 9 ++--- crates/fuel-core/src/service/adapters/sync.rs | 8 ++-- crates/services/p2p/src/ports.rs | 7 +--- .../p2p/src/request_response/messages.rs | 9 ++--- crates/services/p2p/src/service.rs | 26 ++++++------- crates/services/sync/src/import.rs | 38 +++++++++---------- .../test_helpers/pressure_peer_to_peer.rs | 2 +- crates/services/sync/src/ports.rs | 2 +- crates/types/src/blockchain/primitives.rs | 22 ----------- 10 files changed, 50 insertions(+), 90 deletions(-) diff --git a/crates/fuel-core/src/database/sealed_block.rs b/crates/fuel-core/src/database/sealed_block.rs index 603e567f581..eb1fc4cc539 100644 --- a/crates/fuel-core/src/database/sealed_block.rs +++ b/crates/fuel-core/src/database/sealed_block.rs @@ -19,10 +19,7 @@ use fuel_core_types::{ Genesis, Sealed, }, - primitives::{ - BlockId, - BlockIds, - }, + primitives::BlockId, SealedBlock, SealedBlockHeader, }, @@ -142,14 +139,14 @@ impl Database { pub fn get_transactions_on_blocks( &self, - block_ids: &BlockIds, + block_height_range: Range, ) -> StorageResult>> { - let transactions = block_ids - .0 - .iter() - .map(|block_id| { + let transactions = block_height_range + .into_iter() + .map(BlockHeight::from) + .map(|block_height| { let transactions = self - .get_sealed_block_by_id(block_id)? + .get_sealed_block_by_height(&block_height)? .map(|Sealed { entity: block, .. }| block.into_inner().1) .map(|transactions| Transactions(transactions)); Ok(transactions) diff --git a/crates/fuel-core/src/service/adapters/p2p.rs b/crates/fuel-core/src/service/adapters/p2p.rs index 1fd713a028c..5accfa12b10 100644 --- a/crates/fuel-core/src/service/adapters/p2p.rs +++ b/crates/fuel-core/src/service/adapters/p2p.rs @@ -8,10 +8,7 @@ use fuel_core_services::stream::BoxStream; use fuel_core_storage::Result as StorageResult; use fuel_core_types::{ blockchain::{ - primitives::{ - BlockId, - BlockIds, - }, + primitives::BlockId, SealedBlock, SealedBlockHeader, }, @@ -52,9 +49,9 @@ impl P2pDb for Database { fn get_transactions_2( &self, - block_ids: &BlockIds, + block_height_range: Range, ) -> StorageResult>> { - self.get_transactions_on_blocks(block_ids) + self.get_transactions_on_blocks(block_height_range) } } diff --git a/crates/fuel-core/src/service/adapters/sync.rs b/crates/fuel-core/src/service/adapters/sync.rs index 107d61a2e93..0fcd4377d39 100644 --- a/crates/fuel-core/src/service/adapters/sync.rs +++ b/crates/fuel-core/src/service/adapters/sync.rs @@ -98,15 +98,15 @@ impl PeerToPeerPort for P2PAdapter { async fn get_transactions_2( &self, - block_ids: SourcePeer>, + range: SourcePeer>, ) -> anyhow::Result>> { let SourcePeer { peer_id, - data: blocks, - } = block_ids; + data: range, + } = range; if let Some(service) = &self.service { service - .get_transactions_2_from_peer(peer_id.into(), blocks) + .get_transactions_2_from_peer(peer_id.into(), range) .await } else { Err(anyhow::anyhow!("No P2P service available")) diff --git a/crates/services/p2p/src/ports.rs b/crates/services/p2p/src/ports.rs index 1b91032eeaa..46ff1d1534c 100644 --- a/crates/services/p2p/src/ports.rs +++ b/crates/services/p2p/src/ports.rs @@ -2,10 +2,7 @@ use fuel_core_services::stream::BoxStream; use fuel_core_storage::Result as StorageResult; use fuel_core_types::{ blockchain::{ - primitives::{ - BlockId, - BlockIds, - }, + primitives::BlockId, SealedBlock, SealedBlockHeader, }, @@ -38,7 +35,7 @@ pub trait P2pDb: Send + Sync { fn get_transactions_2( &self, - block_ids: &BlockIds, + block_height_range: Range, ) -> StorageResult>>; } diff --git a/crates/services/p2p/src/request_response/messages.rs b/crates/services/p2p/src/request_response/messages.rs index b2cb9259470..fcdcfbab251 100644 --- a/crates/services/p2p/src/request_response/messages.rs +++ b/crates/services/p2p/src/request_response/messages.rs @@ -5,10 +5,7 @@ use std::{ use fuel_core_types::{ blockchain::{ - primitives::{ - BlockId, - BlockIds, - }, + primitives::BlockId, SealedBlock, SealedBlockHeader, }, @@ -30,7 +27,7 @@ use tokio::sync::oneshot; pub(crate) const REQUEST_RESPONSE_PROTOCOL_ID: &[u8] = b"/fuel/req_res/0.0.1"; /// Max Size in Bytes of the Request Message -pub(crate) const MAX_REQUEST_SIZE: usize = core::mem::size_of::() * 100; +pub(crate) const MAX_REQUEST_SIZE: usize = core::mem::size_of::(); // Peer receives a `RequestMessage`. // It prepares a response in form of `OutboundResponse` @@ -48,7 +45,7 @@ pub enum RequestMessage { Block(BlockHeight), SealedHeaders(Range), Transactions(#[serde_as(as = "FromInto<[u8; 32]>")] BlockId), - Transactions2(#[serde_as(as = "FromInto>")] BlockIds), + Transactions2(Range), } /// Final Response Message that p2p service sends to the Orchestrator diff --git a/crates/services/p2p/src/service.rs b/crates/services/p2p/src/service.rs index 99ae02b43df..3c5e6624d4d 100644 --- a/crates/services/p2p/src/service.rs +++ b/crates/services/p2p/src/service.rs @@ -35,10 +35,7 @@ use fuel_core_types::{ blockchain::{ block::Block, consensus::ConsensusVote, - primitives::{ - BlockId, - BlockIds, - }, + primitives::BlockId, SealedBlock, SealedBlockHeader, }, @@ -109,7 +106,7 @@ enum TaskRequest { channel: oneshot::Sender>>, }, GetTransactions2 { - block_ids: Vec, + block_height_range: Range, from_peer: PeerId, channel: oneshot::Sender>>, }, @@ -472,9 +469,8 @@ where let channel_item = ResponseChannelItem::Transactions(channel); let _ = self.p2p_service.send_request_msg(Some(from_peer), request_msg, channel_item); } - Some(TaskRequest::GetTransactions2 { block_ids, from_peer, channel }) => { - let block_ids = BlockIds(block_ids); - let request_msg = RequestMessage::Transactions2(block_ids); + Some(TaskRequest::GetTransactions2 { block_height_range, from_peer, channel }) => { + let request_msg = RequestMessage::Transactions2(block_height_range); let channel_item = ResponseChannelItem::Transactions2(channel); let _ = self.p2p_service.send_request_msg(Some(from_peer), request_msg, channel_item); } @@ -554,14 +550,14 @@ where } } } - RequestMessage::Transactions2(block_ids) => { - match self.db.get_transactions_2(&block_ids) { + RequestMessage::Transactions2(range) => { + match self.db.get_transactions_2(range.clone()) { Ok(maybe_transactions) => { let response = maybe_transactions.map(Arc::new); let _ = self.p2p_service.send_response_msg(request_id, OutboundResponse::Transactions2(response)); }, Err(e) => { - tracing::error!("Failed to get transactions for blocks {:?}: {:?}", block_ids, e); + tracing::error!("Failed to get transactions for range {:?}: {:?}", range, e); let response = None; let _ = self.p2p_service.send_response_msg(request_id, OutboundResponse::Transactions2(response)); return Err(e.into()) @@ -740,14 +736,14 @@ impl SharedState { pub async fn get_transactions_2_from_peer( &self, peer_id: Vec, - block_ids: Vec, + range: Range, ) -> anyhow::Result>> { let (sender, receiver) = oneshot::channel(); let from_peer = PeerId::from_bytes(&peer_id).expect("Valid PeerId"); dbg!(&from_peer); let request = TaskRequest::GetTransactions2 { - block_ids, + block_height_range: range, from_peer, channel: sender, }; @@ -926,7 +922,7 @@ pub mod tests { fn get_transactions_2( &self, - _block_ids: &BlockIds, + _block_height_range: Range, ) -> StorageResult>> { unimplemented!() } @@ -1055,7 +1051,7 @@ pub mod tests { fn get_transactions_2( &self, - _block_ids: &BlockIds, + _block_height_range: Range, ) -> StorageResult>> { todo!() } diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 6cd4dcf9373..af943af39bd 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -9,7 +9,10 @@ use futures::{ }; use std::{ future::Future, - ops::RangeInclusive, + ops::{ + Range, + RangeInclusive, + }, sync::Arc, }; @@ -18,17 +21,13 @@ use fuel_core_services::{ SharedMutex, StateWatcher, }; +use fuel_core_types::services::p2p::Transactions; use fuel_core_types::{ self, // blockchain::consensus::Sealed, fuel_types::BlockHeight, services::p2p::PeerId, }; -use fuel_core_types::{ - blockchain::primitives::BlockId, - // fuel_tx::Transaction, - services::p2p::Transactions, -}; use fuel_core_types::{ blockchain::{ block::Block, @@ -463,7 +462,7 @@ where async fn get_sealed_block_headers

( peer: PeerId, - range: RangeInclusive, + range: Range, p2p: &P, ) -> Result, ImportError> where @@ -471,14 +470,11 @@ where { tracing::debug!( "getting header range from {} to {} inclusive", - range.start(), - range.end() + range.start, + range.end ); - let start = *range.start(); - let end = *range.end() + 1; - let res = p2p - .get_sealed_block_headers(peer.clone().bind(start..end)) - .await; + let range = peer.clone().bind(range); + let res = p2p.get_sealed_block_headers(range).await; let SourcePeer { data: headers, .. } = res; match headers { Ok(Some(headers)) => Ok(headers), @@ -492,14 +488,14 @@ where async fn get_transactions

( peer_id: PeerId, - block_ids: Vec, + range: Range, p2p: &P, ) -> Result, ImportError> where P: PeerToPeerPort + Send + Sync + 'static, { - let block_ids = peer_id.clone().bind(block_ids); - let res = p2p.get_transactions_2(block_ids).await; + let range = peer_id.clone().bind(range); + let res = p2p.get_transactions_2(range).await; match res { Ok(Some(transactions)) => Ok(transactions), Ok(None) => { @@ -526,7 +522,7 @@ where ); let start = *range.start(); let end = *range.end() + 1; - let res = get_sealed_block_headers(peer_id.clone(), range.clone(), p2p).await; + let res = get_sealed_block_headers(peer_id.clone(), start..end, p2p).await; let headers = match res { Ok(headers) => { let headers = headers.into_iter(); @@ -607,8 +603,10 @@ where return (vec![], err) } - let block_ids = headers.iter().map(|header| header.entity.id()).collect(); - let maybe_txs = get_transactions(peer_id.clone(), block_ids, p2p.as_ref()).await; + let start = headers.first().expect("checked").entity.height().to_usize() as u32; + let end = start + headers.len() as u32; + let range = start..end; + let maybe_txs = get_transactions(peer_id.clone(), range, p2p.as_ref()).await; match maybe_txs { Ok(transaction_data) => { let headers = headers; diff --git a/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs b/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs index 44793992f99..944788a53a6 100644 --- a/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs +++ b/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs @@ -80,7 +80,7 @@ impl PeerToPeerPort for PressurePeerToPeer { async fn get_transactions_2( &self, - block_ids: SourcePeer>, + block_ids: SourcePeer>, ) -> anyhow::Result>> { let transactions_count = block_ids.data.len(); self.counts diff --git a/crates/services/sync/src/ports.rs b/crates/services/sync/src/ports.rs index e817eb3b01a..761c2612d14 100644 --- a/crates/services/sync/src/ports.rs +++ b/crates/services/sync/src/ports.rs @@ -68,7 +68,7 @@ pub trait PeerToPeerPort { /// and source peer. async fn get_transactions_2( &self, - block_ids: SourcePeer>, + block_ids: SourcePeer>, ) -> anyhow::Result>>; /// Report a peer for some reason to modify their reputation. diff --git a/crates/types/src/blockchain/primitives.rs b/crates/types/src/blockchain/primitives.rs index 609074fdd4d..0738e710e28 100644 --- a/crates/types/src/blockchain/primitives.rs +++ b/crates/types/src/blockchain/primitives.rs @@ -162,25 +162,3 @@ impl From<[u8; 32]> for BlockId { Self(bytes.into()) } } - -/// Vector of BlockId -#[derive(Eq, PartialEq, Debug, Clone)] -pub struct BlockIds(pub Vec); - -impl From> for BlockIds { - fn from(value: Vec<[u8; 32]>) -> Self { - let block_ids = value.into_iter().map(BlockId::from).collect(); - Self(block_ids) - } -} - -impl From for Vec<[u8; 32]> { - fn from(value: BlockIds) -> Self { - let bytes = value - .0 - .into_iter() - .map(|block_id| block_id.0.into()) - .collect(); - bytes - } -} From 94fd510bbf1a7aaa00222d3d245cb72e3ca45ebe Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Mon, 25 Sep 2023 17:53:56 -0400 Subject: [PATCH 56/87] Update sealed_block.rs --- crates/fuel-core/src/database/sealed_block.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/fuel-core/src/database/sealed_block.rs b/crates/fuel-core/src/database/sealed_block.rs index eb1fc4cc539..0d25c8f4d17 100644 --- a/crates/fuel-core/src/database/sealed_block.rs +++ b/crates/fuel-core/src/database/sealed_block.rs @@ -148,7 +148,7 @@ impl Database { let transactions = self .get_sealed_block_by_height(&block_height)? .map(|Sealed { entity: block, .. }| block.into_inner().1) - .map(|transactions| Transactions(transactions)); + .map(Transactions); Ok(transactions) }) .collect::>()?; From 428b712243ce1011867f2cd1128ce76b72c251e7 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Tue, 26 Sep 2023 14:02:55 -0400 Subject: [PATCH 57/87] Remove dbg --- crates/fuel-core/src/p2p_test_helpers.rs | 2 +- crates/services/p2p/src/service.rs | 5 +---- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/crates/fuel-core/src/p2p_test_helpers.rs b/crates/fuel-core/src/p2p_test_helpers.rs index fb7a6040f27..92dc84f0384 100644 --- a/crates/fuel-core/src/p2p_test_helpers.rs +++ b/crates/fuel-core/src/p2p_test_helpers.rs @@ -316,7 +316,7 @@ pub fn make_config(name: String, chain_config: ChainConfig) -> Config { pub async fn make_node(node_config: Config, test_txs: Vec) -> Node { let db = Database::in_memory(); let node = tokio::time::timeout( - Duration::from_secs(1000), + Duration::from_secs(1), FuelService::from_database(db.clone(), node_config), ) .await diff --git a/crates/services/p2p/src/service.rs b/crates/services/p2p/src/service.rs index cd5b7b249b9..b094e74380c 100644 --- a/crates/services/p2p/src/service.rs +++ b/crates/services/p2p/src/service.rs @@ -767,7 +767,6 @@ impl SharedState { ) -> anyhow::Result>> { let (sender, receiver) = oneshot::channel(); let from_peer = PeerId::from_bytes(&peer_id).expect("Valid PeerId"); - dbg!(&from_peer); let request = TaskRequest::GetTransactions2 { block_height_range: range, @@ -776,9 +775,7 @@ impl SharedState { }; self.request_sender.send(request).await?; - let r = receiver.await.map_err(|e| anyhow!("Receiver error! {}", e)); - dbg!(&r); - r + receiver.await.map_err(|e| anyhow!("Receiver error! {}", e)) } pub fn broadcast_vote(&self, vote: Arc) -> anyhow::Result<()> { From 6d65b0ca53f5a2a55c9d10c652ad4075d4075d0f Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Tue, 26 Sep 2023 14:05:22 -0400 Subject: [PATCH 58/87] Remove dbg --- crates/services/p2p/src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/services/p2p/src/service.rs b/crates/services/p2p/src/service.rs index b094e74380c..e9168268eb7 100644 --- a/crates/services/p2p/src/service.rs +++ b/crates/services/p2p/src/service.rs @@ -775,7 +775,7 @@ impl SharedState { }; self.request_sender.send(request).await?; - receiver.await.map_err(|e| anyhow!("Receiver error! {}", e)) + receiver.await.map_err(|e| anyhow!("{}", e)) } pub fn broadcast_vote(&self, vote: Arc) -> anyhow::Result<()> { From 5dceeec5d124586cb81ac518e1bce69135af3b70 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Wed, 27 Sep 2023 12:09:40 -0400 Subject: [PATCH 59/87] References instead of clone --- crates/services/sync/src/import.rs | 132 ++++++++++++++--------------- 1 file changed, 65 insertions(+), 67 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index af943af39bd..e4e48b20ed2 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -91,7 +91,7 @@ impl Default for Config { /// The combination of shared state, configuration, and services that define /// import behavior. -pub struct Import { +pub struct Import<'a, P: 'a, E: 'a, C: 'a> { /// Shared state between import and sync tasks. state: SharedMutex, /// Notify import when sync has new work. @@ -106,7 +106,7 @@ pub struct Import { consensus: Arc, } -impl Import { +impl<'a, P, E, C> Import<'a, P, E, C> { /// Configure an import behavior from a shared state, configuration and /// services that can be executed by an ImportTask. pub fn new( @@ -167,11 +167,11 @@ impl ImportError { } } -impl Import +impl<'a, P, E, C> Import<'a, P, E, C> where - P: PeerToPeerPort + Send + Sync + 'static, - E: BlockImporterPort + Send + Sync + 'static, - C: ConsensusPort + Send + Sync + 'static, + P: PeerToPeerPort + Send + Sync, + E: BlockImporterPort + Send + Sync, + C: ConsensusPort + Send + Sync, { #[tracing::instrument(skip_all)] /// Execute imports until a shutdown is requested. @@ -212,7 +212,7 @@ where /// If an error occurs, the preceding blocks still be processed /// and the error will be returned. async fn launch_stream( - &self, + &'a self, range: RangeInclusive, shutdown: &StateWatcher, ) -> (usize, Result<(), ImportError>) { @@ -237,16 +237,12 @@ where } let peer = peer.expect("Checked"); - let context_generator = - stream::repeat((peer.clone(), state.clone(), p2p.clone(), executor.clone())); + let context_generator = stream::repeat((peer.clone(), state.clone())); - let block_stream = get_block_stream( - peer.clone(), - range.clone(), - params, - p2p.clone(), - consensus.clone(), - ); + let p2p = p2p.as_ref(); + let consensus = consensus.as_ref(); + let block_stream = + get_block_stream(peer.clone(), range.clone(), params, p2p, consensus); let result = block_stream .map(move |stream_block_batch| { @@ -282,7 +278,7 @@ where // Then execute and commit the block .zip(context_generator) .then( - |(blocks_result, (peer, state, p2p, executor))| async move { + |(blocks_result, (peer, state))| async move { let (sealed_blocks, error) = blocks_result?; let sealed_blocks = futures::stream::iter(sealed_blocks); let res = sealed_blocks.then(|sealed_block| async { @@ -290,7 +286,7 @@ where }).try_collect::>().await.and_then(|v| error.map_or(Ok(v), Err)); match &res { Ok(_) => { - report_peer(p2p.as_ref(), peer.clone(), PeerReportReason::SuccessfulBlockImport).await; + report_peer(p2p, peer.clone(), PeerReportReason::SuccessfulBlockImport).await; }, Err(e) => { // If this fails, then it means that consensus has approved a block that is invalid. @@ -326,28 +322,28 @@ where } fn get_block_stream< - P: PeerToPeerPort + Send + Sync + 'static, - C: ConsensusPort + Send + Sync + 'static, + 'a, + P: PeerToPeerPort + Send + Sync, + C: ConsensusPort + Send + Sync, >( peer_id: PeerId, range: RangeInclusive, - params: &Config, - p2p: Arc

, - consensus: Arc, -) -> impl Stream, Option)>> { + params: &'a Config, + p2p: &'a P, + consensus: &'a C, +) -> impl Stream, Option)> + 'a> + 'a +{ let Config { header_batch_size, .. } = params; - let header_stream = get_header_stream(peer_id.clone(), range, params, p2p.clone()); - let generator = - futures::stream::repeat((peer_id.clone(), p2p.clone(), consensus.clone())); + let header_stream = get_header_stream(peer_id.clone(), range, params, p2p); + let generator = futures::stream::repeat(peer_id.clone()); let iter = header_stream.zip(generator.clone()); let checked_headers = iter - .then(|(header, (peer_id, p2p, consensus))| async move { + .then(move |(header, peer_id)| async move { let header = header?; - check_sealed_header(&header, peer_id, p2p.as_ref(), consensus.as_ref()) - .await?; - await_da_height(&header, consensus.as_ref()).await?; + check_sealed_header(&header, peer_id, p2p, consensus).await?; + await_da_height(&header, consensus).await?; Ok(header) }) .into_scan_err() @@ -355,31 +351,29 @@ fn get_block_stream< checked_headers .chunks(*header_batch_size as usize) .zip(generator) - .map(|(headers, (peer_id, p2p, ..))| get_blocks(p2p, peer_id, headers)) + .map(|(headers, peer_id)| get_blocks(p2p, peer_id, headers)) } -fn get_header_stream( +fn get_header_stream<'a, P: PeerToPeerPort + Send + Sync>( peer: PeerId, range: RangeInclusive, - params: &Config, - p2p: Arc

, -) -> impl Stream> { + params: &'a Config, + p2p: &'a P, +) -> impl Stream> + 'a { let Config { header_batch_size, .. } = params; let ranges = range_chunks(range, *header_batch_size); + let peer_iter = futures::stream::repeat(peer.clone()); futures::stream::iter(ranges) - .then(move |range| { - let p2p = p2p.clone(); - let peer = peer.clone(); - async move { - tracing::debug!( - "getting header range from {} to {} inclusive", - range.start(), - range.end() - ); - get_headers_batch(peer, range, p2p.as_ref()).await - } + .zip(peer_iter) + .then(move |(range, peer)| async move { + tracing::debug!( + "getting header range from {} to {} inclusive", + range.start(), + range.end() + ); + get_headers_batch(peer, range, p2p).await }) .flatten() .into_scan_err() @@ -398,8 +392,8 @@ fn range_chunks( } async fn check_sealed_header< - P: PeerToPeerPort + Send + Sync + 'static, - C: ConsensusPort + Send + Sync + 'static, + P: PeerToPeerPort + Send + Sync, + C: ConsensusPort + Send + Sync, >( header: &SealedBlockHeader, peer_id: PeerId, @@ -418,7 +412,7 @@ async fn check_sealed_header< } } -async fn await_da_height( +async fn await_da_height( header: &SealedBlockHeader, consensus: &C, ) -> Result<(), ImportError> { @@ -449,7 +443,7 @@ async fn wait_for_notify_or_shutdown( async fn select_peer

(block_height: BlockHeight, p2p: &P) -> Result where - P: PeerToPeerPort + Send + Sync + 'static, + P: PeerToPeerPort + Send + Sync, { tracing::debug!("getting peer for block height {}", block_height); let res = p2p.select_peer(block_height).await; @@ -466,7 +460,7 @@ async fn get_sealed_block_headers

( p2p: &P, ) -> Result, ImportError> where - P: PeerToPeerPort + Send + Sync + 'static, + P: PeerToPeerPort + Send + Sync, { tracing::debug!( "getting header range from {} to {} inclusive", @@ -492,7 +486,7 @@ async fn get_transactions

( p2p: &P, ) -> Result, ImportError> where - P: PeerToPeerPort + Send + Sync + 'static, + P: PeerToPeerPort + Send + Sync, { let range = peer_id.clone().bind(range); let res = p2p.get_transactions_2(range).await; @@ -513,7 +507,7 @@ async fn get_headers_batch

( p2p: &P, ) -> impl Stream> where - P: PeerToPeerPort + Send + Sync + 'static, + P: PeerToPeerPort + Send + Sync, { tracing::debug!( "getting header range from {} to {} inclusive", @@ -562,7 +556,7 @@ where async fn report_peer

(p2p: &P, peer_id: PeerId, reason: PeerReportReason) where - P: PeerToPeerPort + Send + Sync + 'static, + P: PeerToPeerPort + Send + Sync, { // Failure to report a peer is a non-fatal error; ignore the error let _ = p2p @@ -580,13 +574,13 @@ where // ), // err // )] -async fn get_blocks

( - p2p: Arc

, +async fn get_blocks<'a, P>( + p2p: &'a P, peer_id: PeerId, headers: Vec>, ) -> (Vec, Option) where - P: PeerToPeerPort + Send + Sync + 'static, + P: PeerToPeerPort + Send + Sync, { let (headers, errors): (Vec<_>, Vec<_>) = headers.into_iter().partition(|r| r.is_ok()); @@ -606,7 +600,7 @@ where let start = headers.first().expect("checked").entity.height().to_usize() as u32; let end = start + headers.len() as u32; let range = start..end; - let maybe_txs = get_transactions(peer_id.clone(), range, p2p.as_ref()).await; + let maybe_txs = get_transactions(peer_id.clone(), range, p2p).await; match maybe_txs { Ok(transaction_data) => { let headers = headers; @@ -632,7 +626,7 @@ where "Failed to created block from header and transactions" ); report_peer( - p2p.as_ref(), + p2p, peer_id.clone(), PeerReportReason::InvalidTransactions, ) @@ -664,7 +658,7 @@ async fn execute_and_commit( block: SealedBlock, ) -> Result<(), ImportError> where - E: BlockImporterPort + Send + Sync + 'static, + E: BlockImporterPort + Send + Sync, { // Execute and commit the block. let height = *block.entity.header().height(); @@ -732,11 +726,14 @@ struct ScanErr(S); impl ScanEmptyErr { /// Scan the stream for empty vector or errors. - fn scan_empty_or_err(self) -> impl Stream, ImportError>> + fn scan_empty_or_err<'a, R>( + self, + ) -> impl Stream, ImportError>> + 'a where - S: Stream, ImportError>> + Send + 'static, + S: Stream, ImportError>> + Send + 'a, + R: 'a, { - let stream = self.0.boxed(); + let stream = self.0.boxed::<'a>(); futures::stream::unfold((false, stream), |(mut is_err, mut stream)| async move { if is_err { None @@ -754,11 +751,12 @@ impl ScanEmptyErr { impl ScanErr { /// Scan the stream for errors. - fn scan_err(self) -> impl Stream> + fn scan_err<'a, R>(self) -> impl Stream> + 'a where - S: Stream> + Send + 'static, + S: Stream> + Send + 'a, + R: 'a, { - let stream = self.0.boxed(); + let stream = self.0.boxed::<'a>(); futures::stream::unfold((false, stream), |(mut err, mut stream)| async move { if err { None From 835f823665869a3a7d1965574c471a284ea82796 Mon Sep 17 00:00:00 2001 From: xgreenx Date: Wed, 27 Sep 2023 17:41:03 +0100 Subject: [PATCH 60/87] Something --- crates/services/sync/src/import.rs | 159 +++++++++++++++-------------- 1 file changed, 81 insertions(+), 78 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index e4e48b20ed2..760e6a3e694 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -3,7 +3,6 @@ //! importing blocks from the network into the local blockchain. use futures::{ - stream, FutureExt, TryStreamExt, }; @@ -91,7 +90,7 @@ impl Default for Config { /// The combination of shared state, configuration, and services that define /// import behavior. -pub struct Import<'a, P: 'a, E: 'a, C: 'a> { +pub struct Import { /// Shared state between import and sync tasks. state: SharedMutex, /// Notify import when sync has new work. @@ -106,7 +105,7 @@ pub struct Import<'a, P: 'a, E: 'a, C: 'a> { consensus: Arc, } -impl<'a, P, E, C> Import<'a, P, E, C> { +impl Import { /// Configure an import behavior from a shared state, configuration and /// services that can be executed by an ImportTask. pub fn new( @@ -167,11 +166,11 @@ impl ImportError { } } -impl<'a, P, E, C> Import<'a, P, E, C> +impl Import where - P: PeerToPeerPort + Send + Sync, - E: BlockImporterPort + Send + Sync, - C: ConsensusPort + Send + Sync, + P: PeerToPeerPort + Send + Sync + 'static, + E: BlockImporterPort + Send + Sync + 'static, + C: ConsensusPort + Send + Sync + 'static, { #[tracing::instrument(skip_all)] /// Execute imports until a shutdown is requested. @@ -212,7 +211,7 @@ where /// If an error occurs, the preceding blocks still be processed /// and the error will be returned. async fn launch_stream( - &'a self, + &self, range: RangeInclusive, shutdown: &StateWatcher, ) -> (usize, Result<(), ImportError>) { @@ -237,12 +236,15 @@ where } let peer = peer.expect("Checked"); - let context_generator = stream::repeat((peer.clone(), state.clone())); + let block_stream = get_block_stream( + peer.clone(), + range.clone(), + params, + p2p.clone(), + consensus.clone(), + ); - let p2p = p2p.as_ref(); - let consensus = consensus.as_ref(); - let block_stream = - get_block_stream(peer.clone(), range.clone(), params, p2p, consensus); + let (peer, state, p2p, executor) = (peer.clone(), state, p2p, executor); let result = block_stream .map(move |stream_block_batch| { @@ -275,29 +277,30 @@ where tracing::info!("In progress import stream shutting down"); } }) - // Then execute and commit the block - .zip(context_generator) .then( - |(blocks_result, (peer, state))| async move { - let (sealed_blocks, error) = blocks_result?; - let sealed_blocks = futures::stream::iter(sealed_blocks); - let res = sealed_blocks.then(|sealed_block| async { - execute_and_commit(executor.as_ref(), &state, sealed_block).await - }).try_collect::>().await.and_then(|v| error.map_or(Ok(v), Err)); - match &res { - Ok(_) => { - report_peer(p2p, peer.clone(), PeerReportReason::SuccessfulBlockImport).await; - }, - Err(e) => { - // If this fails, then it means that consensus has approved a block that is invalid. - // This would suggest a more serious issue than a bad peer, e.g. a fork or an out-of-date client. - tracing::error!("Failed to execute and commit block from peer {:?}: {:?}", peer, e); - }, + move |blocks_result| { + let peer = peer.clone(); + async move { + let (sealed_blocks, error) = blocks_result?; + let sealed_blocks = futures::stream::iter(sealed_blocks); + let res = sealed_blocks.then(|sealed_block| async { + execute_and_commit(executor.as_ref(), state, sealed_block).await + }).try_collect::>().await.and_then(|v| error.map_or(Ok(v), Err)); + match &res { + Ok(_) => { + report_peer(p2p.as_ref(), peer, PeerReportReason::SuccessfulBlockImport).await; + }, + Err(e) => { + // If this fails, then it means that consensus has approved a block that is invalid. + // This would suggest a more serious issue than a bad peer, e.g. a fork or an out-of-date client. + tracing::error!("Failed to execute and commit block from peer {:?}: {:?}", peer, e); + }, + } + res } - res + .instrument(tracing::debug_span!("execute_and_commit")) + .in_current_span() } - .instrument(tracing::debug_span!("execute_and_commit")) - .in_current_span() ) // Continue the stream unless an error occurs. .into_scan_empty_or_err() @@ -322,28 +325,28 @@ where } fn get_block_stream< - 'a, - P: PeerToPeerPort + Send + Sync, - C: ConsensusPort + Send + Sync, + P: PeerToPeerPort + Send + Sync + 'static, + C: ConsensusPort + Send + Sync + 'static, >( peer_id: PeerId, range: RangeInclusive, - params: &'a Config, - p2p: &'a P, - consensus: &'a C, -) -> impl Stream, Option)> + 'a> + 'a -{ + params: &Config, + p2p: Arc

, + consensus: Arc, +) -> impl Stream, Option)>> { let Config { header_batch_size, .. } = params; - let header_stream = get_header_stream(peer_id.clone(), range, params, p2p); - let generator = futures::stream::repeat(peer_id.clone()); + let header_stream = get_header_stream(peer_id.clone(), range, params, p2p.clone()); + let generator = + futures::stream::repeat((peer_id.clone(), p2p.clone(), consensus.clone())); let iter = header_stream.zip(generator.clone()); let checked_headers = iter - .then(move |(header, peer_id)| async move { + .then(|(header, (peer_id, p2p, consensus))| async move { let header = header?; - check_sealed_header(&header, peer_id, p2p, consensus).await?; - await_da_height(&header, consensus).await?; + check_sealed_header(&header, peer_id, p2p.as_ref(), consensus.as_ref()) + .await?; + await_da_height(&header, consensus.as_ref()).await?; Ok(header) }) .into_scan_err() @@ -351,29 +354,31 @@ fn get_block_stream< checked_headers .chunks(*header_batch_size as usize) .zip(generator) - .map(|(headers, peer_id)| get_blocks(p2p, peer_id, headers)) + .map(|(headers, (peer_id, p2p, ..))| get_blocks(p2p, peer_id, headers)) } -fn get_header_stream<'a, P: PeerToPeerPort + Send + Sync>( +fn get_header_stream( peer: PeerId, range: RangeInclusive, - params: &'a Config, - p2p: &'a P, -) -> impl Stream> + 'a { + params: &Config, + p2p: Arc

, +) -> impl Stream> { let Config { header_batch_size, .. } = params; let ranges = range_chunks(range, *header_batch_size); - let peer_iter = futures::stream::repeat(peer.clone()); futures::stream::iter(ranges) - .zip(peer_iter) - .then(move |(range, peer)| async move { - tracing::debug!( - "getting header range from {} to {} inclusive", - range.start(), - range.end() - ); - get_headers_batch(peer, range, p2p).await + .then(move |range| { + let p2p = p2p.clone(); + let peer = peer.clone(); + async move { + tracing::debug!( + "getting header range from {} to {} inclusive", + range.start(), + range.end() + ); + get_headers_batch(peer, range, p2p.as_ref()).await + } }) .flatten() .into_scan_err() @@ -392,8 +397,8 @@ fn range_chunks( } async fn check_sealed_header< - P: PeerToPeerPort + Send + Sync, - C: ConsensusPort + Send + Sync, + P: PeerToPeerPort + Send + Sync + 'static, + C: ConsensusPort + Send + Sync + 'static, >( header: &SealedBlockHeader, peer_id: PeerId, @@ -412,7 +417,7 @@ async fn check_sealed_header< } } -async fn await_da_height( +async fn await_da_height( header: &SealedBlockHeader, consensus: &C, ) -> Result<(), ImportError> { @@ -443,7 +448,7 @@ async fn wait_for_notify_or_shutdown( async fn select_peer

(block_height: BlockHeight, p2p: &P) -> Result where - P: PeerToPeerPort + Send + Sync, + P: PeerToPeerPort + Send + Sync + 'static, { tracing::debug!("getting peer for block height {}", block_height); let res = p2p.select_peer(block_height).await; @@ -460,7 +465,7 @@ async fn get_sealed_block_headers

( p2p: &P, ) -> Result, ImportError> where - P: PeerToPeerPort + Send + Sync, + P: PeerToPeerPort + Send + Sync + 'static, { tracing::debug!( "getting header range from {} to {} inclusive", @@ -486,7 +491,7 @@ async fn get_transactions

( p2p: &P, ) -> Result, ImportError> where - P: PeerToPeerPort + Send + Sync, + P: PeerToPeerPort + Send + Sync + 'static, { let range = peer_id.clone().bind(range); let res = p2p.get_transactions_2(range).await; @@ -507,7 +512,7 @@ async fn get_headers_batch

( p2p: &P, ) -> impl Stream> where - P: PeerToPeerPort + Send + Sync, + P: PeerToPeerPort + Send + Sync + 'static, { tracing::debug!( "getting header range from {} to {} inclusive", @@ -556,7 +561,7 @@ where async fn report_peer

(p2p: &P, peer_id: PeerId, reason: PeerReportReason) where - P: PeerToPeerPort + Send + Sync, + P: PeerToPeerPort + Send + Sync + 'static, { // Failure to report a peer is a non-fatal error; ignore the error let _ = p2p @@ -574,13 +579,13 @@ where // ), // err // )] -async fn get_blocks<'a, P>( - p2p: &'a P, +async fn get_blocks

( + p2p: Arc

, peer_id: PeerId, headers: Vec>, ) -> (Vec, Option) where - P: PeerToPeerPort + Send + Sync, + P: PeerToPeerPort + Send + Sync + 'static, { let (headers, errors): (Vec<_>, Vec<_>) = headers.into_iter().partition(|r| r.is_ok()); @@ -600,7 +605,7 @@ where let start = headers.first().expect("checked").entity.height().to_usize() as u32; let end = start + headers.len() as u32; let range = start..end; - let maybe_txs = get_transactions(peer_id.clone(), range, p2p).await; + let maybe_txs = get_transactions(peer_id.clone(), range, p2p.as_ref()).await; match maybe_txs { Ok(transaction_data) => { let headers = headers; @@ -626,7 +631,7 @@ where "Failed to created block from header and transactions" ); report_peer( - p2p, + p2p.as_ref(), peer_id.clone(), PeerReportReason::InvalidTransactions, ) @@ -658,7 +663,7 @@ async fn execute_and_commit( block: SealedBlock, ) -> Result<(), ImportError> where - E: BlockImporterPort + Send + Sync, + E: BlockImporterPort + Send + Sync + 'static, { // Execute and commit the block. let height = *block.entity.header().height(); @@ -726,12 +731,11 @@ struct ScanErr(S); impl ScanEmptyErr { /// Scan the stream for empty vector or errors. - fn scan_empty_or_err<'a, R>( + fn scan_empty_or_err<'a, R: 'a>( self, ) -> impl Stream, ImportError>> + 'a where S: Stream, ImportError>> + Send + 'a, - R: 'a, { let stream = self.0.boxed::<'a>(); futures::stream::unfold((false, stream), |(mut is_err, mut stream)| async move { @@ -751,10 +755,9 @@ impl ScanEmptyErr { impl ScanErr { /// Scan the stream for errors. - fn scan_err<'a, R>(self) -> impl Stream> + 'a + fn scan_err<'a, R: 'a>(self) -> impl Stream> + 'a where S: Stream> + Send + 'a, - R: 'a, { let stream = self.0.boxed::<'a>(); futures::stream::unfold((false, stream), |(mut err, mut stream)| async move { From d002c53cc75a06a1817187f41dc99fb6e2a90cd4 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Thu, 28 Sep 2023 16:20:44 -0400 Subject: [PATCH 61/87] Batch --- crates/fuel-core/src/service/adapters/sync.rs | 6 +- crates/services/sync/src/import.rs | 509 +++++++++--------- .../test_helpers/pressure_peer_to_peer.rs | 2 +- crates/services/sync/src/import/tests.rs | 131 ++++- crates/services/sync/src/ports.rs | 6 +- 5 files changed, 381 insertions(+), 273 deletions(-) diff --git a/crates/fuel-core/src/service/adapters/sync.rs b/crates/fuel-core/src/service/adapters/sync.rs index 0fcd4377d39..5aa2ee2f94f 100644 --- a/crates/fuel-core/src/service/adapters/sync.rs +++ b/crates/fuel-core/src/service/adapters/sync.rs @@ -113,11 +113,7 @@ impl PeerToPeerPort for P2PAdapter { } } - async fn report_peer( - &self, - peer: PeerId, - report: PeerReportReason, - ) -> anyhow::Result<()> { + fn report_peer(&self, peer: PeerId, report: PeerReportReason) -> anyhow::Result<()> { if let Some(service) = &self.service { let service_name = "Sync"; let new_report = self.process_report(report); diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 760e6a3e694..ec96461a6ec 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -2,45 +2,38 @@ //! This module contains the import task which is responsible for //! importing blocks from the network into the local blockchain. -use futures::{ - FutureExt, - TryStreamExt, -}; -use std::{ - future::Future, - ops::{ - Range, - RangeInclusive, - }, - sync::Arc, -}; - use anyhow::anyhow; use fuel_core_services::{ SharedMutex, StateWatcher, }; -use fuel_core_types::services::p2p::Transactions; use fuel_core_types::{ self, - // blockchain::consensus::Sealed, - fuel_types::BlockHeight, - services::p2p::PeerId, -}; -use fuel_core_types::{ blockchain::{ block::Block, - // consensus::Sealed, - // header::BlockHeader, SealedBlock, SealedBlockHeader, }, - services::p2p::SourcePeer, + fuel_types::BlockHeight, + services::p2p::{ + PeerId, + SourcePeer, + Transactions, + }, }; use futures::{ stream::StreamExt, - // FutureExt, + FutureExt, Stream, + TryStreamExt, +}; +use std::{ + future::Future, + ops::{ + Range, + RangeInclusive, + }, + sync::Arc, }; use tokio::{ sync::Notify, @@ -132,6 +125,48 @@ impl Import { } } +#[derive(Debug)] +struct Batch { + peer: PeerId, + range: Range, + results: Vec, +} + +impl Batch { + pub fn empty(peer: PeerId, range: Range) -> Self { + Self { + peer, + range, + results: vec![], + } + } + + pub fn new(peer: PeerId, range: Range, results: Vec) -> Self { + Self { + peer, + range, + results, + } + } + + pub fn is_empty(&self) -> bool { + self.results.is_empty() + } + + pub fn is_err(&self) -> bool { + self.results.len() < self.range.len() + } +} + +type SealedHeaderBatch = Batch; +type SealedBlockBatch = Batch; + +impl SealedBlockBatch { + fn err(&self) -> Option { + self.is_err().then(|| ImportError::MissingTransactions) + } +} + #[derive(Debug, derive_more::Display)] enum ImportError { ConsensusError(anyhow::Error), @@ -140,7 +175,6 @@ enum ImportError { MissingBlockHeaders, MissingTransactions, BadBlockHeader, - BlockHeightMismatch, JoinError(JoinError), Other(anyhow::Error), } @@ -151,21 +185,6 @@ impl From for ImportError { } } -impl ImportError { - /// All `ImportErrors` will stop the import stream. Fatal `ImportErrors` - /// will prevent the notify signal at the end of the import. Non-fatal - /// `ImportErrors` will allow the notify signal at the end of the import. - fn is_fatal(&self) -> bool { - #[allow(clippy::match_like_matches_macro)] - match self { - ImportError::BlockHeightMismatch => false, - ImportError::BadBlockHeader => false, - ImportError::MissingTransactions => false, - _ => true, - } - } -} - impl Import where P: PeerToPeerPort + Send + Sync + 'static, @@ -184,7 +203,7 @@ where // If there is a range to process, launch the stream. if let Some(range) = self.state.apply(|s| s.process_range()) { // Launch the stream to import the range. - let (count, result) = self.launch_stream(range.clone(), shutdown).await; + let count = self.launch_stream(range.clone(), shutdown).await; // Get the size of the range. let range_len = range.size_hint().0 as u32; @@ -192,13 +211,13 @@ where // If we did not process the entire range, mark the failed heights as failed. if (count as u32) < range_len { let incomplete_range = (*range.start() + count as u32)..=*range.end(); - tracing::error!( + self.state + .apply(|s| s.failed_to_process(incomplete_range.clone())); + Err(anyhow::anyhow!( "Failed to import range of blocks: {:?}", incomplete_range - ); - self.state.apply(|s| s.failed_to_process(incomplete_range)); + ))?; } - result?; } Ok(()) } @@ -214,7 +233,7 @@ where &self, range: RangeInclusive, shutdown: &StateWatcher, - ) -> (usize, Result<(), ImportError>) { + ) -> usize { let Self { state, params, @@ -230,9 +249,8 @@ where let block_height = BlockHeight::from(*range.end()); let peer = select_peer(block_height, p2p.as_ref()).await; - if let Err(err) = peer { - let err = Err(err); - return (0, err) + if peer.is_err() { + return 0 } let peer = peer.expect("Checked"); @@ -245,30 +263,37 @@ where ); let (peer, state, p2p, executor) = (peer.clone(), state, p2p, executor); + let peer_ = peer.clone(); + let range = *range.start()..(*range.end() + 1); + let range_ = range.clone(); let result = block_stream .map(move |stream_block_batch| { let shutdown_guard = shutdown_guard.clone(); let shutdown_signal = shutdown_signal.clone(); + let peer = peer.clone(); + let range = range.clone(); tokio::spawn(async move { // Hold a shutdown sender for the lifetime of the spawned task let _shutdown_guard = shutdown_guard.clone(); let mut shutdown_signal = shutdown_signal.clone(); + let peer = peer.clone(); + let range = range.clone(); tokio::select! { // Stream a batch of blocks blocks = stream_block_batch => blocks, // If a shutdown signal is received during the stream, terminate early and // return an empty response - _ = shutdown_signal.while_started() => (vec![], None) + _ = shutdown_signal.while_started() => Batch::empty(peer, range) } - }).then(|task| async { task.map_err(ImportError::JoinError) }) + }).map(|task| task.map_err(ImportError::JoinError)) }) // Request up to `block_stream_buffer_size` transactions from the network. .buffered(params.block_stream_buffer_size) // Continue the stream unless an error occurs. // Note the error will be returned but the stream will close. - .into_scan_err() - .scan_err() + // .into_scan_err() + // .scan_err() // Continue the stream until the shutdown signal is received. .take_until({ let mut s = shutdown.clone(); @@ -277,45 +302,53 @@ where tracing::info!("In progress import stream shutting down"); } }) - .then( - move |blocks_result| { - let peer = peer.clone(); - async move { - let (sealed_blocks, error) = blocks_result?; - let sealed_blocks = futures::stream::iter(sealed_blocks); - let res = sealed_blocks.then(|sealed_block| async { - execute_and_commit(executor.as_ref(), state, sealed_block).await - }).try_collect::>().await.and_then(|v| error.map_or(Ok(v), Err)); - match &res { - Ok(_) => { - report_peer(p2p.as_ref(), peer, PeerReportReason::SuccessfulBlockImport).await; - }, - Err(e) => { - // If this fails, then it means that consensus has approved a block that is invalid. - // This would suggest a more serious issue than a bad peer, e.g. a fork or an out-of-date client. - tracing::error!("Failed to execute and commit block from peer {:?}: {:?}", peer, e); - }, + .map(|r| r.unwrap_or({ + let peer = peer_.clone(); + let range = range_.clone(); + SealedBlockBatch::empty(peer, range) + })) + .then({ + let peer = peer_.clone(); + let range = range_.clone(); + move |batch| { + let peer = peer.clone(); + let range = range.clone(); + let err = batch.err(); + async move { + let sealed_blocks = futures::stream::iter(batch.results); + let res = sealed_blocks.then(|sealed_block| async { + execute_and_commit(executor.as_ref(), state, sealed_block).await + }).try_collect::>().await.and_then(|v| err.map_or(Ok(v), Err)); + match res { + Ok(v) => { + report_peer(p2p.as_ref(), peer.clone(), PeerReportReason::SuccessfulBlockImport); + Batch::<()>::new(peer, range, v) + }, + Err(e) => { + // If this fails, then it means that consensus has approved a block that is invalid. + // This would suggest a more serious issue than a bad peer, e.g. a fork or an out-of-date client. + tracing::error!("Failed to execute and commit block from peer {:?}: {:?}", peer, e); + Batch::<()>::empty(peer, range) + + }, + } } - res - } .instrument(tracing::debug_span!("execute_and_commit")) .in_current_span() + } } ) + // Continue the stream unless an error occurs. - .into_scan_empty_or_err() - .scan_empty_or_err() + .into_scan_err() + .scan_err() // Count the number of successfully executed blocks and // find any errors. // Fold the stream into a count and any errors. - .fold((0usize, Ok(())), |(count, res), result| async move { - match result { - Ok(_) => (count + 1, res), - Err(e) if !e.is_fatal() => (count, Ok(())), - Err(e) => (count, Err(e)) - } + .fold(0usize, |count, batch| async move { + count + batch.results.len() }) - .in_current_span() + // .in_current_span() .await; // Wait for any spawned tasks to shutdown @@ -325,64 +358,94 @@ where } fn get_block_stream< + 'a, P: PeerToPeerPort + Send + Sync + 'static, C: ConsensusPort + Send + Sync + 'static, >( peer_id: PeerId, range: RangeInclusive, - params: &Config, + params: &'a Config, p2p: Arc

, consensus: Arc, -) -> impl Stream, Option)>> { - let Config { - header_batch_size, .. - } = params; - let header_stream = get_header_stream(peer_id.clone(), range, params, p2p.clone()); - let generator = - futures::stream::repeat((peer_id.clone(), p2p.clone(), consensus.clone())); - let iter = header_stream.zip(generator.clone()); - let checked_headers = iter - .then(|(header, (peer_id, p2p, consensus))| async move { - let header = header?; - check_sealed_header(&header, peer_id, p2p.as_ref(), consensus.as_ref()) - .await?; - await_da_height(&header, consensus.as_ref()).await?; - Ok(header) +) -> impl Stream> + 'a { + let header_stream = + get_header_batch_stream(peer_id.clone(), range.clone(), params, p2p.clone()); + let range = *range.start()..(*range.end() + 1); + header_stream + .map({ + let peer_id = peer_id.clone(); + let consensus = consensus.clone(); + let p2p = p2p.clone(); + move |header_batch| { + header_batch + .results + .into_iter() + .map({ + let peer_id = peer_id.clone(); + let consensus = consensus.clone(); + let p2p = p2p.clone(); + move |header| { + check_sealed_header( + &header, + peer_id.clone(), + p2p.clone(), + consensus.clone(), + )?; + Ok(header) + } + }) + .take_while(|result| result.is_ok()) + .filter_map(|result: Result| { + result.ok() + }) + .collect::>() + } + }) + .map({ + let consensus = consensus.clone(); + move |valid_headers| { + let consensus = consensus.clone(); + async move { + if let Some(header) = valid_headers.last() { + await_da_height(header, consensus.as_ref()).await? + }; + Result::<_, ImportError>::Ok(valid_headers) + } + } + }) + .map({ + let peer_id = peer_id.clone(); + let range = range.clone(); + let p2p = p2p.clone(); + move |headers| { + let peer_id = peer_id.clone(); + let range = range.clone(); + let p2p = p2p.clone(); + async move { + let headers = headers.await.unwrap_or(Default::default()); + let headers = + SealedHeaderBatch::new(peer_id.clone(), range.clone(), headers); + get_blocks(p2p, headers).await + } + } }) - .into_scan_err() - .scan_err(); - checked_headers - .chunks(*header_batch_size as usize) - .zip(generator) - .map(|(headers, (peer_id, p2p, ..))| get_blocks(p2p, peer_id, headers)) } -fn get_header_stream( +fn get_header_batch_stream<'a, P: PeerToPeerPort + Send + Sync + 'static>( peer: PeerId, range: RangeInclusive, - params: &Config, + params: &'a Config, p2p: Arc

, -) -> impl Stream> { +) -> impl Stream + 'a { let Config { header_batch_size, .. } = params; let ranges = range_chunks(range, *header_batch_size); - futures::stream::iter(ranges) - .then(move |range| { - let p2p = p2p.clone(); - let peer = peer.clone(); - async move { - tracing::debug!( - "getting header range from {} to {} inclusive", - range.start(), - range.end() - ); - get_headers_batch(peer, range, p2p.as_ref()).await - } - }) - .flatten() - .into_scan_err() - .scan_err() + futures::stream::iter(ranges).then(move |range| { + let peer = peer.clone(); + let p2p = p2p.clone(); + async move { get_headers_batch(peer, range, p2p).await } + }) } fn range_chunks( @@ -396,23 +459,27 @@ fn range_chunks( }) } -async fn check_sealed_header< +fn check_sealed_header< P: PeerToPeerPort + Send + Sync + 'static, C: ConsensusPort + Send + Sync + 'static, >( header: &SealedBlockHeader, peer_id: PeerId, - p2p: &P, - consensus_port: &C, + p2p: Arc

, + consensus: Arc, ) -> Result<(), ImportError> { - let validity = consensus_port + let validity = consensus .check_sealed_header(header) .map_err(ImportError::ConsensusError) .trace_err("Failed to check consensus on header")?; if validity { Ok(()) } else { - report_peer(p2p, peer_id.clone(), PeerReportReason::BadBlockHeader).await; + report_peer( + p2p.as_ref(), + peer_id.clone(), + PeerReportReason::BadBlockHeader, + ); Err(ImportError::BadBlockHeader) } } @@ -478,7 +545,7 @@ where match headers { Ok(Some(headers)) => Ok(headers), Ok(None) => { - report_peer(p2p, peer.clone(), PeerReportReason::MissingBlockHeaders).await; + report_peer(p2p, peer.clone(), PeerReportReason::MissingBlockHeaders); Err(ImportError::MissingBlockHeaders) } Err(e) => Err(e.into()), @@ -498,8 +565,7 @@ where match res { Ok(Some(transactions)) => Ok(transactions), Ok(None) => { - report_peer(p2p, peer_id.clone(), PeerReportReason::MissingTransactions) - .await; + report_peer(p2p, peer_id.clone(), PeerReportReason::MissingTransactions); Err(ImportError::MissingTransactions) } Err(e) => Err(e.into()), @@ -509,8 +575,8 @@ where async fn get_headers_batch

( peer_id: PeerId, range: RangeInclusive, - p2p: &P, -) -> impl Stream> + p2p: Arc

, +) -> SealedHeaderBatch where P: PeerToPeerPort + Send + Sync + 'static, { @@ -521,94 +587,67 @@ where ); let start = *range.start(); let end = *range.end() + 1; - let res = get_sealed_block_headers(peer_id.clone(), start..end, p2p).await; - let headers = match res { + let range = start..end; + let res = + get_sealed_block_headers(peer_id.clone(), range.clone(), p2p.as_ref()).await; + match res { Ok(headers) => { let headers = headers.into_iter(); - let heights = range.map(BlockHeight::from); + let heights = range.clone().map(BlockHeight::from); let headers = headers .zip(heights) - .map(move |(header, expected_height)| { - let height = *header.entity.height(); - if height == expected_height { - Ok(header) - } else { - Err(ImportError::BlockHeightMismatch) - } + .take_while(move |(header, expected_height)| { + let height = header.entity.height(); + height == expected_height }) + .map(|(header, _)| header) .collect::>(); if let Some(expected_len) = end.checked_sub(start) { - if headers.len() != expected_len as usize - || headers.iter().any(|h| h.is_err()) - { + if headers.len() != expected_len as usize { report_peer( - p2p, + p2p.as_ref(), peer_id.clone(), PeerReportReason::MissingBlockHeaders, - ) - .await; + ); } } - headers + Batch::new(peer_id, range.clone(), headers) } - Err(e) => { - let error = Err(e); - vec![error] - } - }; - futures::stream::iter(headers) + Err(_e) => Batch::empty(peer_id, range.clone()), + } } -async fn report_peer

(p2p: &P, peer_id: PeerId, reason: PeerReportReason) +fn report_peer

(p2p: &P, peer_id: PeerId, reason: PeerReportReason) where P: PeerToPeerPort + Send + Sync + 'static, { + tracing::info!("Reporting peer for {:?}", reason); + // Failure to report a peer is a non-fatal error; ignore the error let _ = p2p .report_peer(peer_id.clone(), reason) - .await - .map_err(|e| tracing::error!("Failed to report peer {:?}: {:?}", peer_id, e)); + .trace_err(&format!("Failed to report peer {:?}", peer_id)); } -// Get blocks correlating to the headers from a specific peer -// #[tracing::instrument( -// skip(p2p, headers), -// fields( -// height = **header.data.height(), -// id = %header.data.consensus.generated.application_hash -// ), -// err -// )] -async fn get_blocks

( - p2p: Arc

, - peer_id: PeerId, - headers: Vec>, -) -> (Vec, Option) +/// Get blocks correlating to the headers from a specific peer +#[tracing::instrument(skip(p2p, headers))] +async fn get_blocks

(p2p: Arc

, headers: SealedHeaderBatch) -> SealedBlockBatch where P: PeerToPeerPort + Send + Sync + 'static, { - let (headers, errors): (Vec<_>, Vec<_>) = - headers.into_iter().partition(|r| r.is_ok()); - let headers = headers - .into_iter() - .map(|item| item.expect("Result is checked for Ok")) - .collect::>(); - let mut errors = errors - .into_iter() - .map(|item| item.expect_err("Result is checked for Error")) - .collect::>(); - let mut err = errors.pop(); if headers.is_empty() { - return (vec![], err) + return SealedBlockBatch::empty(headers.peer, headers.range) } - let start = headers.first().expect("checked").entity.height().to_usize() as u32; - let end = start + headers.len() as u32; - let range = start..end; - let maybe_txs = get_transactions(peer_id.clone(), range, p2p.as_ref()).await; + let Batch { + results: headers, + peer, + range, + .. + } = headers; + let maybe_txs = get_transactions(peer.clone(), range.clone(), p2p.as_ref()).await; match maybe_txs { Ok(transaction_data) => { - let headers = headers; let iter = headers.into_iter().zip(transaction_data); let mut blocks = vec![]; for (block_header, transactions) in iter { @@ -627,25 +666,16 @@ where if let Some(block) = block { blocks.push(block); } else { - tracing::error!( - "Failed to created block from header and transactions" - ); report_peer( p2p.as_ref(), - peer_id.clone(), + peer.clone(), PeerReportReason::InvalidTransactions, - ) - .await; + ); } } - (blocks, err) - } - Err(error) => { - // Failure to retrieve transactions due to a networking error, - // invalid response, or any other reason constitutes a fatal error. - err = Some(error); - (vec![], err) + Batch::new(peer, range, blocks) } + Err(_error) => Batch::empty(peer, range), } } @@ -683,18 +713,18 @@ where /// Extra stream utilities. trait StreamUtil: Sized { - /// Turn a stream of `Result>` into a stream of `Result`. - /// Close the stream if an error occurs or a `None` is received. - /// Return the error if the stream closes. + // /// Turn a stream of `Result>` into a stream of `Result`. + // /// Close the stream if an error occurs or a `None` is received. + // /// Return the error if the stream closes. // fn into_scan_none_or_err(self) -> ScanNoneErr { // ScanNoneErr(self) // } - - /// Close the stream if an error occurs or an empty `Vector` is received. - /// Return the error if the stream closes. - fn into_scan_empty_or_err(self) -> ScanEmptyErr { - ScanEmptyErr(self) - } + // + // /// Close the stream if an error occurs or an empty `Vector` is received. + // /// Return the error if the stream closes. + // fn into_scan_empty_or_err(self) -> ScanEmptyErr { + // ScanEmptyErr(self) + // } /// Turn a stream of `Result` into a stream of `Result`. /// Close the stream if an error occurs. @@ -707,7 +737,7 @@ trait StreamUtil: Sized { impl StreamUtil for S {} // struct ScanNoneErr(S); -struct ScanEmptyErr(S); +// struct ScanEmptyErr(S); struct ScanErr(S); // impl ScanNoneErr { @@ -729,44 +759,41 @@ struct ScanErr(S); // } // } -impl ScanEmptyErr { - /// Scan the stream for empty vector or errors. - fn scan_empty_or_err<'a, R: 'a>( - self, - ) -> impl Stream, ImportError>> + 'a - where - S: Stream, ImportError>> + Send + 'a, - { - let stream = self.0.boxed::<'a>(); - futures::stream::unfold((false, stream), |(mut is_err, mut stream)| async move { - if is_err { - None - } else { - let result = stream.next().await?; - is_err = result.is_err(); - result - .map(|v| (!v.is_empty()).then_some(v)) - .transpose() - .map(|result| (result, (is_err, stream))) - } - }) - } -} - +// impl ScanEmptyErr { +// /// Scan the stream for empty vector or errors. +// fn scan_empty_or_err<'a, T: 'a>(self) -> impl Stream> + 'a +// where +// S: Stream> + Send + 'a, +// { +// let stream = self.0.boxed::<'a>(); +// futures::stream::unfold((false, stream), |(mut is_err, mut stream)| async move { +// if is_err { +// None +// } else { +// let batch = stream.next().await?; +// is_err = batch.is_err(); +// (!batch.is_empty()) +// .then_some(batch) +// .map(|batch| (batch, (is_err, stream))) +// } +// }) +// } +// } +// impl ScanErr { /// Scan the stream for errors. - fn scan_err<'a, R: 'a>(self) -> impl Stream> + 'a + fn scan_err<'a, T: 'a>(self) -> impl Stream> + 'a where - S: Stream> + Send + 'a, + S: Stream> + Send + 'a, { let stream = self.0.boxed::<'a>(); futures::stream::unfold((false, stream), |(mut err, mut stream)| async move { if err { None } else { - let result = stream.next().await?; - err = result.is_err(); - Some((result, (err, stream))) + let batch = stream.next().await?; + err = batch.is_err(); + Some((batch, (err, stream))) } }) } diff --git a/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs b/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs index 944788a53a6..140bf452b68 100644 --- a/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs +++ b/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs @@ -91,7 +91,7 @@ impl PeerToPeerPort for PressurePeerToPeer { self.p2p.get_transactions_2(block_ids).await } - async fn report_peer( + fn report_peer( &self, _peer: PeerId, _report: PeerReportReason, diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index 9ff1708dc6a..6b561f658bf 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -24,7 +24,16 @@ use super::*; #[tokio::test] async fn test_import_0_to_5() { - let consensus_port = MockConsensusPort::times([6]); + let mut consensus_port = MockConsensusPort::default(); + consensus_port + .expect_check_sealed_header() + .times(6) + .returning(|_| Ok(true)); + consensus_port + .expect_await_da_height() + .times(1) + .returning(|_| Ok(())); + let mut p2p = MockPeerToPeerPort::default(); p2p.expect_select_peer().times(1).returning(|_| { let bytes = vec![1u8, 2, 3, 4, 5]; @@ -62,7 +71,16 @@ async fn test_import_0_to_5() { #[tokio::test] async fn test_import_3_to_5() { - let consensus_port = MockConsensusPort::times([2]); + let mut consensus_port = MockConsensusPort::default(); + consensus_port + .expect_check_sealed_header() + .times(2) + .returning(|_| Ok(true)); + consensus_port + .expect_await_da_height() + .times(1) + .returning(|_| Ok(())); + let mut p2p = MockPeerToPeerPort::default(); p2p.expect_select_peer().times(1).returning(|_| { let bytes = vec![1u8, 2, 3, 4, 5]; @@ -143,7 +161,7 @@ async fn import__signature_fails_on_header_5_only() { let res = test_import_inner(state, mocks, None).await; // then - assert_eq!((State::new(4, None), true), res); + assert_eq!((State::new(4, None), false), res); } #[tokio::test] @@ -158,6 +176,7 @@ async fn import__signature_fails_on_header_4_only() { .expect_await_da_height() .times(0) .returning(|_| Ok(())); + let mut p2p = MockPeerToPeerPort::default(); p2p.expect_select_peer().times(1).returning(|_| { let bytes = vec![1u8, 2, 3, 4, 5]; @@ -191,7 +210,7 @@ async fn import__signature_fails_on_header_4_only() { let res = test_import_inner(state, mocks, None).await; // then - assert_eq!((State::new(3, None), true), res); + assert_eq!((State::new(3, None), false), res); } #[tokio::test] @@ -218,7 +237,7 @@ async fn import__header_not_found() { let res = test_import_inner(state, mocks, None).await; // then - assert_eq!((State::new(3, None), true), res); + assert_eq!((State::new(3, None), false), res); } #[tokio::test] @@ -279,7 +298,7 @@ async fn import__header_5_not_found() { let res = test_import_inner(state, mocks, None).await; // then - assert_eq!((State::new(4, None), true), res); + assert_eq!((State::new(4, None), false), res); } #[tokio::test] @@ -307,12 +326,22 @@ async fn import__header_4_not_found() { let res = test_import_inner(state, mocks, None).await; // then - assert_eq!((State::new(3, None), true), res); + assert_eq!((State::new(3, None), false), res); } #[tokio::test] async fn import__transactions_not_found() { // given + let mut consensus_port = MockConsensusPort::default(); + consensus_port + .expect_check_sealed_header() + .times(2) + .returning(|_| Ok(true)); + consensus_port + .expect_await_da_height() + .times(1) + .returning(|_| Ok(())); + let mut p2p = MockPeerToPeerPort::default(); p2p.expect_select_peer().times(1).returning(|_| { let bytes = vec![1u8, 2, 3, 4, 5]; @@ -331,7 +360,7 @@ async fn import__transactions_not_found() { let state = State::new(3, 5).into(); let mocks = Mocks { p2p, - consensus_port: DefaultMocks::times([2]), + consensus_port, executor: DefaultMocks::times([0]), }; @@ -339,12 +368,22 @@ async fn import__transactions_not_found() { let res = test_import_inner(state, mocks, None).await; // then - assert_eq!((State::new(3, None), true), res); + assert_eq!((State::new(3, None), false), res); } #[tokio::test] async fn import__transactions_not_found_for_header_4() { // given + let mut consensus_port = MockConsensusPort::default(); + consensus_port + .expect_check_sealed_header() + .times(2) + .returning(|_| Ok(true)); + consensus_port + .expect_await_da_height() + .times(1) + .returning(|_| Ok(())); + let mut p2p = MockPeerToPeerPort::default(); p2p.expect_select_peer().times(1).returning(|_| { let bytes = vec![1u8, 2, 3, 4, 5]; @@ -373,7 +412,7 @@ async fn import__transactions_not_found_for_header_4() { let state = State::new(3, 5).into(); let mocks = Mocks { p2p, - consensus_port: DefaultMocks::times([2]), + consensus_port, executor: DefaultMocks::times([0]), }; @@ -381,12 +420,22 @@ async fn import__transactions_not_found_for_header_4() { let res = test_import_inner(state, mocks, None).await; // then - assert_eq!((State::new(3, None), true), res); + assert_eq!((State::new(3, None), false), res); } #[tokio::test] async fn import__transactions_not_found_for_header_5() { // given + let mut consensus_port = MockConsensusPort::default(); + consensus_port + .expect_check_sealed_header() + .times(2) + .returning(|_| Ok(true)); + consensus_port + .expect_await_da_height() + .times(1) + .returning(|_| Ok(())); + let mut p2p = MockPeerToPeerPort::default(); p2p.expect_select_peer().times(1).returning(|_| { let bytes = vec![1u8, 2, 3, 4, 5]; @@ -408,7 +457,7 @@ async fn import__transactions_not_found_for_header_5() { let state = State::new(3, 5).into(); let mocks = Mocks { p2p, - consensus_port: DefaultMocks::times([2]), + consensus_port, executor: DefaultMocks::times([1]), }; @@ -416,7 +465,7 @@ async fn import__transactions_not_found_for_header_5() { let res = test_import_inner(state, mocks, None).await; // then - assert_eq!((State::new(4, None), true), res); + assert_eq!((State::new(4, None), false), res); } #[tokio::test] @@ -450,6 +499,16 @@ async fn import__p2p_error() { #[tokio::test] async fn import__p2p_error_on_4_transactions() { // given + let mut consensus_port = MockConsensusPort::default(); + consensus_port + .expect_check_sealed_header() + .times(2) + .returning(|_| Ok(true)); + consensus_port + .expect_await_da_height() + .times(1) + .returning(|_| Ok(())); + let mut p2p = MockPeerToPeerPort::default(); p2p.expect_select_peer().times(1).returning(|_| { let bytes = vec![1u8, 2, 3, 4, 5]; @@ -468,7 +527,7 @@ async fn import__p2p_error_on_4_transactions() { let state = State::new(3, 5).into(); let mocks = Mocks { p2p, - consensus_port: DefaultMocks::times([2]), + consensus_port, executor: DefaultMocks::times([0]), }; @@ -621,6 +680,16 @@ async fn import__consensus_error_on_5() { #[tokio::test] async fn import__execution_error_on_header_4() { // given + let mut consensus_port = MockConsensusPort::default(); + consensus_port + .expect_check_sealed_header() + .times(2) + .returning(|_| Ok(true)); + consensus_port + .expect_await_da_height() + .times(1) + .returning(|_| Ok(())); + let mut p2p = MockPeerToPeerPort::default(); p2p.expect_select_peer().times(1).returning(|_| { let bytes = vec![1u8, 2, 3, 4, 5]; @@ -654,7 +723,7 @@ async fn import__execution_error_on_header_4() { let state = State::new(3, 5).into(); let mocks = Mocks { - consensus_port: DefaultMocks::times([2]), + consensus_port, p2p, executor, }; @@ -669,6 +738,16 @@ async fn import__execution_error_on_header_4() { #[tokio::test] async fn import__execution_error_on_header_5() { // given + let mut consensus_port = MockConsensusPort::default(); + consensus_port + .expect_check_sealed_header() + .times(2) + .returning(|_| Ok(true)); + consensus_port + .expect_await_da_height() + .times(1) + .returning(|_| Ok(())); + let mut p2p = MockPeerToPeerPort::default(); p2p.expect_select_peer().times(1).returning(|_| { let bytes = vec![1u8, 2, 3, 4, 5]; @@ -702,7 +781,7 @@ async fn import__execution_error_on_header_5() { let state = State::new(3, 5).into(); let mocks = Mocks { - consensus_port: DefaultMocks::times([2]), + consensus_port, p2p, executor, }; @@ -722,9 +801,7 @@ async fn signature_always_fails() { .expect_check_sealed_header() .times(1) .returning(|_| Ok(false)); - consensus_port - .expect_await_da_height() - .returning(|_| Ok(())); + consensus_port.expect_await_da_height().times(0); let state = State::new(3, 5).into(); let mocks = Mocks { @@ -737,7 +814,7 @@ async fn signature_always_fails() { let res = test_import_inner(state, mocks, None).await; // then - assert_eq!((State::new(3, None), true), res); + assert_eq!((State::new(3, None), false), res); } #[tokio::test] @@ -745,6 +822,17 @@ async fn import__can_work_in_two_loops() { // given let s = SharedMutex::new(State::new(3, 5)); let state = s.clone(); + + let mut consensus_port = MockConsensusPort::default(); + consensus_port + .expect_check_sealed_header() + .times(3) + .returning(|_| Ok(true)); + consensus_port + .expect_await_da_height() + .times(2) + .returning(|_| Ok(())); + let mut p2p = MockPeerToPeerPort::default(); p2p.expect_select_peer().times(2).returning(|_| { let bytes = vec![1u8, 2, 3, 4, 5]; @@ -767,9 +855,10 @@ async fn import__can_work_in_two_loops() { let v = data.into_iter().map(|_| Transactions::default()).collect(); Ok(Some(v)) }); + let c = DefaultMocks::times([2]); let mocks = Mocks { - consensus_port: DefaultMocks::times([3]), + consensus_port, p2p, executor: DefaultMocks::times([3]), }; diff --git a/crates/services/sync/src/ports.rs b/crates/services/sync/src/ports.rs index 761c2612d14..7367f4e8d34 100644 --- a/crates/services/sync/src/ports.rs +++ b/crates/services/sync/src/ports.rs @@ -72,11 +72,7 @@ pub trait PeerToPeerPort { ) -> anyhow::Result>>; /// Report a peer for some reason to modify their reputation. - async fn report_peer( - &self, - peer: PeerId, - report: PeerReportReason, - ) -> anyhow::Result<()>; + fn report_peer(&self, peer: PeerId, report: PeerReportReason) -> anyhow::Result<()>; } #[cfg_attr(any(test, feature = "benchmarking"), mockall::automock)] From 90e4ab6f00e8d703c571bdeed20db85e849e8964 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Thu, 28 Sep 2023 16:26:48 -0400 Subject: [PATCH 62/87] Clippy says implicit lifetimes --- crates/services/sync/src/import.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index ec96461a6ec..89d6deef1a8 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -358,16 +358,15 @@ where } fn get_block_stream< - 'a, P: PeerToPeerPort + Send + Sync + 'static, C: ConsensusPort + Send + Sync + 'static, >( peer_id: PeerId, range: RangeInclusive, - params: &'a Config, + params: &Config, p2p: Arc

, consensus: Arc, -) -> impl Stream> + 'a { +) -> impl Stream> + '_ { let header_stream = get_header_batch_stream(peer_id.clone(), range.clone(), params, p2p.clone()); let range = *range.start()..(*range.end() + 1); From 7e2eef014b134fe5e6abe88b66135fd82da51a4b Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Thu, 28 Sep 2023 16:32:59 -0400 Subject: [PATCH 63/87] Clean up --- crates/services/sync/src/import.rs | 113 +++++++---------------------- 1 file changed, 27 insertions(+), 86 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 89d6deef1a8..3a4ad39f5c0 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -261,39 +261,34 @@ where p2p.clone(), consensus.clone(), ); - - let (peer, state, p2p, executor) = (peer.clone(), state, p2p, executor); - let peer_ = peer.clone(); let range = *range.start()..(*range.end() + 1); - let range_ = range.clone(); - let result = block_stream - .map(move |stream_block_batch| { - let shutdown_guard = shutdown_guard.clone(); - let shutdown_signal = shutdown_signal.clone(); + .map({ let peer = peer.clone(); let range = range.clone(); - tokio::spawn(async move { - // Hold a shutdown sender for the lifetime of the spawned task - let _shutdown_guard = shutdown_guard.clone(); - let mut shutdown_signal = shutdown_signal.clone(); + move |stream_block_batch| { + let shutdown_guard = shutdown_guard.clone(); + let shutdown_signal = shutdown_signal.clone(); let peer = peer.clone(); let range = range.clone(); - tokio::select! { + tokio::spawn(async move { + // Hold a shutdown sender for the lifetime of the spawned task + let _shutdown_guard = shutdown_guard.clone(); + let mut shutdown_signal = shutdown_signal.clone(); + let peer = peer.clone(); + let range = range.clone(); + tokio::select! { // Stream a batch of blocks blocks = stream_block_batch => blocks, // If a shutdown signal is received during the stream, terminate early and // return an empty response _ = shutdown_signal.while_started() => Batch::empty(peer, range) } - }).map(|task| task.map_err(ImportError::JoinError)) + }).map(|task| task.map_err(ImportError::JoinError)) + } }) // Request up to `block_stream_buffer_size` transactions from the network. .buffered(params.block_stream_buffer_size) - // Continue the stream unless an error occurs. - // Note the error will be returned but the stream will close. - // .into_scan_err() - // .scan_err() // Continue the stream until the shutdown signal is received. .take_until({ let mut s = shutdown.clone(); @@ -302,19 +297,23 @@ where tracing::info!("In progress import stream shutting down"); } }) - .map(|r| r.unwrap_or({ - let peer = peer_.clone(); - let range = range_.clone(); - SealedBlockBatch::empty(peer, range) - })) + .map({ + let peer = peer.clone(); + let range = range.clone(); + move |result| result.unwrap_or({ + let peer = peer.clone(); + let range = range.clone(); + SealedBlockBatch::empty(peer, range) + }) + }) .then({ - let peer = peer_.clone(); - let range = range_.clone(); + let peer = peer.clone(); + let range = range.clone(); move |batch| { let peer = peer.clone(); let range = range.clone(); - let err = batch.err(); async move { + let err = batch.err(); let sealed_blocks = futures::stream::iter(batch.results); let res = sealed_blocks.then(|sealed_block| async { execute_and_commit(executor.as_ref(), state, sealed_block).await @@ -322,13 +321,13 @@ where match res { Ok(v) => { report_peer(p2p.as_ref(), peer.clone(), PeerReportReason::SuccessfulBlockImport); - Batch::<()>::new(peer, range, v) + Batch::new(peer, range, v) }, Err(e) => { // If this fails, then it means that consensus has approved a block that is invalid. // This would suggest a more serious issue than a bad peer, e.g. a fork or an out-of-date client. tracing::error!("Failed to execute and commit block from peer {:?}: {:?}", peer, e); - Batch::<()>::empty(peer, range) + Batch::empty(peer, range) }, } @@ -712,22 +711,6 @@ where /// Extra stream utilities. trait StreamUtil: Sized { - // /// Turn a stream of `Result>` into a stream of `Result`. - // /// Close the stream if an error occurs or a `None` is received. - // /// Return the error if the stream closes. - // fn into_scan_none_or_err(self) -> ScanNoneErr { - // ScanNoneErr(self) - // } - // - // /// Close the stream if an error occurs or an empty `Vector` is received. - // /// Return the error if the stream closes. - // fn into_scan_empty_or_err(self) -> ScanEmptyErr { - // ScanEmptyErr(self) - // } - - /// Turn a stream of `Result` into a stream of `Result`. - /// Close the stream if an error occurs. - /// Return the error if the stream closes. fn into_scan_err(self) -> ScanErr { ScanErr(self) } @@ -735,50 +718,8 @@ trait StreamUtil: Sized { impl StreamUtil for S {} -// struct ScanNoneErr(S); -// struct ScanEmptyErr(S); struct ScanErr(S); -// impl ScanNoneErr { -// /// Scan the stream for `None` or errors. -// fn scan_none_or_err(self) -> impl Stream> -// where -// S: Stream, ImportError>> + Send + 'static, -// { -// let stream = self.0.boxed(); -// futures::stream::unfold((false, stream), |(mut is_err, mut stream)| async move { -// if is_err { -// None -// } else { -// let result = stream.next().await?; -// is_err = result.is_err(); -// result.transpose().map(|result| (result, (is_err, stream))) -// } -// }) -// } -// } - -// impl ScanEmptyErr { -// /// Scan the stream for empty vector or errors. -// fn scan_empty_or_err<'a, T: 'a>(self) -> impl Stream> + 'a -// where -// S: Stream> + Send + 'a, -// { -// let stream = self.0.boxed::<'a>(); -// futures::stream::unfold((false, stream), |(mut is_err, mut stream)| async move { -// if is_err { -// None -// } else { -// let batch = stream.next().await?; -// is_err = batch.is_err(); -// (!batch.is_empty()) -// .then_some(batch) -// .map(|batch| (batch, (is_err, stream))) -// } -// }) -// } -// } -// impl ScanErr { /// Scan the stream for errors. fn scan_err<'a, T: 'a>(self) -> impl Stream> + 'a From 8a7a65a9a77422a827f6d2aa5ef6ee9cae981d1d Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Thu, 28 Sep 2023 16:36:23 -0400 Subject: [PATCH 64/87] More clippy --- crates/services/sync/src/import.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 3a4ad39f5c0..298166c45c7 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -429,12 +429,12 @@ fn get_block_stream< }) } -fn get_header_batch_stream<'a, P: PeerToPeerPort + Send + Sync + 'static>( +fn get_header_batch_stream( peer: PeerId, range: RangeInclusive, - params: &'a Config, + params: &Config, p2p: Arc

, -) -> impl Stream + 'a { +) -> impl Stream { let Config { header_batch_size, .. } = params; From 721a09849fa70220b3c3553ad3c8df48546c0f6b Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Thu, 28 Sep 2023 16:37:35 -0400 Subject: [PATCH 65/87] Minor --- crates/services/sync/src/import.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 298166c45c7..d70eed934e6 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -421,8 +421,7 @@ fn get_block_stream< let p2p = p2p.clone(); async move { let headers = headers.await.unwrap_or(Default::default()); - let headers = - SealedHeaderBatch::new(peer_id.clone(), range.clone(), headers); + let headers = SealedHeaderBatch::new(peer_id.clone(), range, headers); get_blocks(p2p, headers).await } } From 72ef01e1c3159c284b4b468e45f4429ea43411e3 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Thu, 28 Sep 2023 16:44:48 -0400 Subject: [PATCH 66/87] Minor --- crates/services/sync/src/import.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index d70eed934e6..6bd8d5b5223 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -389,13 +389,11 @@ fn get_block_stream< p2p.clone(), consensus.clone(), )?; - Ok(header) + Result::<_, ImportError>::Ok(header) } }) .take_while(|result| result.is_ok()) - .filter_map(|result: Result| { - result.ok() - }) + .filter_map(|result| result.ok()) .collect::>() } }) From f3d7a3b211561e3a92dd4fb1da333c79e3715304 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Thu, 28 Sep 2023 17:25:11 -0400 Subject: [PATCH 67/87] Minor --- crates/services/sync/src/import.rs | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 6bd8d5b5223..2163cd0a70b 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -149,10 +149,6 @@ impl Batch { } } - pub fn is_empty(&self) -> bool { - self.results.is_empty() - } - pub fn is_err(&self) -> bool { self.results.len() < self.range.len() } @@ -347,7 +343,6 @@ where .fold(0usize, |count, batch| async move { count + batch.results.len() }) - // .in_current_span() .await; // Wait for any spawned tasks to shutdown @@ -419,8 +414,16 @@ fn get_block_stream< let p2p = p2p.clone(); async move { let headers = headers.await.unwrap_or(Default::default()); - let headers = SealedHeaderBatch::new(peer_id.clone(), range, headers); - get_blocks(p2p, headers).await + if headers.is_empty() { + SealedBlockBatch::empty(peer_id, range) + } else { + let headers = SealedHeaderBatch::new( + peer_id.clone(), + range.clone(), + headers, + ); + get_blocks(p2p, headers).await + } } } }) @@ -630,15 +633,10 @@ async fn get_blocks

(p2p: Arc

, headers: SealedHeaderBatch) -> SealedBlockBa where P: PeerToPeerPort + Send + Sync + 'static, { - if headers.is_empty() { - return SealedBlockBatch::empty(headers.peer, headers.range) - } - let Batch { results: headers, peer, range, - .. } = headers; let maybe_txs = get_transactions(peer.clone(), range.clone(), p2p.as_ref()).await; match maybe_txs { From 5798cbc44281b31f1e522c4fa0e2dbb6090b3c1b Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Thu, 28 Sep 2023 17:43:02 -0400 Subject: [PATCH 68/87] Instrument --- crates/services/sync/src/import.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 2163cd0a70b..d297e1a8a54 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -425,6 +425,8 @@ fn get_block_stream< get_blocks(p2p, headers).await } } + .instrument(tracing::debug_span!("consensus_and_transactions")) + .in_current_span() } }) } From af2d696f04e1032a5f80cd6cce77da9a8b251c54 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Thu, 28 Sep 2023 21:15:34 -0400 Subject: [PATCH 69/87] Import tests passing --- crates/services/sync/src/import.rs | 106 ++++++++++++++++++----------- 1 file changed, 68 insertions(+), 38 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index d297e1a8a54..dc8eecac284 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -157,11 +157,11 @@ impl Batch { type SealedHeaderBatch = Batch; type SealedBlockBatch = Batch; -impl SealedBlockBatch { - fn err(&self) -> Option { - self.is_err().then(|| ImportError::MissingTransactions) - } -} +// impl SealedBlockBatch { +// fn err(&self) -> Option { +// self.is_err().then(|| ImportError::MissingTransactions) +// } +// } #[derive(Debug, derive_more::Display)] enum ImportError { @@ -257,28 +257,28 @@ where p2p.clone(), consensus.clone(), ); - let range = *range.start()..(*range.end() + 1); + // let range = *range.start()..(*range.end() + 1); let result = block_stream .map({ - let peer = peer.clone(); - let range = range.clone(); + // let peer = peer.clone(); + // let range = range.clone(); move |stream_block_batch| { let shutdown_guard = shutdown_guard.clone(); let shutdown_signal = shutdown_signal.clone(); - let peer = peer.clone(); - let range = range.clone(); + // let peer = peer.clone(); + // let range = range.clone(); tokio::spawn(async move { // Hold a shutdown sender for the lifetime of the spawned task let _shutdown_guard = shutdown_guard.clone(); let mut shutdown_signal = shutdown_signal.clone(); - let peer = peer.clone(); - let range = range.clone(); + // let peer = peer.clone(); + // let range = range.clone(); tokio::select! { // Stream a batch of blocks - blocks = stream_block_batch => blocks, + blocks = stream_block_batch => Some(blocks), // If a shutdown signal is received during the stream, terminate early and // return an empty response - _ = shutdown_signal.while_started() => Batch::empty(peer, range) + _ = shutdown_signal.while_started() => None } }).map(|task| task.map_err(ImportError::JoinError)) } @@ -293,40 +293,41 @@ where tracing::info!("In progress import stream shutting down"); } }) - .map({ - let peer = peer.clone(); - let range = range.clone(); - move |result| result.unwrap_or({ - let peer = peer.clone(); - let range = range.clone(); - SealedBlockBatch::empty(peer, range) - }) - }) + .into_scan_none_or_err() + .scan_none_or_err() + // .map({ + // let peer = peer.clone(); + // let range = range.clone(); + // move |result| result.unwrap_or({ + // let peer = peer.clone(); + // let range = range.clone(); + // SealedBlockBatch::empty(peer, range) + // }) + // }) .then({ let peer = peer.clone(); - let range = range.clone(); + // let range = range.clone(); move |batch| { let peer = peer.clone(); - let range = range.clone(); async move { - let err = batch.err(); - let sealed_blocks = futures::stream::iter(batch.results); + let batch = batch?; + let error = batch.is_err().then(|| ImportError::MissingTransactions); + let results = batch.results; + let sealed_blocks = futures::stream::iter(results); let res = sealed_blocks.then(|sealed_block| async { execute_and_commit(executor.as_ref(), state, sealed_block).await - }).try_collect::>().await.and_then(|v| err.map_or(Ok(v), Err)); - match res { - Ok(v) => { + }).try_collect::>().await.and_then(|v| error.map_or(Ok(v), Err)); + match &res { + Ok(_) => { report_peer(p2p.as_ref(), peer.clone(), PeerReportReason::SuccessfulBlockImport); - Batch::new(peer, range, v) }, Err(e) => { // If this fails, then it means that consensus has approved a block that is invalid. // This would suggest a more serious issue than a bad peer, e.g. a fork or an out-of-date client. tracing::error!("Failed to execute and commit block from peer {:?}: {:?}", peer, e); - Batch::empty(peer, range) - }, - } + }; + res } .instrument(tracing::debug_span!("execute_and_commit")) .in_current_span() @@ -340,8 +341,11 @@ where // Count the number of successfully executed blocks and // find any errors. // Fold the stream into a count and any errors. - .fold(0usize, |count, batch| async move { - count + batch.results.len() + .fold(0usize, |count, result| async move { + match result { + Ok(batch) => count + batch.len(), + Err(_) => count + } }) .await; @@ -708,6 +712,10 @@ where /// Extra stream utilities. trait StreamUtil: Sized { + fn into_scan_none_or_err(self) -> ScanNoneErr { + ScanNoneErr(self) + } + fn into_scan_err(self) -> ScanErr { ScanErr(self) } @@ -716,12 +724,34 @@ trait StreamUtil: Sized { impl StreamUtil for S {} struct ScanErr(S); +struct ScanNoneErr(S); + +impl ScanNoneErr { + /// Scan the stream for `None` or errors. + fn scan_none_or_err<'a, T: 'a>( + self, + ) -> impl Stream> + 'a + where + S: Stream, ImportError>> + Send + 'a, + { + let stream = self.0.boxed(); + futures::stream::unfold((false, stream), |(mut is_err, mut stream)| async move { + if is_err { + None + } else { + let result = stream.next().await?; + is_err = result.is_err(); + result.transpose().map(|result| (result, (is_err, stream))) + } + }) + } +} impl ScanErr { /// Scan the stream for errors. - fn scan_err<'a, T: 'a>(self) -> impl Stream> + 'a + fn scan_err<'a, T: 'a>(self) -> impl Stream> + 'a where - S: Stream> + Send + 'a, + S: Stream> + Send + 'a, { let stream = self.0.boxed::<'a>(); futures::stream::unfold((false, stream), |(mut err, mut stream)| async move { From aa63098da78dcfc91ea028a4af55f8bc432236e3 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Thu, 28 Sep 2023 21:43:38 -0400 Subject: [PATCH 70/87] All tests passing --- crates/services/sync/src/import.rs | 174 ++++++++---------- .../sync/src/import/back_pressure_tests.rs | 48 +++++ 2 files changed, 128 insertions(+), 94 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index dc8eecac284..dd85a568a56 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -133,14 +133,6 @@ struct Batch { } impl Batch { - pub fn empty(peer: PeerId, range: Range) -> Self { - Self { - peer, - range, - results: vec![], - } - } - pub fn new(peer: PeerId, range: Range, results: Vec) -> Self { Self { peer, @@ -157,12 +149,6 @@ impl Batch { type SealedHeaderBatch = Batch; type SealedBlockBatch = Batch; -// impl SealedBlockBatch { -// fn err(&self) -> Option { -// self.is_err().then(|| ImportError::MissingTransactions) -// } -// } - #[derive(Debug, derive_more::Display)] enum ImportError { ConsensusError(anyhow::Error), @@ -310,7 +296,7 @@ where move |batch| { let peer = peer.clone(); async move { - let batch = batch?; + let batch = batch??; let error = batch.is_err().then(|| ImportError::MissingTransactions); let results = batch.results; let sealed_blocks = futures::stream::iter(results); @@ -364,18 +350,23 @@ fn get_block_stream< params: &Config, p2p: Arc

, consensus: Arc, -) -> impl Stream> + '_ { +) -> impl Stream>> + '_ +{ let header_stream = get_header_batch_stream(peer_id.clone(), range.clone(), params, p2p.clone()); - let range = *range.start()..(*range.end() + 1); + // let range = *range.start()..(*range.end() + 1); header_stream .map({ - let peer_id = peer_id.clone(); let consensus = consensus.clone(); let p2p = p2p.clone(); move |header_batch| { - header_batch - .results + let header_batch = header_batch?; + let Batch { + peer, + range, + results, + } = header_batch; + let results = results .into_iter() .map({ let peer_id = peer_id.clone(); @@ -393,15 +384,18 @@ fn get_block_stream< }) .take_while(|result| result.is_ok()) .filter_map(|result| result.ok()) - .collect::>() + .collect::>(); + let batch = Batch::new(peer, range, results); + Result::<_, ImportError>::Ok(batch) } }) .map({ let consensus = consensus.clone(); - move |valid_headers| { + move |valid_headers_batch| { let consensus = consensus.clone(); async move { - if let Some(header) = valid_headers.last() { + let valid_headers = valid_headers_batch?; + if let Some(header) = valid_headers.results.last() { await_da_height(header, consensus.as_ref()).await? }; Result::<_, ImportError>::Ok(valid_headers) @@ -409,24 +403,24 @@ fn get_block_stream< } }) .map({ - let peer_id = peer_id.clone(); - let range = range.clone(); let p2p = p2p.clone(); move |headers| { - let peer_id = peer_id.clone(); - let range = range.clone(); let p2p = p2p.clone(); async move { - let headers = headers.await.unwrap_or(Default::default()); - if headers.is_empty() { - SealedBlockBatch::empty(peer_id, range) + let headers = headers.await?; + let Batch { + peer, + range, + results, + } = headers; + if results.is_empty() { + let batch = SealedBlockBatch::new(peer, range, vec![]); + Ok(batch) } else { - let headers = SealedHeaderBatch::new( - peer_id.clone(), - range.clone(), - headers, - ); - get_blocks(p2p, headers).await + let headers = + SealedHeaderBatch::new(peer.clone(), range.clone(), results); + let batch = get_blocks(p2p, headers).await?; + Ok(batch) } } .instrument(tracing::debug_span!("consensus_and_transactions")) @@ -440,7 +434,7 @@ fn get_header_batch_stream( range: RangeInclusive, params: &Config, p2p: Arc

, -) -> impl Stream { +) -> impl Stream> { let Config { header_batch_size, .. } = params; @@ -580,7 +574,7 @@ async fn get_headers_batch

( peer_id: PeerId, range: RangeInclusive, p2p: Arc

, -) -> SealedHeaderBatch +) -> Result where P: PeerToPeerPort + Send + Sync + 'static, { @@ -592,33 +586,28 @@ where let start = *range.start(); let end = *range.end() + 1; let range = start..end; - let res = - get_sealed_block_headers(peer_id.clone(), range.clone(), p2p.as_ref()).await; - match res { - Ok(headers) => { - let headers = headers.into_iter(); - let heights = range.clone().map(BlockHeight::from); - let headers = headers - .zip(heights) - .take_while(move |(header, expected_height)| { - let height = header.entity.height(); - height == expected_height - }) - .map(|(header, _)| header) - .collect::>(); - if let Some(expected_len) = end.checked_sub(start) { - if headers.len() != expected_len as usize { - report_peer( - p2p.as_ref(), - peer_id.clone(), - PeerReportReason::MissingBlockHeaders, - ); - } - } - Batch::new(peer_id, range.clone(), headers) + let headers = + get_sealed_block_headers(peer_id.clone(), range.clone(), p2p.as_ref()).await?; + let headers = headers.into_iter(); + let heights = range.clone().map(BlockHeight::from); + let headers = headers + .zip(heights) + .take_while(move |(header, expected_height)| { + let height = header.entity.height(); + height == expected_height + }) + .map(|(header, _)| header) + .collect::>(); + if let Some(expected_len) = end.checked_sub(start) { + if headers.len() != expected_len as usize { + report_peer( + p2p.as_ref(), + peer_id.clone(), + PeerReportReason::MissingBlockHeaders, + ); } - Err(_e) => Batch::empty(peer_id, range.clone()), } + Ok(Batch::new(peer_id, range.clone(), headers)) } fn report_peer

(p2p: &P, peer_id: PeerId, reason: PeerReportReason) @@ -635,7 +624,10 @@ where /// Get blocks correlating to the headers from a specific peer #[tracing::instrument(skip(p2p, headers))] -async fn get_blocks

(p2p: Arc

, headers: SealedHeaderBatch) -> SealedBlockBatch +async fn get_blocks

( + p2p: Arc

, + headers: SealedHeaderBatch, +) -> Result where P: PeerToPeerPort + Send + Sync + 'static, { @@ -644,38 +636,32 @@ where peer, range, } = headers; - let maybe_txs = get_transactions(peer.clone(), range.clone(), p2p.as_ref()).await; - match maybe_txs { - Ok(transaction_data) => { - let iter = headers.into_iter().zip(transaction_data); - let mut blocks = vec![]; - for (block_header, transactions) in iter { - let block_header = block_header.clone(); - let SealedBlockHeader { - consensus, - entity: header, - } = block_header; - let block = - Block::try_from_executed(header, transactions.0).map(|block| { - SealedBlock { - entity: block, - consensus, - } - }); - if let Some(block) = block { - blocks.push(block); - } else { - report_peer( - p2p.as_ref(), - peer.clone(), - PeerReportReason::InvalidTransactions, - ); - } - } - Batch::new(peer, range, blocks) + let transaction_data = + get_transactions(peer.clone(), range.clone(), p2p.as_ref()).await?; + let iter = headers.into_iter().zip(transaction_data); + let mut blocks = vec![]; + for (block_header, transactions) in iter { + let block_header = block_header.clone(); + let SealedBlockHeader { + consensus, + entity: header, + } = block_header; + let block = + Block::try_from_executed(header, transactions.0).map(|block| SealedBlock { + entity: block, + consensus, + }); + if let Some(block) = block { + blocks.push(block); + } else { + report_peer( + p2p.as_ref(), + peer.clone(), + PeerReportReason::InvalidTransactions, + ); } - Err(_error) => Batch::empty(peer, range), } + Ok(Batch::new(peer, range, blocks)) } #[tracing::instrument( diff --git a/crates/services/sync/src/import/back_pressure_tests.rs b/crates/services/sync/src/import/back_pressure_tests.rs index c58fc97032d..d141537489f 100644 --- a/crates/services/sync/src/import/back_pressure_tests.rs +++ b/crates/services/sync/src/import/back_pressure_tests.rs @@ -119,3 +119,51 @@ async fn test_back_pressure(input: Input, state: State, params: Config) -> Count import.import(&mut watcher).await.unwrap(); counts.apply(|c| c.max.clone()) } + +#[tokio::test(flavor = "multi_thread")] +async fn test_back_pressure_2() { + // input: Input, state: State, params: Config + let input = Input { + executes: Duration::from_millis(10), + ..Default::default() + }; + let state = State::new(None, 50); + let params = Config { + block_stream_buffer_size: 10, + header_batch_size: 10, + }; + let counts = SharedCounts::new(Default::default()); + let state = SharedMutex::new(state); + + let p2p = Arc::new(PressurePeerToPeer::new( + counts.clone(), + [input.headers, input.transactions], + )); + let executor = Arc::new(PressureBlockImporter::new(counts.clone(), input.executes)); + let consensus = Arc::new(PressureConsensus::new(counts.clone(), input.consensus)); + let notify = Arc::new(Notify::new()); + + let import = Import { + state, + notify, + params, + p2p, + executor, + consensus, + }; + + import.notify.notify_one(); + let (_tx, shutdown) = tokio::sync::watch::channel(fuel_core_services::State::Started); + let mut watcher = shutdown.into(); + import.import(&mut watcher).await.unwrap(); + let counts = counts.apply(|c| c.max.clone()); + let expected = Count { + headers: 10, + consensus: 10, + transactions: 10, + executes: 1, + blocks: 21, + }; + + assert!(counts <= expected); +} From 3a656d392c6b35536ee884c60fff93e990bf7cba Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Thu, 28 Sep 2023 21:54:11 -0400 Subject: [PATCH 71/87] Clean up --- crates/services/sync/src/import.rs | 104 ++++++++++++----------------- 1 file changed, 41 insertions(+), 63 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index dd85a568a56..7b8576d7edf 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -243,31 +243,22 @@ where p2p.clone(), consensus.clone(), ); - // let range = *range.start()..(*range.end() + 1); let result = block_stream - .map({ - // let peer = peer.clone(); - // let range = range.clone(); - move |stream_block_batch| { - let shutdown_guard = shutdown_guard.clone(); - let shutdown_signal = shutdown_signal.clone(); - // let peer = peer.clone(); - // let range = range.clone(); - tokio::spawn(async move { - // Hold a shutdown sender for the lifetime of the spawned task - let _shutdown_guard = shutdown_guard.clone(); - let mut shutdown_signal = shutdown_signal.clone(); - // let peer = peer.clone(); - // let range = range.clone(); - tokio::select! { - // Stream a batch of blocks - blocks = stream_block_batch => Some(blocks), - // If a shutdown signal is received during the stream, terminate early and - // return an empty response - _ = shutdown_signal.while_started() => None - } - }).map(|task| task.map_err(ImportError::JoinError)) + .map(move |stream_block_batch| { + let shutdown_guard = shutdown_guard.clone(); + let shutdown_signal = shutdown_signal.clone(); + tokio::spawn(async move { + // Hold a shutdown sender for the lifetime of the spawned task + let _shutdown_guard = shutdown_guard.clone(); + let mut shutdown_signal = shutdown_signal.clone(); + tokio::select! { + // Stream a batch of blocks + blocks = stream_block_batch => blocks.map(Some), + // If a shutdown signal is received during the stream, terminate early and + // return an empty response + _ = shutdown_signal.while_started() => Ok(None) } + }).map(|task| task.map_err(ImportError::JoinError)?) }) // Request up to `block_stream_buffer_size` transactions from the network. .buffered(params.block_stream_buffer_size) @@ -281,46 +272,34 @@ where }) .into_scan_none_or_err() .scan_none_or_err() - // .map({ - // let peer = peer.clone(); - // let range = range.clone(); - // move |result| result.unwrap_or({ - // let peer = peer.clone(); - // let range = range.clone(); - // SealedBlockBatch::empty(peer, range) - // }) - // }) - .then({ - let peer = peer.clone(); - // let range = range.clone(); - move |batch| { - let peer = peer.clone(); - async move { - let batch = batch??; - let error = batch.is_err().then(|| ImportError::MissingTransactions); - let results = batch.results; - let sealed_blocks = futures::stream::iter(results); - let res = sealed_blocks.then(|sealed_block| async { - execute_and_commit(executor.as_ref(), state, sealed_block).await - }).try_collect::>().await.and_then(|v| error.map_or(Ok(v), Err)); - match &res { - Ok(_) => { - report_peer(p2p.as_ref(), peer.clone(), PeerReportReason::SuccessfulBlockImport); - }, - Err(e) => { - // If this fails, then it means that consensus has approved a block that is invalid. - // This would suggest a more serious issue than a bad peer, e.g. a fork or an out-of-date client. - tracing::error!("Failed to execute and commit block from peer {:?}: {:?}", peer, e); - }, - }; - res - } - .instrument(tracing::debug_span!("execute_and_commit")) - .in_current_span() - } + .then(|batch| { + async move { + let batch = batch?; + let error = batch.is_err().then(|| ImportError::MissingTransactions); + let Batch { + peer, + results, + .. + } = batch; + let sealed_blocks = futures::stream::iter(results); + let res = sealed_blocks.then(|sealed_block| async { + execute_and_commit(executor.as_ref(), state, sealed_block).await + }).try_collect::>().await.and_then(|v| error.map_or(Ok(v), Err)); + match &res { + Ok(_) => { + report_peer(p2p.as_ref(), peer.clone(), PeerReportReason::SuccessfulBlockImport); + }, + Err(e) => { + // If this fails, then it means that consensus has approved a block that is invalid. + // This would suggest a more serious issue than a bad peer, e.g. a fork or an out-of-date client. + tracing::error!("Failed to execute and commit block from peer {:?}: {:?}", peer, e); + }, + }; + res } - ) - + .instrument(tracing::debug_span!("execute_and_commit")) + .in_current_span() + }) // Continue the stream unless an error occurs. .into_scan_err() .scan_err() @@ -354,7 +333,6 @@ fn get_block_stream< { let header_stream = get_header_batch_stream(peer_id.clone(), range.clone(), params, p2p.clone()); - // let range = *range.start()..(*range.end() + 1); header_stream .map({ let consensus = consensus.clone(); From cfd3006ccc5bbd52b1f0bda38f2a80681de9cebd Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Thu, 28 Sep 2023 23:47:58 -0400 Subject: [PATCH 72/87] Restore random peer id from header request --- crates/fuel-core/src/service/adapters/sync.rs | 33 +- crates/services/p2p/src/p2p_service.rs | 6 +- .../p2p/src/request_response/messages.rs | 3 +- crates/services/p2p/src/service.rs | 52 +--- crates/services/sync/src/import.rs | 83 ++--- .../services/sync/src/import/test_helpers.rs | 18 +- .../test_helpers/pressure_peer_to_peer.rs | 39 ++- crates/services/sync/src/import/tests.rs | 285 ++++++------------ crates/services/sync/src/ports.rs | 10 +- crates/services/sync/src/service/tests.rs | 26 +- 10 files changed, 201 insertions(+), 354 deletions(-) diff --git a/crates/fuel-core/src/service/adapters/sync.rs b/crates/fuel-core/src/service/adapters/sync.rs index 5aa2ee2f94f..d38e38c5c88 100644 --- a/crates/fuel-core/src/service/adapters/sync.rs +++ b/crates/fuel-core/src/service/adapters/sync.rs @@ -49,34 +49,23 @@ impl PeerToPeerPort for P2PAdapter { } } - async fn select_peer( - &self, - block_height: BlockHeight, - ) -> anyhow::Result> { - if let Some(service) = &self.service { - let peer_id = service.select_peer(block_height).await?; - Ok(peer_id) - } else { - Err(anyhow::anyhow!("No P2P service available")) - } - } - async fn get_sealed_block_headers( &self, - block_height_range: SourcePeer>, - ) -> SourcePeer>>> { - let SourcePeer { - peer_id, - data: block_height_range, - } = block_height_range; + block_height_range: Range, + ) -> anyhow::Result>>> { let result = if let Some(service) = &self.service { - service - .get_sealed_block_headers(peer_id.clone().into(), block_height_range) - .await + service.get_sealed_block_headers(block_height_range).await } else { Err(anyhow::anyhow!("No P2P service available")) }; - peer_id.bind(result) + match result { + Ok((peer_id, headers)) => { + let peer_id: PeerId = peer_id.into(); + let headers = peer_id.bind(headers); + Ok(headers) + } + Err(err) => Err(err), + } } async fn get_transactions( diff --git a/crates/services/p2p/src/p2p_service.rs b/crates/services/p2p/src/p2p_service.rs index 6496e28deb5..727c737293a 100644 --- a/crates/services/p2p/src/p2p_service.rs +++ b/crates/services/p2p/src/p2p_service.rs @@ -566,7 +566,7 @@ impl FuelP2PService { } } FuelBehaviourEvent::RequestResponse(req_res_event) => match req_res_event { - RequestResponseEvent::Message { message, .. } => match message { + RequestResponseEvent::Message { peer, message } => match message { RequestResponseMessage::Request { request, channel, @@ -624,7 +624,7 @@ impl FuelP2PService { Some(ResponseChannelItem::SealedHeaders(channel)), Ok(ResponseMessage::SealedHeaders(headers)), ) => { - if channel.send(headers).is_err() { + if channel.send((peer, headers)).is_err() { debug!( "Failed to send through the channel for {:?}", request_id @@ -1606,7 +1606,7 @@ mod tests { let expected = arbitrary_headers_for_range(range.clone()); - if let Ok(sealed_headers) = response_message { + if let Ok((_, sealed_headers)) = response_message { let check = expected.iter().zip(sealed_headers.unwrap().iter()).all(|(a, b)| eq_except_metadata(a, b)); let _ = tx_test_end.send(check).await; } else { diff --git a/crates/services/p2p/src/request_response/messages.rs b/crates/services/p2p/src/request_response/messages.rs index fcdcfbab251..875f9bd2756 100644 --- a/crates/services/p2p/src/request_response/messages.rs +++ b/crates/services/p2p/src/request_response/messages.rs @@ -13,6 +13,7 @@ use fuel_core_types::{ fuel_types::BlockHeight, services::p2p::Transactions, }; +use libp2p::PeerId; use serde::{ Deserialize, Serialize, @@ -61,7 +62,7 @@ pub enum ResponseMessage { #[derive(Debug)] pub enum ResponseChannelItem { Block(oneshot::Sender>), - SealedHeaders(oneshot::Sender>>), + SealedHeaders(oneshot::Sender<(PeerId, Option>)>), Transactions(oneshot::Sender>>), Transactions2(oneshot::Sender>>), } diff --git a/crates/services/p2p/src/service.rs b/crates/services/p2p/src/service.rs index e9168268eb7..a9ad4ff8505 100644 --- a/crates/services/p2p/src/service.rs +++ b/crates/services/p2p/src/service.rs @@ -97,8 +97,7 @@ enum TaskRequest { }, GetSealedHeaders { block_height_range: Range, - from_peer: PeerId, - channel: oneshot::Sender>>, + channel: oneshot::Sender<(PeerId, Option>)>, }, GetTransactions { block_id: BlockId, @@ -117,10 +116,6 @@ enum TaskRequest { score: AppScore, reporting_service: &'static str, }, - SelectPeer { - block_height: BlockHeight, - channel: oneshot::Sender>, - }, } impl Debug for TaskRequest { @@ -486,10 +481,16 @@ where let peer = self.p2p_service.get_peer_id_with_height(&height); let _ = self.p2p_service.send_request_msg(peer, request_msg, channel_item); } - Some(TaskRequest::GetSealedHeaders { block_height_range, from_peer, channel: response}) => { - let request_msg = RequestMessage::SealedHeaders(block_height_range); + Some(TaskRequest::GetSealedHeaders { block_height_range, channel: response}) => { + let request_msg = RequestMessage::SealedHeaders(block_height_range.clone()); let channel_item = ResponseChannelItem::SealedHeaders(response); - let _ = self.p2p_service.send_request_msg(Some(from_peer), request_msg, channel_item); + + // Note: this range has already been check for + // validity in `SharedState::get_sealed_block_headers`. + let block_height = BlockHeight::from(block_height_range.end - 1); + let peer = self.p2p_service + .get_peer_id_with_height(&block_height); + let _ = self.p2p_service.send_request_msg(peer, request_msg, channel_item); } Some(TaskRequest::GetTransactions { block_id, from_peer, channel }) => { let request_msg = RequestMessage::Transactions(block_id); @@ -508,10 +509,6 @@ where Some(TaskRequest::RespondWithPeerReport { peer_id, score, reporting_service }) => { let _ = self.p2p_service.report_peer(peer_id, score, reporting_service); } - Some(TaskRequest::SelectPeer { block_height, channel }) => { - let peer = self.p2p_service.get_peer_id_with_height(&block_height); - let _ = channel.send(peer); - } None => { unreachable!("The `Task` is holder of the `Sender`, so it should not be possible"); } @@ -697,32 +694,11 @@ impl SharedState { receiver.await.map_err(|e| anyhow!("{}", e)) } - pub async fn select_peer( - &self, - block_height: BlockHeight, - ) -> anyhow::Result> { - let (sender, receiver) = oneshot::channel(); - - self.request_sender - .send(TaskRequest::SelectPeer { - block_height, - channel: sender, - }) - .await?; - - receiver - .await - .map(|peer_id| peer_id.map(|peer_id| peer_id.to_bytes().into())) - .map_err(|e| anyhow!("{}", e)) - } - pub async fn get_sealed_block_headers( &self, - peer_id: Vec, block_height_range: Range, - ) -> anyhow::Result>> { + ) -> anyhow::Result<(Vec, Option>)> { let (sender, receiver) = oneshot::channel(); - let from_peer = PeerId::from_bytes(&peer_id).expect("Valid PeerId"); if block_height_range.is_empty() { return Err(anyhow!( @@ -733,12 +709,14 @@ impl SharedState { self.request_sender .send(TaskRequest::GetSealedHeaders { block_height_range, - from_peer, channel: sender, }) .await?; - receiver.await.map_err(|e| anyhow!("{}", e)) + receiver + .await + .map(|(peer_id, headers)| (peer_id.to_bytes(), headers)) + .map_err(|e| anyhow!("{}", e)) } pub async fn get_transactions_from_peer( diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 7b8576d7edf..3fa8a550fb6 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -153,8 +153,6 @@ type SealedBlockBatch = Batch; enum ImportError { ConsensusError(anyhow::Error), ExecutionError(anyhow::Error), - NoSuitablePeer, - MissingBlockHeaders, MissingTransactions, BadBlockHeader, JoinError(JoinError), @@ -229,20 +227,8 @@ where let (shutdown_guard, mut shutdown_guard_recv) = tokio::sync::mpsc::channel::<()>(1); - let block_height = BlockHeight::from(*range.end()); - let peer = select_peer(block_height, p2p.as_ref()).await; - if peer.is_err() { - return 0 - } - let peer = peer.expect("Checked"); - - let block_stream = get_block_stream( - peer.clone(), - range.clone(), - params, - p2p.clone(), - consensus.clone(), - ); + let block_stream = + get_block_stream(range.clone(), params, p2p.clone(), consensus.clone()); let result = block_stream .map(move |stream_block_batch| { let shutdown_guard = shutdown_guard.clone(); @@ -324,15 +310,13 @@ fn get_block_stream< P: PeerToPeerPort + Send + Sync + 'static, C: ConsensusPort + Send + Sync + 'static, >( - peer_id: PeerId, range: RangeInclusive, params: &Config, p2p: Arc

, consensus: Arc, ) -> impl Stream>> + '_ { - let header_stream = - get_header_batch_stream(peer_id.clone(), range.clone(), params, p2p.clone()); + let header_stream = get_header_batch_stream(range.clone(), params, p2p.clone()); header_stream .map({ let consensus = consensus.clone(); @@ -347,13 +331,13 @@ fn get_block_stream< let results = results .into_iter() .map({ - let peer_id = peer_id.clone(); let consensus = consensus.clone(); let p2p = p2p.clone(); + let peer = peer.clone(); move |header| { check_sealed_header( &header, - peer_id.clone(), + peer.clone(), p2p.clone(), consensus.clone(), )?; @@ -363,7 +347,7 @@ fn get_block_stream< .take_while(|result| result.is_ok()) .filter_map(|result| result.ok()) .collect::>(); - let batch = Batch::new(peer, range, results); + let batch = Batch::new(peer.clone(), range, results); Result::<_, ImportError>::Ok(batch) } }) @@ -408,7 +392,6 @@ fn get_block_stream< } fn get_header_batch_stream( - peer: PeerId, range: RangeInclusive, params: &Config, p2p: Arc

, @@ -418,9 +401,8 @@ fn get_header_batch_stream( } = params; let ranges = range_chunks(range, *header_batch_size); futures::stream::iter(ranges).then(move |range| { - let peer = peer.clone(); let p2p = p2p.clone(); - async move { get_headers_batch(peer, range, p2p).await } + async move { get_headers_batch(range, p2p).await } }) } @@ -489,24 +471,10 @@ async fn wait_for_notify_or_shutdown( matches!(r, futures::future::Either::Left(_)) } -async fn select_peer

(block_height: BlockHeight, p2p: &P) -> Result -where - P: PeerToPeerPort + Send + Sync + 'static, -{ - tracing::debug!("getting peer for block height {}", block_height); - let res = p2p.select_peer(block_height).await; - match res { - Ok(Some(peer_id)) => Ok(peer_id), - Ok(None) => Err(ImportError::NoSuitablePeer), - Err(e) => Err(e.into()), - } -} - async fn get_sealed_block_headers

( - peer: PeerId, range: Range, p2p: &P, -) -> Result, ImportError> +) -> Result>, ImportError> where P: PeerToPeerPort + Send + Sync + 'static, { @@ -515,17 +483,21 @@ where range.start, range.end ); - let range = peer.clone().bind(range); - let res = p2p.get_sealed_block_headers(range).await; - let SourcePeer { data: headers, .. } = res; - match headers { - Ok(Some(headers)) => Ok(headers), - Ok(None) => { - report_peer(p2p, peer.clone(), PeerReportReason::MissingBlockHeaders); - Err(ImportError::MissingBlockHeaders) + let res = p2p + .get_sealed_block_headers(range) + .await + .trace_err("Failed to get headers"); + let sorted_headers = match res { + Ok(sourced_headers) => { + let sourced = sourced_headers.map(|headers| match headers { + None => vec![], + Some(headers) => headers, + }); + Ok(sourced) } - Err(e) => Err(e.into()), - } + Err(e) => Err(ImportError::Other(e)), + }; + sorted_headers } async fn get_transactions

( @@ -549,7 +521,6 @@ where } async fn get_headers_batch

( - peer_id: PeerId, range: RangeInclusive, p2p: Arc

, ) -> Result @@ -564,11 +535,15 @@ where let start = *range.start(); let end = *range.end() + 1; let range = start..end; - let headers = - get_sealed_block_headers(peer_id.clone(), range.clone(), p2p.as_ref()).await?; - let headers = headers.into_iter(); + let result = get_sealed_block_headers(range.clone(), p2p.as_ref()).await; + let sourced_headers = result?; + let SourcePeer { + peer_id, + data: headers, + } = sourced_headers; let heights = range.clone().map(BlockHeight::from); let headers = headers + .into_iter() .zip(heights) .take_while(move |(header, expected_height)| { let height = header.entity.height(); diff --git a/crates/services/sync/src/import/test_helpers.rs b/crates/services/sync/src/import/test_helpers.rs index 94f39b15330..ba948deea92 100644 --- a/crates/services/sync/src/import/test_helpers.rs +++ b/crates/services/sync/src/import/test_helpers.rs @@ -16,19 +16,33 @@ use fuel_core_types::{ }, fuel_types::BlockHeight, }; +use rand::{ + rngs::StdRng, + Rng, + SeedableRng, +}; pub use counts::{ Count, SharedCounts, }; +use fuel_core_types::services::p2p::PeerId; pub use pressure_block_importer::PressureBlockImporter; pub use pressure_consensus::PressureConsensus; pub use pressure_peer_to_peer::PressurePeerToPeer; -pub fn empty_header(h: BlockHeight) -> SealedBlockHeader { +pub fn random_peer() -> PeerId { + let mut rng = StdRng::seed_from_u64(0xF00DF00D); + let bytes = rng.gen::<[u8; 32]>().to_vec(); + let peer_id = PeerId::from(bytes); + peer_id +} + +pub fn empty_header>(i: I) -> SealedBlockHeader { let mut header = BlockHeader::default(); - header.consensus.height = h; + let height = i.into(); + header.consensus.height = height; let transaction_tree = fuel_core_types::fuel_merkle::binary::in_memory::MerkleTree::new(); header.application.generated.transactions_root = transaction_tree.root().into(); diff --git a/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs b/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs index 140bf452b68..23cf657170f 100644 --- a/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs +++ b/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs @@ -33,6 +33,13 @@ use std::{ time::Duration, }; +fn random_peer() -> PeerId { + let mut rng = StdRng::seed_from_u64(0xF00DF00D); + let bytes = rng.gen::<[u8; 32]>().to_vec(); + let peer_id = PeerId::from(bytes); + peer_id +} + pub struct PressurePeerToPeer { p2p: MockPeerToPeerPort, durations: [Duration; 2], @@ -45,24 +52,14 @@ impl PeerToPeerPort for PressurePeerToPeer { self.p2p.height_stream() } - async fn select_peer( - &self, - _block_height: BlockHeight, - ) -> anyhow::Result> { - let mut rng = StdRng::seed_from_u64(0xF00DF00D); - let bytes = rng.gen::<[u8; 32]>().to_vec(); - let peer_id = PeerId::from(bytes); - Ok(Some(peer_id)) - } - async fn get_sealed_block_headers( &self, - block_height_range: SourcePeer>, - ) -> SourcePeer>>> { + block_height_range: Range, + ) -> anyhow::Result>>> { self.counts.apply(|c| c.inc_headers()); tokio::time::sleep(self.durations[0]).await; self.counts.apply(|c| c.dec_headers()); - for _ in block_height_range.data.clone() { + for _ in block_height_range.clone() { self.counts.apply(|c| c.inc_blocks()); } self.p2p.get_sealed_block_headers(block_height_range).await @@ -104,14 +101,14 @@ impl PressurePeerToPeer { pub fn new(counts: SharedCounts, delays: [Duration; 2]) -> Self { let mut mock = MockPeerToPeerPort::default(); mock.expect_get_sealed_block_headers().returning(|range| { - range.map(|range| { - let range = range - .clone() - .map(BlockHeight::from) - .map(empty_header) - .collect(); - Ok(Some(range)) - }) + let peer = random_peer(); + let headers = range + .clone() + .map(BlockHeight::from) + .map(empty_header) + .collect(); + let headers = peer.bind(Some(headers)); + Ok(headers) }); mock.expect_get_transactions() .returning(|_| Ok(Some(vec![]))); diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index 6b561f658bf..a452e8be16b 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -1,7 +1,10 @@ #![allow(non_snake_case)] use crate::{ - import::test_helpers::empty_header, + import::test_helpers::{ + empty_header, + random_peer, + }, ports::{ MockBlockImporterPort, MockConsensusPort, @@ -10,6 +13,7 @@ use crate::{ }, }; use fuel_core_types::services::p2p::Transactions; + // use test_case::test_case; use super::*; @@ -35,18 +39,13 @@ async fn test_import_0_to_5() { .returning(|_| Ok(())); let mut p2p = MockPeerToPeerPort::default(); - p2p.expect_select_peer().times(1).returning(|_| { - let bytes = vec![1u8, 2, 3, 4, 5]; - let peer_id = bytes.into(); - Ok(Some(peer_id)) - }); p2p.expect_get_sealed_block_headers() .times(1) .returning(|range| { - range.map(|range| { - let headers = range.clone().map(|h| empty_header(h.into())).collect(); - Ok(Some(headers)) - }) + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) }); p2p.expect_get_transactions_2() .times(1) @@ -82,18 +81,13 @@ async fn test_import_3_to_5() { .returning(|_| Ok(())); let mut p2p = MockPeerToPeerPort::default(); - p2p.expect_select_peer().times(1).returning(|_| { - let bytes = vec![1u8, 2, 3, 4, 5]; - let peer_id = bytes.into(); - Ok(Some(peer_id)) - }); p2p.expect_get_sealed_block_headers() .times(1) .returning(|range| { - range.map(|range| { - let headers = range.clone().map(|h| empty_header(h.into())).collect(); - Ok(Some(headers)) - }) + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) }); p2p.expect_get_transactions_2() .times(1) @@ -129,18 +123,13 @@ async fn import__signature_fails_on_header_5_only() { .times(1) .returning(|_| Ok(())); let mut p2p = MockPeerToPeerPort::default(); - p2p.expect_select_peer().times(1).returning(|_| { - let bytes = vec![1u8, 2, 3, 4, 5]; - let peer_id = bytes.into(); - Ok(Some(peer_id)) - }); p2p.expect_get_sealed_block_headers() .times(1) .returning(|range| { - range.map(|range| { - let headers = range.clone().map(|h| empty_header(h.into())).collect(); - Ok(Some(headers)) - }) + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) }); p2p.expect_get_transactions_2() .times(1) @@ -178,18 +167,13 @@ async fn import__signature_fails_on_header_4_only() { .returning(|_| Ok(())); let mut p2p = MockPeerToPeerPort::default(); - p2p.expect_select_peer().times(1).returning(|_| { - let bytes = vec![1u8, 2, 3, 4, 5]; - let peer_id = bytes.into(); - Ok(Some(peer_id)) - }); p2p.expect_get_sealed_block_headers() .times(1) .returning(|range| { - range.map(|range| { - let headers = range.clone().map(|h| empty_header(h.into())).collect(); - Ok(Some(headers)) - }) + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) }); p2p.expect_get_transactions_2() .times(0) @@ -217,14 +201,14 @@ async fn import__signature_fails_on_header_4_only() { async fn import__header_not_found() { // given let mut p2p = MockPeerToPeerPort::default(); - p2p.expect_select_peer().times(1).returning(|_| { - let bytes = vec![1u8, 2, 3, 4, 5]; - let peer_id = bytes.into(); - Ok(Some(peer_id)) - }); p2p.expect_get_sealed_block_headers() .times(1) - .returning(|range| range.map(|_| Ok(Some(Vec::new())))); + .returning(|_| { + let peer = random_peer(); + let headers = Some(Vec::new()); + let headers = peer.bind(headers); + Ok(headers) + }); let state = State::new(3, 5).into(); let mocks = Mocks { @@ -244,14 +228,14 @@ async fn import__header_not_found() { async fn import__header_response_incomplete() { // given let mut p2p = MockPeerToPeerPort::default(); - p2p.expect_select_peer().times(1).returning(|_| { - let bytes = vec![1u8, 2, 3, 4, 5]; - let peer_id = bytes.into(); - Ok(Some(peer_id)) - }); p2p.expect_get_sealed_block_headers() .times(1) - .returning(|range| range.map(|_| Ok(None))); + .returning(|_| { + let peer = random_peer(); + let headers = None; + let headers = peer.bind(headers); + Ok(headers) + }); let state = State::new(3, 5).into(); let mocks = Mocks { @@ -271,14 +255,15 @@ async fn import__header_response_incomplete() { async fn import__header_5_not_found() { // given let mut p2p = MockPeerToPeerPort::default(); - p2p.expect_select_peer().times(1).returning(|_| { - let bytes = vec![1u8, 2, 3, 4, 5]; - let peer_id = bytes.into(); - Ok(Some(peer_id)) - }); p2p.expect_get_sealed_block_headers() .times(1) - .returning(|range| range.map(|_| Ok(Some(vec![empty_header(4.into())])))); + .returning(|_| { + let peer = random_peer(); + let headers = Some(vec![empty_header(4)]); + let headers = peer.bind(headers); + Ok(headers) + }); + p2p.expect_get_transactions_2() .times(1) .returning(|block_ids| { @@ -305,14 +290,14 @@ async fn import__header_5_not_found() { async fn import__header_4_not_found() { // given let mut p2p = MockPeerToPeerPort::default(); - p2p.expect_select_peer().times(1).returning(|_| { - let bytes = vec![1u8, 2, 3, 4, 5]; - let peer_id = bytes.into(); - Ok(Some(peer_id)) - }); p2p.expect_get_sealed_block_headers() .times(1) - .returning(|range| range.map(|_| Ok(Some(vec![empty_header(5.into())])))); + .returning(|_| { + let peer = random_peer(); + let headers = Some(vec![empty_header(5)]); + let headers = peer.bind(headers); + Ok(headers) + }); p2p.expect_get_transactions_2().times(0); let state = State::new(3, 5).into(); @@ -343,15 +328,13 @@ async fn import__transactions_not_found() { .returning(|_| Ok(())); let mut p2p = MockPeerToPeerPort::default(); - p2p.expect_select_peer().times(1).returning(|_| { - let bytes = vec![1u8, 2, 3, 4, 5]; - let peer_id = bytes.into(); - Ok(Some(peer_id)) - }); p2p.expect_get_sealed_block_headers() .times(1) .returning(|range| { - range.map(|_| Ok(Some(vec![empty_header(4.into()), empty_header(5.into())]))) + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) }); p2p.expect_get_transactions_2() .times(1) @@ -385,15 +368,13 @@ async fn import__transactions_not_found_for_header_4() { .returning(|_| Ok(())); let mut p2p = MockPeerToPeerPort::default(); - p2p.expect_select_peer().times(1).returning(|_| { - let bytes = vec![1u8, 2, 3, 4, 5]; - let peer_id = bytes.into(); - Ok(Some(peer_id)) - }); p2p.expect_get_sealed_block_headers() .times(1) .returning(|range| { - range.map(|_| Ok(Some(vec![empty_header(4.into()), empty_header(5.into())]))) + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) }); let mut height = 3; p2p.expect_get_transactions_2() @@ -437,15 +418,13 @@ async fn import__transactions_not_found_for_header_5() { .returning(|_| Ok(())); let mut p2p = MockPeerToPeerPort::default(); - p2p.expect_select_peer().times(1).returning(|_| { - let bytes = vec![1u8, 2, 3, 4, 5]; - let peer_id = bytes.into(); - Ok(Some(peer_id)) - }); p2p.expect_get_sealed_block_headers() .times(1) .returning(|range| { - range.map(|_| Ok(Some(vec![empty_header(4.into()), empty_header(5.into())]))) + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) }); p2p.expect_get_transactions_2() .times(1) @@ -472,14 +451,9 @@ async fn import__transactions_not_found_for_header_5() { async fn import__p2p_error() { // given let mut p2p = MockPeerToPeerPort::default(); - p2p.expect_select_peer().times(1).returning(|_| { - let bytes = vec![1u8, 2, 3, 4, 5]; - let peer_id = bytes.into(); - Ok(Some(peer_id)) - }); p2p.expect_get_sealed_block_headers() .times(1) - .returning(|range| range.map(|_| Err(anyhow::anyhow!("Some network error")))); + .returning(|_| Err(anyhow::anyhow!("Some network error"))); p2p.expect_get_transactions_2().times(0); let state = State::new(3, 5).into(); @@ -510,15 +484,13 @@ async fn import__p2p_error_on_4_transactions() { .returning(|_| Ok(())); let mut p2p = MockPeerToPeerPort::default(); - p2p.expect_select_peer().times(1).returning(|_| { - let bytes = vec![1u8, 2, 3, 4, 5]; - let peer_id = bytes.into(); - Ok(Some(peer_id)) - }); p2p.expect_get_sealed_block_headers() .times(1) .returning(|range| { - range.map(|_| Ok(Some(vec![empty_header(4.into()), empty_header(5.into())]))) + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) }); p2p.expect_get_transactions_2() .times(1) @@ -538,47 +510,6 @@ async fn import__p2p_error_on_4_transactions() { assert_eq!((State::new(3, None), false), res); } -// #[tokio::test] -// async fn import__p2p_error_on_5_transactions() { -// // given -// let mut p2p = MockPeerToPeerPort::default(); -// p2p.expect_select_peer().times(1).returning(|_| { -// let bytes = vec![1u8, 2, 3, 4, 5]; -// let peer_id = bytes.into(); -// Ok(Some(peer_id)) -// }); -// p2p.expect_get_sealed_block_headers() -// .times(1) -// .returning(|_| { -// Ok(peer_sourced_headers(Some(vec![ -// empty_header(4.into()), -// empty_header(5.into()), -// ]))) -// }); -// let mut height = 3; -// p2p.expect_get_transactions().times(2).returning(move |_| { -// height += 1; -// if height == 5 { -// Err(anyhow::anyhow!("Some network error")) -// } else { -// Ok(Some(vec![])) -// } -// }); -// -// let state = State::new(3, 5).into(); -// let mocks = Mocks { -// p2p, -// consensus_port: DefaultMocks::times([2]), -// executor: DefaultMocks::times([1]), -// }; -// -// // when -// let res = test_import_inner(state, mocks, None).await; -// -// // then -// assert_eq!((State::new(4, None), false), res); -// } - #[tokio::test] async fn import__consensus_error_on_4() { // given @@ -599,15 +530,13 @@ async fn import__consensus_error_on_4() { .returning(|_| Ok(())); let mut p2p = MockPeerToPeerPort::default(); - p2p.expect_select_peer().times(1).returning(|_| { - let bytes = vec![1u8, 2, 3, 4, 5]; - let peer_id = bytes.into(); - Ok(Some(peer_id)) - }); p2p.expect_get_sealed_block_headers() .times(1) .returning(|range| { - range.map(|_| Ok(Some(vec![empty_header(4.into()), empty_header(5.into())]))) + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) }); p2p.expect_get_transactions_2().times(0); @@ -645,15 +574,13 @@ async fn import__consensus_error_on_5() { .returning(|_| Ok(())); let mut p2p = MockPeerToPeerPort::default(); - p2p.expect_select_peer().times(1).returning(|_| { - let bytes = vec![1u8, 2, 3, 4, 5]; - let peer_id = bytes.into(); - Ok(Some(peer_id)) - }); p2p.expect_get_sealed_block_headers() .times(1) .returning(|range| { - range.map(|_| Ok(Some(vec![empty_header(4.into()), empty_header(5.into())]))) + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) }); p2p.expect_get_transactions_2() .times(1) @@ -691,15 +618,13 @@ async fn import__execution_error_on_header_4() { .returning(|_| Ok(())); let mut p2p = MockPeerToPeerPort::default(); - p2p.expect_select_peer().times(1).returning(|_| { - let bytes = vec![1u8, 2, 3, 4, 5]; - let peer_id = bytes.into(); - Ok(Some(peer_id)) - }); p2p.expect_get_sealed_block_headers() .times(1) .returning(|range| { - range.map(|_| Ok(Some(vec![empty_header(4.into()), empty_header(5.into())]))) + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) }); p2p.expect_get_transactions_2() .times(1) @@ -749,15 +674,13 @@ async fn import__execution_error_on_header_5() { .returning(|_| Ok(())); let mut p2p = MockPeerToPeerPort::default(); - p2p.expect_select_peer().times(1).returning(|_| { - let bytes = vec![1u8, 2, 3, 4, 5]; - let peer_id = bytes.into(); - Ok(Some(peer_id)) - }); p2p.expect_get_sealed_block_headers() .times(1) .returning(|range| { - range.map(|_| Ok(Some(vec![empty_header(4.into()), empty_header(5.into())]))) + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) }); p2p.expect_get_transactions_2() .times(1) @@ -834,19 +757,14 @@ async fn import__can_work_in_two_loops() { .returning(|_| Ok(())); let mut p2p = MockPeerToPeerPort::default(); - p2p.expect_select_peer().times(2).returning(|_| { - let bytes = vec![1u8, 2, 3, 4, 5]; - let peer_id = bytes.into(); - Ok(Some(peer_id)) - }); p2p.expect_get_sealed_block_headers() .times(2) .returning(move |range| { state.apply(|s| s.observe(6)); - range.map(|range| { - let headers = range.clone().map(|h| empty_header(h.into())).collect(); - Ok(Some(headers)) - }) + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) }); p2p.expect_get_transactions_2() .times(2) @@ -979,7 +897,6 @@ async fn import__missing_transactions_sends_peer_report() { struct PeerReportTestBuilder { shared_peer_id: Vec, get_sealed_headers: Option>>, - // get_transactions: Option>>, get_transactions_2: Option>>, check_sealed_header: Option, block_count: u32, @@ -991,7 +908,6 @@ impl PeerReportTestBuilder { Self { shared_peer_id: vec![1, 2, 3, 4], get_sealed_headers: None, - // get_transactions: None, get_transactions_2: None, check_sealed_header: None, block_count: 1, @@ -1086,22 +1002,19 @@ impl PeerReportTestBuilder { let mut p2p = MockPeerToPeerPort::default(); let peer_id = self.shared_peer_id.clone(); - p2p.expect_select_peer().times(1).returning(move |_| { - let peer_id = peer_id.clone(); - Ok(Some(peer_id.clone().into())) - }); - if let Some(get_headers) = self.get_sealed_headers.clone() { - p2p.expect_get_sealed_block_headers() - .returning(move |range| range.map(|_| Ok(get_headers.clone()))); + p2p.expect_get_sealed_block_headers().returning(move |_| { + let peer: PeerId = peer_id.clone().into(); + let headers = peer.bind(get_headers.clone()); + Ok(headers) + }); } else { p2p.expect_get_sealed_block_headers() .returning(move |range| { - range.map(|range| { - let headers = - range.clone().map(|h| empty_header(h.into())).collect(); - Ok(Some(headers)) - }) + let peer: PeerId = peer_id.clone().into(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) }); } @@ -1226,19 +1139,13 @@ impl DefaultMocks for MockPeerToPeerPort { let mut p2p = MockPeerToPeerPort::default(); let mut t = t.into_iter().cycle(); - p2p.expect_select_peer().times(1).returning(|_| { - let bytes = vec![1u8, 2, 3, 4, 5]; - let peer_id = bytes.into(); - Ok(Some(peer_id)) - }); - p2p.expect_get_sealed_block_headers() .times(1) .returning(|range| { - range.map(|range| { - let headers = range.clone().map(|h| empty_header(h.into())).collect(); - Ok(Some(headers)) - }) + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) }); p2p.expect_get_transactions_2() diff --git a/crates/services/sync/src/ports.rs b/crates/services/sync/src/ports.rs index 7367f4e8d34..cf591814bd6 100644 --- a/crates/services/sync/src/ports.rs +++ b/crates/services/sync/src/ports.rs @@ -45,17 +45,11 @@ pub trait PeerToPeerPort { /// Stream of newly observed block heights. fn height_stream(&self) -> BoxStream; - /// Get a peer based on the block height - async fn select_peer( - &self, - block_height: BlockHeight, - ) -> anyhow::Result>; - /// Request a range of sealed block headers from the network. async fn get_sealed_block_headers( &self, - block_height_range: SourcePeer>, - ) -> SourcePeer>>>; + block_height_range: Range, + ) -> anyhow::Result>>>; /// Request transactions from the network for the given block /// and source peer. diff --git a/crates/services/sync/src/service/tests.rs b/crates/services/sync/src/service/tests.rs index 6648695dfd3..896d011b941 100644 --- a/crates/services/sync/src/service/tests.rs +++ b/crates/services/sync/src/service/tests.rs @@ -2,17 +2,17 @@ use fuel_core_services::{ stream::IntoBoxStream, Service, }; -use fuel_core_types::services::p2p::{ - PeerId, - Transactions, -}; +use fuel_core_types::services::p2p::Transactions; use futures::{ stream, StreamExt, }; use crate::{ - import::test_helpers::empty_header, + import::test_helpers::{ + empty_header, + random_peer, + }, ports::{ MockBlockImporterPort, MockConsensusPort, @@ -38,19 +38,11 @@ async fn test_new_service() { }) .into_boxed() }); - p2p.expect_select_peer().times(1).returning(move |_| { - let peer_id: PeerId = vec![1, 2, 3, 4, 5].into(); - Ok(Some(peer_id)) - }); p2p.expect_get_sealed_block_headers().returning(|range| { - range.map(|range| { - let headers = range - .clone() - .map(BlockHeight::from) - .map(empty_header) - .collect::>(); - Ok(Some(headers)) - }) + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect::>()); + let headers = peer.bind(headers); + Ok(headers) }); p2p.expect_get_transactions_2().returning(|block_ids| { let data = block_ids.data; From c0b4d1b94eb9f943774abb6e1159c12e6e4e3e50 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 29 Sep 2023 00:09:32 -0400 Subject: [PATCH 73/87] WIP --- crates/fuel-core/src/database/sealed_block.rs | 9 --- crates/fuel-core/src/service/adapters/p2p.rs | 9 --- crates/fuel-core/src/service/adapters/sync.rs | 23 +------- crates/services/p2p/src/codecs/postcard.rs | 18 ------ crates/services/p2p/src/p2p_service.rs | 11 ---- crates/services/p2p/src/ports.rs | 7 --- .../p2p/src/request_response/messages.rs | 13 +--- crates/services/p2p/src/service.rs | 59 +------------------ .../test_helpers/pressure_peer_to_peer.rs | 18 +----- crates/services/sync/src/ports.rs | 13 +--- 10 files changed, 6 insertions(+), 174 deletions(-) diff --git a/crates/fuel-core/src/database/sealed_block.rs b/crates/fuel-core/src/database/sealed_block.rs index 0d25c8f4d17..daddaf80dc9 100644 --- a/crates/fuel-core/src/database/sealed_block.rs +++ b/crates/fuel-core/src/database/sealed_block.rs @@ -128,15 +128,6 @@ impl Database { } } - pub fn get_transactions_on_block( - &self, - block_id: &BlockId, - ) -> StorageResult>> { - Ok(self - .get_sealed_block_by_id(block_id)? - .map(|Sealed { entity: block, .. }| block.into_inner().1)) - } - pub fn get_transactions_on_blocks( &self, block_height_range: Range, diff --git a/crates/fuel-core/src/service/adapters/p2p.rs b/crates/fuel-core/src/service/adapters/p2p.rs index 5accfa12b10..aed7e74a051 100644 --- a/crates/fuel-core/src/service/adapters/p2p.rs +++ b/crates/fuel-core/src/service/adapters/p2p.rs @@ -8,11 +8,9 @@ use fuel_core_services::stream::BoxStream; use fuel_core_storage::Result as StorageResult; use fuel_core_types::{ blockchain::{ - primitives::BlockId, SealedBlock, SealedBlockHeader, }, - fuel_tx::Transaction, fuel_types::BlockHeight, services::p2p::Transactions, }; @@ -40,13 +38,6 @@ impl P2pDb for Database { self.get_sealed_block_headers(block_height_range) } - fn get_transactions( - &self, - block_id: &BlockId, - ) -> StorageResult>> { - self.get_transactions_on_block(block_id) - } - fn get_transactions_2( &self, block_height_range: Range, diff --git a/crates/fuel-core/src/service/adapters/sync.rs b/crates/fuel-core/src/service/adapters/sync.rs index d38e38c5c88..e0f427fbd34 100644 --- a/crates/fuel-core/src/service/adapters/sync.rs +++ b/crates/fuel-core/src/service/adapters/sync.rs @@ -12,14 +12,10 @@ use fuel_core_sync::ports::{ }; use fuel_core_types::{ blockchain::{ - primitives::{ - BlockId, - DaBlockHeight, - }, + primitives::DaBlockHeight, SealedBlock, SealedBlockHeader, }, - fuel_tx::Transaction, fuel_types::BlockHeight, services::p2p::{ peer_reputation::{ @@ -68,23 +64,6 @@ impl PeerToPeerPort for P2PAdapter { } } - async fn get_transactions( - &self, - block: SourcePeer, - ) -> anyhow::Result>> { - let SourcePeer { - peer_id, - data: block, - } = block; - if let Some(service) = &self.service { - service - .get_transactions_from_peer(peer_id.into(), block) - .await - } else { - Err(anyhow::anyhow!("No P2P service available")) - } - } - async fn get_transactions_2( &self, range: SourcePeer>, diff --git a/crates/services/p2p/src/codecs/postcard.rs b/crates/services/p2p/src/codecs/postcard.rs index b1cea65b353..3444122e803 100644 --- a/crates/services/p2p/src/codecs/postcard.rs +++ b/crates/services/p2p/src/codecs/postcard.rs @@ -204,15 +204,6 @@ impl RequestResponseConverter for PostcardCodec { Ok(ResponseMessage::SealedBlock(Box::new(response))) } - NetworkResponse::Transactions(tx_bytes) => { - let response = if let Some(tx_bytes) = tx_bytes { - Some(self.deserialize(tx_bytes)?) - } else { - None - }; - - Ok(ResponseMessage::Transactions(response)) - } NetworkResponse::Transactions2(tx_bytes) => { let response = if let Some(tx_bytes) = tx_bytes { Some(self.deserialize(tx_bytes)?) @@ -246,15 +237,6 @@ impl RequestResponseConverter for PostcardCodec { Ok(NetworkResponse::Block(response)) } - OutboundResponse::Transactions(transactions) => { - let response = if let Some(transactions) = transactions { - Some(self.serialize(transactions.as_ref())?) - } else { - None - }; - - Ok(NetworkResponse::Transactions(response)) - } OutboundResponse::Transactions2(transactions) => { let response = if let Some(transactions) = transactions { Some(self.serialize(transactions.as_ref())?) diff --git a/crates/services/p2p/src/p2p_service.rs b/crates/services/p2p/src/p2p_service.rs index 727c737293a..7cc06e5fda1 100644 --- a/crates/services/p2p/src/p2p_service.rs +++ b/crates/services/p2p/src/p2p_service.rs @@ -598,17 +598,6 @@ impl FuelP2PService { ); } } - ( - Some(ResponseChannelItem::Transactions(channel)), - Ok(ResponseMessage::Transactions(transactions)), - ) => { - if channel.send(transactions).is_err() { - debug!( - "Failed to send through the channel for {:?}", - request_id - ); - } - } ( Some(ResponseChannelItem::Transactions2(channel)), Ok(ResponseMessage::Transactions2(transactions)), diff --git a/crates/services/p2p/src/ports.rs b/crates/services/p2p/src/ports.rs index 46ff1d1534c..9a6897a157e 100644 --- a/crates/services/p2p/src/ports.rs +++ b/crates/services/p2p/src/ports.rs @@ -2,11 +2,9 @@ use fuel_core_services::stream::BoxStream; use fuel_core_storage::Result as StorageResult; use fuel_core_types::{ blockchain::{ - primitives::BlockId, SealedBlock, SealedBlockHeader, }, - fuel_tx::Transaction, fuel_types::BlockHeight, services::p2p::Transactions, }; @@ -28,11 +26,6 @@ pub trait P2pDb: Send + Sync { block_height_range: Range, ) -> StorageResult>; - fn get_transactions( - &self, - block_id: &BlockId, - ) -> StorageResult>>; - fn get_transactions_2( &self, block_height_range: Range, diff --git a/crates/services/p2p/src/request_response/messages.rs b/crates/services/p2p/src/request_response/messages.rs index 875f9bd2756..6c98ffd329e 100644 --- a/crates/services/p2p/src/request_response/messages.rs +++ b/crates/services/p2p/src/request_response/messages.rs @@ -5,11 +5,9 @@ use std::{ use fuel_core_types::{ blockchain::{ - primitives::BlockId, SealedBlock, SealedBlockHeader, }, - fuel_tx::Transaction, fuel_types::BlockHeight, services::p2p::Transactions, }; @@ -18,10 +16,7 @@ use serde::{ Deserialize, Serialize, }; -use serde_with::{ - serde_as, - FromInto, -}; +use serde_with::serde_as; use thiserror::Error; use tokio::sync::oneshot; @@ -40,12 +35,10 @@ pub(crate) const MAX_REQUEST_SIZE: usize = core::mem::size_of::( // Server Peer: `RequestMessage` (receive request) -> `OutboundResponse` -> `NetworkResponse` (send response) // Client Peer: `NetworkResponse` (receive response) -> `ResponseMessage(data)` -> `ResponseChannelItem(channel, data)` (handle response) -#[serde_as] #[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)] pub enum RequestMessage { Block(BlockHeight), SealedHeaders(Range), - Transactions(#[serde_as(as = "FromInto<[u8; 32]>")] BlockId), Transactions2(Range), } @@ -54,7 +47,6 @@ pub enum RequestMessage { pub enum ResponseMessage { SealedBlock(Box>), SealedHeaders(Option>), - Transactions(Option>), Transactions2(Option>), } @@ -63,7 +55,6 @@ pub enum ResponseMessage { pub enum ResponseChannelItem { Block(oneshot::Sender>), SealedHeaders(oneshot::Sender<(PeerId, Option>)>), - Transactions(oneshot::Sender>>), Transactions2(oneshot::Sender>>), } @@ -73,7 +64,6 @@ pub enum ResponseChannelItem { pub enum NetworkResponse { Block(Option>), Headers(Option>), - Transactions(Option>), Transactions2(Option>), } @@ -83,7 +73,6 @@ pub enum NetworkResponse { pub enum OutboundResponse { Block(Option>), SealedHeaders(Option>), - Transactions(Option>>), Transactions2(Option>>), } diff --git a/crates/services/p2p/src/service.rs b/crates/services/p2p/src/service.rs index a9ad4ff8505..64f77a9e911 100644 --- a/crates/services/p2p/src/service.rs +++ b/crates/services/p2p/src/service.rs @@ -35,7 +35,6 @@ use fuel_core_types::{ blockchain::{ block::Block, consensus::ConsensusVote, - primitives::BlockId, SealedBlock, SealedBlockHeader, }, @@ -99,11 +98,6 @@ enum TaskRequest { block_height_range: Range, channel: oneshot::Sender<(PeerId, Option>)>, }, - GetTransactions { - block_id: BlockId, - from_peer: PeerId, - channel: oneshot::Sender>>, - }, GetTransactions2 { block_height_range: Range, from_peer: PeerId, @@ -492,11 +486,6 @@ where .get_peer_id_with_height(&block_height); let _ = self.p2p_service.send_request_msg(peer, request_msg, channel_item); } - Some(TaskRequest::GetTransactions { block_id, from_peer, channel }) => { - let request_msg = RequestMessage::Transactions(block_id); - let channel_item = ResponseChannelItem::Transactions(channel); - let _ = self.p2p_service.send_request_msg(Some(from_peer), request_msg, channel_item); - } Some(TaskRequest::GetTransactions2 { block_height_range, from_peer, channel }) => { let request_msg = RequestMessage::Transactions2(block_height_range); let channel_item = ResponseChannelItem::Transactions2(channel); @@ -560,19 +549,8 @@ where } } } - RequestMessage::Transactions(block_id) => { - match self.db.get_transactions(&block_id) { - Ok(maybe_transactions) => { - let response = maybe_transactions.map(Arc::new); - let _ = self.p2p_service.send_response_msg(request_id, OutboundResponse::Transactions(response)); - }, - Err(e) => { - tracing::error!("Failed to get transactions for block {:?}: {:?}", block_id, e); - let response = None; - let _ = self.p2p_service.send_response_msg(request_id, OutboundResponse::Transactions(response)); - return Err(e.into()) - } - } + RequestMessage::Transactions(_block_id) => { + todo!() } RequestMessage::Transactions2(range) => { match self.db.get_transactions_2(range.clone()) { @@ -719,25 +697,6 @@ impl SharedState { .map_err(|e| anyhow!("{}", e)) } - pub async fn get_transactions_from_peer( - &self, - peer_id: Vec, - block_id: BlockId, - ) -> anyhow::Result>> { - let (sender, receiver) = oneshot::channel(); - let from_peer = PeerId::from_bytes(&peer_id).expect("Valid PeerId"); - - self.request_sender - .send(TaskRequest::GetTransactions { - block_id, - from_peer, - channel: sender, - }) - .await?; - - receiver.await.map_err(|e| anyhow!("{}", e)) - } - pub async fn get_transactions_2_from_peer( &self, peer_id: Vec, @@ -915,13 +874,6 @@ pub mod tests { unimplemented!() } - fn get_transactions( - &self, - _block_id: &BlockId, - ) -> StorageResult>> { - unimplemented!() - } - fn get_transactions_2( &self, _block_height_range: Range, @@ -1044,13 +996,6 @@ pub mod tests { todo!() } - fn get_transactions( - &self, - _block_id: &BlockId, - ) -> StorageResult>> { - todo!() - } - fn get_transactions_2( &self, _block_height_range: Range, diff --git a/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs b/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs index 23cf657170f..5783ddb4f3d 100644 --- a/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs +++ b/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs @@ -11,11 +11,7 @@ use crate::{ }; use fuel_core_services::stream::BoxStream; use fuel_core_types::{ - blockchain::{ - primitives::BlockId, - SealedBlockHeader, - }, - fuel_tx::Transaction, + blockchain::SealedBlockHeader, fuel_types::BlockHeight, services::p2p::{ PeerId, @@ -65,16 +61,6 @@ impl PeerToPeerPort for PressurePeerToPeer { self.p2p.get_sealed_block_headers(block_height_range).await } - async fn get_transactions( - &self, - block_id: SourcePeer, - ) -> anyhow::Result>> { - self.counts.apply(|c| c.inc_transactions()); - tokio::time::sleep(self.durations[1]).await; - self.counts.apply(|c| c.dec_transactions()); - self.p2p.get_transactions(block_id).await - } - async fn get_transactions_2( &self, block_ids: SourcePeer>, @@ -110,8 +96,6 @@ impl PressurePeerToPeer { let headers = peer.bind(Some(headers)); Ok(headers) }); - mock.expect_get_transactions() - .returning(|_| Ok(Some(vec![]))); mock.expect_get_transactions_2().returning(|block_ids| { let data = block_ids.data; let v = data.into_iter().map(|_| Transactions::default()).collect(); diff --git a/crates/services/sync/src/ports.rs b/crates/services/sync/src/ports.rs index cf591814bd6..99bd39ac4a6 100644 --- a/crates/services/sync/src/ports.rs +++ b/crates/services/sync/src/ports.rs @@ -3,14 +3,10 @@ use fuel_core_services::stream::BoxStream; use fuel_core_types::{ blockchain::{ - primitives::{ - BlockId, - DaBlockHeight, - }, + primitives::DaBlockHeight, SealedBlock, SealedBlockHeader, }, - fuel_tx::Transaction, fuel_types::BlockHeight, services::p2p::{ PeerId, @@ -51,13 +47,6 @@ pub trait PeerToPeerPort { block_height_range: Range, ) -> anyhow::Result>>>; - /// Request transactions from the network for the given block - /// and source peer. - async fn get_transactions( - &self, - block_id: SourcePeer, - ) -> anyhow::Result>>; - /// Request transactions from the network for the given block /// and source peer. async fn get_transactions_2( From d782a8af2359faffc45ca7b4fab250a39f32eb5f Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 29 Sep 2023 00:11:24 -0400 Subject: [PATCH 74/87] Clean up --- crates/services/p2p/src/request_response/messages.rs | 1 - crates/services/sync/src/import.rs | 5 ++--- crates/services/sync/src/import/test_helpers.rs | 3 +-- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/crates/services/p2p/src/request_response/messages.rs b/crates/services/p2p/src/request_response/messages.rs index 6c98ffd329e..9a01ceb9cc2 100644 --- a/crates/services/p2p/src/request_response/messages.rs +++ b/crates/services/p2p/src/request_response/messages.rs @@ -16,7 +16,6 @@ use serde::{ Deserialize, Serialize, }; -use serde_with::serde_as; use thiserror::Error; use tokio::sync::oneshot; diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 3fa8a550fb6..f24960b8fcb 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -487,7 +487,7 @@ where .get_sealed_block_headers(range) .await .trace_err("Failed to get headers"); - let sorted_headers = match res { + match res { Ok(sourced_headers) => { let sourced = sourced_headers.map(|headers| match headers { None => vec![], @@ -496,8 +496,7 @@ where Ok(sourced) } Err(e) => Err(ImportError::Other(e)), - }; - sorted_headers + } } async fn get_transactions

( diff --git a/crates/services/sync/src/import/test_helpers.rs b/crates/services/sync/src/import/test_helpers.rs index ba948deea92..1329ec82d9f 100644 --- a/crates/services/sync/src/import/test_helpers.rs +++ b/crates/services/sync/src/import/test_helpers.rs @@ -35,8 +35,7 @@ pub use pressure_peer_to_peer::PressurePeerToPeer; pub fn random_peer() -> PeerId { let mut rng = StdRng::seed_from_u64(0xF00DF00D); let bytes = rng.gen::<[u8; 32]>().to_vec(); - let peer_id = PeerId::from(bytes); - peer_id + PeerId::from(bytes) } pub fn empty_header>(i: I) -> SealedBlockHeader { From c49c1c99a7adbc939b0bea497596262e6855f7e7 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 29 Sep 2023 00:18:57 -0400 Subject: [PATCH 75/87] Clean up --- crates/fuel-core/src/database/sealed_block.rs | 1 - crates/services/p2p/src/service.rs | 3 --- .../import/test_helpers/pressure_peer_to_peer.rs | 13 +------------ 3 files changed, 1 insertion(+), 16 deletions(-) diff --git a/crates/fuel-core/src/database/sealed_block.rs b/crates/fuel-core/src/database/sealed_block.rs index daddaf80dc9..7b9f337fa20 100644 --- a/crates/fuel-core/src/database/sealed_block.rs +++ b/crates/fuel-core/src/database/sealed_block.rs @@ -23,7 +23,6 @@ use fuel_core_types::{ SealedBlock, SealedBlockHeader, }, - fuel_tx::Transaction, fuel_types::BlockHeight, services::p2p::Transactions, }; diff --git a/crates/services/p2p/src/service.rs b/crates/services/p2p/src/service.rs index 64f77a9e911..de5b6c24630 100644 --- a/crates/services/p2p/src/service.rs +++ b/crates/services/p2p/src/service.rs @@ -549,9 +549,6 @@ where } } } - RequestMessage::Transactions(_block_id) => { - todo!() - } RequestMessage::Transactions2(range) => { match self.db.get_transactions_2(range.clone()) { Ok(maybe_transactions) => { diff --git a/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs b/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs index 5783ddb4f3d..f12ca02c7cc 100644 --- a/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs +++ b/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs @@ -1,6 +1,7 @@ use crate::{ import::test_helpers::{ empty_header, + random_peer, SharedCounts, }, ports::{ @@ -19,23 +20,11 @@ use fuel_core_types::{ Transactions, }, }; -use rand::{ - prelude::StdRng, - Rng, - SeedableRng, -}; use std::{ ops::Range, time::Duration, }; -fn random_peer() -> PeerId { - let mut rng = StdRng::seed_from_u64(0xF00DF00D); - let bytes = rng.gen::<[u8; 32]>().to_vec(); - let peer_id = PeerId::from(bytes); - peer_id -} - pub struct PressurePeerToPeer { p2p: MockPeerToPeerPort, durations: [Duration; 2], From 45b8376c3965a7f5b6766b3977c14c53214ba75b Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 29 Sep 2023 00:20:23 -0400 Subject: [PATCH 76/87] Clean up --- crates/services/p2p/src/p2p_service.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/crates/services/p2p/src/p2p_service.rs b/crates/services/p2p/src/p2p_service.rs index 7cc06e5fda1..32a615e1225 100644 --- a/crates/services/p2p/src/p2p_service.rs +++ b/crates/services/p2p/src/p2p_service.rs @@ -1662,10 +1662,6 @@ mod tests { let _ = node_b.send_response_msg(*request_id, OutboundResponse::SealedHeaders(Some(sealed_headers))); } - RequestMessage::Transactions(_) => { - let transactions = (0..5).map(|_| Transaction::default_test_tx()).collect(); - let _ = node_b.send_response_msg(*request_id, OutboundResponse::Transactions(Some(Arc::new(transactions)))); - } RequestMessage::Transactions2(_) => { let transactions = (0..5).map(|_| Transaction::default_test_tx()).collect(); let _ = node_b.send_response_msg(*request_id, OutboundResponse::Transactions(Some(Arc::new(transactions)))); From e60a1cd4dd8ae34f77ca2a1d18315cd21633e0ea Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 29 Sep 2023 00:27:03 -0400 Subject: [PATCH 77/87] Clean up --- crates/services/p2p/src/codecs/postcard.rs | 2 +- crates/services/p2p/src/p2p_service.rs | 21 ++------------------- 2 files changed, 3 insertions(+), 20 deletions(-) diff --git a/crates/services/p2p/src/codecs/postcard.rs b/crates/services/p2p/src/codecs/postcard.rs index 3444122e803..93c8f210a6a 100644 --- a/crates/services/p2p/src/codecs/postcard.rs +++ b/crates/services/p2p/src/codecs/postcard.rs @@ -280,7 +280,7 @@ mod tests { #[test] fn test_request_size_fits() { - let m = RequestMessage::Transactions(BlockId::default()); + let m = RequestMessage::Transactions2(BlockId::default()); assert!(postcard::to_stdvec(&m).unwrap().len() <= MAX_REQUEST_SIZE); } } diff --git a/crates/services/p2p/src/p2p_service.rs b/crates/services/p2p/src/p2p_service.rs index 32a615e1225..8d8142f1b46 100644 --- a/crates/services/p2p/src/p2p_service.rs +++ b/crates/services/p2p/src/p2p_service.rs @@ -1604,22 +1604,6 @@ mod tests { } }); } - RequestMessage::Transactions(_) => { - let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); - assert!(node_a.send_request_msg(None, request_msg.clone(), ResponseChannelItem::Transactions(tx_orchestrator)).is_ok()); - let tx_test_end = tx_test_end.clone(); - - tokio::spawn(async move { - let response_message = rx_orchestrator.await; - - if let Ok(Some(transactions)) = response_message { - let _ = tx_test_end.send(transactions.len() == 5).await; - } else { - tracing::error!("Orchestrator failed to receive a message: {:?}", response_message); - let _ = tx_test_end.send(false).await; - } - }); - } RequestMessage::Transactions2(_) => { let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); assert!(node_a.send_request_msg(None, request_msg.clone(), ResponseChannelItem::Transactions2(tx_orchestrator)).is_ok()); @@ -1664,7 +1648,7 @@ mod tests { } RequestMessage::Transactions2(_) => { let transactions = (0..5).map(|_| Transaction::default_test_tx()).collect(); - let _ = node_b.send_response_msg(*request_id, OutboundResponse::Transactions(Some(Arc::new(transactions)))); + let _ = node_b.send_response_msg(*request_id, OutboundResponse::Transactions2(Some(Arc::new(transactions)))); } } } @@ -1678,8 +1662,7 @@ mod tests { #[tokio::test] #[instrument] async fn request_response_works_with_transactions() { - request_response_works_with(RequestMessage::Transactions(BlockId::default())) - .await + request_response_works_with(RequestMessage::Transactions2(2..6)).await } #[tokio::test] From 895a7c9545c1dcf7786c8eb2f272193d91b80901 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 29 Sep 2023 00:39:42 -0400 Subject: [PATCH 78/87] Clean up --- crates/services/p2p/src/codecs/postcard.rs | 4 +--- crates/services/p2p/src/p2p_service.rs | 9 ++++++--- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/crates/services/p2p/src/codecs/postcard.rs b/crates/services/p2p/src/codecs/postcard.rs index 93c8f210a6a..3b32a869eb0 100644 --- a/crates/services/p2p/src/codecs/postcard.rs +++ b/crates/services/p2p/src/codecs/postcard.rs @@ -274,13 +274,11 @@ impl ProtocolName for MessageExchangePostcardProtocol { #[cfg(test)] mod tests { - use fuel_core_types::blockchain::primitives::BlockId; - use super::*; #[test] fn test_request_size_fits() { - let m = RequestMessage::Transactions2(BlockId::default()); + let m = RequestMessage::Transactions2(2..6); assert!(postcard::to_stdvec(&m).unwrap().len() <= MAX_REQUEST_SIZE); } } diff --git a/crates/services/p2p/src/p2p_service.rs b/crates/services/p2p/src/p2p_service.rs index 8d8142f1b46..334062d5b85 100644 --- a/crates/services/p2p/src/p2p_service.rs +++ b/crates/services/p2p/src/p2p_service.rs @@ -696,7 +696,6 @@ mod tests { BlockHeader, PartialBlockHeader, }, - primitives::BlockId, SealedBlock, SealedBlockHeader, }, @@ -704,7 +703,10 @@ mod tests { Transaction, TransactionBuilder, }, - services::p2p::GossipsubMessageAcceptance, + services::p2p::{ + GossipsubMessageAcceptance, + Transactions, + }, }; use futures::{ future::join_all, @@ -1647,7 +1649,8 @@ mod tests { let _ = node_b.send_response_msg(*request_id, OutboundResponse::SealedHeaders(Some(sealed_headers))); } RequestMessage::Transactions2(_) => { - let transactions = (0..5).map(|_| Transaction::default_test_tx()).collect(); + let txs = (0..5).map(|_| Transaction::default_test_tx()).collect(); + let transactions = vec![Transactions(txs)]; let _ = node_b.send_response_msg(*request_id, OutboundResponse::Transactions2(Some(Arc::new(transactions)))); } } From bde0022cba1aa576c4c83fd17eda5f8435c2e1a4 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 29 Sep 2023 00:53:58 -0400 Subject: [PATCH 79/87] Update test --- crates/services/p2p/src/p2p_service.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/crates/services/p2p/src/p2p_service.rs b/crates/services/p2p/src/p2p_service.rs index 334062d5b85..523eda4e619 100644 --- a/crates/services/p2p/src/p2p_service.rs +++ b/crates/services/p2p/src/p2p_service.rs @@ -1606,7 +1606,7 @@ mod tests { } }); } - RequestMessage::Transactions2(_) => { + RequestMessage::Transactions2(_range) => { let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); assert!(node_a.send_request_msg(None, request_msg.clone(), ResponseChannelItem::Transactions2(tx_orchestrator)).is_ok()); let tx_test_end = tx_test_end.clone(); @@ -1615,7 +1615,8 @@ mod tests { let response_message = rx_orchestrator.await; if let Ok(Some(transactions)) = response_message { - let _ = tx_test_end.send(transactions.len() == 5).await; + let check = transactions.len() == 1 && transactions[0].0.len() == 5; + let _ = tx_test_end.send(check).await; } else { tracing::error!("Orchestrator failed to receive a message: {:?}", response_message); let _ = tx_test_end.send(false).await; From b6c82416020be758053271772e9377ba3646ad0e Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 29 Sep 2023 01:01:07 -0400 Subject: [PATCH 80/87] Rename --- crates/fuel-core/src/service/adapters/p2p.rs | 2 +- crates/fuel-core/src/service/adapters/sync.rs | 4 +- crates/services/p2p/src/codecs/postcard.rs | 10 ++-- crates/services/p2p/src/p2p_service.rs | 14 ++--- crates/services/p2p/src/ports.rs | 2 +- .../p2p/src/request_response/messages.rs | 10 ++-- crates/services/p2p/src/service.rs | 24 ++++---- crates/services/sync/src/import.rs | 2 +- .../test_helpers/pressure_peer_to_peer.rs | 6 +- crates/services/sync/src/import/tests.rs | 58 +++++++++---------- crates/services/sync/src/ports.rs | 2 +- crates/services/sync/src/service/tests.rs | 2 +- 12 files changed, 67 insertions(+), 69 deletions(-) diff --git a/crates/fuel-core/src/service/adapters/p2p.rs b/crates/fuel-core/src/service/adapters/p2p.rs index aed7e74a051..aa3e0766d70 100644 --- a/crates/fuel-core/src/service/adapters/p2p.rs +++ b/crates/fuel-core/src/service/adapters/p2p.rs @@ -38,7 +38,7 @@ impl P2pDb for Database { self.get_sealed_block_headers(block_height_range) } - fn get_transactions_2( + fn get_transactions( &self, block_height_range: Range, ) -> StorageResult>> { diff --git a/crates/fuel-core/src/service/adapters/sync.rs b/crates/fuel-core/src/service/adapters/sync.rs index e0f427fbd34..1b63c8c25e1 100644 --- a/crates/fuel-core/src/service/adapters/sync.rs +++ b/crates/fuel-core/src/service/adapters/sync.rs @@ -64,7 +64,7 @@ impl PeerToPeerPort for P2PAdapter { } } - async fn get_transactions_2( + async fn get_transactions( &self, range: SourcePeer>, ) -> anyhow::Result>> { @@ -74,7 +74,7 @@ impl PeerToPeerPort for P2PAdapter { } = range; if let Some(service) = &self.service { service - .get_transactions_2_from_peer(peer_id.into(), range) + .get_transactions_from_peer(peer_id.into(), range) .await } else { Err(anyhow::anyhow!("No P2P service available")) diff --git a/crates/services/p2p/src/codecs/postcard.rs b/crates/services/p2p/src/codecs/postcard.rs index 3b32a869eb0..f5ae53729b7 100644 --- a/crates/services/p2p/src/codecs/postcard.rs +++ b/crates/services/p2p/src/codecs/postcard.rs @@ -204,14 +204,14 @@ impl RequestResponseConverter for PostcardCodec { Ok(ResponseMessage::SealedBlock(Box::new(response))) } - NetworkResponse::Transactions2(tx_bytes) => { + NetworkResponse::Transactions(tx_bytes) => { let response = if let Some(tx_bytes) = tx_bytes { Some(self.deserialize(tx_bytes)?) } else { None }; - Ok(ResponseMessage::Transactions2(response)) + Ok(ResponseMessage::Transactions(response)) } NetworkResponse::Headers(headers_bytes) => { let response = headers_bytes @@ -237,14 +237,14 @@ impl RequestResponseConverter for PostcardCodec { Ok(NetworkResponse::Block(response)) } - OutboundResponse::Transactions2(transactions) => { + OutboundResponse::Transactions(transactions) => { let response = if let Some(transactions) = transactions { Some(self.serialize(transactions.as_ref())?) } else { None }; - Ok(NetworkResponse::Transactions2(response)) + Ok(NetworkResponse::Transactions(response)) } OutboundResponse::SealedHeaders(maybe_headers) => { let response = maybe_headers @@ -278,7 +278,7 @@ mod tests { #[test] fn test_request_size_fits() { - let m = RequestMessage::Transactions2(2..6); + let m = RequestMessage::Transactions(2..6); assert!(postcard::to_stdvec(&m).unwrap().len() <= MAX_REQUEST_SIZE); } } diff --git a/crates/services/p2p/src/p2p_service.rs b/crates/services/p2p/src/p2p_service.rs index 523eda4e619..fdb11c3511c 100644 --- a/crates/services/p2p/src/p2p_service.rs +++ b/crates/services/p2p/src/p2p_service.rs @@ -599,8 +599,8 @@ impl FuelP2PService { } } ( - Some(ResponseChannelItem::Transactions2(channel)), - Ok(ResponseMessage::Transactions2(transactions)), + Some(ResponseChannelItem::Transactions(channel)), + Ok(ResponseMessage::Transactions(transactions)), ) => { if channel.send(transactions).is_err() { debug!( @@ -1606,9 +1606,9 @@ mod tests { } }); } - RequestMessage::Transactions2(_range) => { + RequestMessage::Transactions(_range) => { let (tx_orchestrator, rx_orchestrator) = oneshot::channel(); - assert!(node_a.send_request_msg(None, request_msg.clone(), ResponseChannelItem::Transactions2(tx_orchestrator)).is_ok()); + assert!(node_a.send_request_msg(None, request_msg.clone(), ResponseChannelItem::Transactions(tx_orchestrator)).is_ok()); let tx_test_end = tx_test_end.clone(); tokio::spawn(async move { @@ -1649,10 +1649,10 @@ mod tests { let _ = node_b.send_response_msg(*request_id, OutboundResponse::SealedHeaders(Some(sealed_headers))); } - RequestMessage::Transactions2(_) => { + RequestMessage::Transactions(_) => { let txs = (0..5).map(|_| Transaction::default_test_tx()).collect(); let transactions = vec![Transactions(txs)]; - let _ = node_b.send_response_msg(*request_id, OutboundResponse::Transactions2(Some(Arc::new(transactions)))); + let _ = node_b.send_response_msg(*request_id, OutboundResponse::Transactions(Some(Arc::new(transactions)))); } } } @@ -1666,7 +1666,7 @@ mod tests { #[tokio::test] #[instrument] async fn request_response_works_with_transactions() { - request_response_works_with(RequestMessage::Transactions2(2..6)).await + request_response_works_with(RequestMessage::Transactions(2..6)).await } #[tokio::test] diff --git a/crates/services/p2p/src/ports.rs b/crates/services/p2p/src/ports.rs index 9a6897a157e..94862f9a64c 100644 --- a/crates/services/p2p/src/ports.rs +++ b/crates/services/p2p/src/ports.rs @@ -26,7 +26,7 @@ pub trait P2pDb: Send + Sync { block_height_range: Range, ) -> StorageResult>; - fn get_transactions_2( + fn get_transactions( &self, block_height_range: Range, ) -> StorageResult>>; diff --git a/crates/services/p2p/src/request_response/messages.rs b/crates/services/p2p/src/request_response/messages.rs index 9a01ceb9cc2..68a9eaa7574 100644 --- a/crates/services/p2p/src/request_response/messages.rs +++ b/crates/services/p2p/src/request_response/messages.rs @@ -38,7 +38,7 @@ pub(crate) const MAX_REQUEST_SIZE: usize = core::mem::size_of::( pub enum RequestMessage { Block(BlockHeight), SealedHeaders(Range), - Transactions2(Range), + Transactions(Range), } /// Final Response Message that p2p service sends to the Orchestrator @@ -46,7 +46,7 @@ pub enum RequestMessage { pub enum ResponseMessage { SealedBlock(Box>), SealedHeaders(Option>), - Transactions2(Option>), + Transactions(Option>), } /// Holds oneshot channels for specific responses @@ -54,7 +54,7 @@ pub enum ResponseMessage { pub enum ResponseChannelItem { Block(oneshot::Sender>), SealedHeaders(oneshot::Sender<(PeerId, Option>)>), - Transactions2(oneshot::Sender>>), + Transactions(oneshot::Sender>>), } /// Response that is sent over the wire @@ -63,7 +63,7 @@ pub enum ResponseChannelItem { pub enum NetworkResponse { Block(Option>), Headers(Option>), - Transactions2(Option>), + Transactions(Option>), } /// Initial state of the `ResponseMessage` prior to having its inner value serialized @@ -72,7 +72,7 @@ pub enum NetworkResponse { pub enum OutboundResponse { Block(Option>), SealedHeaders(Option>), - Transactions2(Option>>), + Transactions(Option>>), } #[derive(Debug, Error)] diff --git a/crates/services/p2p/src/service.rs b/crates/services/p2p/src/service.rs index de5b6c24630..29e9ca155b1 100644 --- a/crates/services/p2p/src/service.rs +++ b/crates/services/p2p/src/service.rs @@ -98,7 +98,7 @@ enum TaskRequest { block_height_range: Range, channel: oneshot::Sender<(PeerId, Option>)>, }, - GetTransactions2 { + GetTransactions { block_height_range: Range, from_peer: PeerId, channel: oneshot::Sender>>, @@ -486,9 +486,9 @@ where .get_peer_id_with_height(&block_height); let _ = self.p2p_service.send_request_msg(peer, request_msg, channel_item); } - Some(TaskRequest::GetTransactions2 { block_height_range, from_peer, channel }) => { - let request_msg = RequestMessage::Transactions2(block_height_range); - let channel_item = ResponseChannelItem::Transactions2(channel); + Some(TaskRequest::GetTransactions { block_height_range, from_peer, channel }) => { + let request_msg = RequestMessage::Transactions(block_height_range); + let channel_item = ResponseChannelItem::Transactions(channel); let _ = self.p2p_service.send_request_msg(Some(from_peer), request_msg, channel_item); } Some(TaskRequest::RespondWithGossipsubMessageReport((message, acceptance))) => { @@ -549,16 +549,16 @@ where } } } - RequestMessage::Transactions2(range) => { - match self.db.get_transactions_2(range.clone()) { + RequestMessage::Transactions(range) => { + match self.db.get_transactions(range.clone()) { Ok(maybe_transactions) => { let response = maybe_transactions.map(Arc::new); - let _ = self.p2p_service.send_response_msg(request_id, OutboundResponse::Transactions2(response)); + let _ = self.p2p_service.send_response_msg(request_id, OutboundResponse::Transactions(response)); }, Err(e) => { tracing::error!("Failed to get transactions for range {:?}: {:?}", range, e); let response = None; - let _ = self.p2p_service.send_response_msg(request_id, OutboundResponse::Transactions2(response)); + let _ = self.p2p_service.send_response_msg(request_id, OutboundResponse::Transactions(response)); return Err(e.into()) } } @@ -694,7 +694,7 @@ impl SharedState { .map_err(|e| anyhow!("{}", e)) } - pub async fn get_transactions_2_from_peer( + pub async fn get_transactions_from_peer( &self, peer_id: Vec, range: Range, @@ -702,7 +702,7 @@ impl SharedState { let (sender, receiver) = oneshot::channel(); let from_peer = PeerId::from_bytes(&peer_id).expect("Valid PeerId"); - let request = TaskRequest::GetTransactions2 { + let request = TaskRequest::GetTransactions { block_height_range: range, from_peer, channel: sender, @@ -871,7 +871,7 @@ pub mod tests { unimplemented!() } - fn get_transactions_2( + fn get_transactions( &self, _block_height_range: Range, ) -> StorageResult>> { @@ -993,7 +993,7 @@ pub mod tests { todo!() } - fn get_transactions_2( + fn get_transactions( &self, _block_height_range: Range, ) -> StorageResult>> { diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index f24960b8fcb..e2505c4f08e 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -508,7 +508,7 @@ where P: PeerToPeerPort + Send + Sync + 'static, { let range = peer_id.clone().bind(range); - let res = p2p.get_transactions_2(range).await; + let res = p2p.get_transactions(range).await; match res { Ok(Some(transactions)) => Ok(transactions), Ok(None) => { diff --git a/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs b/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs index f12ca02c7cc..3b3f16ada36 100644 --- a/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs +++ b/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs @@ -50,7 +50,7 @@ impl PeerToPeerPort for PressurePeerToPeer { self.p2p.get_sealed_block_headers(block_height_range).await } - async fn get_transactions_2( + async fn get_transactions( &self, block_ids: SourcePeer>, ) -> anyhow::Result>> { @@ -60,7 +60,7 @@ impl PeerToPeerPort for PressurePeerToPeer { tokio::time::sleep(self.durations[1]).await; self.counts .apply(|c| c.sub_transactions(transactions_count)); - self.p2p.get_transactions_2(block_ids).await + self.p2p.get_transactions(block_ids).await } fn report_peer( @@ -85,7 +85,7 @@ impl PressurePeerToPeer { let headers = peer.bind(Some(headers)); Ok(headers) }); - mock.expect_get_transactions_2().returning(|block_ids| { + mock.expect_get_transactions().returning(|block_ids| { let data = block_ids.data; let v = data.into_iter().map(|_| Transactions::default()).collect(); Ok(Some(v)) diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index a452e8be16b..3b14c775cca 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -47,7 +47,7 @@ async fn test_import_0_to_5() { let headers = peer.bind(headers); Ok(headers) }); - p2p.expect_get_transactions_2() + p2p.expect_get_transactions() .times(1) .returning(|block_ids| { let data = block_ids.data; @@ -89,7 +89,7 @@ async fn test_import_3_to_5() { let headers = peer.bind(headers); Ok(headers) }); - p2p.expect_get_transactions_2() + p2p.expect_get_transactions() .times(1) .returning(|block_ids| { let data = block_ids.data; @@ -131,7 +131,7 @@ async fn import__signature_fails_on_header_5_only() { let headers = peer.bind(headers); Ok(headers) }); - p2p.expect_get_transactions_2() + p2p.expect_get_transactions() .times(1) .returning(|block_ids| { let data = block_ids.data; @@ -175,7 +175,7 @@ async fn import__signature_fails_on_header_4_only() { let headers = peer.bind(headers); Ok(headers) }); - p2p.expect_get_transactions_2() + p2p.expect_get_transactions() .times(0) .returning(|block_ids| { let data = block_ids.data; @@ -264,7 +264,7 @@ async fn import__header_5_not_found() { Ok(headers) }); - p2p.expect_get_transactions_2() + p2p.expect_get_transactions() .times(1) .returning(|block_ids| { let data = block_ids.data; @@ -298,7 +298,7 @@ async fn import__header_4_not_found() { let headers = peer.bind(headers); Ok(headers) }); - p2p.expect_get_transactions_2().times(0); + p2p.expect_get_transactions().times(0); let state = State::new(3, 5).into(); let mocks = Mocks { @@ -336,7 +336,7 @@ async fn import__transactions_not_found() { let headers = peer.bind(headers); Ok(headers) }); - p2p.expect_get_transactions_2() + p2p.expect_get_transactions() .times(1) .returning(|_| Ok(None)); @@ -377,7 +377,7 @@ async fn import__transactions_not_found_for_header_4() { Ok(headers) }); let mut height = 3; - p2p.expect_get_transactions_2() + p2p.expect_get_transactions() .times(1) .returning(move |block_ids| { height += 1; @@ -426,12 +426,10 @@ async fn import__transactions_not_found_for_header_5() { let headers = peer.bind(headers); Ok(headers) }); - p2p.expect_get_transactions_2() - .times(1) - .returning(move |_| { - let v = vec![Transactions::default()]; - Ok(Some(v)) - }); + p2p.expect_get_transactions().times(1).returning(move |_| { + let v = vec![Transactions::default()]; + Ok(Some(v)) + }); let state = State::new(3, 5).into(); let mocks = Mocks { @@ -454,7 +452,7 @@ async fn import__p2p_error() { p2p.expect_get_sealed_block_headers() .times(1) .returning(|_| Err(anyhow::anyhow!("Some network error"))); - p2p.expect_get_transactions_2().times(0); + p2p.expect_get_transactions().times(0); let state = State::new(3, 5).into(); let mocks = Mocks { @@ -492,7 +490,7 @@ async fn import__p2p_error_on_4_transactions() { let headers = peer.bind(headers); Ok(headers) }); - p2p.expect_get_transactions_2() + p2p.expect_get_transactions() .times(1) .returning(|_| Err(anyhow::anyhow!("Some network error"))); @@ -538,7 +536,7 @@ async fn import__consensus_error_on_4() { let headers = peer.bind(headers); Ok(headers) }); - p2p.expect_get_transactions_2().times(0); + p2p.expect_get_transactions().times(0); let state = State::new(3, 5).into(); let mocks = Mocks { @@ -582,7 +580,7 @@ async fn import__consensus_error_on_5() { let headers = peer.bind(headers); Ok(headers) }); - p2p.expect_get_transactions_2() + p2p.expect_get_transactions() .times(1) .returning(|block_ids| { let data = block_ids.data; @@ -626,7 +624,7 @@ async fn import__execution_error_on_header_4() { let headers = peer.bind(headers); Ok(headers) }); - p2p.expect_get_transactions_2() + p2p.expect_get_transactions() .times(1) .returning(|block_ids| { let data = block_ids.data; @@ -682,7 +680,7 @@ async fn import__execution_error_on_header_5() { let headers = peer.bind(headers); Ok(headers) }); - p2p.expect_get_transactions_2() + p2p.expect_get_transactions() .times(1) .returning(|block_ids| { let data = block_ids.data; @@ -766,7 +764,7 @@ async fn import__can_work_in_two_loops() { let headers = peer.bind(headers); Ok(headers) }); - p2p.expect_get_transactions_2() + p2p.expect_get_transactions() .times(2) .returning(|block_ids| { let data = block_ids.data; @@ -888,7 +886,7 @@ async fn import__missing_transactions_sends_peer_report() { // Given PeerReportTestBuilder::new() // When - .with_get_transactions_2(None) + .with_get_transactions(None) // Then .run_with_expected_reports([PeerReportReason::MissingTransactions]) .await; @@ -897,7 +895,7 @@ async fn import__missing_transactions_sends_peer_report() { struct PeerReportTestBuilder { shared_peer_id: Vec, get_sealed_headers: Option>>, - get_transactions_2: Option>>, + get_transactions: Option>>, check_sealed_header: Option, block_count: u32, debug: bool, @@ -908,7 +906,7 @@ impl PeerReportTestBuilder { Self { shared_peer_id: vec![1, 2, 3, 4], get_sealed_headers: None, - get_transactions_2: None, + get_transactions: None, check_sealed_header: None, block_count: 1, debug: false, @@ -937,11 +935,11 @@ impl PeerReportTestBuilder { // self // } - pub fn with_get_transactions_2( + pub fn with_get_transactions( mut self, get_transactions: Option>, ) -> Self { - self.get_transactions_2 = Some(get_transactions); + self.get_transactions = Some(get_transactions); self } @@ -1018,12 +1016,12 @@ impl PeerReportTestBuilder { }); } - let transactions = self.get_transactions_2.clone(); + let transactions = self.get_transactions.clone(); if let Some(t) = transactions { - p2p.expect_get_transactions_2() + p2p.expect_get_transactions() .returning(move |_| Ok(t.clone())); } else { - p2p.expect_get_transactions_2().returning(|block_ids| { + p2p.expect_get_transactions().returning(|block_ids| { let data = block_ids.data; let v = data.into_iter().map(|_| Transactions::default()).collect(); Ok(Some(v)) @@ -1148,7 +1146,7 @@ impl DefaultMocks for MockPeerToPeerPort { Ok(headers) }); - p2p.expect_get_transactions_2() + p2p.expect_get_transactions() .times(t.next().unwrap()) .returning(|block_ids| { let data = block_ids.data; diff --git a/crates/services/sync/src/ports.rs b/crates/services/sync/src/ports.rs index 99bd39ac4a6..86f8280489d 100644 --- a/crates/services/sync/src/ports.rs +++ b/crates/services/sync/src/ports.rs @@ -49,7 +49,7 @@ pub trait PeerToPeerPort { /// Request transactions from the network for the given block /// and source peer. - async fn get_transactions_2( + async fn get_transactions( &self, block_ids: SourcePeer>, ) -> anyhow::Result>>; diff --git a/crates/services/sync/src/service/tests.rs b/crates/services/sync/src/service/tests.rs index 896d011b941..57440afae24 100644 --- a/crates/services/sync/src/service/tests.rs +++ b/crates/services/sync/src/service/tests.rs @@ -44,7 +44,7 @@ async fn test_new_service() { let headers = peer.bind(headers); Ok(headers) }); - p2p.expect_get_transactions_2().returning(|block_ids| { + p2p.expect_get_transactions().returning(|block_ids| { let data = block_ids.data; let v = data.into_iter().map(|_| Transactions::default()).collect(); Ok(Some(v)) From 07962aa1c70ccf4ac35e3dace5038685e9c64e5b Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 29 Sep 2023 01:14:35 -0400 Subject: [PATCH 81/87] Clean up --- crates/types/src/services/p2p.rs | 8 -------- 1 file changed, 8 deletions(-) diff --git a/crates/types/src/services/p2p.rs b/crates/types/src/services/p2p.rs index 3175efba5b0..98961432c52 100644 --- a/crates/types/src/services/p2p.rs +++ b/crates/types/src/services/p2p.rs @@ -74,14 +74,6 @@ impl SourcePeer { let data = f(self.data); SourcePeer { peer_id, data } } - - /// Asref - pub fn as_ref(&self) -> SourcePeer<&T> { - SourcePeer { - peer_id: self.peer_id.clone(), - data: &self.data, - } - } } impl GossipData { From 4ed6aefcfd2a5cc2ce6d8fa0fab5d2d8111e4f1d Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 29 Sep 2023 11:33:28 -0400 Subject: [PATCH 82/87] Remove redundant test --- .../sync/src/import/back_pressure_tests.rs | 48 ------------------- crates/services/sync/src/import/tests.rs | 10 ---- 2 files changed, 58 deletions(-) diff --git a/crates/services/sync/src/import/back_pressure_tests.rs b/crates/services/sync/src/import/back_pressure_tests.rs index d141537489f..c58fc97032d 100644 --- a/crates/services/sync/src/import/back_pressure_tests.rs +++ b/crates/services/sync/src/import/back_pressure_tests.rs @@ -119,51 +119,3 @@ async fn test_back_pressure(input: Input, state: State, params: Config) -> Count import.import(&mut watcher).await.unwrap(); counts.apply(|c| c.max.clone()) } - -#[tokio::test(flavor = "multi_thread")] -async fn test_back_pressure_2() { - // input: Input, state: State, params: Config - let input = Input { - executes: Duration::from_millis(10), - ..Default::default() - }; - let state = State::new(None, 50); - let params = Config { - block_stream_buffer_size: 10, - header_batch_size: 10, - }; - let counts = SharedCounts::new(Default::default()); - let state = SharedMutex::new(state); - - let p2p = Arc::new(PressurePeerToPeer::new( - counts.clone(), - [input.headers, input.transactions], - )); - let executor = Arc::new(PressureBlockImporter::new(counts.clone(), input.executes)); - let consensus = Arc::new(PressureConsensus::new(counts.clone(), input.consensus)); - let notify = Arc::new(Notify::new()); - - let import = Import { - state, - notify, - params, - p2p, - executor, - consensus, - }; - - import.notify.notify_one(); - let (_tx, shutdown) = tokio::sync::watch::channel(fuel_core_services::State::Started); - let mut watcher = shutdown.into(); - import.import(&mut watcher).await.unwrap(); - let counts = counts.apply(|c| c.max.clone()); - let expected = Count { - headers: 10, - consensus: 10, - transactions: 10, - executes: 1, - blocks: 21, - }; - - assert!(counts <= expected); -} diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index 3b14c775cca..ed01e6551de 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -14,18 +14,8 @@ use crate::{ }; use fuel_core_types::services::p2p::Transactions; -// use test_case::test_case; - use super::*; -// #[test_case(State::new(None, 5), Mocks::times([6]) => (State::new(5, None), true) ; "executes 5")] -// #[test_case(State::new(3, 5), Mocks::times([2]) => (State::new(5, None), true) ; "executes 3 to 5")] -// #[tokio::test] -// async fn test_import(state: State, mocks: Mocks) -> (State, bool) { -// let state = SharedMutex::new(state); -// test_import_inner(state, mocks, None).await -// } - #[tokio::test] async fn test_import_0_to_5() { let mut consensus_port = MockConsensusPort::default(); From 0a035ed8167f84f3096bc2f383fe0fe2c6d31306 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 29 Sep 2023 13:16:03 -0400 Subject: [PATCH 83/87] Parameterize tests + additional test --- crates/services/sync/src/import.rs | 29 ++-- crates/services/sync/src/import/tests.rs | 184 ++++++++++++++++++++--- 2 files changed, 173 insertions(+), 40 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index e2505c4f08e..320a6a79c26 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -409,11 +409,11 @@ fn get_header_batch_stream( fn range_chunks( range: RangeInclusive, chunk_size: u32, -) -> impl Iterator> { - let end = *range.end(); +) -> impl Iterator> { + let end = *range.end() + 1; range.step_by(chunk_size as usize).map(move |chunk_start| { let block_end = (chunk_start + chunk_size).min(end); - chunk_start..=block_end + chunk_start..block_end }) } @@ -520,7 +520,7 @@ where } async fn get_headers_batch

( - range: RangeInclusive, + range: Range, p2p: Arc

, ) -> Result where @@ -528,12 +528,9 @@ where { tracing::debug!( "getting header range from {} to {} inclusive", - range.start(), - range.end() + range.start, + range.end ); - let start = *range.start(); - let end = *range.end() + 1; - let range = start..end; let result = get_sealed_block_headers(range.clone(), p2p.as_ref()).await; let sourced_headers = result?; let SourcePeer { @@ -550,14 +547,12 @@ where }) .map(|(header, _)| header) .collect::>(); - if let Some(expected_len) = end.checked_sub(start) { - if headers.len() != expected_len as usize { - report_peer( - p2p.as_ref(), - peer_id.clone(), - PeerReportReason::MissingBlockHeaders, - ); - } + if headers.len() != range.len() as usize { + report_peer( + p2p.as_ref(), + peer_id.clone(), + PeerReportReason::MissingBlockHeaders, + ); } Ok(Batch::new(peer_id, range.clone(), headers)) } diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index ed01e6551de..0e9c62a321f 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -45,6 +45,10 @@ async fn test_import_0_to_5() { Ok(Some(v)) }); + let params = Config { + block_stream_buffer_size: 10, + header_batch_size: 10, + }; let mocks = Mocks { consensus_port, p2p, @@ -53,7 +57,7 @@ async fn test_import_0_to_5() { let state = State::new(None, 5); let state = SharedMutex::new(state); - let v = test_import_inner(state, mocks, None).await; + let v = test_import_inner(state, mocks, None, params).await; let expected = (State::new(5, None), true); assert_eq!(v, expected); } @@ -87,6 +91,10 @@ async fn test_import_3_to_5() { Ok(Some(v)) }); + let params = Config { + block_stream_buffer_size: 10, + header_batch_size: 10, + }; let mocks = Mocks { consensus_port, p2p, @@ -95,11 +103,76 @@ async fn test_import_3_to_5() { let state = State::new(3, 5); let state = SharedMutex::new(state); - let v = test_import_inner(state, mocks, None).await; + let v = test_import_inner(state, mocks, None, params).await; let expected = (State::new(5, None), true); assert_eq!(v, expected); } +#[tokio::test] +async fn test_import_0_to_499() { + // The observed block height + let end = 499; + // The number of blocks in range 0..end + let n = end + 1; + // The number of batches per request + let header_batch_size = 10; + + let mut consensus_port = MockConsensusPort::default(); + + // Happens once for each header + let times = n; + consensus_port + .expect_check_sealed_header() + .times(times) + .returning(|_| Ok(true)); + + // Happens once for each batch + let times = n.div_ceil(header_batch_size); + consensus_port + .expect_await_da_height() + .times(times) + .returning(|_| Ok(())); + + let mut p2p = MockPeerToPeerPort::default(); + + // Happens once for each batch + let times = n.div_ceil(header_batch_size); + p2p.expect_get_sealed_block_headers() + .times(times) + .returning(|range| { + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) + }); + + // Happens once for each batch + let times = n.div_ceil(header_batch_size); + p2p.expect_get_transactions() + .times(times) + .returning(|block_ids| { + let data = block_ids.data; + let v = data.into_iter().map(|_| Transactions::default()).collect(); + Ok(Some(v)) + }); + + let params = Config { + block_stream_buffer_size: 10, + header_batch_size: header_batch_size as u32, + }; + let mocks = Mocks { + consensus_port, + p2p, + executor: DefaultMocks::times([n]), + }; + + let state = State::new(None, end as u32); + let state = SharedMutex::new(state); + let v = test_import_inner(state, mocks, None, params).await; + let expected = (State::new(end as u32, None), true); + assert_eq!(v, expected); +} + #[tokio::test] async fn import__signature_fails_on_header_5_only() { // given @@ -135,9 +208,13 @@ async fn import__signature_fails_on_header_5_only() { p2p, executor: DefaultMocks::times([1]), }; + let params = Config { + block_stream_buffer_size: 10, + header_batch_size: 10, + }; // when - let res = test_import_inner(state, mocks, None).await; + let res = test_import_inner(state, mocks, None, params).await; // then assert_eq!((State::new(4, None), false), res); @@ -179,9 +256,13 @@ async fn import__signature_fails_on_header_4_only() { p2p, executor: DefaultMocks::times([0]), }; + let params = Config { + block_stream_buffer_size: 10, + header_batch_size: 10, + }; // when - let res = test_import_inner(state, mocks, None).await; + let res = test_import_inner(state, mocks, None, params).await; // then assert_eq!((State::new(3, None), false), res); @@ -206,9 +287,13 @@ async fn import__header_not_found() { consensus_port: DefaultMocks::times([0]), executor: DefaultMocks::times([0]), }; + let params = Config { + block_stream_buffer_size: 10, + header_batch_size: 10, + }; // when - let res = test_import_inner(state, mocks, None).await; + let res = test_import_inner(state, mocks, None, params).await; // then assert_eq!((State::new(3, None), false), res); @@ -233,9 +318,13 @@ async fn import__header_response_incomplete() { consensus_port: DefaultMocks::times([0]), executor: DefaultMocks::times([0]), }; + let params = Config { + block_stream_buffer_size: 10, + header_batch_size: 10, + }; // when - let res = test_import_inner(state, mocks, None).await; + let res = test_import_inner(state, mocks, None, params).await; // then assert_eq!((State::new(3, None), false), res); @@ -268,9 +357,13 @@ async fn import__header_5_not_found() { consensus_port: DefaultMocks::times([1]), executor: DefaultMocks::times([1]), }; + let params = Config { + block_stream_buffer_size: 10, + header_batch_size: 10, + }; // when - let res = test_import_inner(state, mocks, None).await; + let res = test_import_inner(state, mocks, None, params).await; // then assert_eq!((State::new(4, None), false), res); @@ -296,9 +389,13 @@ async fn import__header_4_not_found() { consensus_port: DefaultMocks::times([0]), executor: DefaultMocks::times([0]), }; + let params = Config { + block_stream_buffer_size: 10, + header_batch_size: 10, + }; // when - let res = test_import_inner(state, mocks, None).await; + let res = test_import_inner(state, mocks, None, params).await; // then assert_eq!((State::new(3, None), false), res); @@ -336,9 +433,13 @@ async fn import__transactions_not_found() { consensus_port, executor: DefaultMocks::times([0]), }; + let params = Config { + block_stream_buffer_size: 10, + header_batch_size: 10, + }; // when - let res = test_import_inner(state, mocks, None).await; + let res = test_import_inner(state, mocks, None, params).await; // then assert_eq!((State::new(3, None), false), res); @@ -386,9 +487,13 @@ async fn import__transactions_not_found_for_header_4() { consensus_port, executor: DefaultMocks::times([0]), }; + let params = Config { + block_stream_buffer_size: 10, + header_batch_size: 10, + }; // when - let res = test_import_inner(state, mocks, None).await; + let res = test_import_inner(state, mocks, None, params).await; // then assert_eq!((State::new(3, None), false), res); @@ -427,9 +532,13 @@ async fn import__transactions_not_found_for_header_5() { consensus_port, executor: DefaultMocks::times([1]), }; + let params = Config { + block_stream_buffer_size: 10, + header_batch_size: 10, + }; // when - let res = test_import_inner(state, mocks, None).await; + let res = test_import_inner(state, mocks, None, params).await; // then assert_eq!((State::new(4, None), false), res); @@ -450,9 +559,13 @@ async fn import__p2p_error() { consensus_port: DefaultMocks::times([0]), executor: DefaultMocks::times([0]), }; + let params = Config { + block_stream_buffer_size: 10, + header_batch_size: 10, + }; // when - let res = test_import_inner(state, mocks, None).await; + let res = test_import_inner(state, mocks, None, params).await; // then assert_eq!((State::new(3, None), false), res); @@ -490,9 +603,13 @@ async fn import__p2p_error_on_4_transactions() { consensus_port, executor: DefaultMocks::times([0]), }; + let params = Config { + block_stream_buffer_size: 10, + header_batch_size: 10, + }; // when - let res = test_import_inner(state, mocks, None).await; + let res = test_import_inner(state, mocks, None, params).await; // then assert_eq!((State::new(3, None), false), res); @@ -534,9 +651,13 @@ async fn import__consensus_error_on_4() { p2p, executor: DefaultMocks::times([0]), }; + let params = Config { + block_stream_buffer_size: 10, + header_batch_size: 10, + }; // when - let res = test_import_inner(state, mocks, None).await; + let res = test_import_inner(state, mocks, None, params).await; // then assert_eq!((State::new(3, None), false), res); @@ -584,9 +705,13 @@ async fn import__consensus_error_on_5() { p2p, executor: DefaultMocks::times([1]), }; + let params = Config { + block_stream_buffer_size: 10, + header_batch_size: 10, + }; // when - let res = test_import_inner(state, mocks, None).await; + let res = test_import_inner(state, mocks, None, params).await; // then assert_eq!((State::new(4, None), false), res); @@ -640,9 +765,13 @@ async fn import__execution_error_on_header_4() { p2p, executor, }; + let params = Config { + block_stream_buffer_size: 10, + header_batch_size: 10, + }; // when - let res = test_import_inner(state, mocks, None).await; + let res = test_import_inner(state, mocks, None, params).await; // then assert_eq!((State::new(3, None), false), res); @@ -696,9 +825,13 @@ async fn import__execution_error_on_header_5() { p2p, executor, }; + let params = Config { + block_stream_buffer_size: 10, + header_batch_size: 10, + }; // when - let res = test_import_inner(state, mocks, None).await; + let res = test_import_inner(state, mocks, None, params).await; // then assert_eq!((State::new(4, None), false), res); @@ -720,9 +853,13 @@ async fn signature_always_fails() { p2p: DefaultMocks::times([0]), executor: DefaultMocks::times([0]), }; + let params = Config { + block_stream_buffer_size: 10, + header_batch_size: 10, + }; // when - let res = test_import_inner(state, mocks, None).await; + let res = test_import_inner(state, mocks, None, params).await; // then assert_eq!((State::new(3, None), false), res); @@ -768,9 +905,13 @@ async fn import__can_work_in_two_loops() { p2p, executor: DefaultMocks::times([3]), }; + let params = Config { + block_stream_buffer_size: 10, + header_batch_size: 10, + }; // when - let res = test_import_inner(s, mocks, Some(c)).await; + let res = test_import_inner(s, mocks, Some(c), params).await; // then assert_eq!((State::new(6, None), true), res); @@ -780,6 +921,7 @@ async fn test_import_inner( state: SharedMutex, mocks: Mocks, count: Option, + params: Config, ) -> (State, bool) { let notify = Arc::new(Notify::new()); let Mocks { @@ -787,10 +929,6 @@ async fn test_import_inner( mut p2p, executor, } = mocks; - let params = Config { - block_stream_buffer_size: 10, - header_batch_size: 10, - }; p2p.expect_report_peer().returning(|_, _| Ok(())); let p2p = Arc::new(p2p); From 59cc41c479f52c37552be39267e23022dc8451bc Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 29 Sep 2023 13:17:18 -0400 Subject: [PATCH 84/87] Fix comment --- crates/services/sync/src/import/tests.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index 0e9c62a321f..b2360f1183b 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -112,9 +112,9 @@ async fn test_import_3_to_5() { async fn test_import_0_to_499() { // The observed block height let end = 499; - // The number of blocks in range 0..end + // The number of headers/blocks in range 0..end let n = end + 1; - // The number of batches per request + // The number of headers/blocks per batch let header_batch_size = 10; let mut consensus_port = MockConsensusPort::default(); From d5167be21418740b8c136aa53654b9768feac76f Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Fri, 29 Sep 2023 13:41:25 -0400 Subject: [PATCH 85/87] Fix --- crates/services/sync/src/import.rs | 2 +- crates/services/sync/src/import/tests.rs | 10 +++++++--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 320a6a79c26..7ec50e76df6 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -547,7 +547,7 @@ where }) .map(|(header, _)| header) .collect::>(); - if headers.len() != range.len() as usize { + if headers.len() != range.len() { report_peer( p2p.as_ref(), peer_id.clone(), diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index b2360f1183b..e87bfd1ebd0 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -16,6 +16,10 @@ use fuel_core_types::services::p2p::Transactions; use super::*; +fn div_ceil(divisor: usize, dividend: usize) -> usize { + (divisor + (dividend - 1)) / dividend +} + #[tokio::test] async fn test_import_0_to_5() { let mut consensus_port = MockConsensusPort::default(); @@ -127,7 +131,7 @@ async fn test_import_0_to_499() { .returning(|_| Ok(true)); // Happens once for each batch - let times = n.div_ceil(header_batch_size); + let times = div_ceil(n, header_batch_size); consensus_port .expect_await_da_height() .times(times) @@ -136,7 +140,7 @@ async fn test_import_0_to_499() { let mut p2p = MockPeerToPeerPort::default(); // Happens once for each batch - let times = n.div_ceil(header_batch_size); + let times = div_ceil(n, header_batch_size); p2p.expect_get_sealed_block_headers() .times(times) .returning(|range| { @@ -147,7 +151,7 @@ async fn test_import_0_to_499() { }); // Happens once for each batch - let times = n.div_ceil(header_batch_size); + let times = div_ceil(n, header_batch_size); p2p.expect_get_transactions() .times(times) .returning(|block_ids| { From 09bc753eea0e979817dd3245f4b1aca111d86ad7 Mon Sep 17 00:00:00 2001 From: Green Baneling Date: Mon, 2 Oct 2023 16:43:37 +0100 Subject: [PATCH 86/87] Remove `Result` from streams and use `Batch::is_err` (#1391) Simplification after the review process --- Cargo.lock | 1 - crates/services/sync/Cargo.toml | 1 - crates/services/sync/src/import.rs | 336 +++++++++-------------- crates/services/sync/src/import/tests.rs | 8 - crates/types/src/services/p2p.rs | 4 +- 5 files changed, 132 insertions(+), 218 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cafe49e7fb4..51172a543ec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3115,7 +3115,6 @@ version = "0.20.4" dependencies = [ "anyhow", "async-trait", - "derive_more", "fuel-core-services", "fuel-core-trace", "fuel-core-types", diff --git a/crates/services/sync/Cargo.toml b/crates/services/sync/Cargo.toml index b14022eb2a6..f83ba4e90ab 100644 --- a/crates/services/sync/Cargo.toml +++ b/crates/services/sync/Cargo.toml @@ -12,7 +12,6 @@ version = { workspace = true } [dependencies] anyhow = { workspace = true } async-trait = { workspace = true } -derive_more = { workspace = true } fuel-core-services = { workspace = true } fuel-core-types = { workspace = true } futures = { workspace = true } diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 7ec50e76df6..ec897af9849 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -2,7 +2,6 @@ //! This module contains the import task which is responsible for //! importing blocks from the network into the local blockchain. -use anyhow::anyhow; use fuel_core_services::{ SharedMutex, StateWatcher, @@ -25,7 +24,6 @@ use futures::{ stream::StreamExt, FutureExt, Stream, - TryStreamExt, }; use std::{ future::Future, @@ -35,10 +33,7 @@ use std::{ }, sync::Arc, }; -use tokio::{ - sync::Notify, - task::JoinError, -}; +use tokio::sync::Notify; use tracing::Instrument; use crate::{ @@ -149,22 +144,6 @@ impl Batch { type SealedHeaderBatch = Batch; type SealedBlockBatch = Batch; -#[derive(Debug, derive_more::Display)] -enum ImportError { - ConsensusError(anyhow::Error), - ExecutionError(anyhow::Error), - MissingTransactions, - BadBlockHeader, - JoinError(JoinError), - Other(anyhow::Error), -} - -impl From for ImportError { - fn from(value: anyhow::Error) -> Self { - ImportError::Other(value) - } -} - impl Import where P: PeerToPeerPort + Send + Sync + 'static, @@ -174,12 +153,12 @@ where #[tracing::instrument(skip_all)] /// Execute imports until a shutdown is requested. pub async fn import(&self, shutdown: &mut StateWatcher) -> anyhow::Result { - self.import_inner(shutdown).await.map_err(|e| anyhow!(e))?; + self.import_inner(shutdown).await?; Ok(wait_for_notify_or_shutdown(&self.notify, shutdown).await) } - async fn import_inner(&self, shutdown: &StateWatcher) -> Result<(), ImportError> { + async fn import_inner(&self, shutdown: &StateWatcher) -> anyhow::Result<()> { // If there is a range to process, launch the stream. if let Some(range) = self.state.apply(|s| s.process_range()) { // Launch the stream to import the range. @@ -239,12 +218,14 @@ where let mut shutdown_signal = shutdown_signal.clone(); tokio::select! { // Stream a batch of blocks - blocks = stream_block_batch => blocks.map(Some), + blocks = stream_block_batch => Some(blocks), // If a shutdown signal is received during the stream, terminate early and // return an empty response - _ = shutdown_signal.while_started() => Ok(None) + _ = shutdown_signal.while_started() => None } - }).map(|task| task.map_err(ImportError::JoinError)?) + }).map(|task| { + task.trace_err("Failed to join the task").ok().flatten() + }) }) // Request up to `block_stream_buffer_size` transactions from the network. .buffered(params.block_stream_buffer_size) @@ -256,32 +237,42 @@ where tracing::info!("In progress import stream shutting down"); } }) - .into_scan_none_or_err() - .scan_none_or_err() + .into_scan_none() + .scan_none() + .into_scan_err() + .scan_err() .then(|batch| { async move { - let batch = batch?; - let error = batch.is_err().then(|| ImportError::MissingTransactions); let Batch { peer, + range, results, - .. } = batch; - let sealed_blocks = futures::stream::iter(results); - let res = sealed_blocks.then(|sealed_block| async { - execute_and_commit(executor.as_ref(), state, sealed_block).await - }).try_collect::>().await.and_then(|v| error.map_or(Ok(v), Err)); - match &res { - Ok(_) => { - report_peer(p2p.as_ref(), peer.clone(), PeerReportReason::SuccessfulBlockImport); - }, - Err(e) => { - // If this fails, then it means that consensus has approved a block that is invalid. - // This would suggest a more serious issue than a bad peer, e.g. a fork or an out-of-date client. - tracing::error!("Failed to execute and commit block from peer {:?}: {:?}", peer, e); - }, - }; - res + + let mut done = vec![]; + for sealed_block in results { + let res = execute_and_commit(executor.as_ref(), state, sealed_block).await; + + match &res { + Ok(_) => { + done.push(()); + }, + Err(e) => { + // If this fails, then it means that consensus has approved a block that is invalid. + // This would suggest a more serious issue than a bad peer, e.g. a fork or an out-of-date client. + tracing::error!("Failed to execute and commit block from peer {:?}: {:?}", peer, e); + break; + }, + }; + } + + let batch = Batch::new(peer.clone(), range, done); + + if !batch.is_err() { + report_peer(p2p, peer, PeerReportReason::SuccessfulBlockImport); + } + + batch } .instrument(tracing::debug_span!("execute_and_commit")) .in_current_span() @@ -289,14 +280,10 @@ where // Continue the stream unless an error occurs. .into_scan_err() .scan_err() - // Count the number of successfully executed blocks and - // find any errors. - // Fold the stream into a count and any errors. - .fold(0usize, |count, result| async move { - match result { - Ok(batch) => count + batch.len(), - Err(_) => count - } + // Count the number of successfully executed blocks. + // Fold the stream into a count. + .fold(0usize, |count, batch| async move { + count + batch.results.len() }) .await; @@ -314,80 +301,53 @@ fn get_block_stream< params: &Config, p2p: Arc

, consensus: Arc, -) -> impl Stream>> + '_ -{ +) -> impl Stream> + '_ { let header_stream = get_header_batch_stream(range.clone(), params, p2p.clone()); header_stream .map({ let consensus = consensus.clone(); let p2p = p2p.clone(); - move |header_batch| { - let header_batch = header_batch?; + move |header_batch: SealedHeaderBatch| { let Batch { peer, range, results, } = header_batch; - let results = results + let checked_headers = results .into_iter() - .map({ - let consensus = consensus.clone(); - let p2p = p2p.clone(); - let peer = peer.clone(); - move |header| { - check_sealed_header( - &header, - peer.clone(), - p2p.clone(), - consensus.clone(), - )?; - Result::<_, ImportError>::Ok(header) - } + .take_while(|header| { + check_sealed_header(header, peer.clone(), &p2p, &consensus) }) - .take_while(|result| result.is_ok()) - .filter_map(|result| result.ok()) .collect::>(); - let batch = Batch::new(peer.clone(), range, results); - Result::<_, ImportError>::Ok(batch) + Batch::new(peer, range, checked_headers) } }) - .map({ + .map(move |headers| { let consensus = consensus.clone(); - move |valid_headers_batch| { - let consensus = consensus.clone(); - async move { - let valid_headers = valid_headers_batch?; - if let Some(header) = valid_headers.results.last() { - await_da_height(header, consensus.as_ref()).await? - }; - Result::<_, ImportError>::Ok(valid_headers) - } - } - }) - .map({ let p2p = p2p.clone(); - move |headers| { - let p2p = p2p.clone(); - async move { - let headers = headers.await?; - let Batch { - peer, - range, - results, - } = headers; - if results.is_empty() { - let batch = SealedBlockBatch::new(peer, range, vec![]); - Ok(batch) - } else { - let headers = - SealedHeaderBatch::new(peer.clone(), range.clone(), results); - let batch = get_blocks(p2p, headers).await?; - Ok(batch) - } + async move { + let Batch { + peer, + range, + results, + } = headers; + if results.is_empty() { + SealedBlockBatch::new(peer, range, vec![]) + } else { + await_da_height( + results + .last() + .expect("We checked headers are not empty above"), + &consensus, + ) + .await; + let headers = + SealedHeaderBatch::new(peer.clone(), range.clone(), results); + get_blocks(&p2p, headers).await } - .instrument(tracing::debug_span!("consensus_and_transactions")) - .in_current_span() } + .instrument(tracing::debug_span!("consensus_and_transactions")) + .in_current_span() }) } @@ -395,14 +355,14 @@ fn get_header_batch_stream( range: RangeInclusive, params: &Config, p2p: Arc

, -) -> impl Stream> { +) -> impl Stream { let Config { header_batch_size, .. } = params; let ranges = range_chunks(range, *header_batch_size); futures::stream::iter(ranges).then(move |range| { let p2p = p2p.clone(); - async move { get_headers_batch(range, p2p).await } + async move { get_headers_batch(range, &p2p).await } }) } @@ -423,34 +383,27 @@ fn check_sealed_header< >( header: &SealedBlockHeader, peer_id: PeerId, - p2p: Arc

, - consensus: Arc, -) -> Result<(), ImportError> { + p2p: &Arc

, + consensus: &Arc, +) -> bool { let validity = consensus .check_sealed_header(header) - .map_err(ImportError::ConsensusError) - .trace_err("Failed to check consensus on header")?; - if validity { - Ok(()) - } else { - report_peer( - p2p.as_ref(), - peer_id.clone(), - PeerReportReason::BadBlockHeader, - ); - Err(ImportError::BadBlockHeader) + .trace_err("Failed to check consensus on header") + .unwrap_or(false); + if !validity { + report_peer(p2p, peer_id.clone(), PeerReportReason::BadBlockHeader); } + validity } async fn await_da_height( header: &SealedBlockHeader, - consensus: &C, -) -> Result<(), ImportError> { - consensus + consensus: &Arc, +) { + let _ = consensus .await_da_height(&header.entity.da_height) .await - .map_err(ImportError::ConsensusError)?; - Ok(()) + .trace_err("Failed to wait for DA layer to sync"); } /// Waits for a notify or shutdown signal. @@ -473,8 +426,8 @@ async fn wait_for_notify_or_shutdown( async fn get_sealed_block_headers

( range: Range, - p2p: &P, -) -> Result>, ImportError> + p2p: &Arc

, +) -> SourcePeer> where P: PeerToPeerPort + Send + Sync + 'static, { @@ -483,46 +436,36 @@ where range.start, range.end ); - let res = p2p - .get_sealed_block_headers(range) + p2p.get_sealed_block_headers(range) .await - .trace_err("Failed to get headers"); - match res { - Ok(sourced_headers) => { - let sourced = sourced_headers.map(|headers| match headers { - None => vec![], - Some(headers) => headers, - }); - Ok(sourced) - } - Err(e) => Err(ImportError::Other(e)), - } + .trace_err("Failed to get headers") + .unwrap_or_default() + .map(|inner| inner.unwrap_or_default()) } async fn get_transactions

( peer_id: PeerId, range: Range, - p2p: &P, -) -> Result, ImportError> + p2p: &Arc

, +) -> Option> where P: PeerToPeerPort + Send + Sync + 'static, { let range = peer_id.clone().bind(range); - let res = p2p.get_transactions(range).await; + let res = p2p + .get_transactions(range) + .await + .trace_err("Failed to get transactions"); match res { - Ok(Some(transactions)) => Ok(transactions), - Ok(None) => { + Ok(Some(transactions)) => Some(transactions), + _ => { report_peer(p2p, peer_id.clone(), PeerReportReason::MissingTransactions); - Err(ImportError::MissingTransactions) + None } - Err(e) => Err(e.into()), } } -async fn get_headers_batch

( - range: Range, - p2p: Arc

, -) -> Result +async fn get_headers_batch

(range: Range, p2p: &Arc

) -> SealedHeaderBatch where P: PeerToPeerPort + Send + Sync + 'static, { @@ -531,8 +474,7 @@ where range.start, range.end ); - let result = get_sealed_block_headers(range.clone(), p2p.as_ref()).await; - let sourced_headers = result?; + let sourced_headers = get_sealed_block_headers(range.clone(), p2p).await; let SourcePeer { peer_id, data: headers, @@ -548,16 +490,12 @@ where .map(|(header, _)| header) .collect::>(); if headers.len() != range.len() { - report_peer( - p2p.as_ref(), - peer_id.clone(), - PeerReportReason::MissingBlockHeaders, - ); + report_peer(p2p, peer_id.clone(), PeerReportReason::MissingBlockHeaders); } - Ok(Batch::new(peer_id, range.clone(), headers)) + Batch::new(peer_id, range, headers) } -fn report_peer

(p2p: &P, peer_id: PeerId, reason: PeerReportReason) +fn report_peer

(p2p: &Arc

, peer_id: PeerId, reason: PeerReportReason) where P: PeerToPeerPort + Send + Sync + 'static, { @@ -571,10 +509,7 @@ where /// Get blocks correlating to the headers from a specific peer #[tracing::instrument(skip(p2p, headers))] -async fn get_blocks

( - p2p: Arc

, - headers: SealedHeaderBatch, -) -> Result +async fn get_blocks

(p2p: &Arc

, headers: SealedHeaderBatch) -> SealedBlockBatch where P: PeerToPeerPort + Send + Sync + 'static, { @@ -583,12 +518,14 @@ where peer, range, } = headers; - let transaction_data = - get_transactions(peer.clone(), range.clone(), p2p.as_ref()).await?; - let iter = headers.into_iter().zip(transaction_data); + let Some(transaction_data) = get_transactions(peer.clone(), range.clone(), p2p).await + else { + return Batch::new(peer, range, vec![]) + }; + + let iter = headers.into_iter().zip(transaction_data.into_iter()); let mut blocks = vec![]; for (block_header, transactions) in iter { - let block_header = block_header.clone(); let SealedBlockHeader { consensus, entity: header, @@ -601,14 +538,11 @@ where if let Some(block) = block { blocks.push(block); } else { - report_peer( - p2p.as_ref(), - peer.clone(), - PeerReportReason::InvalidTransactions, - ); + report_peer(p2p, peer.clone(), PeerReportReason::InvalidTransactions); + break } } - Ok(Batch::new(peer, range, blocks)) + Batch::new(peer, range, blocks) } #[tracing::instrument( @@ -623,16 +557,13 @@ async fn execute_and_commit( executor: &E, state: &SharedMutex, block: SealedBlock, -) -> Result<(), ImportError> +) -> anyhow::Result<()> where E: BlockImporterPort + Send + Sync + 'static, { // Execute and commit the block. let height = *block.entity.header().height(); - let r = executor - .execute_and_commit(block) - .await - .map_err(ImportError::ExecutionError); + let r = executor.execute_and_commit(block).await; // If the block executed successfully, mark it as committed. if r.is_ok() { @@ -645,10 +576,12 @@ where /// Extra stream utilities. trait StreamUtil: Sized { - fn into_scan_none_or_err(self) -> ScanNoneErr { - ScanNoneErr(self) + /// Scan the stream for `None`. + fn into_scan_none(self) -> ScanNone { + ScanNone(self) } + /// Scan the stream for errors. fn into_scan_err(self) -> ScanErr { ScanErr(self) } @@ -657,34 +590,25 @@ trait StreamUtil: Sized { impl StreamUtil for S {} struct ScanErr(S); -struct ScanNoneErr(S); +struct ScanNone(S); -impl ScanNoneErr { - /// Scan the stream for `None` or errors. - fn scan_none_or_err<'a, T: 'a>( - self, - ) -> impl Stream> + 'a +impl ScanNone { + fn scan_none<'a, T: 'a>(self) -> impl Stream + 'a where - S: Stream, ImportError>> + Send + 'a, + S: Stream> + Send + 'a, { - let stream = self.0.boxed(); - futures::stream::unfold((false, stream), |(mut is_err, mut stream)| async move { - if is_err { - None - } else { - let result = stream.next().await?; - is_err = result.is_err(); - result.transpose().map(|result| (result, (is_err, stream))) - } + let stream = self.0.boxed::<'a>(); + futures::stream::unfold((false, stream), |(_, mut stream)| async move { + let element = stream.next().await?; + element.map(|e| (e, (false, stream))) }) } } impl ScanErr { - /// Scan the stream for errors. - fn scan_err<'a, T: 'a>(self) -> impl Stream> + 'a + fn scan_err<'a, T: 'a>(self) -> impl Stream> + 'a where - S: Stream> + Send + 'a, + S: Stream> + Send + 'a, { let stream = self.0.boxed::<'a>(); futures::stream::unfold((false, stream), |(mut err, mut stream)| async move { diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index e87bfd1ebd0..b97f27958a6 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -1059,14 +1059,6 @@ impl PeerReportTestBuilder { self } - // pub fn with_get_transactions( - // mut self, - // get_transactions: Option>, - // ) -> Self { - // self.get_transactions = Some(get_transactions); - // self - // } - pub fn with_get_transactions( mut self, get_transactions: Option>, diff --git a/crates/types/src/services/p2p.rs b/crates/types/src/services/p2p.rs index 98961432c52..758ca83df2d 100644 --- a/crates/types/src/services/p2p.rs +++ b/crates/types/src/services/p2p.rs @@ -54,7 +54,7 @@ pub struct GossipData { /// Transactions gossiped by peers for inclusion into a block pub type TransactionGossipData = GossipData; -#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[derive(Default, Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] /// The source of some network data. pub struct SourcePeer { /// The source of the data. @@ -112,7 +112,7 @@ pub struct BlockHeightHeartbeatData { } /// Opaque peer identifier. -#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[derive(Default, Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct PeerId(Vec); From 4e40e157d7a413422fc94a630c0d49317ddb0187 Mon Sep 17 00:00:00 2001 From: Brandon Vrooman Date: Mon, 2 Oct 2023 17:22:32 -0400 Subject: [PATCH 87/87] Review feedback --- crates/services/p2p/src/codecs/postcard.rs | 3 ++- crates/services/p2p/src/p2p_service.rs | 6 ++++-- crates/services/sync/src/import.rs | 3 +-- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/crates/services/p2p/src/codecs/postcard.rs b/crates/services/p2p/src/codecs/postcard.rs index f5ae53729b7..930a8c876a9 100644 --- a/crates/services/p2p/src/codecs/postcard.rs +++ b/crates/services/p2p/src/codecs/postcard.rs @@ -278,7 +278,8 @@ mod tests { #[test] fn test_request_size_fits() { - let m = RequestMessage::Transactions(2..6); + let arbitrary_range = 2..6; + let m = RequestMessage::Transactions(arbitrary_range); assert!(postcard::to_stdvec(&m).unwrap().len() <= MAX_REQUEST_SIZE); } } diff --git a/crates/services/p2p/src/p2p_service.rs b/crates/services/p2p/src/p2p_service.rs index fdb11c3511c..0fe5a156a02 100644 --- a/crates/services/p2p/src/p2p_service.rs +++ b/crates/services/p2p/src/p2p_service.rs @@ -1666,7 +1666,8 @@ mod tests { #[tokio::test] #[instrument] async fn request_response_works_with_transactions() { - request_response_works_with(RequestMessage::Transactions(2..6)).await + let arbitrary_range = 2..6; + request_response_works_with(RequestMessage::Transactions(arbitrary_range)).await } #[tokio::test] @@ -1678,7 +1679,8 @@ mod tests { #[tokio::test] #[instrument] async fn request_response_works_with_sealed_headers_range_inclusive() { - request_response_works_with(RequestMessage::SealedHeaders(2..6)).await + let arbitrary_range = 2..6; + request_response_works_with(RequestMessage::SealedHeaders(arbitrary_range)).await } #[tokio::test] diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index ec897af9849..5e35ff755f1 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -341,8 +341,7 @@ fn get_block_stream< &consensus, ) .await; - let headers = - SealedHeaderBatch::new(peer.clone(), range.clone(), results); + let headers = SealedHeaderBatch::new(peer, range, results); get_blocks(&p2p, headers).await } }