Skip to content

Commit

Permalink
Fixed block response limit check (paritytech#9692)
Browse files Browse the repository at this point in the history
* Fixed block response limit check

* Fixed start block detection and added a test

* Missing test
  • Loading branch information
arkpar authored and GopherJ committed Sep 14, 2021
1 parent 91061a7 commit 647b6fb
Show file tree
Hide file tree
Showing 5 changed files with 45 additions and 12 deletions.
5 changes: 3 additions & 2 deletions client/network/src/block_request_handler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ pub fn generate_protocol_config(protocol_id: &ProtocolId) -> ProtocolConfig {
name: generate_protocol_name(protocol_id).into(),
max_request_size: 1024 * 1024,
max_response_size: 16 * 1024 * 1024,
request_timeout: Duration::from_secs(40),
request_timeout: Duration::from_secs(20),
inbound_queue: None,
}
}
Expand Down Expand Up @@ -355,7 +355,8 @@ impl<B: BlockT> BlockRequestHandler<B> {
indexed_body,
};

total_size += block_data.body.len();
total_size += block_data.body.iter().map(|ex| ex.len()).sum::<usize>();
total_size += block_data.indexed_body.iter().map(|ex| ex.len()).sum::<usize>();
blocks.push(block_data);

if blocks.len() >= max_blocks as usize || total_size > MAX_BODY_BYTES {
Expand Down
2 changes: 1 addition & 1 deletion client/network/src/protocol.rs
Original file line number Diff line number Diff line change
Expand Up @@ -629,7 +629,7 @@ impl<B: BlockT> Protocol<B> {
} else {
None
},
receipt: if !block_data.message_queue.is_empty() {
receipt: if !block_data.receipt.is_empty() {
Some(block_data.receipt)
} else {
None
Expand Down
19 changes: 11 additions & 8 deletions client/network/src/protocol/sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ mod state;
mod warp;

/// Maximum blocks to request in a single packet.
const MAX_BLOCKS_TO_REQUEST: usize = 128;
const MAX_BLOCKS_TO_REQUEST: usize = 64;

/// Maximum blocks to store in the import queue.
const MAX_IMPORTING_BLOCKS: usize = 2048;
Expand Down Expand Up @@ -1055,12 +1055,14 @@ impl<B: BlockT> ChainSync<B> {
self.pending_requests.add(who);
if let Some(request) = request {
match &mut peer.state {
PeerSyncState::DownloadingNew(start_block) => {
PeerSyncState::DownloadingNew(_) => {
self.blocks.clear_peer_download(who);
let start_block = *start_block;
peer.state = PeerSyncState::Available;
validate_blocks::<B>(&blocks, who, Some(request))?;
self.blocks.insert(start_block, blocks, who.clone());
if let Some(start_block) =
validate_blocks::<B>(&blocks, who, Some(request))?
{
self.blocks.insert(start_block, blocks, who.clone());
}
self.drain_blocks()
},
PeerSyncState::DownloadingStale(_) => {
Expand Down Expand Up @@ -2314,13 +2316,14 @@ where
}

/// Validate that the given `blocks` are correct.
/// Returns the number of the first block in the sequence.
///
/// It is expected that `blocks` are in asending order.
/// It is expected that `blocks` are in ascending order.
fn validate_blocks<Block: BlockT>(
blocks: &Vec<message::BlockData<Block>>,
who: &PeerId,
request: Option<BlockRequest<Block>>,
) -> Result<(), BadPeer> {
) -> Result<Option<NumberFor<Block>>, BadPeer> {
if let Some(request) = request {
if Some(blocks.len() as _) > request.max {
debug!(
Expand Down Expand Up @@ -2414,7 +2417,7 @@ fn validate_blocks<Block: BlockT>(
}
}

Ok(())
Ok(blocks.first().and_then(|b| b.header.as_ref()).map(|h| *h.number()))
}

#[cfg(test)]
Expand Down
2 changes: 1 addition & 1 deletion client/network/src/protocol/sync/blocks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ impl<B: BlockT> BlockCollection<B> {
for r in ranges {
self.blocks.remove(&r);
}
trace!(target: "sync", "Drained {} blocks", drained.len());
trace!(target: "sync", "Drained {} blocks from {:?}", drained.len(), from);
drained
}

Expand Down
29 changes: 29 additions & 0 deletions client/network/test/src/sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1193,3 +1193,32 @@ fn syncs_indexed_blocks() {
.unwrap()
.is_some());
}

#[test]
fn syncs_huge_blocks() {
use sp_core::storage::well_known_keys::HEAP_PAGES;
use sp_runtime::codec::Encode;
use substrate_test_runtime_client::BlockBuilderExt;

sp_tracing::try_init_simple();
let mut net = TestNet::new(2);

// Increase heap space for bigger blocks.
net.peer(0).generate_blocks(1, BlockOrigin::Own, |mut builder| {
builder.push_storage_change(HEAP_PAGES.to_vec(), Some(256u64.encode())).unwrap();
builder.build().unwrap().block
});

net.peer(0).generate_blocks(32, BlockOrigin::Own, |mut builder| {
// Add 32 extrinsics 32k each = 1MiB total
for _ in 0..32 {
let ex = Extrinsic::IncludeData([42u8; 32 * 1024].to_vec());
builder.push(ex).unwrap();
}
builder.build().unwrap().block
});

net.block_until_sync();
assert_eq!(net.peer(0).client.info().best_number, 33);
assert_eq!(net.peer(1).client.info().best_number, 33);
}

0 comments on commit 647b6fb

Please sign in to comment.