Skip to content

Commit

Permalink
Support block gap created by fast sync (paritytech#5703)
Browse files Browse the repository at this point in the history
This is part 2 of
paritytech#5406 (comment),
properly handling the block gap generated during fast sync.

Although paritytech#5406 remains unresolved due to the known issues in paritytech#5663, I
decided to open up this PR earlier than later to speed up the overall
progress. I've tested the fast sync locally with this PR, and it appears
to be functioning well. (I was doing a fast sync from a discontinued
archive node locally, thus the issue highlighted in
paritytech#5663 (comment)
was bypassed exactly.)

Once the edge cases in paritytech#5663 are addressed, we can move forward by
removing the body attribute from the LightState block request and
complete the work on paritytech#5406. The changes in this PR are incremental, so
reviewing commit by commit should provide the best clarity.

cc @dmitry-markin

---------

Co-authored-by: Bastian Köcher <git@kchr.de>
  • Loading branch information
2 people authored and Krayt78 committed Dec 18, 2024
1 parent 1152215 commit 0a5d6ba
Show file tree
Hide file tree
Showing 2 changed files with 83 additions and 30 deletions.
13 changes: 13 additions & 0 deletions prdoc/pr_5703.prdoc
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
title: Properly handle block gap created by fast sync

doc:
- audience: Node Dev
description: |
Implements support for handling block gaps generated during fast sync. This includes managing the creation,
updating, and removal of block gaps.
Note that this feature is not fully activated until the `body` attribute is removed from the `LightState`
block request in chain sync, which will occur after the issue #5406 is resolved.

crates:
- name: sc-client-db
bump: patch
100 changes: 70 additions & 30 deletions substrate/client/db/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1486,6 +1486,7 @@ impl<Block: BlockT> Backend<Block> {
.map(|(n, _)| n)
.unwrap_or(Zero::zero());
let existing_header = number <= highest_leaf && self.blockchain.header(hash)?.is_some();
let existing_body = pending_block.body.is_some();

// blocks are keyed by number + hash.
let lookup_key = utils::number_and_hash_to_lookup_key(number, hash)?;
Expand Down Expand Up @@ -1677,6 +1678,23 @@ impl<Block: BlockT> Backend<Block> {
children,
);
}
}

let should_check_block_gap = !existing_header || !existing_body;

if should_check_block_gap {
let insert_new_gap =
|transaction: &mut Transaction<DbHash>,
new_gap: BlockGap<NumberFor<Block>>,
block_gap: &mut Option<BlockGap<NumberFor<Block>>>| {
transaction.set(columns::META, meta_keys::BLOCK_GAP, &new_gap.encode());
transaction.set(
columns::META,
meta_keys::BLOCK_GAP_VERSION,
&BLOCK_GAP_CURRENT_VERSION.encode(),
);
block_gap.replace(new_gap);
};

if let Some(mut gap) = block_gap {
match gap.gap_type {
Expand All @@ -1695,43 +1713,65 @@ impl<Block: BlockT> Backend<Block> {
block_gap = None;
debug!(target: "db", "Removed block gap.");
} else {
block_gap = Some(gap);
insert_new_gap(&mut transaction, gap, &mut block_gap);
debug!(target: "db", "Update block gap. {block_gap:?}");
transaction.set(
columns::META,
meta_keys::BLOCK_GAP,
&gap.encode(),
);
transaction.set(
columns::META,
meta_keys::BLOCK_GAP_VERSION,
&BLOCK_GAP_CURRENT_VERSION.encode(),
);
}
block_gap_updated = true;
},
BlockGapType::MissingBody => {
unreachable!("Unsupported block gap. TODO: https://github.com/paritytech/polkadot-sdk/issues/5406")
// Gap increased when syncing the header chain during fast sync.
if number == gap.end + One::one() && !existing_body {
gap.end += One::one();
utils::insert_number_to_key_mapping(
&mut transaction,
columns::KEY_LOOKUP,
number,
hash,
)?;
insert_new_gap(&mut transaction, gap, &mut block_gap);
debug!(target: "db", "Update block gap. {block_gap:?}");
block_gap_updated = true;
// Gap decreased when downloading the full blocks.
} else if number == gap.start && existing_body {
gap.start += One::one();
if gap.start > gap.end {
transaction.remove(columns::META, meta_keys::BLOCK_GAP);
transaction.remove(columns::META, meta_keys::BLOCK_GAP_VERSION);
block_gap = None;
debug!(target: "db", "Removed block gap.");
} else {
insert_new_gap(&mut transaction, gap, &mut block_gap);
debug!(target: "db", "Update block gap. {block_gap:?}");
}
block_gap_updated = true;
}
},
}
} else if operation.create_gap &&
number > best_num + One::one() &&
self.blockchain.header(parent_hash)?.is_none()
{
let gap = BlockGap {
start: best_num + One::one(),
end: number - One::one(),
gap_type: BlockGapType::MissingHeaderAndBody,
};
transaction.set(columns::META, meta_keys::BLOCK_GAP, &gap.encode());
transaction.set(
columns::META,
meta_keys::BLOCK_GAP_VERSION,
&BLOCK_GAP_CURRENT_VERSION.encode(),
);
block_gap = Some(gap);
block_gap_updated = true;
debug!(target: "db", "Detected block gap {block_gap:?}");
} else if operation.create_gap {
if number > best_num + One::one() &&
self.blockchain.header(parent_hash)?.is_none()
{
let gap = BlockGap {
start: best_num + One::one(),
end: number - One::one(),
gap_type: BlockGapType::MissingHeaderAndBody,
};
insert_new_gap(&mut transaction, gap, &mut block_gap);
block_gap_updated = true;
debug!(target: "db", "Detected block gap (warp sync) {block_gap:?}");
} else if number == best_num + One::one() &&
self.blockchain.header(parent_hash)?.is_some() &&
!existing_body
{
let gap = BlockGap {
start: number,
end: number,
gap_type: BlockGapType::MissingBody,
};
insert_new_gap(&mut transaction, gap, &mut block_gap);
block_gap_updated = true;
debug!(target: "db", "Detected block gap (fast sync) {block_gap:?}");
}
}
}

Expand Down

0 comments on commit 0a5d6ba

Please sign in to comment.