Skip to content

Commit

Permalink
Merge pull request #28 from rustaceanrob/better-reorg
Browse files Browse the repository at this point in the history
chain: better reorg behavior for CBF
  • Loading branch information
rustaceanrob authored Jun 30, 2024
2 parents 13e711a + e05b667 commit 9be7c3f
Show file tree
Hide file tree
Showing 3 changed files with 28 additions and 3 deletions.
10 changes: 7 additions & 3 deletions src/chain/chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -422,12 +422,16 @@ impl Chain {
.send_dialog("Valid reorganization found".into())
.await;
let reorged = self.header_chain.extend(&uncommon);
let removed_hashes = &reorged
.iter()
.map(|disconnect| disconnect.header.block_hash())
.collect::<Vec<BlockHash>>();
self.clear_compact_filter_queue();
self.cf_header_chain.remove(removed_hashes);
self.filter_chain.remove(removed_hashes);
self.dialog
.send_data(NodeMessage::BlocksDisconnected(reorged))
.await;
self.clear_compact_filter_queue();
self.clear_filter_headers().await;
self.clear_filters().await;
self.flush_over_height(stem).await;
Ok(())
} else {
Expand Down
10 changes: 10 additions & 0 deletions src/filters/cfheader_chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ type Queue = Option<Vec<QueuedCFHeader>>;
#[derive(Debug)]
pub(crate) struct CFHeaderChain {
anchor_checkpoint: HeaderCheckpoint,
// We only really care about this relationship
hash_chain: HashMap<BlockHash, FilterHash>,
merged_queue: Queue,
prev_stophash_request: Option<BlockHash>,
Expand Down Expand Up @@ -102,6 +103,7 @@ impl CFHeaderChain {
self.attempt_merge().await
}

// If enough peers have responded, insert those block hashes and filter hashes into a map.
async fn attempt_merge(&mut self) -> AppendAttempt {
let queue = self.merged_queue.as_ref().unwrap();
if self.current_quorum.ge(&self.quorum_required) {
Expand Down Expand Up @@ -147,6 +149,14 @@ impl CFHeaderChain {
self.hash_chain.clear();
}

// Some blocks got reorganized, so we remove them as well as the previous header
pub(crate) fn remove(&mut self, hashes: &[BlockHash]) {
for hash in hashes {
self.hash_chain.remove(hash);
}
self.prev_header = None;
}

pub(crate) fn hash_at(&self, block: &BlockHash) -> Option<&FilterHash> {
self.hash_chain.get(block)
}
Expand Down
11 changes: 11 additions & 0 deletions src/filters/filter_chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,13 @@ use crate::chain::checkpoints::HeaderCheckpoint;

const INITIAL_BUFFER_SIZE: usize = 20_000;

// Block filters can be 300 bytes or more. Assuming that some users may
// run the node for extended periods of time, there is little advantage to actually
// storing them. Instead we keep track of the filters we have seen by saving their block hash.
#[derive(Debug)]
pub(crate) struct FilterChain {
anchor_checkpoint: HeaderCheckpoint,
// Because we are checking the filters on the fly, we don't actually store them
chain: HashSet<BlockHash>,
prev_stophash_request: Option<BlockHash>,
}
Expand All @@ -26,6 +30,13 @@ impl FilterChain {
self.chain.insert(hash);
}

// Some blocks got invalidated, so we remove them from our "chain"
pub(crate) fn remove(&mut self, hashes: &[BlockHash]) {
for hash in hashes {
self.chain.remove(hash);
}
}

pub(crate) async fn clear_cache(&mut self) {
self.chain.clear()
}
Expand Down

0 comments on commit 9be7c3f

Please sign in to comment.