From e05b66717044541baf9a12175724a426347882c2 Mon Sep 17 00:00:00 2001 From: Rob N Date: Sun, 30 Jun 2024 11:43:44 -1000 Subject: [PATCH] chain: better reorg behavior for CBF --- src/chain/chain.rs | 10 +++++++--- src/filters/cfheader_chain.rs | 10 ++++++++++ src/filters/filter_chain.rs | 11 +++++++++++ 3 files changed, 28 insertions(+), 3 deletions(-) diff --git a/src/chain/chain.rs b/src/chain/chain.rs index 9837ce5..a7eacff 100644 --- a/src/chain/chain.rs +++ b/src/chain/chain.rs @@ -422,12 +422,16 @@ impl Chain { .send_dialog("Valid reorganization found".into()) .await; let reorged = self.header_chain.extend(&uncommon); + let removed_hashes = &reorged + .iter() + .map(|disconnect| disconnect.header.block_hash()) + .collect::>(); + self.clear_compact_filter_queue(); + self.cf_header_chain.remove(removed_hashes); + self.filter_chain.remove(removed_hashes); self.dialog .send_data(NodeMessage::BlocksDisconnected(reorged)) .await; - self.clear_compact_filter_queue(); - self.clear_filter_headers().await; - self.clear_filters().await; self.flush_over_height(stem).await; Ok(()) } else { diff --git a/src/filters/cfheader_chain.rs b/src/filters/cfheader_chain.rs index 6c74d2e..6e4f5da 100644 --- a/src/filters/cfheader_chain.rs +++ b/src/filters/cfheader_chain.rs @@ -55,6 +55,7 @@ type Queue = Option>; #[derive(Debug)] pub(crate) struct CFHeaderChain { anchor_checkpoint: HeaderCheckpoint, + // We only really care about this relationship hash_chain: HashMap, merged_queue: Queue, prev_stophash_request: Option, @@ -102,6 +103,7 @@ impl CFHeaderChain { self.attempt_merge().await } + // If enough peers have responded, insert those block hashes and filter hashes into a map. async fn attempt_merge(&mut self) -> AppendAttempt { let queue = self.merged_queue.as_ref().unwrap(); if self.current_quorum.ge(&self.quorum_required) { @@ -147,6 +149,14 @@ impl CFHeaderChain { self.hash_chain.clear(); } + // Some blocks got reorganized, so we remove them as well as the previous header + pub(crate) fn remove(&mut self, hashes: &[BlockHash]) { + for hash in hashes { + self.hash_chain.remove(hash); + } + self.prev_header = None; + } + pub(crate) fn hash_at(&self, block: &BlockHash) -> Option<&FilterHash> { self.hash_chain.get(block) } diff --git a/src/filters/filter_chain.rs b/src/filters/filter_chain.rs index 47687b1..1f928bf 100644 --- a/src/filters/filter_chain.rs +++ b/src/filters/filter_chain.rs @@ -6,9 +6,13 @@ use crate::chain::checkpoints::HeaderCheckpoint; const INITIAL_BUFFER_SIZE: usize = 20_000; +// Block filters can be 300 bytes or more. Assuming that some users may +// run the node for extended periods of time, there is little advantage to actually +// storing them. Instead we keep track of the filters we have seen by saving their block hash. #[derive(Debug)] pub(crate) struct FilterChain { anchor_checkpoint: HeaderCheckpoint, + // Because we are checking the filters on the fly, we don't actually store them chain: HashSet, prev_stophash_request: Option, } @@ -26,6 +30,13 @@ impl FilterChain { self.chain.insert(hash); } + // Some blocks got invalidated, so we remove them from our "chain" + pub(crate) fn remove(&mut self, hashes: &[BlockHash]) { + for hash in hashes { + self.chain.remove(hash); + } + } + pub(crate) async fn clear_cache(&mut self) { self.chain.clear() }