Skip to content

Commit

Permalink
Lazy load origin children (kaspanet#518)
Browse files Browse the repository at this point in the history
* Lazy load origin children

* remove redundant collect

* Delete level-0 relations for blocks which only belong to higher proof levels

* Comments

* Edit comment
  • Loading branch information
someone235 authored Aug 15, 2024
1 parent 5ebd9fe commit 6bf1c75
Show file tree
Hide file tree
Showing 3 changed files with 52 additions and 22 deletions.
9 changes: 9 additions & 0 deletions consensus/src/model/services/reachability.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ pub trait ReachabilityService {
fn is_any_dag_ancestor_result(&self, list: &mut impl Iterator<Item = Hash>, queried: Hash) -> Result<bool>;
fn get_next_chain_ancestor(&self, descendant: Hash, ancestor: Hash) -> Hash;
fn get_chain_parent(&self, this: Hash) -> Hash;
fn has_reachability_data(&self, this: Hash) -> bool;
}

impl<T: ReachabilityStoreReader + ?Sized> ReachabilityService for T {
Expand Down Expand Up @@ -56,6 +57,10 @@ impl<T: ReachabilityStoreReader + ?Sized> ReachabilityService for T {
fn get_chain_parent(&self, this: Hash) -> Hash {
self.get_parent(this).unwrap()
}

fn has_reachability_data(&self, this: Hash) -> bool {
self.has(this).unwrap()
}
}

/// Multi-threaded reachability service imp
Expand Down Expand Up @@ -108,6 +113,10 @@ impl<T: ReachabilityStoreReader + ?Sized> ReachabilityService for MTReachability
fn get_chain_parent(&self, this: Hash) -> Hash {
self.store.read().get_parent(this).unwrap()
}

fn has_reachability_data(&self, this: Hash) -> bool {
self.store.read().has(this).unwrap()
}
}

impl<T: ReachabilityStoreReader + ?Sized> MTReachabilityService<T> {
Expand Down
31 changes: 25 additions & 6 deletions consensus/src/pipeline/pruning_processor/processor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -268,6 +268,12 @@ impl PruningProcessor {
.chain(data.ghostdag_blocks.iter().map(|gd| gd.hash))
.chain(proof.iter().flatten().map(|h| h.hash))
.collect();
let keep_level_zero_relations: BlockHashSet = std::iter::empty()
.chain(data.anticone.iter().copied())
.chain(data.daa_window_blocks.iter().map(|th| th.header.hash))
.chain(data.ghostdag_blocks.iter().map(|gd| gd.hash))
.chain(proof[0].iter().map(|h| h.hash))
.collect();
let keep_headers: BlockHashSet = self.past_pruning_points();

info!("Header and Block pruning: waiting for consensus write permissions...");
Expand All @@ -281,16 +287,16 @@ impl PruningProcessor {
{
let mut counter = 0;
let mut batch = WriteBatch::default();
for kept in keep_relations.iter().copied() {
for kept in keep_level_zero_relations.iter().copied() {
let Some(ghostdag) = self.ghostdag_primary_store.get_data(kept).unwrap_option() else {
continue;
};
if ghostdag.unordered_mergeset().any(|h| !keep_relations.contains(&h)) {
if ghostdag.unordered_mergeset().any(|h| !keep_level_zero_relations.contains(&h)) {
let mut mutable_ghostdag: ExternalGhostdagData = ghostdag.as_ref().into();
mutable_ghostdag.mergeset_blues.retain(|h| keep_relations.contains(h));
mutable_ghostdag.mergeset_reds.retain(|h| keep_relations.contains(h));
mutable_ghostdag.blues_anticone_sizes.retain(|k, _| keep_relations.contains(k));
if !keep_relations.contains(&mutable_ghostdag.selected_parent) {
mutable_ghostdag.mergeset_blues.retain(|h| keep_level_zero_relations.contains(h));
mutable_ghostdag.mergeset_reds.retain(|h| keep_level_zero_relations.contains(h));
mutable_ghostdag.blues_anticone_sizes.retain(|k, _| keep_level_zero_relations.contains(k));
if !keep_level_zero_relations.contains(&mutable_ghostdag.selected_parent) {
mutable_ghostdag.selected_parent = ORIGIN;
}
counter += 1;
Expand Down Expand Up @@ -396,6 +402,19 @@ impl PruningProcessor {
// other parts of the code assume the existence of GD data etc.)
statuses_write.set_batch(&mut batch, current, StatusHeaderOnly).unwrap();
}

// Delete level-0 relations for blocks which only belong to higher proof levels.
// Note: it is also possible to delete level relations for level x > 0 for any block that only belongs
// to proof levels higher than x, but this requires maintaining such per level usage mapping.
// Since the main motivation of this deletion step is to reduce the
// number of origin's children in level 0, and this is not a bottleneck in any other
// level, we currently chose to only delete level-0 redundant relations.
if !keep_level_zero_relations.contains(&current) {
let mut staging_level_relations = StagingRelationsStore::new(&mut level_relations_write[0]);
relations::delete_level_relations(MemoryWriter, &mut staging_level_relations, current).unwrap_option();
staging_level_relations.commit(&mut batch).unwrap();
self.ghostdag_stores[0].delete_batch(&mut batch, current).unwrap_option();
}
} else {
// Count only blocks which get fully pruned including DAG relations
counter += 1;
Expand Down
34 changes: 18 additions & 16 deletions consensus/src/processes/parents_builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,6 @@ use crate::model::{
stores::{headers::HeaderStoreReader, reachability::ReachabilityStoreReader, relations::RelationsStoreReader},
};

use super::reachability::ReachabilityResultExtensions;

#[derive(Clone)]
pub struct ParentsManager<T: HeaderStoreReader, U: ReachabilityStoreReader, V: RelationsStoreReader> {
max_block_level: BlockLevel,
Expand Down Expand Up @@ -52,10 +50,7 @@ impl<T: HeaderStoreReader, U: ReachabilityStoreReader, V: RelationsStoreReader>
.expect("at least one of the parents is expected to be in the future of the pruning point");
direct_parent_headers.swap(0, first_parent_in_future_of_pruning_point);

let origin_children = self.relations_service.get_children(ORIGIN).unwrap().read().iter().copied().collect_vec();
let origin_children_headers =
origin_children.iter().copied().map(|parent| self.headers_store.get_header(parent).unwrap()).collect_vec();

let mut origin_children_headers = None;
let mut parents = Vec::with_capacity(self.max_block_level as usize);

for block_level in 0..self.max_block_level {
Expand Down Expand Up @@ -97,11 +92,7 @@ impl<T: HeaderStoreReader, U: ReachabilityStoreReader, V: RelationsStoreReader>
};

for (i, parent) in grandparents.into_iter().enumerate() {
let is_in_origin_children_future = self
.reachability_service
.is_any_dag_ancestor_result(&mut origin_children.iter().copied(), parent)
.unwrap_option()
.is_some_and(|r| r);
let has_reachability_data = self.reachability_service.has_reachability_data(parent);

// Reference blocks are the blocks that are used in reachability queries to check if
// a candidate is in the future of another candidate. In most cases this is just the
Expand All @@ -110,13 +101,24 @@ impl<T: HeaderStoreReader, U: ReachabilityStoreReader, V: RelationsStoreReader>
// If we make sure to add a parent in the future of the pruning point first, we can
// know that any pruned candidate that is in the past of some blocks in the pruning
// point anticone should be a parent (in the relevant level) of one of
// the virtual genesis children in the pruning point anticone. So we can check which
// virtual genesis children have this block as parent and use those block as
// the origin children in the pruning point anticone. So we can check which
// origin children have this block as parent and use those block as
// reference blocks.
let reference_blocks = if is_in_origin_children_future {
let reference_blocks = if has_reachability_data {
smallvec![parent]
} else {
let mut reference_blocks = SmallVec::with_capacity(origin_children.len());
// Here we explicitly declare the type because otherwise Rust would make it mutable.
let origin_children_headers: &Vec<_> = origin_children_headers.get_or_insert_with(|| {
self.relations_service
.get_children(ORIGIN)
.unwrap()
.read()
.iter()
.copied()
.map(|parent| self.headers_store.get_header(parent).unwrap())
.collect_vec()
});
let mut reference_blocks = SmallVec::with_capacity(origin_children_headers.len());
for child_header in origin_children_headers.iter() {
if self.parents_at_level(child_header, block_level).contains(&parent) {
reference_blocks.push(child_header.hash);
Expand All @@ -133,7 +135,7 @@ impl<T: HeaderStoreReader, U: ReachabilityStoreReader, V: RelationsStoreReader>
continue;
}

if !is_in_origin_children_future {
if !has_reachability_data {
continue;
}

Expand Down

0 comments on commit 6bf1c75

Please sign in to comment.