Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Remove discarded blocks and states from database by default #11983

Merged
Show file tree
Hide file tree
Changes from 17 commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
bc5ba8e
1.Add pruning param "canonical" in sc-cli.
hzy1919 Aug 5, 2022
d762b63
Merge branch 'master' into Remove-discarded-blocks-and-states-from-da…
hzy1919 Aug 5, 2022
e425634
Update tests in sc-state-db.
hzy1919 Aug 8, 2022
70b21b8
Merge branch 'master' into Remove-discarded-blocks-and-states-from-da…
hzy1919 Aug 8, 2022
124f586
Merge branch 'master' into Remove-discarded-blocks-and-states-from-da…
hzy1919 Aug 8, 2022
e2c5674
Update tests in sc-state-db.
hzy1919 Aug 8, 2022
7bff3cb
Merge branch 'master' into Remove-discarded-blocks-and-states-from-da…
hzy1919 Aug 19, 2022
da0c426
Merge branch 'master' into Remove-discarded-blocks-and-states-from-da…
hzy1919 Sep 1, 2022
c519937
Merge branch 'master' into Remove-discarded-blocks-and-states-from-da…
hzy1919 Sep 8, 2022
3f03c82
1.Add a new value `AllWithNonFinalized` in `enum BlocksPruning` which…
hzy1919 Sep 9, 2022
940d9db
Merge branch 'master' into Remove-discarded-blocks-and-states-from-da…
hzy1919 Sep 9, 2022
458e078
Make some corresponding adjustments based on the content in the conve…
hzy1919 Sep 10, 2022
a228701
Update client/db/src/lib.rs
hzy1919 Sep 14, 2022
6ae25a1
Apply suggestions from code review.
hzy1919 Sep 14, 2022
54a5a2b
Merge branch 'master' into Remove-discarded-blocks-and-states-from-da…
hzy1919 Sep 15, 2022
9e3e8eb
1.Change `blocks_pruning` to be like `state_pruning` .
hzy1919 Sep 21, 2022
ac58113
Merge branch 'master' into Remove-discarded-blocks-and-states-from-da…
hzy1919 Sep 22, 2022
4c3810c
Fmt and add some doc.
hzy1919 Sep 24, 2022
f4ea09b
Merge branch 'master' into Remove-discarded-blocks-and-states-from-da…
hzy1919 Sep 24, 2022
2b0c25c
Update client/cli/src/params/pruning_params.rs
hzy1919 Sep 25, 2022
6cbc62e
Update client/cli/src/params/pruning_params.rs
hzy1919 Sep 25, 2022
df5ffec
Update doc.
hzy1919 Sep 25, 2022
cb62c23
Change `new_test_with_tx_storage` to take `BlocksPruning`.
hzy1919 Sep 25, 2022
79afe8b
Merge branch 'master' into Remove-discarded-blocks-and-states-from-da…
hzy1919 Sep 25, 2022
3dff0f0
Fmt
hzy1919 Sep 25, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion bin/node/cli/benches/block_production.rs
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase {
database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 },
trie_cache_maximum_size: Some(64 * 1024 * 1024),
state_pruning: Some(PruningMode::ArchiveAll),
blocks_pruning: BlocksPruning::All,
blocks_pruning: BlocksPruning::KeepAll,
chain_spec: spec,
wasm_method: WasmExecutionMethod::Compiled {
instantiation_strategy: WasmtimeInstantiationStrategy::PoolingCopyOnWrite,
Expand Down
2 changes: 1 addition & 1 deletion bin/node/cli/benches/transaction_pool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase {
database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 },
trie_cache_maximum_size: Some(64 * 1024 * 1024),
state_pruning: Some(PruningMode::ArchiveAll),
blocks_pruning: BlocksPruning::All,
blocks_pruning: BlocksPruning::KeepAll,
chain_spec: spec,
wasm_method: WasmExecutionMethod::Interpreted,
// NOTE: we enforce the use of the native runtime to make the errors more debuggable
Expand Down
2 changes: 1 addition & 1 deletion bin/node/testing/src/bench.rs
Original file line number Diff line number Diff line change
Expand Up @@ -392,7 +392,7 @@ impl BenchDb {
trie_cache_maximum_size: Some(16 * 1024 * 1024),
state_pruning: Some(PruningMode::ArchiveAll),
source: database_type.into_settings(dir.into()),
blocks_pruning: sc_client_db::BlocksPruning::All,
blocks_pruning: sc_client_db::BlocksPruning::KeepAll,
};
let task_executor = TaskExecutor::new();

Expand Down
4 changes: 2 additions & 2 deletions client/cli/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -251,11 +251,11 @@ pub trait CliConfiguration<DCV: DefaultConfigurationValues = ()>: Sized {
/// Get the block pruning mode.
///
/// By default this is retrieved from `block_pruning` if it is available. Otherwise its
/// `BlocksPruning::All`.
/// `BlocksPruning::KeepFinalized`.
fn blocks_pruning(&self) -> Result<BlocksPruning> {
self.pruning_params()
.map(|x| x.blocks_pruning())
.unwrap_or_else(|| Ok(BlocksPruning::All))
.unwrap_or_else(|| Ok(BlocksPruning::KeepFinalized))
}

/// Get the chain ID (string).
Expand Down
25 changes: 18 additions & 7 deletions client/cli/src/params/pruning_params.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,11 +32,12 @@ pub struct PruningParams {
pub state_pruning: Option<String>,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The doc for this param should be updated with the new final option.

/// Specify the number of finalized blocks to keep in the database.
///
/// Default is to keep all blocks.
/// Default is to keep all of finalized blocks.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Here we should add that 0 keep all blocks.
Generally this is not very intuitive.
Maybe we should do something like state_pruning: use the same string: 'archive' for 'keep_all' and 'final' fo 'keep_finalized'. (subject to change of name too).

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hmm,,It's really not very intuitive that 0 keep all blocks. IMO it's a compromise approach now.
I agree that change blocks_pruning to be like state_pruning.

hzy1919 marked this conversation as resolved.
Show resolved Hide resolved
/// 0 keep all blocks.
///
/// NOTE: only finalized blocks are subject for removal!
#[clap(alias = "keep-blocks", long, value_name = "COUNT")]
pub blocks_pruning: Option<u32>,
pub blocks_pruning: Option<String>,
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You also need to document the options available.

}

impl PruningParams {
Expand All @@ -46,19 +47,29 @@ impl PruningParams {
.as_ref()
.map(|s| match s.as_str() {
"archive" => Ok(PruningMode::ArchiveAll),
"archive-canonical" => Ok(PruningMode::ArchiveCanonical),
bc => bc
.parse()
.map_err(|_| error::Error::Input("Invalid pruning mode specified".to_string()))
.map_err(|_| error::Error::Input("Invalid state pruning mode specified".to_string()))
.map(PruningMode::blocks_pruning),
})
.transpose()
}

/// Get the block pruning value from the parameters
pub fn blocks_pruning(&self) -> error::Result<BlocksPruning> {
Ok(match self.blocks_pruning {
Some(n) => BlocksPruning::Some(n),
None => BlocksPruning::All,
})
match self.blocks_pruning.as_ref() {
Some(bp) => {
match bp.as_str() {
"archive" => Ok(BlocksPruning::KeepAll),
"archive-canonical" => Ok(BlocksPruning::KeepFinalized),
bc => bc
.parse()
.map_err(|_| error::Error::Input("Invalid blocks pruning mode specified".to_string()))
.map(|n| BlocksPruning::Some(n)),
}
},
None => Ok(BlocksPruning::KeepFinalized),
}
}
}
2 changes: 1 addition & 1 deletion client/db/benches/state_access.rs
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ fn create_backend(config: BenchmarkConfig, temp_dir: &TempDir) -> Backend<Block>
trie_cache_maximum_size,
state_pruning: Some(PruningMode::ArchiveAll),
source: DatabaseSource::ParityDb { path },
blocks_pruning: BlocksPruning::All,
blocks_pruning: BlocksPruning::KeepAll,
};

Backend::new(settings, 100).expect("Creates backend")
Expand Down
205 changes: 176 additions & 29 deletions client/db/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -320,10 +320,12 @@ pub struct DatabaseSettings {
}

/// Block pruning settings.
#[derive(Debug, Clone, Copy)]
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum BlocksPruning {
/// Keep full block history.
All,
/// Keep full block history, of every block that was ever imported.
KeepAll,
/// Keep full finalized block history.
KeepFinalized,
/// Keep N recent finalized blocks.
Some(u32),
}
Expand Down Expand Up @@ -1078,6 +1080,24 @@ impl<Block: BlockT> Backend<Block> {

Self::new(db_setting, canonicalization_delay).expect("failed to create test-db")
}
#[cfg(any(test, feature = "test-helpers"))]
pub fn new_test_with_tx_storage_2(blocks_pruning: BlocksPruning, canonicalization_delay: u64) -> Self {
let db = kvdb_memorydb::create(crate::utils::NUM_COLUMNS);
let db = sp_database::as_database(db);
let state_pruning = match blocks_pruning {
BlocksPruning::KeepAll => PruningMode::ArchiveAll ,
BlocksPruning::KeepFinalized => PruningMode::ArchiveCanonical ,
BlocksPruning::Some(n) => PruningMode::blocks_pruning(n) ,
};
let db_setting = DatabaseSettings {
trie_cache_maximum_size: Some(16 * 1024 * 1024),
state_pruning: Some(state_pruning),
source: DatabaseSource::Custom { db, require_create_flag: true },
blocks_pruning: blocks_pruning,
};

Self::new(db_setting, canonicalization_delay).expect("failed to create test-db")
}

/// Expose the Database that is used by this backend.
/// The second argument is the Column that stores the State.
Expand Down Expand Up @@ -1707,32 +1727,47 @@ impl<Block: BlockT> Backend<Block> {
finalized: NumberFor<Block>,
displaced: &FinalizationOutcome<Block::Hash, NumberFor<Block>>,
) -> ClientResult<()> {
if let BlocksPruning::Some(blocks_pruning) = self.blocks_pruning {
// Always keep the last finalized block
let keep = std::cmp::max(blocks_pruning, 1);
if finalized >= keep.into() {
let number = finalized.saturating_sub(keep.into());
self.prune_block(transaction, BlockId::<Block>::number(number))?;
}
match self.blocks_pruning {
BlocksPruning::KeepAll => {},
BlocksPruning::Some(blocks_pruning) => {
// Always keep the last finalized block
let keep = std::cmp::max(blocks_pruning, 1);
if finalized >= keep.into() {
let number = finalized.saturating_sub(keep.into());
self.prune_block(transaction, BlockId::<Block>::number(number))?;
}
self.prune_displaced_branches(transaction, finalized, displaced)?;
},
BlocksPruning::KeepFinalized => {
self.prune_displaced_branches(transaction, finalized, displaced)?;
},
}
Ok(())
}

// Also discard all blocks from displaced branches
for h in displaced.leaves() {
let mut number = finalized;
let mut hash = *h;
// Follow displaced chains back until we reach a finalized block.
// Since leaves are discarded due to finality, they can't have parents
// that are canonical, but not yet finalized. So we stop deleting as soon as
// we reach canonical chain.
while self.blockchain.hash(number)? != Some(hash) {
let id = BlockId::<Block>::hash(hash);
match self.blockchain.header(id)? {
Some(header) => {
self.prune_block(transaction, id)?;
number = header.number().saturating_sub(One::one());
hash = *header.parent_hash();
},
None => break,
}
fn prune_displaced_branches(
&self,
transaction: &mut Transaction<DbHash>,
finalized: NumberFor<Block>,
displaced: &FinalizationOutcome<Block::Hash, NumberFor<Block>>,
) -> ClientResult<()> {
// Discard all blocks from displaced branches
for h in displaced.leaves() {
let mut number = finalized;
let mut hash = *h;
// Follow displaced chains back until we reach a finalized block.
// Since leaves are discarded due to finality, they can't have parents
// that are canonical, but not yet finalized. So we stop deleting as soon as
// we reach canonical chain.
while self.blockchain.hash(number)? != Some(hash) {
let id = BlockId::<Block>::hash(hash);
match self.blockchain.header(id)? {
Some(header) => {
self.prune_block(transaction, id)?;
number = header.number().saturating_sub(One::one());
hash = *header.parent_hash();
},
None => break,
}
}
}
Expand All @@ -1752,6 +1787,13 @@ impl<Block: BlockT> Backend<Block> {
columns::BODY,
id,
)?;
utils::remove_from_db(
transaction,
&*self.storage.db,
columns::KEY_LOOKUP,
columns::JUSTIFICATIONS,
id,
)?;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

May be worth mentioning in release note that justification are now pruned with block (additionally to the new parameters).
But looks like a good thing to me 👍

if let Some(index) =
read_db(&*self.storage.db, columns::KEY_LOOKUP, columns::BODY_INDEX, id)?
{
Expand Down Expand Up @@ -2506,7 +2548,7 @@ pub(crate) mod tests {
trie_cache_maximum_size: Some(16 * 1024 * 1024),
state_pruning: Some(PruningMode::blocks_pruning(1)),
source: DatabaseSource::Custom { db: backing, require_create_flag: false },
blocks_pruning: BlocksPruning::All,
blocks_pruning: BlocksPruning::KeepFinalized,
},
0,
)
Expand Down Expand Up @@ -3209,6 +3251,111 @@ pub(crate) mod tests {
assert_eq!(Some(vec![3.into()]), bc.body(BlockId::hash(blocks[3])).unwrap());
assert_eq!(Some(vec![4.into()]), bc.body(BlockId::hash(blocks[4])).unwrap());
}

#[test]
fn prune_blocks_on_finalize_in_keep_all() {
let backend = Backend::<Block>::new_test_with_tx_storage_2(BlocksPruning::KeepAll, 0);
let mut blocks = Vec::new();
let mut prev_hash = Default::default();
for i in 0..5 {
let hash = insert_block(
&backend,
i,
prev_hash,
None,
Default::default(),
vec![i.into()],
None,
)
.unwrap();
blocks.push(hash);
prev_hash = hash;
}

let mut op = backend.begin_operation().unwrap();
backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap();
for i in 1..3 {
op.mark_finalized(BlockId::Hash(blocks[i]), None).unwrap();
}
backend.commit_operation(op).unwrap();

let bc = backend.blockchain();
assert_eq!(Some(vec![0.into()]), bc.body(BlockId::hash(blocks[0])).unwrap());
assert_eq!(Some(vec![1.into()]), bc.body(BlockId::hash(blocks[1])).unwrap());
assert_eq!(Some(vec![2.into()]), bc.body(BlockId::hash(blocks[2])).unwrap());
assert_eq!(Some(vec![3.into()]), bc.body(BlockId::hash(blocks[3])).unwrap());
assert_eq!(Some(vec![4.into()]), bc.body(BlockId::hash(blocks[4])).unwrap());
}

#[test]
fn prune_blocks_on_finalize_with_fork_in_keep_all() {
let backend = Backend::<Block>::new_test_with_tx_storage_2(BlocksPruning::KeepAll, 10);
let mut blocks = Vec::new();
let mut prev_hash = Default::default();
for i in 0..5 {
let hash = insert_block(
&backend,
i,
prev_hash,
None,
Default::default(),
vec![i.into()],
None,
)
.unwrap();
blocks.push(hash);
prev_hash = hash;
}

// insert a fork at block 2
let fork_hash_root = insert_block(
&backend,
2,
blocks[1],
None,
sp_core::H256::random(),
vec![2.into()],
None,
)
.unwrap();
insert_block(
&backend,
3,
fork_hash_root,
None,
H256::random(),
vec![3.into(), 11.into()],
None,
)
.unwrap();

let mut op = backend.begin_operation().unwrap();
backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap();
op.mark_head(BlockId::Hash(blocks[4])).unwrap();
backend.commit_operation(op).unwrap();

let bc = backend.blockchain();
assert_eq!(Some(vec![2.into()]), bc.body(BlockId::hash(fork_hash_root)).unwrap());

for i in 1..5 {
let mut op = backend.begin_operation().unwrap();
backend.begin_state_operation(&mut op, BlockId::Hash(blocks[i])).unwrap();
op.mark_finalized(BlockId::Hash(blocks[i]), None).unwrap();
backend.commit_operation(op).unwrap();
}

assert_eq!(Some(vec![0.into()]), bc.body(BlockId::hash(blocks[0])).unwrap());
assert_eq!(Some(vec![1.into()]), bc.body(BlockId::hash(blocks[1])).unwrap());
assert_eq!(Some(vec![2.into()]), bc.body(BlockId::hash(blocks[2])).unwrap());
assert_eq!(Some(vec![3.into()]), bc.body(BlockId::hash(blocks[3])).unwrap());
assert_eq!(Some(vec![4.into()]), bc.body(BlockId::hash(blocks[4])).unwrap());

assert_eq!(Some(vec![2.into()]), bc.body(BlockId::hash(fork_hash_root)).unwrap());
assert_eq!(bc.info().best_number, 4);
for i in 0..5 {
assert!(bc.hash(i).unwrap().is_some());
}
}

#[test]
fn prune_blocks_on_finalize_with_fork() {
Expand Down
4 changes: 2 additions & 2 deletions client/service/test/src/client/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1200,7 +1200,7 @@ fn doesnt_import_blocks_that_revert_finality() {
DatabaseSettings {
trie_cache_maximum_size: Some(1 << 20),
state_pruning: Some(PruningMode::ArchiveAll),
blocks_pruning: BlocksPruning::All,
blocks_pruning: BlocksPruning::KeepAll,
source: DatabaseSource::RocksDb { path: tmp.path().into(), cache_size: 1024 },
},
u64::MAX,
Expand Down Expand Up @@ -1426,7 +1426,7 @@ fn returns_status_for_pruned_blocks() {
DatabaseSettings {
trie_cache_maximum_size: Some(1 << 20),
state_pruning: Some(PruningMode::blocks_pruning(1)),
blocks_pruning: BlocksPruning::All,
blocks_pruning: BlocksPruning::KeepFinalized,
source: DatabaseSource::RocksDb { path: tmp.path().into(), cache_size: 1024 },
},
u64::MAX,
Expand Down
2 changes: 1 addition & 1 deletion client/service/test/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,7 @@ fn node_config<
database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 },
trie_cache_maximum_size: Some(16 * 1024 * 1024),
state_pruning: Default::default(),
blocks_pruning: BlocksPruning::All,
blocks_pruning: BlocksPruning::KeepFinalized,
chain_spec: Box::new((*spec).clone()),
wasm_method: sc_service::config::WasmExecutionMethod::Interpreted,
wasm_runtime_overrides: Default::default(),
Expand Down