diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index 6186a45f3d646..b6106ee81e839 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -56,6 +56,8 @@ use std::{ fmt, }; +const LOG_TARGET: &str = "state-db"; +const LOG_TARGET_PIN: &str = "state-db::pin"; const PRUNING_MODE: &[u8] = b"mode"; const PRUNING_MODE_ARCHIVE: &[u8] = b"archive"; const PRUNING_MODE_ARCHIVE_CANON: &[u8] = b"archive_canonical"; @@ -309,7 +311,7 @@ impl StateDbSync { ref_counting: bool, db: D, ) -> Result, Error> { - trace!(target: "state-db", "StateDb settings: {:?}. Ref-counting: {}", mode, ref_counting); + trace!(target: LOG_TARGET, "StateDb settings: {:?}. Ref-counting: {}", mode, ref_counting); let non_canonical: NonCanonicalOverlay = NonCanonicalOverlay::new(&db)?; let pruning: Option> = match mode { diff --git a/client/state-db/src/noncanonical.rs b/client/state-db/src/noncanonical.rs index e950d245dcec9..d6cc49a3d66b3 100644 --- a/client/state-db/src/noncanonical.rs +++ b/client/state-db/src/noncanonical.rs @@ -20,6 +20,8 @@ //! Maintains trees of block overlays and allows discarding trees/roots //! The overlays are added in `insert` and removed in `canonicalize`. +use crate::{LOG_TARGET, LOG_TARGET_PIN}; + use super::{to_meta_key, ChangeSet, CommitSet, DBValue, Error, Hash, MetaDb, StateDbError}; use codec::{Decode, Encode}; use log::trace; @@ -178,7 +180,12 @@ impl NonCanonicalOverlay { let mut values = HashMap::new(); if let Some((ref hash, mut block)) = last_canonicalized { // read the journal - trace!(target: "state-db", "Reading uncanonicalized journal. Last canonicalized #{} ({:?})", block, hash); + trace!( + target: LOG_TARGET, + "Reading uncanonicalized journal. Last canonicalized #{} ({:?})", + block, + hash + ); let mut total: u64 = 0; block += 1; loop { @@ -198,7 +205,7 @@ impl NonCanonicalOverlay { }; insert_values(&mut values, record.inserted); trace!( - target: "state-db", + target: LOG_TARGET, "Uncanonicalized journal entry {}.{} ({:?}) ({} inserted, {} deleted)", block, index, @@ -217,7 +224,11 @@ impl NonCanonicalOverlay { levels.push_back(level); block += 1; } - trace!(target: "state-db", "Finished reading uncanonicalized journal, {} entries", total); + trace!( + target: LOG_TARGET, + "Finished reading uncanonicalized journal, {} entries", + total + ); } Ok(NonCanonicalOverlay { last_canonicalized, @@ -252,7 +263,9 @@ impl NonCanonicalOverlay { } else if self.last_canonicalized.is_some() { if number < front_block_number || number > front_block_number + self.levels.len() as u64 { - trace!(target: "state-db", "Failed to insert block {}, current is {} .. {})", + trace!( + target: LOG_TARGET, + "Failed to insert block {}, current is {} .. {})", number, front_block_number, front_block_number + self.levels.len() as u64, @@ -284,7 +297,7 @@ impl NonCanonicalOverlay { if level.blocks.len() >= MAX_BLOCKS_PER_LEVEL as usize { trace!( - target: "state-db", + target: LOG_TARGET, "Too many sibling blocks at #{number}: {:?}", level.blocks.iter().map(|b| &b.hash).collect::>() ); @@ -314,7 +327,15 @@ impl NonCanonicalOverlay { deleted: changeset.deleted, }; commit.meta.inserted.push((journal_key, journal_record.encode())); - trace!(target: "state-db", "Inserted uncanonicalized changeset {}.{} {:?} ({} inserted, {} deleted)", number, index, hash, journal_record.inserted.len(), journal_record.deleted.len()); + trace!( + target: LOG_TARGET, + "Inserted uncanonicalized changeset {}.{} {:?} ({} inserted, {} deleted)", + number, + index, + hash, + journal_record.inserted.len(), + journal_record.deleted.len() + ); insert_values(&mut self.values, journal_record.inserted); Ok(commit) } @@ -368,7 +389,7 @@ impl NonCanonicalOverlay { hash: &BlockHash, commit: &mut CommitSet, ) -> Result { - trace!(target: "state-db", "Canonicalizing {:?}", hash); + trace!(target: LOG_TARGET, "Canonicalizing {:?}", hash); let level = match self.levels.pop_front() { Some(level) => level, None => return Err(StateDbError::InvalidBlock), @@ -432,7 +453,7 @@ impl NonCanonicalOverlay { .meta .inserted .push((to_meta_key(LAST_CANONICAL, &()), canonicalized.encode())); - trace!(target: "state-db", "Discarding {} records", commit.meta.deleted.len()); + trace!(target: LOG_TARGET, "Discarding {} records", commit.meta.deleted.len()); let num = canonicalized.1; self.last_canonicalized = Some(canonicalized); @@ -479,7 +500,7 @@ impl NonCanonicalOverlay { }; // Check that it does not have any children if (level_index != level_count - 1) && self.parents.values().any(|h| h == hash) { - log::debug!(target: "state-db", "Trying to remove block {:?} with children", hash); + log::debug!(target: LOG_TARGET, "Trying to remove block {:?} with children", hash); return None } let overlay = level.remove(index); @@ -502,7 +523,7 @@ impl NonCanonicalOverlay { pub fn pin(&mut self, hash: &BlockHash) { let refs = self.pinned.entry(hash.clone()).or_default(); if *refs == 0 { - trace!(target: "state-db-pin", "Pinned non-canon block: {:?}", hash); + trace!(target: LOG_TARGET_PIN, "Pinned non-canon block: {:?}", hash); } *refs += 1; } @@ -531,7 +552,11 @@ impl NonCanonicalOverlay { entry.get_mut().1 -= 1; if entry.get().1 == 0 { let (inserted, _) = entry.remove(); - trace!(target: "state-db-pin", "Discarding unpinned non-canon block: {:?}", hash); + trace!( + target: LOG_TARGET_PIN, + "Discarding unpinned non-canon block: {:?}", + hash + ); discard_values(&mut self.values, inserted); self.parents.remove(&hash); } diff --git a/client/state-db/src/pruning.rs b/client/state-db/src/pruning.rs index d942fb2428b6a..16561bbe0ffd5 100644 --- a/client/state-db/src/pruning.rs +++ b/client/state-db/src/pruning.rs @@ -26,7 +26,7 @@ use crate::{ noncanonical::LAST_CANONICAL, to_meta_key, CommitSet, Error, Hash, MetaDb, StateDbError, - DEFAULT_MAX_BLOCK_CONSTRAINT, + DEFAULT_MAX_BLOCK_CONSTRAINT, LOG_TARGET, }; use codec::{Decode, Encode}; use log::trace; @@ -79,14 +79,24 @@ impl DeathRowQueue { death_index: HashMap::new(), }; // read the journal - trace!(target: "state-db", "Reading pruning journal for the memory queue. Pending #{}", base); + trace!( + target: LOG_TARGET, + "Reading pruning journal for the memory queue. Pending #{}", + base, + ); loop { let journal_key = to_journal_key(block); match db.get_meta(&journal_key).map_err(Error::Db)? { Some(record) => { let record: JournalRecord = Decode::decode(&mut record.as_slice())?; - trace!(target: "state-db", "Pruning journal entry {} ({} inserted, {} deleted)", block, record.inserted.len(), record.deleted.len()); + trace!( + target: LOG_TARGET, + "Pruning journal entry {} ({} inserted, {} deleted)", + block, + record.inserted.len(), + record.deleted.len(), + ); queue.import(base, block, record); }, None => break, @@ -107,7 +117,11 @@ impl DeathRowQueue { // limit the cache capacity from 1 to `DEFAULT_MAX_BLOCK_CONSTRAINT` let cache_capacity = window_size.clamp(1, DEFAULT_MAX_BLOCK_CONSTRAINT) as usize; let mut cache = VecDeque::with_capacity(cache_capacity); - trace!(target: "state-db", "Reading pruning journal for the database-backed queue. Pending #{}", base); + trace!( + target: LOG_TARGET, + "Reading pruning journal for the database-backed queue. Pending #{}", + base + ); DeathRowQueue::load_batch_from_db(&db, &mut cache, base, cache_capacity)?; Ok(DeathRowQueue::DbBacked { db, cache, cache_capacity, last }) } @@ -115,13 +129,13 @@ impl DeathRowQueue { /// import a new block to the back of the queue fn import(&mut self, base: u64, num: u64, journal_record: JournalRecord) { let JournalRecord { hash, inserted, deleted } = journal_record; - trace!(target: "state-db", "Importing {}, base={}", num, base); + trace!(target: LOG_TARGET, "Importing {}, base={}", num, base); match self { DeathRowQueue::DbBacked { cache, cache_capacity, last, .. } => { // If the new block continues cached range and there is space, load it directly into // cache. if num == base + cache.len() as u64 && cache.len() < *cache_capacity { - trace!(target: "state-db", "Adding to DB backed cache {:?} (#{})", hash, num); + trace!(target: LOG_TARGET, "Adding to DB backed cache {:?} (#{})", hash, num); cache.push_back(DeathRow { hash, deleted: deleted.into_iter().collect() }); } *last = Some(num); @@ -306,6 +320,18 @@ impl RefWindow { }; let queue = if count_insertions { + // Highly scientific crafted number for deciding when to print the warning! + // + // Rocksdb doesn't support refcounting and requires that we load the entire pruning + // window into the memory. + if window_size > 1000 { + log::warn!( + target: LOG_TARGET, + "Large pruning window of {window_size} detected! THIS CAN LEAD TO HIGH MEMORY USAGE AND CRASHES. \ + Reduce the pruning window or switch your database to paritydb." + ); + } + DeathRowQueue::new_mem(&db, base)? } else { let last = match last_canonicalized_number {