Skip to content

Commit

Permalink
Rm integrity check from contracts & force_delayed_canonicalize (parit…
Browse files Browse the repository at this point in the history
…ytech#35)

* rm force_delayed_canonicalize fn

* rm integrity check
  • Loading branch information
kostekIV authored and lesniak43 committed Mar 11, 2024
1 parent 06be85f commit 803a52c
Show file tree
Hide file tree
Showing 2 changed files with 0 additions and 318 deletions.
251 changes: 0 additions & 251 deletions substrate/client/db/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1390,56 +1390,6 @@ impl<Block: BlockT> Backend<Block> {
Ok(MetaUpdate { hash, number, is_best: false, is_finalized: true, with_state })
}

// performs forced canonicalization with a delay after importing a non-finalized block.
fn force_delayed_canonicalize(
&self,
transaction: &mut Transaction<DbHash>,
) -> ClientResult<()> {
let best_canonical = match self.storage.state_db.last_canonicalized() {
LastCanonicalized::None => 0,
LastCanonicalized::Block(b) => b,
// Nothing needs to be done when canonicalization is not happening.
LastCanonicalized::NotCanonicalizing => return Ok(()),
};

let info = self.blockchain.info();
let best_number: u64 = self.blockchain.info().best_number.saturated_into();

for to_canonicalize in
best_canonical + 1..=best_number.saturating_sub(self.canonicalization_delay)
{
let hash_to_canonicalize = sc_client_api::blockchain::HeaderBackend::hash(
&self.blockchain,
to_canonicalize.saturated_into(),
)?
.ok_or_else(|| {
let best_hash = info.best_hash;

sp_blockchain::Error::Backend(format!(
"Can't canonicalize missing block number #{to_canonicalize} when for best block {best_hash:?} (#{best_number})",
))
})?;

if !sc_client_api::Backend::have_state_at(
self,
hash_to_canonicalize,
to_canonicalize.saturated_into(),
) {
return Ok(())
}

trace!(target: "db", "Canonicalize block #{} ({:?})", to_canonicalize, hash_to_canonicalize);
let commit = self.storage.state_db.canonicalize_block(&hash_to_canonicalize).map_err(
sp_blockchain::Error::from_state_db::<
sc_state_db::Error<sp_database::error::DatabaseError>,
>,
)?;
apply_state_commit(transaction, commit);
}

Ok(())
}

fn try_commit_operation(&self, mut operation: BlockImportOperation<Block>) -> ClientResult<()> {
let mut transaction = Transaction::new();

Expand Down Expand Up @@ -1643,9 +1593,6 @@ impl<Block: BlockT> Backend<Block> {
operation.commit_state,
&mut current_transaction_justifications,
)?;
} else {
// canonicalize blocks which are old enough, regardless of finality.
self.force_delayed_canonicalize(&mut transaction)?
}

if !existing_header {
Expand Down Expand Up @@ -3980,204 +3927,6 @@ pub(crate) mod tests {
assert_eq!(backend.blockchain().info().best_hash, block2);
}

#[test]
fn force_delayed_canonicalize_waiting_for_blocks_to_be_finalized() {
let pruning_modes =
[BlocksPruning::Some(10), BlocksPruning::KeepAll, BlocksPruning::KeepFinalized];

for pruning_mode in pruning_modes {
eprintln!("Running with pruning mode: {:?}", pruning_mode);

let backend = Backend::<Block>::new_test_with_tx_storage(pruning_mode, 1);

let genesis = insert_block(
&backend,
0,
Default::default(),
None,
Default::default(),
vec![],
None,
)
.unwrap();

let block1 = {
let mut op = backend.begin_operation().unwrap();
backend.begin_state_operation(&mut op, genesis).unwrap();
let mut header = Header {
number: 1,
parent_hash: genesis,
state_root: Default::default(),
digest: Default::default(),
extrinsics_root: Default::default(),
};

let storage = vec![(vec![1, 3, 5], None), (vec![5, 5, 5], Some(vec![4, 5, 6]))];

let (root, overlay) = op.old_state.storage_root(
storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
StateVersion::V1,
);
op.update_db_storage(overlay).unwrap();
header.state_root = root.into();

op.update_storage(storage, Vec::new()).unwrap();

op.set_block_data(
header.clone(),
Some(Vec::new()),
None,
None,
NewBlockState::Normal,
)
.unwrap();

backend.commit_operation(op).unwrap();

header.hash()
};

if matches!(pruning_mode, BlocksPruning::Some(_)) {
assert_eq!(
LastCanonicalized::Block(0),
backend.storage.state_db.last_canonicalized()
);
}

// This should not trigger any forced canonicalization as we didn't have imported any
// best block by now.
let block2 = {
let mut op = backend.begin_operation().unwrap();
backend.begin_state_operation(&mut op, block1).unwrap();
let mut header = Header {
number: 2,
parent_hash: block1,
state_root: Default::default(),
digest: Default::default(),
extrinsics_root: Default::default(),
};

let storage = vec![(vec![5, 5, 5], Some(vec![4, 5, 6, 2]))];

let (root, overlay) = op.old_state.storage_root(
storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
StateVersion::V1,
);
op.update_db_storage(overlay).unwrap();
header.state_root = root.into();

op.update_storage(storage, Vec::new()).unwrap();

op.set_block_data(
header.clone(),
Some(Vec::new()),
None,
None,
NewBlockState::Normal,
)
.unwrap();

backend.commit_operation(op).unwrap();

header.hash()
};

if matches!(pruning_mode, BlocksPruning::Some(_)) {
assert_eq!(
LastCanonicalized::Block(0),
backend.storage.state_db.last_canonicalized()
);
}

// This should also not trigger it yet, because we import a best block, but the best
// block from the POV of the db is still at `0`.
let block3 = {
let mut op = backend.begin_operation().unwrap();
backend.begin_state_operation(&mut op, block2).unwrap();
let mut header = Header {
number: 3,
parent_hash: block2,
state_root: Default::default(),
digest: Default::default(),
extrinsics_root: Default::default(),
};

let storage = vec![(vec![5, 5, 5], Some(vec![4, 5, 6, 3]))];

let (root, overlay) = op.old_state.storage_root(
storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
StateVersion::V1,
);
op.update_db_storage(overlay).unwrap();
header.state_root = root.into();

op.update_storage(storage, Vec::new()).unwrap();

op.set_block_data(
header.clone(),
Some(Vec::new()),
None,
None,
NewBlockState::Best,
)
.unwrap();

backend.commit_operation(op).unwrap();

header.hash()
};

// Now it should kick in.
let block4 = {
let mut op = backend.begin_operation().unwrap();
backend.begin_state_operation(&mut op, block3).unwrap();
let mut header = Header {
number: 4,
parent_hash: block3,
state_root: Default::default(),
digest: Default::default(),
extrinsics_root: Default::default(),
};

let storage = vec![(vec![5, 5, 5], Some(vec![4, 5, 6, 4]))];

let (root, overlay) = op.old_state.storage_root(
storage.iter().map(|(k, v)| (k.as_slice(), v.as_ref().map(|v| &v[..]))),
StateVersion::V1,
);
op.update_db_storage(overlay).unwrap();
header.state_root = root.into();

op.update_storage(storage, Vec::new()).unwrap();

op.set_block_data(
header.clone(),
Some(Vec::new()),
None,
None,
NewBlockState::Best,
)
.unwrap();

backend.commit_operation(op).unwrap();

header.hash()
};

if matches!(pruning_mode, BlocksPruning::Some(_)) {
assert_eq!(
LastCanonicalized::Block(2),
backend.storage.state_db.last_canonicalized()
);
}

assert_eq!(block1, backend.blockchain().hash(1).unwrap().unwrap());
assert_eq!(block2, backend.blockchain().hash(2).unwrap().unwrap());
assert_eq!(block3, backend.blockchain().hash(3).unwrap().unwrap());
assert_eq!(block4, backend.blockchain().hash(4).unwrap().unwrap());
}
}

#[test]
fn test_pinned_blocks_on_finalize() {
let backend = Backend::<Block>::new_test_with_tx_storage(BlocksPruning::Some(1), 10);
Expand Down
67 changes: 0 additions & 67 deletions substrate/frame/contracts/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -437,73 +437,6 @@ pub mod pallet {
ContractInfo::<T>::process_deletion_queue_batch(remaining_weight)
.saturating_add(T::WeightInfo::on_process_deletion_queue_batch())
}

fn integrity_test() {
Migration::<T>::integrity_test();

// Total runtime memory limit
let max_runtime_mem: u32 = T::Schedule::get().limits.runtime_memory;
// Memory limits for a single contract:
// Value stack size: 1Mb per contract, default defined in wasmi
const MAX_STACK_SIZE: u32 = 1024 * 1024;
// Heap limit is normally 16 mempages of 64kb each = 1Mb per contract
let max_heap_size = T::Schedule::get().limits.max_memory_size();
// Max call depth is CallStack::size() + 1
let max_call_depth = u32::try_from(T::CallStack::size().saturating_add(1))
.expect("CallStack size is too big");

// Check that given configured `MaxCodeLen`, runtime heap memory limit can't be broken.
//
// In worst case, the decoded Wasm contract code would be `x16` times larger than the
// encoded one. This is because even a single-byte wasm instruction has 16-byte size in
// wasmi. This gives us `MaxCodeLen*16` safety margin.
//
// Next, the pallet keeps the Wasm blob for each
// contract, hence we add up `MaxCodeLen` to the safety margin.
//
// Finally, the inefficiencies of the freeing-bump allocator
// being used in the client for the runtime memory allocations, could lead to possible
// memory allocations for contract code grow up to `x4` times in some extreme cases,
// which gives us total multiplier of `17*4` for `MaxCodeLen`.
//
// That being said, for every contract executed in runtime, at least `MaxCodeLen*17*4`
// memory should be available. Note that maximum allowed heap memory and stack size per
// each contract (stack frame) should also be counted.
//
// Finally, we allow 50% of the runtime memory to be utilized by the contracts call
// stack, keeping the rest for other facilities, such as PoV, etc.
//
// This gives us the following formula:
//
// `(MaxCodeLen * 17 * 4 + MAX_STACK_SIZE + max_heap_size) * max_call_depth <
// max_runtime_mem/2`
//
// Hence the upper limit for the `MaxCodeLen` can be defined as follows:
let code_len_limit = max_runtime_mem
.saturating_div(2)
.saturating_div(max_call_depth)
.saturating_sub(max_heap_size)
.saturating_sub(MAX_STACK_SIZE)
.saturating_div(17 * 4);

assert!(
T::MaxCodeLen::get() < code_len_limit,
"Given `CallStack` height {:?}, `MaxCodeLen` should be set less than {:?} \
(current value is {:?}), to avoid possible runtime oom issues.",
max_call_depth,
code_len_limit,
T::MaxCodeLen::get(),
);

// Debug buffer should at least be large enough to accommodate a simple error message
const MIN_DEBUG_BUF_SIZE: u32 = 256;
assert!(
T::MaxDebugBufferLen::get() > MIN_DEBUG_BUF_SIZE,
"Debug buffer should have minimum size of {} (current setting is {})",
MIN_DEBUG_BUF_SIZE,
T::MaxDebugBufferLen::get(),
)
}
}

#[pallet::call]
Expand Down

0 comments on commit 803a52c

Please sign in to comment.