Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Commit

Permalink
Introduce block authorship soft deadline (#9663)
Browse files Browse the repository at this point in the history
* Soft limit.

* Add soft deadline tests.

* cargo +nightly fmt --all

* Fix sc-service test.

* Improving tests
  • Loading branch information
tomusdrw authored Oct 4, 2021
1 parent a30e1b2 commit 125092f
Show file tree
Hide file tree
Showing 6 changed files with 231 additions and 25 deletions.
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

186 changes: 178 additions & 8 deletions client/basic-authorship/src/basic_authorship.rs
Original file line number Diff line number Diff line change
Expand Up @@ -286,6 +286,11 @@ where
}
}

/// If the block is full we will attempt to push at most
/// this number of transactions before quitting for real.
/// It allows us to increase block utilization.
const MAX_SKIPPED_TRANSACTIONS: usize = 8;

impl<A, B, Block, C, PR> Proposer<B, Block, C, A, PR>
where
A: TransactionPool<Block = Block>,
Expand All @@ -309,11 +314,6 @@ where
block_size_limit: Option<usize>,
) -> Result<Proposal<Block, backend::TransactionFor<B, Block>, PR::Proof>, sp_blockchain::Error>
{
/// If the block is full we will attempt to push at most
/// this number of transactions before quitting for real.
/// It allows us to increase block utilization.
const MAX_SKIPPED_TRANSACTIONS: usize = 8;

let mut block_builder =
self.client.new_block_at(&self.parent_id, inherent_digests, PR::ENABLED)?;

Expand All @@ -336,6 +336,9 @@ where
}

// proceed with transactions
// We calculate soft deadline used only in case we start skipping transactions.
let now = (self.now)();
let soft_deadline = now + deadline.saturating_duration_since(now) / 2;
let block_timer = time::Instant::now();
let mut skipped = 0;
let mut unqueue_invalid = Vec::new();
Expand Down Expand Up @@ -364,7 +367,8 @@ where
let mut hit_block_size_limit = false;

while let Some(pending_tx) = pending_iterator.next() {
if (self.now)() > deadline {
let now = (self.now)();
if now > deadline {
debug!(
"Consensus deadline reached when pushing block transactions, \
proceeding with proposing."
Expand All @@ -387,6 +391,13 @@ where
MAX_SKIPPED_TRANSACTIONS - skipped,
);
continue
} else if now < soft_deadline {
debug!(
"Transaction would overflow the block size limit, \
but we still have time before the soft deadline, so \
we will try a bit more."
);
continue
} else {
debug!("Reached block size limit, proceeding with proposing.");
hit_block_size_limit = true;
Expand All @@ -408,6 +419,11 @@ where
"Block seems full, but will try {} more transactions before quitting.",
MAX_SKIPPED_TRANSACTIONS - skipped,
);
} else if (self.now)() < soft_deadline {
debug!(
"Block seems full, but we still have time before the soft deadline, \
so we will try a bit more before quitting."
);
} else {
debug!("Block is full, proceed with proposing.");
break
Expand Down Expand Up @@ -493,6 +509,7 @@ mod tests {
use sp_api::Core;
use sp_blockchain::HeaderBackend;
use sp_consensus::{BlockOrigin, Environment, Proposer};
use sp_core::Pair;
use sp_runtime::traits::NumberFor;
use substrate_test_runtime_client::{
prelude::*,
Expand All @@ -512,6 +529,19 @@ mod tests {
.into_signed_tx()
}

fn exhausts_resources_extrinsic_from(who: usize) -> Extrinsic {
let pair = AccountKeyring::numeric(who);
let transfer = Transfer {
// increase the amount to bump priority
amount: 1,
nonce: 0,
from: pair.public(),
to: Default::default(),
};
let signature = pair.sign(&transfer.encode()).into();
Extrinsic::Transfer { transfer, signature, exhaust_resources_when_not_first: true }
}

fn chain_event<B: BlockT>(header: B::Header) -> ChainEvent<B>
where
NumberFor<B>: From<u64>,
Expand Down Expand Up @@ -557,7 +587,7 @@ mod tests {
return value.1
}
let old = value.1;
let new = old + time::Duration::from_secs(2);
let new = old + time::Duration::from_secs(1);
*value = (true, new);
old
}),
Expand Down Expand Up @@ -730,8 +760,8 @@ mod tests {

// then
// block should have some extrinsics although we have some more in the pool.
assert_eq!(block.extrinsics().len(), expected_block_extrinsics);
assert_eq!(txpool.ready().count(), expected_pool_transactions);
assert_eq!(block.extrinsics().len(), expected_block_extrinsics);

block
};
Expand All @@ -744,6 +774,7 @@ mod tests {
.expect("there should be header"),
)),
);
assert_eq!(txpool.ready().count(), 7);

// let's create one block and import it
let block = propose_block(&client, 0, 2, 7);
Expand All @@ -757,6 +788,7 @@ mod tests {
.expect("there should be header"),
)),
);
assert_eq!(txpool.ready().count(), 5);

// now let's make sure that we can still make some progress
let block = propose_block(&client, 1, 2, 5);
Expand Down Expand Up @@ -849,4 +881,142 @@ mod tests {
// block size and thus, one less transaction should fit into the limit.
assert_eq!(block.extrinsics().len(), extrinsics_num - 2);
}

#[test]
fn should_keep_adding_transactions_after_exhausts_resources_before_soft_deadline() {
// given
let client = Arc::new(substrate_test_runtime_client::new());
let spawner = sp_core::testing::TaskExecutor::new();
let txpool = BasicPool::new_full(
Default::default(),
true.into(),
None,
spawner.clone(),
client.clone(),
);

block_on(
txpool.submit_at(
&BlockId::number(0),
SOURCE,
// add 2 * MAX_SKIPPED_TRANSACTIONS that exhaust resources
(0..MAX_SKIPPED_TRANSACTIONS * 2)
.into_iter()
.map(|i| exhausts_resources_extrinsic_from(i))
// and some transactions that are okay.
.chain((0..MAX_SKIPPED_TRANSACTIONS).into_iter().map(|i| extrinsic(i as _)))
.collect(),
),
)
.unwrap();

block_on(
txpool.maintain(chain_event(
client
.header(&BlockId::Number(0u64))
.expect("header get error")
.expect("there should be header"),
)),
);
assert_eq!(txpool.ready().count(), MAX_SKIPPED_TRANSACTIONS * 3);

let mut proposer_factory =
ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);

let cell = Mutex::new(time::Instant::now());
let proposer = proposer_factory.init_with_now(
&client.header(&BlockId::number(0)).unwrap().unwrap(),
Box::new(move || {
let mut value = cell.lock();
let old = *value;
*value = old + time::Duration::from_secs(1);
old
}),
);

// when
// give it enough time so that deadline is never triggered.
let deadline = time::Duration::from_secs(900);
let block =
block_on(proposer.propose(Default::default(), Default::default(), deadline, None))
.map(|r| r.block)
.unwrap();

// then block should have all non-exhaust resources extrinsics (+ the first one).
assert_eq!(block.extrinsics().len(), MAX_SKIPPED_TRANSACTIONS + 1);
}

#[test]
fn should_only_skip_up_to_some_limit_after_soft_deadline() {
// given
let client = Arc::new(substrate_test_runtime_client::new());
let spawner = sp_core::testing::TaskExecutor::new();
let txpool = BasicPool::new_full(
Default::default(),
true.into(),
None,
spawner.clone(),
client.clone(),
);

block_on(
txpool.submit_at(
&BlockId::number(0),
SOURCE,
(0..MAX_SKIPPED_TRANSACTIONS + 2)
.into_iter()
.map(|i| exhausts_resources_extrinsic_from(i))
// and some transactions that are okay.
.chain((0..MAX_SKIPPED_TRANSACTIONS).into_iter().map(|i| extrinsic(i as _)))
.collect(),
),
)
.unwrap();

block_on(
txpool.maintain(chain_event(
client
.header(&BlockId::Number(0u64))
.expect("header get error")
.expect("there should be header"),
)),
);
assert_eq!(txpool.ready().count(), MAX_SKIPPED_TRANSACTIONS * 2 + 2);

let mut proposer_factory =
ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None);

let deadline = time::Duration::from_secs(600);
let cell = Arc::new(Mutex::new((0, time::Instant::now())));
let cell2 = cell.clone();
let proposer = proposer_factory.init_with_now(
&client.header(&BlockId::number(0)).unwrap().unwrap(),
Box::new(move || {
let mut value = cell.lock();
let (called, old) = *value;
// add time after deadline is calculated internally (hence 1)
let increase = if called == 1 {
// we start after the soft_deadline should have already been reached.
deadline / 2
} else {
// but we make sure to never reach the actual deadline
time::Duration::from_millis(0)
};
*value = (called + 1, old + increase);
old
}),
);

let block =
block_on(proposer.propose(Default::default(), Default::default(), deadline, None))
.map(|r| r.block)
.unwrap();

// then the block should have no transactions despite some in the pool
assert_eq!(block.extrinsics().len(), 1);
assert!(
cell2.lock().0 > MAX_SKIPPED_TRANSACTIONS,
"Not enough calls to current time, which indicates the test might have ended because of deadline, not soft deadline"
);
}
}
1 change: 1 addition & 0 deletions client/service/test/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ repository = "https://github.com/paritytech/substrate/"
targets = ["x86_64-unknown-linux-gnu"]

[dependencies]
hex = "0.4"
hex-literal = "0.3.1"
tempfile = "3.1.0"
tokio = { version = "1.10.0", features = ["time"] }
Expand Down
41 changes: 30 additions & 11 deletions client/service/test/src/client/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1759,12 +1759,21 @@ fn storage_keys_iter_works() {
let res: Vec<_> = client
.storage_keys_iter(&BlockId::Number(0), Some(&prefix), None)
.unwrap()
.take(2)
.map(|x| x.0)
.take(8)
.map(|x| hex::encode(&x.0))
.collect();
assert_eq!(
res,
[hex!("0befda6e1ca4ef40219d588a727f1271").to_vec(), hex!("3a636f6465").to_vec()]
[
"00c232cf4e70a5e343317016dc805bf80a6a8cd8ad39958d56f99891b07851e0",
"085b2407916e53a86efeb8b72dbe338c4b341dab135252f96b6ed8022209b6cb",
"0befda6e1ca4ef40219d588a727f1271",
"1a560ecfd2a62c2b8521ef149d0804eb621050e3988ed97dca55f0d7c3e6aa34",
"1d66850d32002979d67dd29dc583af5b2ae2a1f71c1f35ad90fff122be7a3824",
"237498b98d8803334286e9f0483ef513098dd3c1c22ca21c4dc155b4ef6cc204",
"29b9db10ec5bf7907d8f74b5e60aa8140c4fbdd8127a1ee5600cb98e5ec01729",
"3a636f6465",
]
);

let res: Vec<_> = client
Expand All @@ -1774,15 +1783,19 @@ fn storage_keys_iter_works() {
Some(&StorageKey(hex!("3a636f6465").to_vec())),
)
.unwrap()
.take(3)
.map(|x| x.0)
.take(7)
.map(|x| hex::encode(&x.0))
.collect();
assert_eq!(
res,
[
hex!("3a686561707061676573").to_vec(),
hex!("6644b9b8bc315888ac8e41a7968dc2b4141a5403c58acdf70b7e8f7e07bf5081").to_vec(),
hex!("79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d").to_vec(),
"3a686561707061676573",
"52008686cc27f6e5ed83a216929942f8bcd32a396f09664a5698f81371934b56",
"5348d72ac6cc66e5d8cbecc27b0e0677503b845fe2382d819f83001781788fd5",
"5c2d5fda66373dabf970e4fb13d277ce91c5233473321129d32b5a8085fa8133",
"6644b9b8bc315888ac8e41a7968dc2b4141a5403c58acdf70b7e8f7e07bf5081",
"66484000ed3f75c95fc7b03f39c20ca1e1011e5999278247d3b2f5e3c3273808",
"79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d",
]
);

Expand All @@ -1795,12 +1808,18 @@ fn storage_keys_iter_works() {
)),
)
.unwrap()
.take(1)
.map(|x| x.0)
.take(5)
.map(|x| hex::encode(x.0))
.collect();
assert_eq!(
res,
[hex!("cf722c0832b5231d35e29f319ff27389f5032bfc7bfc3ba5ed7839f2042fb99f").to_vec()]
[
"7d5007603a7f5dd729d51d93cf695d6465789443bb967c0d1fe270e388c96eaa",
"811ecfaadcf5f2ee1d67393247e2f71a1662d433e8ce7ff89fb0d4aa9561820b",
"a93d74caa7ec34ea1b04ce1e5c090245f867d333f0f88278a451e45299654dc5",
"a9ee1403384afbfc13f13be91ff70bfac057436212e53b9733914382ac942892",
"cf722c0832b5231d35e29f319ff27389f5032bfc7bfc3ba5ed7839f2042fb99f",
]
);
}

Expand Down
11 changes: 11 additions & 0 deletions primitives/keyring/src/sr25519.rs
Original file line number Diff line number Diff line change
Expand Up @@ -89,9 +89,20 @@ impl Keyring {
pub fn public(self) -> Public {
self.pair().public()
}

pub fn to_seed(self) -> String {
format!("//{}", self)
}

/// Create a crypto `Pair` from a numeric value.
pub fn numeric(idx: usize) -> Pair {
Pair::from_string(&format!("//{}", idx), None).expect("numeric values are known good; qed")
}

/// Get account id of a `numeric` account.
pub fn numeric_id(idx: usize) -> AccountId32 {
(*Self::numeric(idx).public().as_array_ref()).into()
}
}

impl From<Keyring> for &'static str {
Expand Down
Loading

0 comments on commit 125092f

Please sign in to comment.