Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

babe: allow skipping over empty epochs #11727

Merged
merged 20 commits into from
Dec 24, 2022
Merged
Show file tree
Hide file tree
Changes from 13 commits
Commits
Show all changes
20 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 32 additions & 2 deletions client/consensus/babe/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1525,11 +1525,12 @@ where
if let Some(next_epoch_descriptor) = next_epoch_digest {
old_epoch_changes = Some((*epoch_changes).clone());

let viable_epoch = epoch_changes
let mut viable_epoch = epoch_changes
.viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&self.config, slot))
.ok_or_else(|| {
ConsensusError::ClientImport(Error::<Block>::FetchEpoch(parent_hash).into())
})?;
})?
.into_cloned();

let epoch_config = next_config_digest
.map(Into::into)
Expand All @@ -1542,6 +1543,35 @@ where
log::Level::Info
};

if viable_epoch.as_ref().end_slot() <= slot {
bkchr marked this conversation as resolved.
Show resolved Hide resolved
// some epochs must have been skipped as our current slot
// fits outside the current epoch. we will figure out
// which epoch it belongs to and we will re-use the same
// data for that epoch
let mut epoch_data = viable_epoch.as_mut();
let skipped_epochs = (*slot - *epoch_data.start_slot) / epoch_data.duration;

let original_epoch_index = epoch_data.epoch_index;

// NOTE: notice that we are only updating a local copy of the `Epoch`, this
// makes it so that when we insert the next epoch into `EpochChanges` below
// (after incrementing it), it will use the correct epoch index and start slot.
// we do not update the original epoch that will be re-used because there might
// be other forks (that we haven't imported) where the epoch isn't skipped, and
// to import those forks we want to keep the original epoch data. not updating
// the original epoch works because when we search the tree for which epoch to
// use for a given slot, we will search in-depth with the predicate
// `epoch.start_slot <= slot` which will still match correctly without updating
// `start_slot` to the correct value as below.
andresilva marked this conversation as resolved.
Show resolved Hide resolved
epoch_data.epoch_index += skipped_epochs;
epoch_data.start_slot =
Slot::from(*epoch_data.start_slot + skipped_epochs * epoch_data.duration);
andresilva marked this conversation as resolved.
Show resolved Hide resolved

warn!(target: "babe", "👶 Epoch(s) skipped: from {} to {}",
original_epoch_index, epoch_data.epoch_index,
);
}

log!(target: "babe",
log_level,
"👶 New epoch {} launching at block {} (block slot {} >= start slot {}).",
Expand Down
129 changes: 129 additions & 0 deletions client/consensus/babe/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ use rand_chacha::{rand_core::SeedableRng, ChaChaRng};
use sc_block_builder::{BlockBuilder, BlockBuilderProvider};
use sc_client_api::{backend::TransactionFor, BlockchainEvents, Finalizer};
use sc_consensus::{BoxBlockImport, BoxJustificationImport};
use sc_consensus_epochs::{EpochIdentifier, EpochIdentifierPosition};
use sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging;
use sc_keystore::LocalKeystore;
use sc_network_test::{Block as TestBlock, *};
Expand Down Expand Up @@ -1069,3 +1070,131 @@ fn obsolete_blocks_aux_data_cleanup() {
// Present C4, C5
assert!(aux_data_check(&fork3_hashes, true));
}

#[test]
fn allows_skipping_epochs() {
let mut net = BabeTestNet::new(1);

let peer = net.peer(0);
let data = peer.data.as_ref().expect("babe link set up during initialization");

let client = peer.client().as_client();
let mut block_import = data.block_import.lock().take().expect("import set up during init");

let mut proposer_factory = DummyFactory {
client: client.clone(),
config: data.link.config.clone(),
epoch_changes: data.link.epoch_changes.clone(),
mutator: Arc::new(|_, _| ()),
};

let epoch_changes = data.link.epoch_changes.clone();
let epoch_length = data.link.config.epoch_length;

// we create all of the blocks in epoch 0 as well as a block in epoch 1
let blocks = propose_and_import_blocks(
&client,
&mut proposer_factory,
&mut block_import,
BlockId::Number(0),
epoch_length as usize + 1,
);

// the first block in epoch 0 (#1) announces both epoch 0 and 1 (this is a
// special genesis epoch)
let epoch0 = epoch_changes
.shared_data()
.epoch(&EpochIdentifier {
position: EpochIdentifierPosition::Genesis0,
hash: blocks[0],
number: 1,
})
.unwrap()
.clone();

assert_eq!(epoch0.epoch_index, 0);
assert_eq!(epoch0.start_slot, Slot::from(1));

let epoch1 = epoch_changes
.shared_data()
.epoch(&EpochIdentifier {
position: EpochIdentifierPosition::Genesis1,
hash: blocks[0],
number: 1,
})
.unwrap()
.clone();

assert_eq!(epoch1.epoch_index, 1);
assert_eq!(epoch1.start_slot, Slot::from(epoch_length + 1));

// the first block in epoch 1 (#7) announces epoch 2. we will be skipping
// this epoch and therefore re-using its data for epoch 3
let epoch2 = epoch_changes
.shared_data()
.epoch(&EpochIdentifier {
position: EpochIdentifierPosition::Regular,
hash: blocks[epoch_length as usize],
number: epoch_length + 1,
})
.unwrap()
.clone();

assert_eq!(epoch2.epoch_index, 2);
assert_eq!(epoch2.start_slot, Slot::from(epoch_length * 2 + 1));

// we now author a block that belongs to epoch 3, thereby skipping epoch 2
let last_block = client.expect_header(BlockId::Hash(*blocks.last().unwrap())).unwrap();
let block = propose_and_import_block(
&last_block,
Some((epoch_length * 3 + 1).into()),
&mut proposer_factory,
&mut block_import,
);

// and the first block in epoch 3 (#8) announces epoch 4
let epoch4 = epoch_changes
.shared_data()
.epoch(&EpochIdentifier {
position: EpochIdentifierPosition::Regular,
hash: block,
number: epoch_length + 2,
})
.unwrap()
.clone();

assert_eq!(epoch4.epoch_index, 4);
assert_eq!(epoch4.start_slot, Slot::from(epoch_length * 4 + 1));

// if we try to get the epoch data for a slot in epoch 3
let epoch3 = epoch_changes
.shared_data()
.epoch_data_for_child_of(
descendent_query(&*client),
&block,
epoch_length + 2,
(epoch_length * 3 + 2).into(),
|slot| Epoch::genesis(&data.link.config, slot),
)
.unwrap()
.unwrap();

// we get back the data for epoch 2
assert_eq!(epoch3, epoch2);

// but if we try to get the epoch data for a slot in epoch 4
let epoch4_ = epoch_changes
.shared_data()
.epoch_data_for_child_of(
descendent_query(&*client),
&block,
epoch_length + 2,
(epoch_length * 4 + 1).into(),
|slot| Epoch::genesis(&data.link.config, slot),
)
.unwrap()
.unwrap();

// we get epoch 4 as expected
assert_eq!(epoch4, epoch4_);
}
49 changes: 29 additions & 20 deletions frame/babe/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -575,10 +575,19 @@ impl<T: Config> Pallet<T> {
// by the session module to be called before this.
debug_assert!(Self::initialized().is_some());

// Update epoch index
let epoch_index = EpochIndex::<T>::get()
.checked_add(1)
.expect("epoch indices will never reach 2^64 before the death of the universe; qed");
// Update epoch index.
//
// NOTE: we figure out the epoch index from the slot, which may not
// necessarily be contiguous if the chain was offline for more than
// `T::EpochDuration` slots. When skipping from epoch N to e.g. N+4, we
// will be using the randomness and authorities for that epoch that had
// been previously announced for epoch N+1, and the randomness collected
// during the current epoch (N) will be used for epoch N+5.
let epoch_index = sp_consensus_babe::epoch_index(
CurrentSlot::<T>::get(),
GenesisSlot::<T>::get(),
T::EpochDuration::get(),
);

EpochIndex::<T>::put(epoch_index);
Authorities::<T>::put(authorities);
Expand Down Expand Up @@ -625,11 +634,16 @@ impl<T: Config> Pallet<T> {
}
}

/// Finds the start slot of the current epoch. only guaranteed to
/// give correct results after `initialize` of the first block
/// in the chain (as its result is based off of `GenesisSlot`).
/// Finds the start slot of the current epoch.
///
/// Only guaranteed to give correct results after `initialize` of the first
/// block in the chain (as its result is based off of `GenesisSlot`).
pub fn current_epoch_start() -> Slot {
Self::epoch_start(EpochIndex::<T>::get())
sp_consensus_babe::epoch_start_slot(
EpochIndex::<T>::get(),
GenesisSlot::<T>::get(),
T::EpochDuration::get(),
)
}

/// Produces information about the current epoch.
Expand All @@ -653,9 +667,15 @@ impl<T: Config> Pallet<T> {
if u64 is not enough we should crash for safety; qed.",
);

let start_slot = sp_consensus_babe::epoch_start_slot(
next_epoch_index,
GenesisSlot::<T>::get(),
T::EpochDuration::get(),
);

Epoch {
epoch_index: next_epoch_index,
start_slot: Self::epoch_start(next_epoch_index),
start_slot,
duration: T::EpochDuration::get(),
authorities: NextAuthorities::<T>::get().to_vec(),
randomness: NextRandomness::<T>::get(),
Expand All @@ -667,17 +687,6 @@ impl<T: Config> Pallet<T> {
}
}

fn epoch_start(epoch_index: u64) -> Slot {
// (epoch_index * epoch_duration) + genesis_slot

const PROOF: &str = "slot number is u64; it should relate in some way to wall clock time; \
if u64 is not enough we should crash for safety; qed.";

let epoch_start = epoch_index.checked_mul(T::EpochDuration::get()).expect(PROOF);

epoch_start.checked_add(*GenesisSlot::<T>::get()).expect(PROOF).into()
}

fn deposit_consensus<U: Encode>(new: U) {
let log = DigestItem::Consensus(BABE_ENGINE_ID, new.encode());
<frame_system::Pallet<T>>::deposit_log(log)
Expand Down
31 changes: 31 additions & 0 deletions frame/babe/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -948,3 +948,34 @@ fn generate_equivocation_report_blob() {
println!("equivocation_proof.encode(): {:?}", equivocation_proof.encode());
});
}

#[test]
fn skipping_over_epochs_works() {
let mut ext = new_test_ext(3);

ext.execute_with(|| {
let epoch_duration: u64 = <Test as Config>::EpochDuration::get();

// this sets the genesis slot to 100;
let genesis_slot = 100;
go_to_block(1, genesis_slot);

// we will author all blocks from epoch #0 and arrive at a point where
// we are in epoch #1. we should already have the randomness ready that
// will be used in epoch #2
progress_to_block(epoch_duration + 1);
assert_eq!(EpochIndex::<Test>::get(), 1);

// genesis randomness is an array of zeros
let randomness_for_epoch_2 = NextRandomness::<Test>::get();
assert!(randomness_for_epoch_2 != [0; 32]);

// we will now create a block for a slot that is part of epoch #4.
// we should appropriately increment the epoch index as well as re-use
// the randomness from epoch #2 on epoch #4
go_to_block(System::block_number() + 1, genesis_slot + epoch_duration * 4);

assert_eq!(EpochIndex::<Test>::get(), 4);
assert_eq!(Randomness::<Test>::get(), randomness_for_epoch_2);
});
}
19 changes: 19 additions & 0 deletions primitives/consensus/babe/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -360,6 +360,25 @@ pub struct Epoch {
pub config: BabeEpochConfiguration,
}

/// Returns the epoch index the given slot belongs to.
pub fn epoch_index(slot: Slot, genesis_slot: Slot, epoch_duration: u64) -> u64 {
*slot.saturating_sub(genesis_slot) / epoch_duration
}

/// Returns the first slot at the given epoch index.
pub fn epoch_start_slot(epoch_index: u64, genesis_slot: Slot, epoch_duration: u64) -> Slot {
// (epoch_index * epoch_duration) + genesis_slot

const PROOF: &str = "slot number is u64; it should relate in some way to wall clock time; \
if u64 is not enough we should crash for safety; qed.";

epoch_index
.checked_mul(epoch_duration)
.and_then(|slot| slot.checked_add(*genesis_slot))
.expect(PROOF)
.into()
}

sp_api::decl_runtime_apis! {
/// API necessary for block authorship with BABE.
#[api_version(2)]
Expand Down