diff --git a/Cargo.lock b/Cargo.lock index cdcd32d1b312e..ab9eb76bc6b59 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4360,6 +4360,7 @@ name = "node-runtime" version = "2.0.1" dependencies = [ "frame-benchmarking", + "frame-election-provider-support", "frame-executive", "frame-support", "frame-system", @@ -5424,6 +5425,7 @@ dependencies = [ "frame-system", "impl-trait-for-tuples", "lazy_static", + "log", "pallet-timestamp", "parity-scale-codec", "sp-application-crypto", diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index e57944674fcc4..9b182c4085790 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -43,6 +43,7 @@ frame-benchmarking = { version = "3.1.0", default-features = false, path = "../. frame-support = { version = "3.0.0", default-features = false, path = "../../../frame/support" } frame-system = { version = "3.0.0", default-features = false, path = "../../../frame/system" } frame-system-benchmarking = { version = "3.0.0", default-features = false, path = "../../../frame/system/benchmarking", optional = true } +frame-election-provider-support = { version = "3.0.0", default-features = false, path = "../../../frame/election-provider-support" } frame-system-rpc-runtime-api = { version = "3.0.0", default-features = false, path = "../../../frame/system/rpc/runtime-api/" } frame-try-runtime = { version = "0.9.0", default-features = false, path = "../../../frame/try-runtime", optional = true } pallet-assets = { version = "3.0.0", default-features = false, path = "../../../frame/assets" } diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 4532fe40c1b9e..e8ad0db781155 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -482,6 +482,7 @@ parameter_types! { pub OffchainRepeat: BlockNumber = 5; } +use frame_election_provider_support::onchain; impl pallet_staking::Config for Runtime { const MAX_NOMINATIONS: u32 = MAX_NOMINATIONS; type Currency = Balances; @@ -505,6 +506,8 @@ impl pallet_staking::Config for Runtime { type NextNewSession = Session; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; type ElectionProvider = ElectionProviderMultiPhase; + type GenesisElectionProvider = + onchain::OnChainSequentialPhragmen>; type WeightInfo = pallet_staking::weights::SubstrateWeight; } @@ -515,7 +518,7 @@ parameter_types! { // fallback: no need to do on-chain phragmen initially. pub const Fallback: pallet_election_provider_multi_phase::FallbackStrategy = - pallet_election_provider_multi_phase::FallbackStrategy::OnChain; + pallet_election_provider_multi_phase::FallbackStrategy::Nothing; pub SolutionImprovementThreshold: Perbill = Perbill::from_rational(1u32, 10_000); diff --git a/frame/babe/src/mock.rs b/frame/babe/src/mock.rs index 236b975817ffd..770e20cb786e2 100644 --- a/frame/babe/src/mock.rs +++ b/frame/babe/src/mock.rs @@ -213,6 +213,7 @@ impl pallet_staking::Config for Test { type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; type NextNewSession = Session; type ElectionProvider = onchain::OnChainSequentialPhragmen; + type GenesisElectionProvider = Self::ElectionProvider; type WeightInfo = (); } diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index 0254525ce819d..2bb47a8778074 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -115,7 +115,23 @@ //! good solution is queued, then the fallback strategy [`pallet::Config::Fallback`] is used to //! determine what needs to be done. The on-chain election is slow, and contains no balancing or //! reduction post-processing. See [`onchain::OnChainSequentialPhragmen`]. The -//! [`FallbackStrategy::Nothing`] should probably only be used for testing, and returns an error. +//! [`FallbackStrategy::Nothing`] just returns an error, and enables the [`Phase::Emergency`]. +//! +//! ### Emergency Phase +//! +//! If, for any of the below reasons: +//! +//! 1. No signed or unsigned solution submitted & Fallback is `None` or failed +//! 2. Internal error +//! +//! A call to `T::ElectionProvider::elect` is made, and `Ok(_)` cannot be returned, then the pallet +//! proceeds to the [`Phase::Emergency`]. During this phase, any solution can be submitted from +//! [`T::ForceOrigin`], without any checking. Once submitted, the forced solution is kept in +//! [`QueuedSolution`] until the next call to `T::ElectionProvider::elect`, where it is returned and +//! [`Phase`] goes back to `Off`. +//! +//! This implies that the user of this pallet (i.e. a staking pallet) should re-try calling +//! `T::ElectionProvider::elect` in case of error until `OK(_)` is returned. //! //! ## Feasible Solution (correct solution) //! @@ -269,7 +285,7 @@ pub type CompactAccuracyOf = as CompactSolution>::Accuracy; pub type OnChainAccuracyOf = ::OnChainAccuracy; /// Wrapper type that implements the configurations needed for the on-chain backup. -struct OnChainConfig(sp_std::marker::PhantomData); +pub struct OnChainConfig(sp_std::marker::PhantomData); impl onchain::Config for OnChainConfig { type AccountId = T::AccountId; type BlockNumber = T::BlockNumber; @@ -312,9 +328,13 @@ pub enum Phase { /// advising validators not to bother running the unsigned offchain worker. /// /// As validator nodes are free to edit their OCW code, they could simply ignore this advisory - /// and always compute their own solution. However, by default, when the unsigned phase is passive, - /// the offchain workers will not bother running. + /// and always compute their own solution. However, by default, when the unsigned phase is + /// passive, the offchain workers will not bother running. Unsigned((bool, Bn)), + /// The emergency phase. This is enabled upon a failing call to `T::ElectionProvider::elect`. + /// After that, the only way to leave this phase is through a successful + /// `T::ElectionProvider::elect`. + Emergency, } impl Default for Phase { @@ -324,6 +344,11 @@ impl Default for Phase { } impl Phase { + /// Whether the phase is emergency or not. + pub fn is_emergency(&self) -> bool { + matches!(self, Phase::Emergency) + } + /// Whether the phase is signed or not. pub fn is_signed(&self) -> bool { matches!(self, Phase::Signed) @@ -582,7 +607,8 @@ pub mod pallet { /// Configuration for the fallback type Fallback: Get; - /// Origin that can set the minimum score. + /// Origin that can control this pallet. Note that any action taken by this origin (such) + /// as providing an emergency solution is not checked. Thus, it must be a trusted origin. type ForceOrigin: EnsureOrigin; /// The configuration of benchmarking. @@ -603,6 +629,13 @@ pub mod pallet { let remaining = next_election - now; let current_phase = Self::current_phase(); + log!( + trace, + "current phase {:?}, next election {:?}, metadata: {:?}", + current_phase, + next_election, + Self::snapshot_metadata() + ); match current_phase { Phase::Off if remaining <= signed_deadline && remaining > unsigned_deadline => { // NOTE: if signed-phase length is zero, second part of the if-condition fails. @@ -612,7 +645,7 @@ pub mod pallet { T::WeightInfo::on_initialize_open_signed().saturating_add(snap_weight) } Err(why) => { - // not much we can do about this at this point. + // Not much we can do about this at this point. log!(warn, "failed to open signed phase due to {:?}", why); T::WeightInfo::on_initialize_nothing() // NOTE: ^^ The trait specifies that this is a noop in terms of weight @@ -623,13 +656,13 @@ pub mod pallet { Phase::Signed | Phase::Off if remaining <= unsigned_deadline && remaining > Zero::zero() => { - // determine if followed by signed or not. + // Determine if followed by signed or not. let (need_snapshot, enabled, signed_weight) = if current_phase == Phase::Signed { - // followed by a signed phase: close the signed phase, no need for snapshot. + // Followed by a signed phase: close the signed phase, no need for snapshot. // TODO: proper weight https://github.com/paritytech/substrate/pull/7910. (false, true, Weight::zero()) } else { - // no signed phase: create a new snapshot, definitely `enable` the unsigned + // No signed phase: create a new snapshot, definitely `enable` the unsigned // phase. (true, true, Weight::zero()) }; @@ -646,7 +679,7 @@ pub mod pallet { base_weight.saturating_add(snap_weight).saturating_add(signed_weight) } Err(why) => { - // not much we can do about this at this point. + // Not much we can do about this at this point. log!(warn, "failed to open unsigned phase due to {:?}", why); T::WeightInfo::on_initialize_nothing() // NOTE: ^^ The trait specifies that this is a noop in terms of weight @@ -661,7 +694,7 @@ pub mod pallet { fn offchain_worker(now: T::BlockNumber) { use sp_runtime::offchain::storage_lock::{StorageLock, BlockAndTime}; - // create a lock with the maximum deadline of number of blocks in the unsigned phase. + // Create a lock with the maximum deadline of number of blocks in the unsigned phase. // This should only come useful in an **abrupt** termination of execution, otherwise the // guard will be dropped upon successful execution. let mut lock = StorageLock::>>::with_block_deadline( @@ -687,7 +720,7 @@ pub mod pallet { assert!(size_of::>() <= size_of::()); // ---------------------------- - // based on the requirements of [`sp_npos_elections::Assignment::try_normalize`]. + // Based on the requirements of [`sp_npos_elections::Assignment::try_normalize`]. let max_vote: usize = as CompactSolution>::LIMIT; // 1. Maximum sum of [ChainAccuracy; 16] must fit into `UpperOf`.. @@ -761,7 +794,7 @@ pub mod pallet { // Check score being an improvement, phase, and desired targets. Self::unsigned_pre_dispatch_checks(&solution).expect(error_message); - // ensure witness was correct. + // Ensure witness was correct. let SolutionOrSnapshotSize { voters, targets } = Self::snapshot_metadata().expect(error_message); @@ -772,7 +805,7 @@ pub mod pallet { let ready = Self::feasibility_check(solution, ElectionCompute::Unsigned).expect(error_message); - // store the newly received solution. + // Store the newly received solution. log!(info, "queued unsigned solution with score {:?}", ready.score); >::put(ready); Self::deposit_event(Event::SolutionStored(ElectionCompute::Unsigned)); @@ -794,6 +827,29 @@ pub mod pallet { >::set(maybe_next_score); Ok(()) } + + /// Set a solution in the queue, to be handed out to the client of this pallet in the next + /// call to `ElectionProvider::elect`. + /// + /// This can only be set by `T::ForceOrigin`, and only when the phase is `Emergency`. + /// + /// The solution is not checked for any feasibility and is assumed to be trustworthy, as any + /// feasibility check itself can in principle cause the election process to fail (due to + /// memory/weight constrains). + #[pallet::weight(T::DbWeight::get().reads_writes(1, 1))] + pub fn set_emergency_election_result( + origin: OriginFor, + solution: ReadySolution, + ) -> DispatchResult { + T::ForceOrigin::ensure_origin(origin)?; + ensure!(Self::current_phase().is_emergency(), >::CallNotAllowed); + + // Note: we don't `rotate_round` at this point; the next call to + // `ElectionProvider::elect` will succeed and take care of that. + + >::put(solution); + Ok(()) + } } #[pallet::event] @@ -829,6 +885,8 @@ pub mod pallet { PreDispatchWeakSubmission, /// OCW submitted solution for wrong round OcwCallWrongEra, + /// The call is not allowed at this point. + CallNotAllowed, } #[pallet::origin] @@ -838,7 +896,7 @@ pub mod pallet { type Call = Call; fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { if let Call::submit_unsigned(solution, _) = call { - // discard solution not coming from the local OCW. + // Discard solution not coming from the local OCW. match source { TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ } _ => { @@ -860,10 +918,10 @@ pub mod pallet { solution.score[0].saturated_into() ), ) - // used to deduplicate unsigned solutions: each validator should produce one + // Used to deduplicate unsigned solutions: each validator should produce one // solution per round at most, and solutions are not propagate. .and_provides(solution.round) - // transaction should stay in the pool for the duration of the unsigned phase. + // Transaction should stay in the pool for the duration of the unsigned phase. .longevity(T::UnsignedPhase::get().saturated_into::()) // We don't propagate this. This can never be validated at a remote node. .propagate(false) @@ -950,14 +1008,14 @@ impl Pallet { log!(trace, "lock for offchain worker acquired."); match Self::current_phase() { Phase::Unsigned((true, opened)) if opened == now => { - // mine a new solution, cache it, and attempt to submit it + // Mine a new solution, cache it, and attempt to submit it let initial_output = Self::ensure_offchain_repeat_frequency(now).and_then(|_| { Self::mine_check_save_submit() }); log!(debug, "initial offchain thread output: {:?}", initial_output); } Phase::Unsigned((true, opened)) if opened < now => { - // try and resubmit the cached solution, and recompute ONLY if it is not + // Try and resubmit the cached solution, and recompute ONLY if it is not // feasible. let resubmit_output = Self::ensure_offchain_repeat_frequency(now).and_then(|_| { Self::restore_or_compute_then_maybe_submit() @@ -967,7 +1025,7 @@ impl Pallet { _ => {} } - // after election finalization, clear OCW solution storage. + // After election finalization, clear OCW solution storage. if >::events() .into_iter() .filter_map(|event_record| { @@ -1007,7 +1065,7 @@ impl Pallet { now: T::BlockNumber, ) -> Result { let weight = if need_snapshot { - // if not being followed by a signed phase, then create the snapshots. + // If not being followed by a signed phase, then create the snapshots. debug_assert!(Self::snapshot().is_none()); Self::create_snapshot()? } else { @@ -1037,13 +1095,13 @@ impl Pallet { let (desired_targets, w3) = T::DataProvider::desired_targets().map_err(ElectionError::DataProvider)?; - // defensive-only + // Defensive-only. if targets.len() > target_limit || voters.len() > voter_limit { debug_assert!(false, "Snapshot limit has not been respected."); return Err(ElectionError::DataProvider("Snapshot too big for submission.")); } - // only write snapshot if all existed. + // Only write snapshot if all existed. >::put(SolutionOrSnapshotSize { voters: voters.len() as u32, targets: targets.len() as u32, @@ -1067,10 +1125,10 @@ impl Pallet { ) -> Result, FeasibilityError> { let RawSolution { compact, score, round } = solution; - // first, check round. + // First, check round. ensure!(Self::round() == round, FeasibilityError::InvalidRound); - // winners are not directly encoded in the solution. + // Winners are not directly encoded in the solution. let winners = compact.unique_targets(); let desired_targets = @@ -1081,7 +1139,7 @@ impl Pallet { // upon arrival, thus we would then remove it here. Given overlay it is cheap anyhow ensure!(winners.len() as u32 == desired_targets, FeasibilityError::WrongWinnerCount); - // ensure that the solution's score can pass absolute min-score. + // Ensure that the solution's score can pass absolute min-score. let submitted_score = solution.score.clone(); ensure!( Self::minimum_untrusted_score().map_or(true, |min_score| @@ -1090,7 +1148,7 @@ impl Pallet { FeasibilityError::UntrustedScoreTooLow ); - // read the entire snapshot. + // Read the entire snapshot. let RoundSnapshot { voters: snapshot_voters, targets: snapshot_targets } = Self::snapshot().ok_or(FeasibilityError::SnapshotUnavailable)?; @@ -1100,7 +1158,7 @@ impl Pallet { let target_at = helpers::target_at_fn::(&snapshot_targets); let voter_index = helpers::voter_index_fn_usize::(&cache); - // first, make sure that all the winners are sane. + // First, make sure that all the winners are sane. // OPTIMIZATION: we could first build the assignments, and then extract the winners directly // from that, as that would eliminate a little bit of duplicate work. For now, we keep them // separate: First extract winners separately from compact, and then assignments. This is @@ -1119,19 +1177,19 @@ impl Pallet { let _ = assignments .iter() .map(|ref assignment| { - // check that assignment.who is actually a voter (defensive-only). + // Check that assignment.who is actually a voter (defensive-only). // NOTE: while using the index map from `voter_index` is better than a blind linear // search, this *still* has room for optimization. Note that we had the index when // we did `compact -> assignment` and we lost it. Ideal is to keep the index around. - // defensive-only: must exist in the snapshot. + // Defensive-only: must exist in the snapshot. let snapshot_index = voter_index(&assignment.who).ok_or(FeasibilityError::InvalidVoter)?; - // defensive-only: index comes from the snapshot, must exist. + // Defensive-only: index comes from the snapshot, must exist. let (_voter, _stake, targets) = snapshot_voters.get(snapshot_index).ok_or(FeasibilityError::InvalidVoter)?; - // check that all of the targets are valid based on the snapshot. + // Check that all of the targets are valid based on the snapshot. if assignment.distribution.iter().any(|(d, _)| !targets.contains(d)) { return Err(FeasibilityError::InvalidVote); } @@ -1163,14 +1221,14 @@ impl Pallet { /// 1. Increment round. /// 2. Change phase to [`Phase::Off`] /// 3. Clear all snapshot data. - fn post_elect() { - // inc round + fn rotate_round() { + // Inc round. >::mutate(|r| *r = *r + 1); - // change phase + // Phase is off now. >::put(Phase::Off); - // kill snapshots + // Kill snapshots. Self::kill_snapshot(); } @@ -1220,10 +1278,18 @@ impl ElectionProvider for Pallet { type DataProvider = T::DataProvider; fn elect() -> Result<(Supports, Weight), Self::Error> { - let outcome_and_weight = Self::do_elect(); - // IMPORTANT: regardless of if election was `Ok` or `Err`, we shall do some cleanup. - Self::post_elect(); - outcome_and_weight + match Self::do_elect() { + Ok((supports, weight)) => { + // All went okay, put sign to be Off, clean snapshot, etc. + Self::rotate_round(); + Ok((supports, weight)) + } + Err(why) => { + log!(error, "Entering emergency mode: {:?}", why); + >::put(Phase::Emergency); + Err(why) + } + } } } @@ -1254,7 +1320,7 @@ mod feasibility_check { assert!(MultiPhase::current_phase().is_signed()); let solution = raw_solution(); - // for whatever reason it might be: + // For whatever reason it might be: >::kill(); assert_noop!( @@ -1307,7 +1373,7 @@ mod feasibility_check { assert_eq!(MultiPhase::snapshot().unwrap().targets.len(), 4); // ----------------------------------------------------^^ valid range is [0..3]. - // swap all votes from 3 to 4. This will ensure that the number of unique winners + // Swap all votes from 3 to 4. This will ensure that the number of unique winners // will still be 4, but one of the indices will be gibberish. Requirement is to make // sure 3 a winner, which we don't do here. solution @@ -1333,7 +1399,7 @@ mod feasibility_check { #[test] fn voter_indices() { - // should be caught in `compact.into_assignment`. + // Should be caught in `compact.into_assignment`. ExtBuilder::default().desired_targets(2).build_and_execute(|| { roll_to(::get() - ::get() - ::get()); assert!(MultiPhase::current_phase().is_signed()); @@ -1342,7 +1408,7 @@ mod feasibility_check { assert_eq!(MultiPhase::snapshot().unwrap().voters.len(), 8); // ----------------------------------------------------^^ valid range is [0..7]. - // check that there is a index 7 in votes1, and flip to 8. + // Check that there is an index 7 in votes1, and flip to 8. assert!( solution .compact @@ -1369,7 +1435,7 @@ mod feasibility_check { assert_eq!(MultiPhase::snapshot().unwrap().voters.len(), 8); // ----------------------------------------------------^^ valid range is [0..7]. - // first, check that voter at index 7 (40) actually voted for 3 (40) -- this is self + // First, check that voter at index 7 (40) actually voted for 3 (40) -- this is self // vote. Then, change the vote to 2 (30). assert_eq!( solution @@ -1397,7 +1463,7 @@ mod feasibility_check { let mut solution = raw_solution(); assert_eq!(MultiPhase::snapshot().unwrap().voters.len(), 8); - // simply faff with the score. + // Simply faff with the score. solution.score[0] += 1; assert_noop!( @@ -1457,7 +1523,7 @@ mod tests { assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); assert!(MultiPhase::snapshot().is_some()); - // we close when upstream tells us to elect. + // We close when upstream tells us to elect. roll_to(32); assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); assert!(MultiPhase::snapshot().is_some()); @@ -1540,7 +1606,7 @@ mod tests { roll_to(30); assert!(MultiPhase::current_phase().is_off()); - // this module is now only capable of doing on-chain backup. + // This module is now only capable of doing on-chain backup. assert_ok!(MultiPhase::elect()); assert!(MultiPhase::current_phase().is_off()); @@ -1549,9 +1615,9 @@ mod tests { #[test] fn early_termination() { - // an early termination in the signed phase, with no queued solution. + // An early termination in the signed phase, with no queued solution. ExtBuilder::default().build_and_execute(|| { - // signed phase started at block 15 and will end at 25. + // Signed phase started at block 15 and will end at 25. roll_to(14); assert_eq!(MultiPhase::current_phase(), Phase::Off); @@ -1560,11 +1626,11 @@ mod tests { assert_eq!(MultiPhase::current_phase(), Phase::Signed); assert_eq!(MultiPhase::round(), 1); - // an unexpected call to elect. + // An unexpected call to elect. roll_to(20); MultiPhase::elect().unwrap(); - // we surely can't have any feasible solutions. This will cause an on-chain election. + // We surely can't have any feasible solutions. This will cause an on-chain election. assert_eq!( multi_phase_events(), vec![ @@ -1572,7 +1638,7 @@ mod tests { Event::ElectionFinalized(Some(ElectionCompute::OnChain)) ], ); - // all storage items must be cleared. + // All storage items must be cleared. assert_eq!(MultiPhase::round(), 2); assert!(MultiPhase::snapshot().is_none()); assert!(MultiPhase::snapshot_metadata().is_none()); @@ -1590,7 +1656,7 @@ mod tests { roll_to(25); assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); - // zilch solutions thus far. + // Zilch solutions thus far. let (supports, _) = MultiPhase::elect().unwrap(); assert_eq!( @@ -1609,7 +1675,7 @@ mod tests { roll_to(25); assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); - // zilch solutions thus far. + // Zilch solutions thus far. assert_eq!(MultiPhase::elect().unwrap_err(), ElectionError::NoFallbackConfigured); }) } @@ -1619,15 +1685,15 @@ mod tests { ExtBuilder::default().build_and_execute(|| { Targets::set((0..(TargetIndex::max_value() as AccountId) + 1).collect::>()); - // signed phase failed to open. + // Signed phase failed to open. roll_to(15); assert_eq!(MultiPhase::current_phase(), Phase::Off); - // unsigned phase failed to open. + // Unsigned phase failed to open. roll_to(25); assert_eq!(MultiPhase::current_phase(), Phase::Off); - // on-chain backup works though. + // On-chain backup works though. roll_to(29); let (supports, _) = MultiPhase::elect().unwrap(); assert!(supports.len() > 0); @@ -1642,7 +1708,7 @@ mod tests { let (solution, _) = MultiPhase::mine_solution(2).unwrap(); - // default solution has a score of [50, 100, 5000]. + // Default solution has a score of [50, 100, 5000]. assert_eq!(solution.score, [50, 100, 5000]); >::put([49, 0, 0]); diff --git a/frame/grandpa/src/mock.rs b/frame/grandpa/src/mock.rs index 752d94ce19085..fe8a1bd4a3951 100644 --- a/frame/grandpa/src/mock.rs +++ b/frame/grandpa/src/mock.rs @@ -219,6 +219,7 @@ impl pallet_staking::Config for Test { type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; type NextNewSession = Session; type ElectionProvider = onchain::OnChainSequentialPhragmen; + type GenesisElectionProvider = Self::ElectionProvider; type WeightInfo = (); } diff --git a/frame/offences/benchmarking/src/mock.rs b/frame/offences/benchmarking/src/mock.rs index 7230c1215afc9..b780662b92cd7 100644 --- a/frame/offences/benchmarking/src/mock.rs +++ b/frame/offences/benchmarking/src/mock.rs @@ -178,6 +178,7 @@ impl pallet_staking::Config for Test { type NextNewSession = Session; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; type ElectionProvider = onchain::OnChainSequentialPhragmen; + type GenesisElectionProvider = Self::ElectionProvider; type WeightInfo = (); } diff --git a/frame/session/Cargo.toml b/frame/session/Cargo.toml index 44e1f2f67858b..efe7bc133fb4d 100644 --- a/frame/session/Cargo.toml +++ b/frame/session/Cargo.toml @@ -24,6 +24,7 @@ frame-support = { version = "3.0.0", default-features = false, path = "../suppor frame-system = { version = "3.0.0", default-features = false, path = "../system" } pallet-timestamp = { version = "3.0.0", default-features = false, path = "../timestamp" } sp-trie = { version = "3.0.0", optional = true, default-features = false, path = "../../primitives/trie" } +log = { version = "0.4.0", default-features = false } impl-trait-for-tuples = "0.2.1" [dev-dependencies] @@ -44,5 +45,6 @@ std = [ "sp-staking/std", "pallet-timestamp/std", "sp-trie/std", + "log/std", ] try-runtime = ["frame-support/try-runtime"] diff --git a/frame/session/benchmarking/src/mock.rs b/frame/session/benchmarking/src/mock.rs index 87d1242812db2..591e54f067bb5 100644 --- a/frame/session/benchmarking/src/mock.rs +++ b/frame/session/benchmarking/src/mock.rs @@ -183,6 +183,7 @@ impl pallet_staking::Config for Test { type NextNewSession = Session; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; type ElectionProvider = onchain::OnChainSequentialPhragmen; + type GenesisElectionProvider = Self::ElectionProvider; type WeightInfo = (); } diff --git a/frame/session/src/historical/mod.rs b/frame/session/src/historical/mod.rs index 8902ebe551f6c..3cfcbf98bf38c 100644 --- a/frame/session/src/historical/mod.rs +++ b/frame/session/src/historical/mod.rs @@ -124,10 +124,17 @@ impl ValidatorSetWithIdentification for Module { /// Specialization of the crate-level `SessionManager` which returns the set of full identification /// when creating a new session. -pub trait SessionManager: crate::SessionManager { +pub trait SessionManager: + crate::SessionManager +{ /// If there was a validator set change, its returns the set of new validators along with their /// full identifications. fn new_session(new_index: SessionIndex) -> Option>; + fn new_session_genesis( + new_index: SessionIndex, + ) -> Option> { + >::new_session(new_index) + } fn start_session(start_index: SessionIndex); fn end_session(end_index: SessionIndex); } @@ -136,19 +143,20 @@ pub trait SessionManager: crate::SessionManager /// sets the historical trie root of the ending session. pub struct NoteHistoricalRoot(sp_std::marker::PhantomData<(T, I)>); -impl crate::SessionManager for NoteHistoricalRoot - where I: SessionManager -{ - fn new_session(new_index: SessionIndex) -> Option> { - +impl> NoteHistoricalRoot { + fn do_new_session(new_index: SessionIndex, is_genesis: bool) -> Option> { StoredRange::mutate(|range| { range.get_or_insert_with(|| (new_index, new_index)).1 = new_index + 1; }); - let new_validators_and_id = >::new_session(new_index); - let new_validators = new_validators_and_id.as_ref().map(|new_validators| { - new_validators.iter().map(|(v, _id)| v.clone()).collect() - }); + let new_validators_and_id = if is_genesis { + >::new_session_genesis(new_index) + } else { + >::new_session(new_index) + }; + let new_validators_opt = new_validators_and_id + .as_ref() + .map(|new_validators| new_validators.iter().map(|(v, _id)| v.clone()).collect()); if let Some(new_validators) = new_validators_and_id { let count = new_validators.len() as ValidatorCount; @@ -166,7 +174,20 @@ impl crate::SessionManager for NoteHistoricalRoot< } } - new_validators + new_validators_opt + } +} + +impl crate::SessionManager for NoteHistoricalRoot +where + I: SessionManager, +{ + fn new_session(new_index: SessionIndex) -> Option> { + Self::do_new_session(new_index, false) + } + + fn new_session_genesis(new_index: SessionIndex) -> Option> { + Self::do_new_session(new_index, true) } fn start_session(start_index: SessionIndex) { diff --git a/frame/session/src/lib.rs b/frame/session/src/lib.rs index 547d29715d9c1..933aff02972f8 100644 --- a/frame/session/src/lib.rs +++ b/frame/session/src/lib.rs @@ -238,12 +238,19 @@ pub trait SessionManager { /// `new_session(session)` is guaranteed to be called before `end_session(session-1)`. In other /// words, a new session must always be planned before an ongoing one can be finished. fn new_session(new_index: SessionIndex) -> Option>; + /// Same as `new_session`, but it this should only be called at genesis. + /// + /// The session manager might decide to treat this in a different way. Default impl is simply + /// using [`new_session`]. + fn new_session_genesis(new_index: SessionIndex) -> Option> { + Self::new_session(new_index) + } /// End the session. /// /// Because the session pallet can queue validator set the ending session can be lower than the /// last new session index. fn end_session(end_index: SessionIndex); - /// Start the session. + /// Start an already planned session. /// /// The session start to be used for validation. fn start_session(start_index: SessionIndex); @@ -340,13 +347,9 @@ impl SessionHandler for Tuple { pub struct TestSessionHandler; impl SessionHandler for TestSessionHandler { const KEY_TYPE_IDS: &'static [KeyTypeId] = &[sp_runtime::key_types::DUMMY]; - fn on_genesis_session(_: &[(AId, Ks)]) {} - fn on_new_session(_: bool, _: &[(AId, Ks)], _: &[(AId, Ks)]) {} - fn on_before_session_ending() {} - fn on_disabled(_: usize) {} } @@ -451,7 +454,7 @@ decl_storage! { } } - let initial_validators_0 = T::SessionManager::new_session(0) + let initial_validators_0 = T::SessionManager::new_session_genesis(0) .unwrap_or_else(|| { frame_support::print("No initial validator provided by `SessionManager`, use \ session config keys to generate initial validator set."); @@ -459,7 +462,7 @@ decl_storage! { }); assert!(!initial_validators_0.is_empty(), "Empty validator set for session 0 in genesis block!"); - let initial_validators_1 = T::SessionManager::new_session(1) + let initial_validators_1 = T::SessionManager::new_session_genesis(1) .unwrap_or_else(|| initial_validators_0.clone()); assert!(!initial_validators_1.is_empty(), "Empty validator set for session 1 in genesis block!"); @@ -548,7 +551,7 @@ decl_module! { /// Actual cost depends on the number of length of `T::Keys::key_ids()` which is fixed. /// - DbReads: `T::ValidatorIdOf`, `NextKeys`, `origin account` /// - DbWrites: `NextKeys`, `origin account` - /// - DbWrites per key id: `KeyOwnder` + /// - DbWrites per key id: `KeyOwner` /// # #[weight = T::WeightInfo::purge_keys()] pub fn purge_keys(origin) { @@ -573,17 +576,17 @@ decl_module! { } impl Module { - /// Move on to next session. Register new validator set and session keys. Changes - /// to the validator set have a session of delay to take effect. This allows for - /// equivocation punishment after a fork. + /// Move on to next session. Register new validator set and session keys. Changes to the + /// validator set have a session of delay to take effect. This allows for equivocation + /// punishment after a fork. pub fn rotate_session() { let session_index = CurrentIndex::get(); + log::trace!(target: "runtime::session", "rotating session {:?}", session_index); let changed = QueuedChanged::get(); // Inform the session handlers that a session is going to end. T::SessionHandler::on_before_session_ending(); - T::SessionManager::end_session(session_index); // Get queued session keys and validators. diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index 800d3379d7e3c..2ad939e5b166c 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -91,7 +91,7 @@ pub fn create_validator_with_nominators( ValidatorCount::::put(1); // Start a new Era - let new_validators = Staking::::new_era(SessionIndex::one()).unwrap(); + let new_validators = Staking::::try_trigger_new_era(SessionIndex::one(), true).unwrap(); assert_eq!(new_validators.len(), 1); assert_eq!(new_validators[0], v_stash, "Our validator was not selected!"); @@ -484,7 +484,8 @@ benchmarks! { )?; let session_index = SessionIndex::one(); }: { - let validators = Staking::::new_era(session_index).ok_or("`new_era` failed")?; + let validators = Staking::::try_trigger_new_era(session_index, true) + .ok_or("`new_era` failed")?; assert!(validators.len() == v as usize); } @@ -500,7 +501,7 @@ benchmarks! { None, )?; // Start a new Era - let new_validators = Staking::::new_era(SessionIndex::one()).unwrap(); + let new_validators = Staking::::try_trigger_new_era(SessionIndex::one(), true).unwrap(); assert!(new_validators.len() == v as usize); let current_era = CurrentEra::::get().unwrap(); diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 30c2a160e9e72..58ab459d1bf28 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -304,7 +304,7 @@ use sp_runtime::{ curve::PiecewiseLinear, traits::{ Convert, Zero, StaticLookup, CheckedSub, Saturating, SaturatedConversion, - AtLeast32BitUnsigned, + AtLeast32BitUnsigned, Bounded, }, }; use sp_staking::{ @@ -542,7 +542,7 @@ impl StakingLedger where if !slash_from_target.is_zero() { *target -= slash_from_target; - // don't leave a dust balance in the staking system. + // Don't leave a dust balance in the staking system. if *target <= minimum_balance { slash_from_target += *target; *value += sp_std::mem::replace(target, Zero::zero()); @@ -560,10 +560,10 @@ impl StakingLedger where slash_out_of(total, &mut chunk.value, &mut value); chunk.value }) - .take_while(|value| value.is_zero()) // take all fully-consumed chunks out. + .take_while(|value| value.is_zero()) // Take all fully-consumed chunks out. .count(); - // kill all drained chunks. + // Kill all drained chunks. let _ = self.unlocking.drain(..i); pre_total.saturating_sub(*total) @@ -719,6 +719,8 @@ pub enum Forcing { /// Not forcing anything - just let whatever happen. NotForcing, /// Force a new era, then reset to `NotForcing` as soon as it is done. + /// Note that this will force to trigger an election until a new era is triggered, if the + /// election failed, the next session end will trigger a new election again, until success. ForceNew, /// Avoid a new era indefinitely. ForceNone, @@ -831,6 +833,13 @@ pub mod pallet { DataProvider = Pallet, >; + /// Something that provides the election functionality at genesis. + type GenesisElectionProvider: frame_election_provider_support::ElectionProvider< + Self::AccountId, + Self::BlockNumber, + DataProvider = Pallet, + >; + /// Maximum number of nominations per nominator. const MAX_NOMINATIONS: u32; @@ -1245,6 +1254,8 @@ pub mod pallet { Withdrawn(T::AccountId, BalanceOf), /// A nominator has been kicked from a validator. \[nominator, stash\] Kicked(T::AccountId, T::AccountId), + /// The election failed. No new era is planned. + StakingElectionFailed, } #[pallet::error] @@ -1376,7 +1387,7 @@ pub mod pallet { Err(Error::::AlreadyPaired)? } - // reject a bond which is considered to be _dust_. + // Reject a bond which is considered to be _dust_. if value < T::Currency::minimum_balance() { Err(Error::::InsufficientValue)? } @@ -1442,7 +1453,7 @@ pub mod pallet { let extra = extra.min(max_additional); ledger.total += extra; ledger.active += extra; - // last check: the new active amount of ledger must be more than ED. + // Last check: the new active amount of ledger must be more than ED. ensure!(ledger.active >= T::Currency::minimum_balance(), Error::::InsufficientValue); Self::deposit_event(Event::::Bonded(stash, extra)); @@ -1560,7 +1571,7 @@ pub mod pallet { // portion to fall below existential deposit + will have no more unlocking chunks // left. We can now safely remove all staking-related information. Self::kill_stash(&stash, num_slashing_spans)?; - // remove the lock. + // Remove the lock. T::Currency::remove_lock(STAKING_ID, &stash); // This is worst case scenario, so we use the full weight and return None None @@ -1653,7 +1664,7 @@ pub mod pallet { let nominations = Nominations { targets, - // initial nominations are considered submitted at era 0. See `Nominations` doc + // Initial nominations are considered submitted at era 0. See `Nominations` doc submitted_in: Self::current_era().unwrap_or(0), suppressed: false, }; @@ -1805,6 +1816,12 @@ pub mod pallet { /// /// The dispatch origin must be Root. /// + /// # Warning + /// + /// The election process starts multiple blocks before the end of the era. + /// Thus the election process may be ongoing when this is called. In this case the + /// election will continue until the next era is triggered. + /// /// # /// - No arguments. /// - Weight: O(1) @@ -1822,6 +1839,12 @@ pub mod pallet { /// /// The dispatch origin must be Root. /// + /// # Warning + /// + /// The election process starts multiple blocks before the end of the era. + /// If this is called just before a new era is triggered, the election process may not + /// have enough blocks to get a result. + /// /// # /// - No arguments. /// - Weight: O(1) @@ -1870,10 +1893,10 @@ pub mod pallet { ) -> DispatchResult { ensure_root(origin)?; - // remove all staking-related information. + // Remove all staking-related information. Self::kill_stash(&stash, num_slashing_spans)?; - // remove the lock. + // Remove the lock. T::Currency::remove_lock(STAKING_ID, &stash); Ok(()) } @@ -1882,6 +1905,12 @@ pub mod pallet { /// /// The dispatch origin must be Root. /// + /// # Warning + /// + /// The election process starts multiple blocks before the end of the era. + /// If this is called just before a new era is triggered, the election process may not + /// have enough blocks to get a result. + /// /// # /// - Weight: O(1) /// - Write: ForceEra @@ -1992,7 +2021,7 @@ pub mod pallet { ensure!(!ledger.unlocking.is_empty(), Error::::NoUnlockChunk); let ledger = ledger.rebond(value); - // last check: the new active amount of ledger must be more than ED. + // Last check: the new active amount of ledger must be more than ED. ensure!(ledger.active >= T::Currency::minimum_balance(), Error::::InsufficientValue); Self::deposit_event(Event::::Bonded(ledger.stash.clone(), value)); @@ -2299,10 +2328,9 @@ impl Pallet { } /// Plan a new session potentially trigger a new era. - fn new_session(session_index: SessionIndex) -> Option> { + fn new_session(session_index: SessionIndex, is_genesis: bool) -> Option> { if let Some(current_era) = Self::current_era() { // Initial era has been set. - let current_era_start_session_index = Self::eras_start_session_index(current_era) .unwrap_or_else(|| { frame_support::print("Error: start_session_index must be set for current_era"); @@ -2313,25 +2341,32 @@ impl Pallet { .unwrap_or(0); // Must never happen. match ForceEra::::get() { - // Will set to default again, which is `NotForcing`. - Forcing::ForceNew => ForceEra::::kill(), - // Short circuit to `new_era`. + // Will be set to `NotForcing` again if a new era has been triggered. + Forcing::ForceNew => (), + // Short circuit to `try_trigger_new_era`. Forcing::ForceAlways => (), - // Only go to `new_era` if deadline reached. + // Only go to `try_trigger_new_era` if deadline reached. Forcing::NotForcing if era_length >= T::SessionsPerEra::get() => (), _ => { - // either `Forcing::ForceNone`, + // Either `Forcing::ForceNone`, // or `Forcing::NotForcing if era_length >= T::SessionsPerEra::get()`. return None }, } - // new era. - Self::new_era(session_index) + // New era. + let maybe_new_era_validators = Self::try_trigger_new_era(session_index, is_genesis); + if maybe_new_era_validators.is_some() + && matches!(ForceEra::::get(), Forcing::ForceNew) + { + ForceEra::::put(Forcing::NotForcing); + } + + maybe_new_era_validators } else { - // Set initial era + // Set initial era. log!(debug, "Starting the first era."); - Self::new_era(session_index) + Self::try_trigger_new_era(session_index, is_genesis) } } @@ -2390,12 +2425,12 @@ impl Pallet { if active_era > bonding_duration { let first_kept = active_era - bonding_duration; - // prune out everything that's from before the first-kept index. + // Prune out everything that's from before the first-kept index. let n_to_prune = bonded.iter() .take_while(|&&(era_idx, _)| era_idx < first_kept) .count(); - // kill slashing metadata. + // Kill slashing metadata. for (pruned_era, _) in bonded.drain(..n_to_prune) { slashing::clear_era_metadata::(pruned_era); } @@ -2428,77 +2463,105 @@ impl Pallet { } } - /// Plan a new era. Return the potential new staking set. - fn new_era(start_session_index: SessionIndex) -> Option> { + /// Plan a new era. + /// + /// * Bump the current era storage (which holds the latest planned era). + /// * Store start session index for the new planned era. + /// * Clean old era information. + /// * Store staking information for the new planned era + /// + /// Returns the new validator set. + pub fn trigger_new_era( + start_session_index: SessionIndex, + exposures: Vec<(T::AccountId, Exposure>)>, + ) -> Vec { // Increment or set current era. - let current_era = CurrentEra::::mutate(|s| { + let new_planned_era = CurrentEra::::mutate(|s| { *s = Some(s.map(|s| s + 1).unwrap_or(0)); s.unwrap() }); - ErasStartSessionIndex::::insert(¤t_era, &start_session_index); + ErasStartSessionIndex::::insert(&new_planned_era, &start_session_index); // Clean old era information. - if let Some(old_era) = current_era.checked_sub(Self::history_depth() + 1) { + if let Some(old_era) = new_planned_era.checked_sub(Self::history_depth() + 1) { Self::clear_era_information(old_era); } - // Set staking information for new era. - let maybe_new_validators = Self::enact_election(current_era); - - maybe_new_validators + // Set staking information for the new era. + Self::store_stakers_info(exposures, new_planned_era) } - /// Enact and process the election using the `ElectionProvider` type. + /// Potentially plan a new era. + /// + /// Get election result from `T::ElectionProvider`. + /// In case election result has more than [`MinimumValidatorCount`] validator trigger a new era. /// - /// This will also process the election, as noted in [`process_election`]. - fn enact_election(current_era: EraIndex) -> Option> { - T::ElectionProvider::elect() - .map_err(|e| { - log!(warn, "election provider failed due to {:?}", e) + /// In case a new era is planned, the new validator set is returned. + fn try_trigger_new_era(start_session_index: SessionIndex, is_genesis: bool) -> Option> { + let (election_result, weight) = if is_genesis { + T::GenesisElectionProvider::elect().map_err(|e| { + log!(warn, "genesis election provider failed due to {:?}", e); + Self::deposit_event(Event::StakingElectionFailed); }) - .and_then(|(res, weight)| { - >::register_extra_weight_unchecked( - weight, - frame_support::weights::DispatchClass::Mandatory, - ); - Self::process_election(res, current_era) + } else { + T::ElectionProvider::elect().map_err(|e| { + log!(warn, "election provider failed due to {:?}", e); + Self::deposit_event(Event::StakingElectionFailed); }) - .ok() - } + } + .ok()?; - /// Process the output of the election. - /// - /// This ensures enough validators have been elected, converts all supports to exposures and - /// writes them to the associated storage. - /// - /// Returns `Err(())` if less than [`MinimumValidatorCount`] validators have been elected, `Ok` - /// otherwise. - pub fn process_election( - flat_supports: frame_election_provider_support::Supports, - current_era: EraIndex, - ) -> Result, ()> { - let exposures = Self::collect_exposures(flat_supports); - let elected_stashes = exposures.iter().cloned().map(|(x, _)| x).collect::>(); + >::register_extra_weight_unchecked( + weight, + frame_support::weights::DispatchClass::Mandatory, + ); + + let exposures = Self::collect_exposures(election_result); - if (elected_stashes.len() as u32) < Self::minimum_validator_count().max(1) { + if (exposures.len() as u32) < Self::minimum_validator_count().max(1) { // Session will panic if we ever return an empty validator set, thus max(1) ^^. - if current_era > 0 { - log!( + match CurrentEra::::get() { + Some(current_era) if current_era > 0 => log!( warn, - "chain does not have enough staking candidates to operate for era {:?} ({} elected, minimum is {})", - current_era, - elected_stashes.len(), + "chain does not have enough staking candidates to operate for era {:?} ({} \ + elected, minimum is {})", + CurrentEra::::get().unwrap_or(0), + exposures.len(), Self::minimum_validator_count(), - ); + ), + None => { + // The initial era is allowed to have no exposures. + // In this case the SessionManager is expected to choose a sensible validator + // set. + // TODO: this should be simplified #8911 + CurrentEra::::put(0); + ErasStartSessionIndex::::insert(&0, &start_session_index); + }, + _ => () } - return Err(()); + + Self::deposit_event(Event::StakingElectionFailed); + return None } + Self::deposit_event(Event::StakingElection); + Some(Self::trigger_new_era(start_session_index, exposures)) + } + + /// Process the output of the election. + /// + /// Store staking information for the new planned era + pub fn store_stakers_info( + exposures: Vec<(T::AccountId, Exposure>)>, + new_planned_era: EraIndex, + ) -> Vec { + let elected_stashes = exposures.iter().cloned().map(|(x, _)| x).collect::>(); + // Populate stakers, exposures, and the snapshot of validator prefs. let mut total_stake: BalanceOf = Zero::zero(); exposures.into_iter().for_each(|(stash, exposure)| { total_stake = total_stake.saturating_add(exposure.total); - >::insert(current_era, &stash, &exposure); + >::insert(new_planned_era, &stash, &exposure); let mut exposure_clipped = exposure; let clipped_max_len = T::MaxNominatorRewardedPerValidator::get() as usize; @@ -2506,31 +2569,28 @@ impl Pallet { exposure_clipped.others.sort_by(|a, b| a.value.cmp(&b.value).reverse()); exposure_clipped.others.truncate(clipped_max_len); } - >::insert(¤t_era, &stash, exposure_clipped); + >::insert(&new_planned_era, &stash, exposure_clipped); }); // Insert current era staking information - >::insert(¤t_era, total_stake); + >::insert(&new_planned_era, total_stake); - // collect the pref of all winners + // Collect the pref of all winners. for stash in &elected_stashes { let pref = Self::validators(stash); - >::insert(¤t_era, stash, pref); + >::insert(&new_planned_era, stash, pref); } - // emit event - Self::deposit_event(Event::::StakingElection); - - if current_era > 0 { + if new_planned_era > 0 { log!( info, "new validator set of size {:?} has been processed for era {:?}", elected_stashes.len(), - current_era, + new_planned_era, ); } - Ok(elected_stashes) + elected_stashes } /// Consume a set of [`Supports`] from [`sp_npos_elections`] and collect them into a @@ -2546,7 +2606,7 @@ impl Pallet { supports .into_iter() .map(|(validator, support)| { - // build `struct exposure` from `support` + // Build `struct exposure` from `support`. let mut others = Vec::with_capacity(support.voters.len()); let mut own: BalanceOf = Zero::zero(); let mut total: BalanceOf = Zero::zero(); @@ -2681,12 +2741,12 @@ impl Pallet { let mut all_voters = Vec::new(); for (validator, _) in >::iter() { - // append self vote + // Append self vote. let self_vote = (validator.clone(), weight_of(&validator), vec![validator.clone()]); all_voters.push(self_vote); } - // collect all slashing spans into a BTreeMap for further queries. + // Collect all slashing spans into a BTreeMap for further queries. let slashing_spans = >::iter().collect::>(); for (nominator, nominations) in >::iter() { @@ -2765,18 +2825,23 @@ impl frame_election_provider_support::ElectionDataProvider::get() { + Forcing::ForceNone => Bounded::max_value(), + Forcing::ForceNew | Forcing::ForceAlways => Zero::zero(), + Forcing::NotForcing if era_length >= T::SessionsPerEra::get() => Zero::zero(), + Forcing::NotForcing => T::SessionsPerEra::get() + .saturating_sub(era_length) + // One session is computed in this_session_end. + .saturating_sub(1) + .into(), + }; now.saturating_add( until_this_session_end.saturating_add(sessions_left.saturating_mul(session_length)), @@ -2841,16 +2906,21 @@ impl frame_election_provider_support::ElectionDataProvider pallet_session::SessionManager for Pallet { fn new_session(new_index: SessionIndex) -> Option> { - log!(trace, "planning new_session({})", new_index); + log!(trace, "planning new session {}", new_index); + CurrentPlannedSession::::put(new_index); + Self::new_session(new_index, false) + } + fn new_session_genesis(new_index: SessionIndex) -> Option> { + log!(trace, "planning new session {} at genesis", new_index); CurrentPlannedSession::::put(new_index); - Self::new_session(new_index) + Self::new_session(new_index, true) } fn start_session(start_index: SessionIndex) { - log!(trace, "starting start_session({})", start_index); + log!(trace, "starting session {}", start_index); Self::start_session(start_index) } fn end_session(end_index: SessionIndex) { - log!(trace, "ending end_session({})", end_index); + log!(trace, "ending session {}", end_index); Self::end_session(end_index) } } @@ -2872,6 +2942,20 @@ impl historical::SessionManager Option>)>> { + >::new_session_genesis(new_index).map(|validators| { + let current_era = Self::current_era() + // Must be some as a new era has been created. + .unwrap_or(0); + + validators.into_iter().map(|v| { + let exposure = Self::eras_stakers(current_era, &v); + (v, exposure) + }).collect() + }) + } fn start_session(start_index: SessionIndex) { >::start_session(start_index) } @@ -2960,7 +3044,7 @@ where let active_era = Self::active_era(); add_db_reads_writes(1, 0); if active_era.is_none() { - // this offence need not be re-submitted. + // This offence need not be re-submitted. return consumed_weight } active_era.expect("value checked not to be `None`; qed").index @@ -2974,7 +3058,7 @@ where let window_start = active_era.saturating_sub(T::BondingDuration::get()); - // fast path for active-era report - most likely. + // Fast path for active-era report - most likely. // `slash_session` cannot be in a future active era. It must be in `active_era` or before. let slash_era = if slash_session >= active_era_start_session_index { active_era @@ -2982,10 +3066,10 @@ where let eras = BondedEras::::get(); add_db_reads_writes(1, 0); - // reverse because it's more likely to find reports from recent eras. + // Reverse because it's more likely to find reports from recent eras. match eras.iter().rev().filter(|&&(_, ref sesh)| sesh <= &slash_session).next() { Some(&(ref slash_era, _)) => *slash_era, - // before bonding period. defensive - should be filtered out. + // Before bonding period. defensive - should be filtered out. None => return consumed_weight, } }; @@ -3031,7 +3115,7 @@ where } unapplied.reporters = details.reporters.clone(); if slash_defer_duration == 0 { - // apply right away. + // Apply right away. slashing::apply_slash::(unapplied); { let slash_cost = (6, 5); @@ -3042,7 +3126,7 @@ where ); } } else { - // defer to end of some `slash_defer_duration` from now. + // Defer to end of some `slash_defer_duration` from now. ::UnappliedSlashes::mutate( active_era, move |for_later| for_later.push(unapplied), @@ -3071,7 +3155,7 @@ where O: Offence, { fn report_offence(reporters: Vec, offence: O) -> Result<(), OffenceError> { - // disallow any slashing from before the current bonding period. + // Disallow any slashing from before the current bonding period. let offence_session = offence.session_index(); let bonded_eras = BondedEras::::get(); diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index 211cc025300e0..f58cdf0d2350f 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -260,6 +260,7 @@ impl Config for Test { type NextNewSession = Session; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; type ElectionProvider = onchain::OnChainSequentialPhragmen; + type GenesisElectionProvider = Self::ElectionProvider; type WeightInfo = (); } diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 4473e89585002..ee8f78769e70a 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -440,13 +440,26 @@ fn no_candidate_emergency_condition() { let res = Staking::chill(Origin::signed(10)); assert_ok!(res); - // trigger era - mock::start_active_era(1); + let current_era = CurrentEra::::get(); + + // try trigger new era + mock::run_to_block(20); + assert_eq!( + *staking_events().last().unwrap(), + Event::StakingElectionFailed, + ); + // No new era is created + assert_eq!(current_era, CurrentEra::::get()); + + // Go to far further session to see if validator have changed + mock::run_to_block(100); - // Previous ones are elected. chill is invalidates. TODO: #2494 + // Previous ones are elected. chill is not effective in active era (as era hasn't changed) assert_eq_uvec!(validator_controllers(), vec![10, 20, 30, 40]); - // Though the validator preferences has been removed. - assert!(Staking::validators(11) != prefs); + // The chill is still pending. + assert!(!::Validators::contains_key(11)); + // No new era is created. + assert_eq!(current_era, CurrentEra::::get()); }); } @@ -3970,6 +3983,34 @@ mod election_data_provider { *staking_events().last().unwrap(), Event::StakingElection ); + + Staking::force_no_eras(Origin::root()).unwrap(); + assert_eq!(Staking::next_election_prediction(System::block_number()), u64::max_value()); + + Staking::force_new_era_always(Origin::root()).unwrap(); + assert_eq!(Staking::next_election_prediction(System::block_number()), 45 + 5); + + Staking::force_new_era(Origin::root()).unwrap(); + assert_eq!(Staking::next_election_prediction(System::block_number()), 45 + 5); + + // Do a fail election + MinimumValidatorCount::::put(1000); + run_to_block(50); + // Election: failed, next session is a new election + assert_eq!(Staking::next_election_prediction(System::block_number()), 50 + 5); + // The new era is still forced until a new era is planned. + assert_eq!(ForceEra::::get(), Forcing::ForceNew); + + MinimumValidatorCount::::put(2); + run_to_block(55); + assert_eq!(Staking::next_election_prediction(System::block_number()), 55 + 25); + assert_eq!(staking_events().len(), 6); + assert_eq!( + *staking_events().last().unwrap(), + Event::StakingElection + ); + // The new era has been planned, forcing is changed from `ForceNew` to `NotForcing`. + assert_eq!(ForceEra::::get(), Forcing::NotForcing); }) } }