diff --git a/roadmap/implementers-guide/src/SUMMARY.md b/roadmap/implementers-guide/src/SUMMARY.md index f37fc08f964a..f90f149f2556 100644 --- a/roadmap/implementers-guide/src/SUMMARY.md +++ b/roadmap/implementers-guide/src/SUMMARY.md @@ -16,7 +16,9 @@ - [Scheduler Module](runtime/scheduler.md) - [Inclusion Module](runtime/inclusion.md) - [InclusionInherent Module](runtime/inclusioninherent.md) - - [Router Module](runtime/router.md) + - [DMP Module](runtime/dmp.md) + - [UMP Module](runtime/ump.md) + - [HRMP Module](runtime/hrmp.md) - [Session Info Module](runtime/session_info.md) - [Runtime APIs](runtime-api/README.md) - [Validators](runtime-api/validators.md) diff --git a/roadmap/implementers-guide/src/glossary.md b/roadmap/implementers-guide/src/glossary.md index 63294d1d77fd..2dbe2ab14abe 100644 --- a/roadmap/implementers-guide/src/glossary.md +++ b/roadmap/implementers-guide/src/glossary.md @@ -24,6 +24,7 @@ Here you can find definitions of a bunch of jargon, usually specific to the Polk - Parathread: A parachain which is scheduled on a pay-as-you-go basis. - Proof-of-Validity (PoV): A stateless-client proof that a parachain candidate is valid, with respect to some validation function. - Relay Parent: A block in the relay chain, referred to in a context where work is being done in the context of the state at this block. +- Router: The router module is a meta module that consists of three runtime modules responsible for routing messages between paras and the relay chain. The three separate runtime modules are: Dmp, Ump, Hrmp, each responsible for the respective part of message routing. - Runtime: The relay-chain state machine. - Runtime Module: See Module. - Runtime API: A means for the node-side behavior to access structured information based on the state of a fork of the blockchain. diff --git a/roadmap/implementers-guide/src/runtime/dmp.md b/roadmap/implementers-guide/src/runtime/dmp.md new file mode 100644 index 000000000000..6f125ca46b5e --- /dev/null +++ b/roadmap/implementers-guide/src/runtime/dmp.md @@ -0,0 +1,59 @@ +# DMP Module + +A module responsible for Downward Message Processing (DMP). See [Messaging Overview](../messaging.md) for more details. + +## Storage + +General storage entries + +```rust +/// Paras that are to be cleaned up at the end of the session. +/// The entries are sorted ascending by the para id. +OutgoingParas: Vec; +``` + +Storage layout required for implementation of DMP. + +```rust +/// The downward messages addressed for a certain para. +DownwardMessageQueues: map ParaId => Vec; +/// A mapping that stores the downward message queue MQC head for each para. +/// +/// Each link in this chain has a form: +/// `(prev_head, B, H(M))`, where +/// - `prev_head`: is the previous head hash or zero if none. +/// - `B`: is the relay-chain block number in which a message was appended. +/// - `H(M)`: is the hash of the message being appended. +DownwardMessageQueueHeads: map ParaId => Hash; +``` + +## Initialization + +No initialization routine runs for this module. + +## Routines + +Candidate Acceptance Function: + +* `check_processed_downward_messages(P: ParaId, processed_downward_messages: u32)`: + 1. Checks that `DownwardMessageQueues` for `P` is at least `processed_downward_messages` long. + 1. Checks that `processed_downward_messages` is at least 1 if `DownwardMessageQueues` for `P` is not empty. + +Candidate Enactment: + +* `prune_dmq(P: ParaId, processed_downward_messages: u32)`: + 1. Remove the first `processed_downward_messages` from the `DownwardMessageQueues` of `P`. + +Utility routines. + +`queue_downward_message(P: ParaId, M: DownwardMessage)`: + 1. Check if the size of `M` exceeds the `config.max_downward_message_size`. If so, return an error. + 1. Wrap `M` into `InboundDownwardMessage` using the current block number for `sent_at`. + 1. Obtain a new MQC link for the resulting `InboundDownwardMessage` and replace `DownwardMessageQueueHeads` for `P` with the resulting hash. + 1. Add the resulting `InboundDownwardMessage` into `DownwardMessageQueues` for `P`. + +## Session Change + +1. Drain `OutgoingParas`. For each `P` happened to be in the list: + 1. Remove all `DownwardMessageQueues` of `P`. + 1. Remove `DownwardMessageQueueHeads` for `P`. diff --git a/roadmap/implementers-guide/src/runtime/router.md b/roadmap/implementers-guide/src/runtime/hrmp.md similarity index 73% rename from roadmap/implementers-guide/src/runtime/router.md rename to roadmap/implementers-guide/src/runtime/hrmp.md index ef7ce8ceb7bd..145a2f284530 100644 --- a/roadmap/implementers-guide/src/runtime/router.md +++ b/roadmap/implementers-guide/src/runtime/hrmp.md @@ -1,6 +1,6 @@ -# Router Module +# HRMP Module -The Router module is responsible for all messaging mechanisms supported between paras and the relay chain, specifically: UMP, DMP, HRMP and later XCMP. +A module responsible for Horizontally Relay-routed Message Passing (HRMP). See [Messaging Overview](../messaging.md) for more details. ## Storage @@ -12,61 +12,6 @@ General storage entries OutgoingParas: Vec; ``` -### Upward Message Passing (UMP) - -```rust -/// The messages waiting to be handled by the relay-chain originating from a certain parachain. -/// -/// Note that some upward messages might have been already processed by the inclusion logic. E.g. -/// channel management messages. -/// -/// The messages are processed in FIFO order. -RelayDispatchQueues: map ParaId => Vec; -/// Size of the dispatch queues. Caches sizes of the queues in `RelayDispatchQueue`. -/// -/// First item in the tuple is the count of messages and second -/// is the total length (in bytes) of the message payloads. -/// -/// Note that this is an auxilary mapping: it's possible to tell the byte size and the number of -/// messages only looking at `RelayDispatchQueues`. This mapping is separate to avoid the cost of -/// loading the whole message queue if only the total size and count are required. -/// -/// Invariant: -/// - The set of keys should exactly match the set of keys of `RelayDispatchQueues`. -RelayDispatchQueueSize: map ParaId => (u32, u32); // (num_messages, total_bytes) -/// The ordered list of `ParaId`s that have a `RelayDispatchQueue` entry. -/// -/// Invariant: -/// - The set of items from this vector should be exactly the set of the keys in -/// `RelayDispatchQueues` and `RelayDispatchQueueSize`. -NeedsDispatch: Vec; -/// This is the para that gets dispatched first during the next upward dispatchable queue -/// execution round. -/// -/// Invariant: -/// - If `Some(para)`, then `para` must be present in `NeedsDispatch`. -NextDispatchRoundStartWith: Option; -``` - -### Downward Message Passing (DMP) - -Storage layout required for implementation of DMP. - -```rust -/// The downward messages addressed for a certain para. -DownwardMessageQueues: map ParaId => Vec; -/// A mapping that stores the downward message queue MQC head for each para. -/// -/// Each link in this chain has a form: -/// `(prev_head, B, H(M))`, where -/// - `prev_head`: is the previous head hash or zero if none. -/// - `B`: is the relay-chain block number in which a message was appended. -/// - `H(M)`: is the hash of the message being appended. -DownwardMessageQueueHeads: map ParaId => Hash; -``` - -### HRMP - HRMP related structs: ```rust @@ -189,13 +134,6 @@ No initialization routine runs for this module. Candidate Acceptance Function: -* `check_upward_messages(P: ParaId, Vec`): - 1. Checks that there are at most `config.max_upward_message_num_per_candidate` messages. - 1. Checks that no message exceeds `config.max_upward_message_size`. - 1. Verify that `RelayDispatchQueueSize` for `P` has enough capacity for the messages -* `check_processed_downward_messages(P: ParaId, processed_downward_messages)`: - 1. Checks that `DownwardMessageQueues` for `P` is at least `processed_downward_messages` long. - 1. Checks that `processed_downward_messages` is at least 1 if `DownwardMessageQueues` for `P` is not empty. * `check_hrmp_watermark(P: ParaId, new_hrmp_watermark)`: 1. `new_hrmp_watermark` should be strictly greater than the value of `HrmpWatermarks` for `P` (if any). 1. `new_hrmp_watermark` must not be greater than the context's block number. @@ -232,42 +170,12 @@ Candidate Enactment: > parametrization this shouldn't be a big of a deal. > If that becomes a problem consider introducing an extra dictionary which says at what block the given sender > sent a message to the recipient. -* `prune_dmq(P: ParaId, processed_downward_messages)`: - 1. Remove the first `processed_downward_messages` from the `DownwardMessageQueues` of `P`. -* `enact_upward_messages(P: ParaId, Vec)`: - 1. Process each upward message `M` in order: - 1. Append the message to `RelayDispatchQueues` for `P` - 1. Increment the size and the count in `RelayDispatchQueueSize` for `P`. - 1. Ensure that `P` is present in `NeedsDispatch`. The following routine is intended to be called in the same time when `Paras::schedule_para_cleanup` is called. `schedule_para_cleanup(ParaId)`: 1. Add the para into the `OutgoingParas` vector maintaining the sorted order. -The following routine is meant to execute pending entries in upward message queues. This function doesn't fail, even if -dispatcing any of individual upward messages returns an error. - -`process_pending_upward_messages()`: - 1. Initialize a cumulative weight counter `T` to 0 - 1. Iterate over items in `NeedsDispatch` cyclically, starting with `NextDispatchRoundStartWith`. If the item specified is `None` start from the beginning. For each `P` encountered: - 1. Dequeue the first upward message `D` from `RelayDispatchQueues` for `P` - 1. Decrement the size of the message from `RelayDispatchQueueSize` for `P` - 1. Delegate processing of the message to the runtime. The weight consumed is added to `T`. - 1. If `T >= config.preferred_dispatchable_upward_messages_step_weight`, set `NextDispatchRoundStartWith` to `P` and finish processing. - 1. If `RelayDispatchQueues` for `P` became empty, remove `P` from `NeedsDispatch`. - 1. If `NeedsDispatch` became empty then finish processing and set `NextDispatchRoundStartWith` to `None`. - > NOTE that in practice we would need to approach the weight calculation more thoroughly, i.e. incorporate all operations - > that could take place on the course of handling these upward messages. - -Utility routines. - -`queue_downward_message(P: ParaId, M: DownwardMessage)`: - 1. Check if the size of `M` exceeds the `config.max_downward_message_size`. If so, return an error. - 1. Wrap `M` into `InboundDownwardMessage` using the current block number for `sent_at`. - 1. Obtain a new MQC link for the resulting `InboundDownwardMessage` and replace `DownwardMessageQueueHeads` for `P` with the resulting hash. - 1. Add the resulting `InboundDownwardMessage` into `DownwardMessageQueues` for `P`. - ## Entry-points The following entry-points are meant to be used for HRMP channel management. @@ -336,15 +244,8 @@ the parachain executed the message. 1. Drain `OutgoingParas`. For each `P` happened to be in the list: 1. Remove all inbound channels of `P`, i.e. `(_, P)`, 1. Remove all outbound channels of `P`, i.e. `(P, _)`, - 1. Remove all `DownwardMessageQueues` of `P`. - 1. Remove `DownwardMessageQueueHeads` for `P`. - 1. Remove `RelayDispatchQueueSize` of `P`. - 1. Remove `RelayDispatchQueues` of `P`. 1. Remove `HrmpOpenChannelRequestCount` for `P` 1. Remove `HrmpAcceptedChannelRequestCount` for `P`. - 1. Remove `P` if it exists in `NeedsDispatch`. - 1. If `P` is in `NextDispatchRoundStartWith`, then reset it to `None` - - Note that if we don't remove the open/close requests since they are going to die out naturally at the end of the session. 1. For each channel designator `D` in `HrmpOpenChannelRequestsList` we query the request `R` from `HrmpOpenChannelRequests`: 1. if `R.confirmed = false`: 1. increment `R.age` by 1. diff --git a/roadmap/implementers-guide/src/runtime/inclusion.md b/roadmap/implementers-guide/src/runtime/inclusion.md index 46f3e5211674..f2d9f214225a 100644 --- a/roadmap/implementers-guide/src/runtime/inclusion.md +++ b/roadmap/implementers-guide/src/runtime/inclusion.md @@ -67,20 +67,20 @@ All failed checks should lead to an unrecoverable error making the block invalid 1. Ensure that any code upgrade scheduled by the candidate does not happen within `config.validation_upgrade_frequency` of `Paras::last_code_upgrade(para_id, true)`, if any, comparing against the value of `Paras::FutureCodeUpgrades` for the given para ID. 1. Check the collator's signature on the candidate data. 1. check the backing of the candidate using the signatures and the bitfields, comparing against the validators assigned to the groups, fetched with the `group_validators` lookup. - 1. call `Router::check_upward_messages(para, commitments.upward_messages)` to check that the upward messages are valid. - 1. call `Router::check_processed_downward_messages(para, commitments.processed_downward_messages)` to check that the DMQ is properly drained. - 1. call `Router::check_hrmp_watermark(para, commitments.hrmp_watermark)` for each candidate to check rules of processing the HRMP watermark. - 1. using `Router::check_outbound_hrmp(sender, commitments.horizontal_messages)` ensure that the each candidate sent a valid set of horizontal messages + 1. call `Ump::check_upward_messages(para, commitments.upward_messages)` to check that the upward messages are valid. + 1. call `Dmp::check_processed_downward_messages(para, commitments.processed_downward_messages)` to check that the DMQ is properly drained. + 1. call `Hrmp::check_hrmp_watermark(para, commitments.hrmp_watermark)` for each candidate to check rules of processing the HRMP watermark. + 1. using `Hrmp::check_outbound_hrmp(sender, commitments.horizontal_messages)` ensure that the each candidate sent a valid set of horizontal messages 1. create an entry in the `PendingAvailability` map for each backed candidate with a blank `availability_votes` bitfield. 1. create a corresponding entry in the `PendingAvailabilityCommitments` with the commitments. 1. Return a `Vec` of all scheduled cores of the list of passed assignments that a candidate was successfully backed for, sorted ascending by CoreIndex. * `enact_candidate(relay_parent_number: BlockNumber, CommittedCandidateReceipt)`: 1. If the receipt contains a code upgrade, Call `Paras::schedule_code_upgrade(para_id, code, relay_parent_number + config.validationl_upgrade_delay)`. > TODO: Note that this is safe as long as we never enact candidates where the relay parent is across a session boundary. In that case, which we should be careful to avoid with contextual execution, the configuration might have changed and the para may de-sync from the host's understanding of it. - 1. call `Router::enact_upward_messages` for each backed candidate, using the [`UpwardMessage`s](../types/messages.md#upward-message) from the [`CandidateCommitments`](../types/candidate.md#candidate-commitments). - 1. call `Router::prune_dmq` with the para id of the candidate and the candidate's `processed_downward_messages`. - 1. call `Router::prune_hrmp` with the para id of the candiate and the candidate's `hrmp_watermark`. - 1. call `Router::queue_outbound_hrmp` with the para id of the candidate and the list of horizontal messages taken from the commitment, + 1. call `Ump::enact_upward_messages` for each backed candidate, using the [`UpwardMessage`s](../types/messages.md#upward-message) from the [`CandidateCommitments`](../types/candidate.md#candidate-commitments). + 1. call `Dmp::prune_dmq` with the para id of the candidate and the candidate's `processed_downward_messages`. + 1. call `Hrmp::prune_hrmp` with the para id of the candiate and the candidate's `hrmp_watermark`. + 1. call `Hrmp::queue_outbound_hrmp` with the para id of the candidate and the list of horizontal messages taken from the commitment, 1. Call `Paras::note_new_head` using the `HeadData` from the receipt and `relay_parent_number`. * `collect_pending`: diff --git a/roadmap/implementers-guide/src/runtime/inclusioninherent.md b/roadmap/implementers-guide/src/runtime/inclusioninherent.md index 9290025e2d05..54ebf3af7b52 100644 --- a/roadmap/implementers-guide/src/runtime/inclusioninherent.md +++ b/roadmap/implementers-guide/src/runtime/inclusioninherent.md @@ -22,5 +22,5 @@ Included: Option<()>, 1. Invoke `Scheduler::schedule(freed)` 1. Invoke the `Inclusion::process_candidates` routine with the parameters `(backed_candidates, Scheduler::scheduled(), Scheduler::group_validators)`. 1. Call `Scheduler::occupied` using the return value of the `Inclusion::process_candidates` call above, first sorting the list of assigned core indices. - 1. Call the `Router::process_pending_upward_messages` routine to execute all messages in upward dispatch queues. + 1. Call the `Ump::process_pending_upward_messages` routine to execute all messages in upward dispatch queues. 1. If all of the above succeeds, set `Included` to `Some(())`. diff --git a/roadmap/implementers-guide/src/runtime/initializer.md b/roadmap/implementers-guide/src/runtime/initializer.md index 5fd2bc3bd60f..fd7324b2198d 100644 --- a/roadmap/implementers-guide/src/runtime/initializer.md +++ b/roadmap/implementers-guide/src/runtime/initializer.md @@ -23,8 +23,10 @@ The other parachains modules are initialized in this order: 1. Paras 1. Scheduler 1. Inclusion -1. Validity. -1. Router. +1. Validity +1. DMP +1. UMP +1. HRMP The [Configuration Module](configuration.md) is first, since all other modules need to operate under the same configuration as each other. It would lead to inconsistency if, for example, the scheduler ran first and then the configuration was updated before the Inclusion module. diff --git a/roadmap/implementers-guide/src/runtime/ump.md b/roadmap/implementers-guide/src/runtime/ump.md new file mode 100644 index 000000000000..ff2e9e09b997 --- /dev/null +++ b/roadmap/implementers-guide/src/runtime/ump.md @@ -0,0 +1,100 @@ +# UMP Module + +A module responsible for Upward Message Passing (UMP). See [Messaging Overview](../messaging.md) for more details. + +## Storage + +General storage entries + +```rust +/// Paras that are to be cleaned up at the end of the session. +/// The entries are sorted ascending by the para id. +OutgoingParas: Vec; +``` + +Storage related to UMP + +```rust +/// The messages waiting to be handled by the relay-chain originating from a certain parachain. +/// +/// Note that some upward messages might have been already processed by the inclusion logic. E.g. +/// channel management messages. +/// +/// The messages are processed in FIFO order. +RelayDispatchQueues: map ParaId => Vec; +/// Size of the dispatch queues. Caches sizes of the queues in `RelayDispatchQueue`. +/// +/// First item in the tuple is the count of messages and second +/// is the total length (in bytes) of the message payloads. +/// +/// Note that this is an auxilary mapping: it's possible to tell the byte size and the number of +/// messages only looking at `RelayDispatchQueues`. This mapping is separate to avoid the cost of +/// loading the whole message queue if only the total size and count are required. +/// +/// Invariant: +/// - The set of keys should exactly match the set of keys of `RelayDispatchQueues`. +RelayDispatchQueueSize: map ParaId => (u32, u32); // (num_messages, total_bytes) +/// The ordered list of `ParaId`s that have a `RelayDispatchQueue` entry. +/// +/// Invariant: +/// - The set of items from this vector should be exactly the set of the keys in +/// `RelayDispatchQueues` and `RelayDispatchQueueSize`. +NeedsDispatch: Vec; +/// This is the para that gets dispatched first during the next upward dispatchable queue +/// execution round. +/// +/// Invariant: +/// - If `Some(para)`, then `para` must be present in `NeedsDispatch`. +NextDispatchRoundStartWith: Option; +``` + + +## Initialization + +No initialization routine runs for this module. + +## Routines + +Candidate Acceptance Function: + +* `check_upward_messages(P: ParaId, Vec`): + 1. Checks that there are at most `config.max_upward_message_num_per_candidate` messages. + 1. Checks that no message exceeds `config.max_upward_message_size`. + 1. Verify that `RelayDispatchQueueSize` for `P` has enough capacity for the messages + +Candidate Enactment: + +* `enact_upward_messages(P: ParaId, Vec)`: + 1. Process each upward message `M` in order: + 1. Append the message to `RelayDispatchQueues` for `P` + 1. Increment the size and the count in `RelayDispatchQueueSize` for `P`. + 1. Ensure that `P` is present in `NeedsDispatch`. + +The following routine is intended to be called in the same time when `Paras::schedule_para_cleanup` is called. + +`schedule_para_cleanup(ParaId)`: + 1. Add the para into the `OutgoingParas` vector maintaining the sorted order. + +The following routine is meant to execute pending entries in upward message queues. This function doesn't fail, even if +dispatcing any of individual upward messages returns an error. + +`process_pending_upward_messages()`: + 1. Initialize a cumulative weight counter `T` to 0 + 1. Iterate over items in `NeedsDispatch` cyclically, starting with `NextDispatchRoundStartWith`. If the item specified is `None` start from the beginning. For each `P` encountered: + 1. Dequeue the first upward message `D` from `RelayDispatchQueues` for `P` + 1. Decrement the size of the message from `RelayDispatchQueueSize` for `P` + 1. Delegate processing of the message to the runtime. The weight consumed is added to `T`. + 1. If `T >= config.preferred_dispatchable_upward_messages_step_weight`, set `NextDispatchRoundStartWith` to `P` and finish processing. + 1. If `RelayDispatchQueues` for `P` became empty, remove `P` from `NeedsDispatch`. + 1. If `NeedsDispatch` became empty then finish processing and set `NextDispatchRoundStartWith` to `None`. + > NOTE that in practice we would need to approach the weight calculation more thoroughly, i.e. incorporate all operations + > that could take place on the course of handling these upward messages. + +## Session Change + +1. Drain `OutgoingParas`. For each `P` happened to be in the list:. + 1. Remove `RelayDispatchQueueSize` of `P`. + 1. Remove `RelayDispatchQueues` of `P`. + 1. Remove `P` if it exists in `NeedsDispatch`. + 1. If `P` is in `NextDispatchRoundStartWith`, then reset it to `None` + - Note that if we don't remove the open/close requests since they are going to die out naturally at the end of the session. diff --git a/runtime/common/src/paras_registrar.rs b/runtime/common/src/paras_registrar.rs index dab0bb02e250..6ecd99aee9fe 100644 --- a/runtime/common/src/paras_registrar.rs +++ b/runtime/common/src/paras_registrar.rs @@ -33,7 +33,7 @@ use runtime_parachains::{ self, ParaGenesisArgs, }, - router, + dmp, ump, hrmp, ensure_parachain, Origin, }; @@ -41,7 +41,7 @@ use runtime_parachains::{ type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -pub trait Trait: paras::Trait + router::Trait { +pub trait Trait: paras::Trait + dmp::Trait + ump::Trait + hrmp::Trait { /// The aggregated origin type must support the `parachains` origin. We require that we can /// infallibly convert between this origin and the system origin, but in reality, they're the /// same type, we just can't express that to the Rust type system without writing a `where` @@ -125,7 +125,7 @@ decl_module! { parachain: false, }; - >::schedule_para_initialize(id, genesis); + runtime_parachains::schedule_para_initialize::(id, genesis); Ok(()) } @@ -150,8 +150,7 @@ decl_module! { let debtor = >::take(id); let _ = ::Currency::unreserve(&debtor, T::ParathreadDeposit::get()); - >::schedule_para_cleanup(id); - >::schedule_para_cleanup(id); + runtime_parachains::schedule_para_cleanup::(id); Ok(()) } @@ -231,7 +230,7 @@ impl Module { parachain: true, }; - >::schedule_para_initialize(id, genesis); + runtime_parachains::schedule_para_initialize::(id, genesis); Ok(()) } @@ -242,8 +241,7 @@ impl Module { ensure!(is_parachain, Error::::InvalidChainId); - >::schedule_para_cleanup(id); - >::schedule_para_cleanup(id); + runtime_parachains::schedule_para_cleanup::(id); Ok(()) } @@ -267,7 +265,7 @@ mod tests { impl_outer_origin, impl_outer_dispatch, assert_ok, parameter_types, }; use keyring::Sr25519Keyring; - use runtime_parachains::{initializer, configuration, inclusion, router, scheduler}; + use runtime_parachains::{initializer, configuration, inclusion, scheduler, dmp, ump, hrmp}; use pallet_session::OneSessionHandler; impl_outer_origin! { @@ -425,8 +423,13 @@ mod tests { type WeightInfo = (); } - impl router::Trait for Test { + impl dmp::Trait for Test {} + + impl ump::Trait for Test { type UmpSink = (); + } + + impl hrmp::Trait for Test { type Origin = Origin; } diff --git a/runtime/common/src/paras_sudo_wrapper.rs b/runtime/common/src/paras_sudo_wrapper.rs index 80f64bf1718b..19245ac873d1 100644 --- a/runtime/common/src/paras_sudo_wrapper.rs +++ b/runtime/common/src/paras_sudo_wrapper.rs @@ -23,13 +23,12 @@ use frame_support::{ }; use frame_system::ensure_root; use runtime_parachains::{ - router, - paras::{self, ParaGenesisArgs}, + dmp, ump, hrmp, paras::{self, ParaGenesisArgs}, }; use primitives::v1::Id as ParaId; /// The module's configuration trait. -pub trait Trait: paras::Trait + router::Trait { } +pub trait Trait: paras::Trait + dmp::Trait + ump::Trait + hrmp::Trait { } decl_error! { pub enum Error for Module { } @@ -48,7 +47,7 @@ decl_module! { genesis: ParaGenesisArgs, ) -> DispatchResult { ensure_root(origin)?; - paras::Module::::schedule_para_initialize(id, genesis); + runtime_parachains::schedule_para_initialize::(id, genesis); Ok(()) } @@ -56,8 +55,7 @@ decl_module! { #[weight = (1_000, DispatchClass::Operational)] pub fn sudo_schedule_para_cleanup(origin, id: ParaId) -> DispatchResult { ensure_root(origin)?; - paras::Module::::schedule_para_cleanup(id); - router::Module::::schedule_para_cleanup(id); + runtime_parachains::schedule_para_cleanup::(id); Ok(()) } } diff --git a/runtime/parachains/src/router/dmp.rs b/runtime/parachains/src/dmp.rs similarity index 64% rename from runtime/parachains/src/router/dmp.rs rename to runtime/parachains/src/dmp.rs index cc3163e5435c..5b49479c4bbf 100644 --- a/runtime/parachains/src/router/dmp.rs +++ b/runtime/parachains/src/dmp.rs @@ -14,9 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use super::{Trait, Module, Store}; -use crate::configuration::HostConfiguration; -use frame_support::{StorageMap, weights::Weight, traits::Get}; +use crate::{ + configuration::{self, HostConfiguration}, + initializer, +}; +use frame_support::{decl_module, decl_storage, StorageMap, weights::Weight, traits::Get}; use sp_std::{fmt, prelude::*}; use sp_runtime::traits::{BlakeTwo256, Hash as HashT, SaturatedConversion}; use primitives::v1::{Id as ParaId, DownwardMessage, InboundDownwardMessage, Hash}; @@ -60,13 +62,72 @@ impl fmt::Debug for ProcessedDownwardMessagesAcceptanceErr { } } +pub trait Trait: frame_system::Trait + configuration::Trait {} + +decl_storage! { + trait Store for Module as Dmp { + /// Paras that are to be cleaned up at the end of the session. + /// The entries are sorted ascending by the para id. + OutgoingParas: Vec; + + /// The downward messages addressed for a certain para. + DownwardMessageQueues: map hasher(twox_64_concat) ParaId => Vec>; + /// A mapping that stores the downward message queue MQC head for each para. + /// + /// Each link in this chain has a form: + /// `(prev_head, B, H(M))`, where + /// - `prev_head`: is the previous head hash or zero if none. + /// - `B`: is the relay-chain block number in which a message was appended. + /// - `H(M)`: is the hash of the message being appended. + DownwardMessageQueueHeads: map hasher(twox_64_concat) ParaId => Hash; + } +} + +decl_module! { + /// The DMP module. + pub struct Module for enum Call where origin: ::Origin { } +} + /// Routines and getters related to downward message passing. impl Module { - pub(crate) fn clean_dmp_after_outgoing(outgoing_para: ParaId) { + /// Block initialization logic, called by initializer. + pub(crate) fn initializer_initialize(_now: T::BlockNumber) -> Weight { + 0 + } + + /// Block finalization logic, called by initializer. + pub(crate) fn initializer_finalize() {} + + /// Called by the initializer to note that a new session has started. + pub(crate) fn initializer_on_new_session( + _notification: &initializer::SessionChangeNotification, + ) { + Self::perform_outgoing_para_cleanup(); + } + + /// Iterate over all paras that were registered for offboarding and remove all the data + /// associated with them. + fn perform_outgoing_para_cleanup() { + let outgoing = OutgoingParas::take(); + for outgoing_para in outgoing { + Self::clean_dmp_after_outgoing(outgoing_para); + } + } + + fn clean_dmp_after_outgoing(outgoing_para: ParaId) { ::DownwardMessageQueues::remove(&outgoing_para); ::DownwardMessageQueueHeads::remove(&outgoing_para); } + /// Schedule a para to be cleaned up at the start of the next session. + pub(crate) fn schedule_para_cleanup(id: ParaId) { + OutgoingParas::mutate(|v| { + if let Err(i) = v.binary_search(&id) { + v.insert(i, id); + } + }); + } + /// Enqueue a downward message to a specific recipient para. /// /// When encoded, the message should not exceed the `config.max_downward_message_size`. @@ -165,19 +226,45 @@ impl Module { #[cfg(test)] mod tests { use super::*; - use crate::mock::{Configuration, Router, new_test_ext}; - use crate::router::{ - OutgoingParas, - tests::{default_genesis_config, run_to_block}, - }; + use primitives::v1::BlockNumber; use frame_support::StorageValue; + use frame_support::traits::{OnFinalize, OnInitialize}; use codec::Encode; + use crate::mock::{Configuration, new_test_ext, System, Dmp, GenesisConfig as MockGenesisConfig}; + + pub(crate) fn run_to_block(to: BlockNumber, new_session: Option>) { + while System::block_number() < to { + let b = System::block_number(); + Dmp::initializer_finalize(); + System::on_finalize(b); + + System::on_initialize(b + 1); + System::set_block_number(b + 1); + + if new_session.as_ref().map_or(false, |v| v.contains(&(b + 1))) { + Dmp::initializer_on_new_session(&Default::default()); + } + Dmp::initializer_initialize(b + 1); + } + } + + fn default_genesis_config() -> MockGenesisConfig { + MockGenesisConfig { + configuration: crate::configuration::GenesisConfig { + config: crate::configuration::HostConfiguration { + max_downward_message_size: 1024, + ..Default::default() + }, + }, + ..Default::default() + } + } fn queue_downward_message( para_id: ParaId, msg: DownwardMessage, ) -> Result<(), QueueDownwardMessageError> { - Router::queue_downward_message(&Configuration::config(), para_id, msg) + Dmp::queue_downward_message(&Configuration::config(), para_id, msg) } #[test] @@ -194,23 +281,23 @@ mod tests { queue_downward_message(b, vec![4, 5, 6]).unwrap(); queue_downward_message(c, vec![7, 8, 9]).unwrap(); - Router::schedule_para_cleanup(a); + Dmp::schedule_para_cleanup(a); // run to block without session change. run_to_block(2, None); - assert!(!::DownwardMessageQueues::get(&a).is_empty()); - assert!(!::DownwardMessageQueues::get(&b).is_empty()); - assert!(!::DownwardMessageQueues::get(&c).is_empty()); + assert!(!::DownwardMessageQueues::get(&a).is_empty()); + assert!(!::DownwardMessageQueues::get(&b).is_empty()); + assert!(!::DownwardMessageQueues::get(&c).is_empty()); - Router::schedule_para_cleanup(b); + Dmp::schedule_para_cleanup(b); // run to block changing the session. run_to_block(3, Some(vec![3])); - assert!(::DownwardMessageQueues::get(&a).is_empty()); - assert!(::DownwardMessageQueues::get(&b).is_empty()); - assert!(!::DownwardMessageQueues::get(&c).is_empty()); + assert!(::DownwardMessageQueues::get(&a).is_empty()); + assert!(::DownwardMessageQueues::get(&b).is_empty()); + assert!(!::DownwardMessageQueues::get(&c).is_empty()); // verify that the outgoing paras are emptied. assert!(OutgoingParas::get().is_empty()) @@ -223,15 +310,15 @@ mod tests { let b = ParaId::from(228); new_test_ext(default_genesis_config()).execute_with(|| { - assert_eq!(Router::dmq_length(a), 0); - assert_eq!(Router::dmq_length(b), 0); + assert_eq!(Dmp::dmq_length(a), 0); + assert_eq!(Dmp::dmq_length(b), 0); queue_downward_message(a, vec![1, 2, 3]).unwrap(); - assert_eq!(Router::dmq_length(a), 1); - assert_eq!(Router::dmq_length(b), 0); - assert!(!Router::dmq_mqc_head(a).is_zero()); - assert!(Router::dmq_mqc_head(b).is_zero()); + assert_eq!(Dmp::dmq_length(a), 1); + assert_eq!(Dmp::dmq_length(b), 0); + assert!(!Dmp::dmq_mqc_head(a).is_zero()); + assert!(Dmp::dmq_mqc_head(b).is_zero()); }); } @@ -241,20 +328,20 @@ mod tests { new_test_ext(default_genesis_config()).execute_with(|| { // processed_downward_messages=0 is allowed when the DMQ is empty. - assert!(Router::check_processed_downward_messages(a, 0).is_ok()); + assert!(Dmp::check_processed_downward_messages(a, 0).is_ok()); queue_downward_message(a, vec![1, 2, 3]).unwrap(); queue_downward_message(a, vec![4, 5, 6]).unwrap(); queue_downward_message(a, vec![7, 8, 9]).unwrap(); // 0 doesn't pass if the DMQ has msgs. - assert!(!Router::check_processed_downward_messages(a, 0).is_ok()); + assert!(!Dmp::check_processed_downward_messages(a, 0).is_ok()); // a candidate can consume up to 3 messages - assert!(Router::check_processed_downward_messages(a, 1).is_ok()); - assert!(Router::check_processed_downward_messages(a, 2).is_ok()); - assert!(Router::check_processed_downward_messages(a, 3).is_ok()); + assert!(Dmp::check_processed_downward_messages(a, 1).is_ok()); + assert!(Dmp::check_processed_downward_messages(a, 2).is_ok()); + assert!(Dmp::check_processed_downward_messages(a, 3).is_ok()); // there is no 4 messages in the queue - assert!(!Router::check_processed_downward_messages(a, 4).is_ok()); + assert!(!Dmp::check_processed_downward_messages(a, 4).is_ok()); }); } @@ -263,19 +350,19 @@ mod tests { let a = ParaId::from(1312); new_test_ext(default_genesis_config()).execute_with(|| { - assert_eq!(Router::dmq_length(a), 0); + assert_eq!(Dmp::dmq_length(a), 0); queue_downward_message(a, vec![1, 2, 3]).unwrap(); queue_downward_message(a, vec![4, 5, 6]).unwrap(); queue_downward_message(a, vec![7, 8, 9]).unwrap(); - assert_eq!(Router::dmq_length(a), 3); + assert_eq!(Dmp::dmq_length(a), 3); // pruning 0 elements shouldn't change anything. - Router::prune_dmq(a, 0); - assert_eq!(Router::dmq_length(a), 3); + Dmp::prune_dmq(a, 0); + assert_eq!(Dmp::dmq_length(a), 3); - Router::prune_dmq(a, 2); - assert_eq!(Router::dmq_length(a), 1); + Dmp::prune_dmq(a, 2); + assert_eq!(Dmp::dmq_length(a), 1); }); } diff --git a/runtime/parachains/src/router/hrmp.rs b/runtime/parachains/src/hrmp.rs similarity index 75% rename from runtime/parachains/src/router/hrmp.rs rename to runtime/parachains/src/hrmp.rs index 3bdd895cea8a..af8ae8eb1363 100644 --- a/runtime/parachains/src/router/hrmp.rs +++ b/runtime/parachains/src/hrmp.rs @@ -14,19 +14,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use super::{dmp, Error as DispatchError, Module, Store, Trait}; use crate::{ + ensure_parachain, configuration::{self, HostConfiguration}, - paras, + initializer, paras, dmp, }; use codec::{Decode, Encode}; -use frame_support::{ensure, traits::Get, weights::Weight, StorageMap, StorageValue}; +use frame_support::{ + decl_storage, decl_module, decl_error, ensure, traits::Get, weights::Weight, StorageMap, + StorageValue, dispatch::DispatchResult, +}; use primitives::v1::{ Balance, Hash, HrmpChannelId, Id as ParaId, InboundHrmpMessage, OutboundHrmpMessage, SessionIndex, }; use sp_runtime::traits::{BlakeTwo256, Hash as HashT}; -use sp_std::{mem, fmt, collections::{btree_map::BTreeMap, btree_set::BTreeSet}, prelude::*}; +use sp_std::{ + mem, fmt, + collections::{btree_map::BTreeMap, btree_set::BTreeSet}, + prelude::*, +}; /// A description of a request to open an HRMP channel. #[derive(Encode, Decode)] @@ -135,8 +142,7 @@ where } => write!( fmt, "the HRMP watermark is not advanced relative to the last watermark ({:?} > {:?})", - new_watermark, - last_watermark, + new_watermark, last_watermark, ), AheadRelayParent { new_watermark, @@ -144,13 +150,12 @@ where } => write!( fmt, "the HRMP watermark is ahead the relay-parent ({:?} > {:?})", - new_watermark, - relay_chain_parent_number, + new_watermark, relay_chain_parent_number ), LandsOnBlockWithNoMessages { new_watermark } => write!( fmt, "the HRMP watermark ({:?}) doesn't land on a block with messages received", - new_watermark, + new_watermark ), } } @@ -163,8 +168,7 @@ impl fmt::Debug for OutboundHrmpAcceptanceErr { MoreMessagesThanPermitted { sent, permitted } => write!( fmt, "more HRMP messages than permitted by config ({} > {})", - sent, - permitted, + sent, permitted, ), NotSorted { idx } => write!( fmt, @@ -174,9 +178,7 @@ impl fmt::Debug for OutboundHrmpAcceptanceErr { NoSuchChannel { idx, channel_id } => write!( fmt, "the HRMP message at index {} is sent to a non existent channel {:?}->{:?}", - idx, - channel_id.sender, - channel_id.recipient, + idx, channel_id.sender, channel_id.recipient, ), MaxMessageSizeExceeded { idx, @@ -185,9 +187,7 @@ impl fmt::Debug for OutboundHrmpAcceptanceErr { } => write!( fmt, "the HRMP message at index {} exceeds the negotiated channel maximum message size ({} > {})", - idx, - msg_size, - max_size, + idx, msg_size, max_size, ), TotalSizeExceeded { idx, @@ -196,23 +196,205 @@ impl fmt::Debug for OutboundHrmpAcceptanceErr { } => write!( fmt, "sending the HRMP message at index {} would exceed the neogitiated channel total size ({} > {})", - idx, - total_size, - limit, + idx, total_size, limit, ), CapacityExceeded { idx, count, limit } => write!( fmt, "sending the HRMP message at index {} would exceed the neogitiated channel capacity ({} > {})", - idx, - count, - limit, + idx, count, limit, ), } } } +pub trait Trait: frame_system::Trait + configuration::Trait + paras::Trait + dmp::Trait { + type Origin: From + + From<::Origin> + + Into::Origin>>; +} + +decl_storage! { + trait Store for Module as Hrmp { + /// Paras that are to be cleaned up at the end of the session. + /// The entries are sorted ascending by the para id. + OutgoingParas: Vec; + + + /// The set of pending HRMP open channel requests. + /// + /// The set is accompanied by a list for iteration. + /// + /// Invariant: + /// - There are no channels that exists in list but not in the set and vice versa. + HrmpOpenChannelRequests: map hasher(twox_64_concat) HrmpChannelId => Option; + HrmpOpenChannelRequestsList: Vec; + + /// This mapping tracks how many open channel requests are inititated by a given sender para. + /// Invariant: `HrmpOpenChannelRequests` should contain the same number of items that has `(X, _)` + /// as the number of `HrmpOpenChannelRequestCount` for `X`. + HrmpOpenChannelRequestCount: map hasher(twox_64_concat) ParaId => u32; + /// This mapping tracks how many open channel requests were accepted by a given recipient para. + /// Invariant: `HrmpOpenChannelRequests` should contain the same number of items `(_, X)` with + /// `confirmed` set to true, as the number of `HrmpAcceptedChannelRequestCount` for `X`. + HrmpAcceptedChannelRequestCount: map hasher(twox_64_concat) ParaId => u32; + + /// A set of pending HRMP close channel requests that are going to be closed during the session change. + /// Used for checking if a given channel is registered for closure. + /// + /// The set is accompanied by a list for iteration. + /// + /// Invariant: + /// - There are no channels that exists in list but not in the set and vice versa. + HrmpCloseChannelRequests: map hasher(twox_64_concat) HrmpChannelId => Option<()>; + HrmpCloseChannelRequestsList: Vec; + + /// The HRMP watermark associated with each para. + /// Invariant: + /// - each para `P` used here as a key should satisfy `Paras::is_valid_para(P)` within a session. + HrmpWatermarks: map hasher(twox_64_concat) ParaId => Option; + /// HRMP channel data associated with each para. + /// Invariant: + /// - each participant in the channel should satisfy `Paras::is_valid_para(P)` within a session. + HrmpChannels: map hasher(twox_64_concat) HrmpChannelId => Option; + /// Ingress/egress indexes allow to find all the senders and receivers given the opposite + /// side. I.e. + /// + /// (a) ingress index allows to find all the senders for a given recipient. + /// (b) egress index allows to find all the recipients for a given sender. + /// + /// Invariants: + /// - for each ingress index entry for `P` each item `I` in the index should present in `HrmpChannels` + /// as `(I, P)`. + /// - for each egress index entry for `P` each item `E` in the index should present in `HrmpChannels` + /// as `(P, E)`. + /// - there should be no other dangling channels in `HrmpChannels`. + /// - the vectors are sorted. + HrmpIngressChannelsIndex: map hasher(twox_64_concat) ParaId => Vec; + HrmpEgressChannelsIndex: map hasher(twox_64_concat) ParaId => Vec; + /// Storage for the messages for each channel. + /// Invariant: cannot be non-empty if the corresponding channel in `HrmpChannels` is `None`. + HrmpChannelContents: map hasher(twox_64_concat) HrmpChannelId => Vec>; + /// Maintains a mapping that can be used to answer the question: + /// What paras sent a message at the given block number for a given reciever. + /// Invariants: + /// - The inner `Vec` is never empty. + /// - The inner `Vec` cannot store two same `ParaId`. + /// - The outer vector is sorted ascending by block number and cannot store two items with the same + /// block number. + HrmpChannelDigests: map hasher(twox_64_concat) ParaId => Vec<(T::BlockNumber, Vec)>; + } +} + +decl_error! { + pub enum Error for Module { + /// The sender tried to open a channel to themselves. + OpenHrmpChannelToSelf, + /// The recipient is not a valid para. + OpenHrmpChannelInvalidRecipient, + /// The requested capacity is zero. + OpenHrmpChannelZeroCapacity, + /// The requested capacity exceeds the global limit. + OpenHrmpChannelCapacityExceedsLimit, + /// The requested maximum message size is 0. + OpenHrmpChannelZeroMessageSize, + /// The open request requested the message size that exceeds the global limit. + OpenHrmpChannelMessageSizeExceedsLimit, + /// The channel already exists + OpenHrmpChannelAlreadyExists, + /// There is already a request to open the same channel. + OpenHrmpChannelAlreadyRequested, + /// The sender already has the maximum number of allowed outbound channels. + OpenHrmpChannelLimitExceeded, + /// The channel from the sender to the origin doesn't exist. + AcceptHrmpChannelDoesntExist, + /// The channel is already confirmed. + AcceptHrmpChannelAlreadyConfirmed, + /// The recipient already has the maximum number of allowed inbound channels. + AcceptHrmpChannelLimitExceeded, + /// The origin tries to close a channel where it is neither the sender nor the recipient. + CloseHrmpChannelUnauthorized, + /// The channel to be closed doesn't exist. + CloseHrmpChannelDoesntExist, + /// The channel close request is already requested. + CloseHrmpChannelAlreadyUnderway, + } +} + +decl_module! { + /// The HRMP module. + pub struct Module for enum Call where origin: ::Origin { + type Error = Error; + + #[weight = 0] + fn hrmp_init_open_channel( + origin, + recipient: ParaId, + proposed_max_capacity: u32, + proposed_max_message_size: u32, + ) -> DispatchResult { + let origin = ensure_parachain(::Origin::from(origin))?; + Self::init_open_channel( + origin, + recipient, + proposed_max_capacity, + proposed_max_message_size + )?; + Ok(()) + } + + #[weight = 0] + fn hrmp_accept_open_channel(origin, sender: ParaId) -> DispatchResult { + let origin = ensure_parachain(::Origin::from(origin))?; + Self::accept_open_channel(origin, sender)?; + Ok(()) + } + + #[weight = 0] + fn hrmp_close_channel(origin, channel_id: HrmpChannelId) -> DispatchResult { + let origin = ensure_parachain(::Origin::from(origin))?; + Self::close_channel(origin, channel_id)?; + Ok(()) + } + } +} + /// Routines and getters related to HRMP. impl Module { + /// Block initialization logic, called by initializer. + pub(crate) fn initializer_initialize(_now: T::BlockNumber) -> Weight { + 0 + } + + /// Block finalization logic, called by initializer. + pub(crate) fn initializer_finalize() {} + + /// Called by the initializer to note that a new session has started. + pub(crate) fn initializer_on_new_session( + notification: &initializer::SessionChangeNotification, + ) { + Self::perform_outgoing_para_cleanup(); + Self::process_hrmp_open_channel_requests(¬ification.prev_config); + Self::process_hrmp_close_channel_requests(); + } + + /// Iterate over all paras that were registered for offboarding and remove all the data + /// associated with them. + fn perform_outgoing_para_cleanup() { + let outgoing = OutgoingParas::take(); + for outgoing_para in outgoing { + Self::clean_hrmp_after_outgoing(outgoing_para); + } + } + + /// Schedule a para to be cleaned up at the start of the next session. + pub(crate) fn schedule_para_cleanup(id: ParaId) { + OutgoingParas::mutate(|v| { + if let Err(i) = v.binary_search(&id) { + v.insert(i, id); + } + }); + } + /// Remove all storage entries associated with the given para. pub(super) fn clean_hrmp_after_outgoing(outgoing_para: ParaId) { ::HrmpOpenChannelRequestCount::remove(&outgoing_para); @@ -631,32 +813,29 @@ impl Module { recipient: ParaId, proposed_max_capacity: u32, proposed_max_message_size: u32, - ) -> Result<(), DispatchError> { - ensure!( - origin != recipient, - DispatchError::::OpenHrmpChannelToSelf - ); + ) -> Result<(), Error> { + ensure!(origin != recipient, Error::::OpenHrmpChannelToSelf); ensure!( >::is_valid_para(recipient), - DispatchError::::OpenHrmpChannelInvalidRecipient, + Error::::OpenHrmpChannelInvalidRecipient, ); let config = >::config(); ensure!( proposed_max_capacity > 0, - DispatchError::::OpenHrmpChannelZeroCapacity, + Error::::OpenHrmpChannelZeroCapacity, ); ensure!( proposed_max_capacity <= config.hrmp_channel_max_capacity, - DispatchError::::OpenHrmpChannelCapacityExceedsLimit, + Error::::OpenHrmpChannelCapacityExceedsLimit, ); ensure!( proposed_max_message_size > 0, - DispatchError::::OpenHrmpChannelZeroMessageSize, + Error::::OpenHrmpChannelZeroMessageSize, ); ensure!( proposed_max_message_size <= config.hrmp_channel_max_message_size, - DispatchError::::OpenHrmpChannelMessageSizeExceedsLimit, + Error::::OpenHrmpChannelMessageSizeExceedsLimit, ); let channel_id = HrmpChannelId { @@ -665,11 +844,11 @@ impl Module { }; ensure!( ::HrmpOpenChannelRequests::get(&channel_id).is_none(), - DispatchError::::OpenHrmpChannelAlreadyExists, + Error::::OpenHrmpChannelAlreadyExists, ); ensure!( ::HrmpChannels::get(&channel_id).is_none(), - DispatchError::::OpenHrmpChannelAlreadyRequested, + Error::::OpenHrmpChannelAlreadyRequested, ); let egress_cnt = @@ -682,7 +861,7 @@ impl Module { }; ensure!( egress_cnt + open_req_cnt < channel_num_limit, - DispatchError::::OpenHrmpChannelLimitExceeded, + Error::::OpenHrmpChannelLimitExceeded, ); // TODO: Deposit https://github.com/paritytech/polkadot/issues/1907 @@ -713,7 +892,7 @@ impl Module { .encode() }; if let Err(dmp::QueueDownwardMessageError::ExceedsMaxMessageSize) = - Self::queue_downward_message(&config, recipient, notification_bytes) + >::queue_downward_message(&config, recipient, notification_bytes) { // this should never happen unless the max downward message size is configured to an // jokingly small number. @@ -723,19 +902,16 @@ impl Module { Ok(()) } - pub(super) fn accept_open_channel( - origin: ParaId, - sender: ParaId, - ) -> Result<(), DispatchError> { + pub(super) fn accept_open_channel(origin: ParaId, sender: ParaId) -> Result<(), Error> { let channel_id = HrmpChannelId { sender, recipient: origin, }; let mut channel_req = ::HrmpOpenChannelRequests::get(&channel_id) - .ok_or(DispatchError::::AcceptHrmpChannelDoesntExist)?; + .ok_or(Error::::AcceptHrmpChannelDoesntExist)?; ensure!( !channel_req.confirmed, - DispatchError::::AcceptHrmpChannelAlreadyConfirmed, + Error::::AcceptHrmpChannelAlreadyConfirmed, ); // check if by accepting this open channel request, this parachain would exceed the @@ -751,7 +927,7 @@ impl Module { let accepted_cnt = ::HrmpAcceptedChannelRequestCount::get(&origin); ensure!( ingress_cnt + accepted_cnt < channel_num_limit, - DispatchError::::AcceptHrmpChannelLimitExceeded, + Error::::AcceptHrmpChannelLimitExceeded, ); // TODO: Deposit https://github.com/paritytech/polkadot/issues/1907 @@ -772,7 +948,7 @@ impl Module { .encode() }; if let Err(dmp::QueueDownwardMessageError::ExceedsMaxMessageSize) = - Self::queue_downward_message(&config, sender, notification_bytes) + >::queue_downward_message(&config, sender, notification_bytes) { // this should never happen unless the max downward message size is configured to an // jokingly small number. @@ -782,26 +958,23 @@ impl Module { Ok(()) } - pub(super) fn close_channel( - origin: ParaId, - channel_id: HrmpChannelId, - ) -> Result<(), DispatchError> { + pub(super) fn close_channel(origin: ParaId, channel_id: HrmpChannelId) -> Result<(), Error> { // check if the origin is allowed to close the channel. ensure!( origin == channel_id.sender || origin == channel_id.recipient, - DispatchError::::CloseHrmpChannelUnauthorized, + Error::::CloseHrmpChannelUnauthorized, ); // check if the channel requested to close does exist. ensure!( ::HrmpChannels::get(&channel_id).is_some(), - DispatchError::::CloseHrmpChannelDoesntExist, + Error::::CloseHrmpChannelDoesntExist, ); // check that there is no outstanding close request for this channel ensure!( ::HrmpCloseChannelRequests::get(&channel_id).is_none(), - DispatchError::::CloseHrmpChannelAlreadyUnderway, + Error::::CloseHrmpChannelAlreadyUnderway, ); ::HrmpCloseChannelRequests::insert(&channel_id, ()); @@ -825,7 +998,7 @@ impl Module { channel_id.sender }; if let Err(dmp::QueueDownwardMessageError::ExceedsMaxMessageSize) = - Self::queue_downward_message(&config, opposite_party, notification_bytes) + >::queue_downward_message(&config, opposite_party, notification_bytes) { // this should never happen unless the max downward message size is configured to an // jokingly small number. @@ -876,19 +1049,20 @@ impl Module { #[cfg(test)] mod tests { use super::*; - use crate::mock::{new_test_ext, Configuration, Paras, Router, System}; - use crate::router::tests::default_genesis_config; + use crate::mock::{ + new_test_ext, Configuration, Paras, Hrmp, System, GenesisConfig as MockGenesisConfig, + }; use primitives::v1::BlockNumber; use std::collections::{BTreeMap, HashSet}; - pub(crate) fn run_to_block(to: BlockNumber, new_session: Option>) { + fn run_to_block(to: BlockNumber, new_session: Option>) { use frame_support::traits::{OnFinalize as _, OnInitialize as _}; while System::block_number() < to { let b = System::block_number(); // NOTE: this is in reverse initialization order. - Router::initializer_finalize(); + Hrmp::initializer_finalize(); Paras::initializer_finalize(); System::on_finalize(b); @@ -899,12 +1073,12 @@ mod tests { if new_session.as_ref().map_or(false, |v| v.contains(&(b + 1))) { // NOTE: this is in initialization order. Paras::initializer_on_new_session(&Default::default()); - Router::initializer_on_new_session(&Default::default()); + Hrmp::initializer_on_new_session(&Default::default()); } // NOTE: this is in initialization order. Paras::initializer_initialize(b + 1); - Router::initializer_initialize(b + 1); + Hrmp::initializer_initialize(b + 1); } } @@ -951,6 +1125,18 @@ mod tests { } } + fn default_genesis_config() -> MockGenesisConfig { + MockGenesisConfig { + configuration: crate::configuration::GenesisConfig { + config: crate::configuration::HostConfiguration { + max_downward_message_size: 1024, + ..Default::default() + }, + }, + ..Default::default() + } + } + fn register_parachain(id: ParaId) { Paras::schedule_para_initialize( id, @@ -967,17 +1153,17 @@ mod tests { } fn channel_exists(sender: ParaId, recipient: ParaId) -> bool { - ::HrmpChannels::get(&HrmpChannelId { sender, recipient }).is_some() + ::HrmpChannels::get(&HrmpChannelId { sender, recipient }).is_some() } fn assert_storage_consistency_exhaustive() { use frame_support::IterableStorageMap; assert_eq!( - ::HrmpOpenChannelRequests::iter() + ::HrmpOpenChannelRequests::iter() .map(|(k, _)| k) .collect::>(), - ::HrmpOpenChannelRequestsList::get() + ::HrmpOpenChannelRequestsList::get() .into_iter() .collect::>(), ); @@ -987,17 +1173,17 @@ mod tests { // // having ensured that, we can go ahead and go over all counts and verify that they match. assert_eq!( - ::HrmpOpenChannelRequestCount::iter() + ::HrmpOpenChannelRequestCount::iter() .map(|(k, _)| k) .collect::>(), - ::HrmpOpenChannelRequests::iter() + ::HrmpOpenChannelRequests::iter() .map(|(k, _)| k.sender) .collect::>(), ); for (open_channel_initiator, expected_num) in - ::HrmpOpenChannelRequestCount::iter() + ::HrmpOpenChannelRequestCount::iter() { - let actual_num = ::HrmpOpenChannelRequests::iter() + let actual_num = ::HrmpOpenChannelRequests::iter() .filter(|(ch, _)| ch.sender == open_channel_initiator) .count() as u32; assert_eq!(expected_num, actual_num); @@ -1006,28 +1192,28 @@ mod tests { // The same as above, but for accepted channel request count. Note that we are interested // only in confirmed open requests. assert_eq!( - ::HrmpAcceptedChannelRequestCount::iter() + ::HrmpAcceptedChannelRequestCount::iter() .map(|(k, _)| k) .collect::>(), - ::HrmpOpenChannelRequests::iter() + ::HrmpOpenChannelRequests::iter() .filter(|(_, v)| v.confirmed) .map(|(k, _)| k.recipient) .collect::>(), ); for (channel_recipient, expected_num) in - ::HrmpAcceptedChannelRequestCount::iter() + ::HrmpAcceptedChannelRequestCount::iter() { - let actual_num = ::HrmpOpenChannelRequests::iter() + let actual_num = ::HrmpOpenChannelRequests::iter() .filter(|(ch, v)| ch.recipient == channel_recipient && v.confirmed) .count() as u32; assert_eq!(expected_num, actual_num); } assert_eq!( - ::HrmpCloseChannelRequests::iter() + ::HrmpCloseChannelRequests::iter() .map(|(k, _)| k) .collect::>(), - ::HrmpCloseChannelRequestsList::get() + ::HrmpCloseChannelRequestsList::get() .into_iter() .collect::>(), ); @@ -1035,14 +1221,14 @@ mod tests { // A HRMP watermark can be None for an onboarded parachain. However, an offboarded parachain // cannot have an HRMP watermark: it should've been cleanup. assert_contains_only_onboarded( - ::HrmpWatermarks::iter().map(|(k, _)| k), + ::HrmpWatermarks::iter().map(|(k, _)| k), "HRMP watermarks should contain only onboarded paras", ); // An entry in `HrmpChannels` indicates that the channel is open. Only open channels can // have contents. - for (non_empty_channel, contents) in ::HrmpChannelContents::iter() { - assert!(::HrmpChannels::contains_key( + for (non_empty_channel, contents) in ::HrmpChannelContents::iter() { + assert!(::HrmpChannels::contains_key( &non_empty_channel )); @@ -1054,7 +1240,7 @@ mod tests { // Senders and recipients must be onboarded. Otherwise, all channels associated with them // are removed. assert_contains_only_onboarded( - ::HrmpChannels::iter().flat_map(|(k, _)| vec![k.sender, k.recipient]), + ::HrmpChannels::iter().flat_map(|(k, _)| vec![k.sender, k.recipient]), "senders and recipients in all channels should be onboarded", ); @@ -1077,13 +1263,13 @@ mod tests { // (b, z) (b, z) // // and then that we compare that to the channel list in the `HrmpChannels`. - let channel_set_derived_from_ingress = ::HrmpIngressChannelsIndex::iter() + let channel_set_derived_from_ingress = ::HrmpIngressChannelsIndex::iter() .flat_map(|(p, v)| v.into_iter().map(|i| (i, p)).collect::>()) .collect::>(); - let channel_set_derived_from_egress = ::HrmpEgressChannelsIndex::iter() + let channel_set_derived_from_egress = ::HrmpEgressChannelsIndex::iter() .flat_map(|(p, v)| v.into_iter().map(|e| (p, e)).collect::>()) .collect::>(); - let channel_set_ground_truth = ::HrmpChannels::iter() + let channel_set_ground_truth = ::HrmpChannels::iter() .map(|(k, _)| (k.sender, k.recipient)) .collect::>(); assert_eq!( @@ -1092,18 +1278,18 @@ mod tests { ); assert_eq!(channel_set_derived_from_egress, channel_set_ground_truth); - ::HrmpIngressChannelsIndex::iter() + ::HrmpIngressChannelsIndex::iter() .map(|(_, v)| v) .for_each(|v| assert_is_sorted(&v, "HrmpIngressChannelsIndex")); - ::HrmpEgressChannelsIndex::iter() + ::HrmpEgressChannelsIndex::iter() .map(|(_, v)| v) .for_each(|v| assert_is_sorted(&v, "HrmpIngressChannelsIndex")); assert_contains_only_onboarded( - ::HrmpChannelDigests::iter().map(|(k, _)| k), + ::HrmpChannelDigests::iter().map(|(k, _)| k), "HRMP channel digests should contain only onboarded paras", ); - for (_digest_for_para, digest) in ::HrmpChannelDigests::iter() { + for (_digest_for_para, digest) in ::HrmpChannelDigests::iter() { // Assert that items are in **strictly** ascending order. The strictness also implies // there are no duplicates. assert!(digest.windows(2).all(|xs| xs[0].0 < xs[1].0)); @@ -1161,10 +1347,10 @@ mod tests { register_parachain(para_b); run_to_block(5, Some(vec![5])); - Router::init_open_channel(para_a, para_b, 2, 8).unwrap(); + Hrmp::init_open_channel(para_a, para_b, 2, 8).unwrap(); assert_storage_consistency_exhaustive(); - Router::accept_open_channel(para_b, para_a).unwrap(); + Hrmp::accept_open_channel(para_b, para_a).unwrap(); assert_storage_consistency_exhaustive(); // Advance to a block 6, but without session change. That means that the channel has @@ -1189,15 +1375,15 @@ mod tests { register_parachain(para_b); run_to_block(5, Some(vec![5])); - Router::init_open_channel(para_a, para_b, 2, 8).unwrap(); - Router::accept_open_channel(para_b, para_a).unwrap(); + Hrmp::init_open_channel(para_a, para_b, 2, 8).unwrap(); + Hrmp::accept_open_channel(para_b, para_a).unwrap(); run_to_block(6, Some(vec![6])); assert!(channel_exists(para_a, para_b)); // Close the channel. The effect is not immediate, but rather deferred to the next // session change. - Router::close_channel( + Hrmp::close_channel( para_b, HrmpChannelId { sender: para_a, @@ -1228,8 +1414,8 @@ mod tests { register_parachain(para_b); run_to_block(5, Some(vec![5])); - Router::init_open_channel(para_a, para_b, 2, 20).unwrap(); - Router::accept_open_channel(para_b, para_a).unwrap(); + Hrmp::init_open_channel(para_a, para_b, 2, 20).unwrap(); + Hrmp::accept_open_channel(para_b, para_a).unwrap(); // On Block 6: // A sends a message to B @@ -1240,15 +1426,15 @@ mod tests { data: b"this is an emergency".to_vec(), }]; let config = Configuration::config(); - assert!(Router::check_outbound_hrmp(&config, para_a, &msgs).is_ok()); - let _ = Router::queue_outbound_hrmp(para_a, msgs); + assert!(Hrmp::check_outbound_hrmp(&config, para_a, &msgs).is_ok()); + let _ = Hrmp::queue_outbound_hrmp(para_a, msgs); assert_storage_consistency_exhaustive(); // On Block 7: // B receives the message sent by A. B sets the watermark to 6. run_to_block(7, None); - assert!(Router::check_hrmp_watermark(para_b, 7, 6).is_ok()); - let _ = Router::prune_hrmp(para_b, 6); + assert!(Hrmp::check_hrmp_watermark(para_b, 7, 6).is_ok()); + let _ = Hrmp::prune_hrmp(para_b, 6); assert_storage_consistency_exhaustive(); }); } @@ -1263,8 +1449,8 @@ mod tests { register_parachain(para_b); run_to_block(5, Some(vec![5])); - Router::init_open_channel(para_a, para_b, 2, 8).unwrap(); - Router::accept_open_channel(para_b, para_a).unwrap(); + Hrmp::init_open_channel(para_a, para_b, 2, 8).unwrap(); + Hrmp::accept_open_channel(para_b, para_a).unwrap(); deregister_parachain(para_a); // On Block 6: session change. The channel should not be created. @@ -1290,10 +1476,10 @@ mod tests { // Open two channels to the same receiver, b: // a -> b, c -> b - Router::init_open_channel(para_a, para_b, 2, 8).unwrap(); - Router::accept_open_channel(para_b, para_a).unwrap(); - Router::init_open_channel(para_c, para_b, 2, 8).unwrap(); - Router::accept_open_channel(para_b, para_c).unwrap(); + Hrmp::init_open_channel(para_a, para_b, 2, 8).unwrap(); + Hrmp::accept_open_channel(para_b, para_a).unwrap(); + Hrmp::init_open_channel(para_c, para_b, 2, 8).unwrap(); + Hrmp::accept_open_channel(para_b, para_c).unwrap(); // On Block 6: session change. run_to_block(6, Some(vec![6])); @@ -1304,12 +1490,12 @@ mod tests { data: b"knock".to_vec(), }]; let config = Configuration::config(); - assert!(Router::check_outbound_hrmp(&config, para_a, &msgs).is_ok()); - let _ = Router::queue_outbound_hrmp(para_a, msgs.clone()); + assert!(Hrmp::check_outbound_hrmp(&config, para_a, &msgs).is_ok()); + let _ = Hrmp::queue_outbound_hrmp(para_a, msgs.clone()); // Verify that the sent messages are there and that also the empty channels are present. - let mqc_heads = Router::hrmp_mqc_heads(para_b); - let contents = Router::inbound_hrmp_channels_contents(para_b); + let mqc_heads = Hrmp::hrmp_mqc_heads(para_b); + let contents = Hrmp::inbound_hrmp_channels_contents(para_b); assert_eq!( contents, vec![ diff --git a/runtime/parachains/src/inclusion.rs b/runtime/parachains/src/inclusion.rs index 572a426e3a8c..1509b884080d 100644 --- a/runtime/parachains/src/inclusion.rs +++ b/runtime/parachains/src/inclusion.rs @@ -36,7 +36,7 @@ use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; use sp_staking::SessionIndex; use sp_runtime::{DispatchError, traits::{One, Saturating}}; -use crate::{configuration, paras, router, scheduler::CoreAssignment}; +use crate::{configuration, paras, dmp, ump, hrmp, scheduler::CoreAssignment}; /// A bitfield signed by a validator indicating that it is keeping its piece of the erasure-coding /// for any backed candidates referred to by a `1` bit available. @@ -86,7 +86,12 @@ impl CandidatePendingAvailability { } pub trait Trait: - frame_system::Trait + paras::Trait + router::Trait + configuration::Trait + frame_system::Trait + + paras::Trait + + dmp::Trait + + ump::Trait + + hrmp::Trait + + configuration::Trait { type Event: From> + Into<::Event>; } @@ -600,19 +605,19 @@ impl Module { } // enact the messaging facet of the candidate. - weight += >::prune_dmq( + weight += >::prune_dmq( receipt.descriptor.para_id, commitments.processed_downward_messages, ); - weight += >::enact_upward_messages( + weight += >::enact_upward_messages( receipt.descriptor.para_id, commitments.upward_messages, ); - weight += >::prune_hrmp( + weight += >::prune_hrmp( receipt.descriptor.para_id, T::BlockNumber::from(commitments.hrmp_watermark), ); - weight += >::queue_outbound_hrmp( + weight += >::queue_outbound_hrmp( receipt.descriptor.para_id, commitments.horizontal_messages, ); @@ -719,10 +724,10 @@ enum AcceptanceCheckErr { HeadDataTooLarge, PrematureCodeUpgrade, NewCodeTooLarge, - ProcessedDownwardMessages(router::ProcessedDownwardMessagesAcceptanceErr), - UpwardMessages(router::UpwardMessagesAcceptanceCheckErr), - HrmpWatermark(router::HrmpWatermarkAcceptanceErr), - OutboundHrmp(router::OutboundHrmpAcceptanceErr), + ProcessedDownwardMessages(dmp::ProcessedDownwardMessagesAcceptanceErr), + UpwardMessages(ump::AcceptanceCheckErr), + HrmpWatermark(hrmp::HrmpWatermarkAcceptanceErr), + OutboundHrmp(hrmp::OutboundHrmpAcceptanceErr), } impl AcceptanceCheckErr { @@ -795,17 +800,17 @@ impl CandidateCheckContext { } // check if the candidate passes the messaging acceptance criteria - >::check_processed_downward_messages( + >::check_processed_downward_messages( para_id, processed_downward_messages, )?; - >::check_upward_messages(&self.config, para_id, upward_messages)?; - >::check_hrmp_watermark( + >::check_upward_messages(&self.config, para_id, upward_messages)?; + >::check_hrmp_watermark( para_id, self.relay_parent_number, hrmp_watermark, )?; - >::check_outbound_hrmp(&self.config, para_id, horizontal_messages)?; + >::check_outbound_hrmp(&self.config, para_id, horizontal_messages)?; Ok(()) } diff --git a/runtime/parachains/src/inclusion_inherent.rs b/runtime/parachains/src/inclusion_inherent.rs index 14f63c9dbbaf..b6cbf94133d9 100644 --- a/runtime/parachains/src/inclusion_inherent.rs +++ b/runtime/parachains/src/inclusion_inherent.rs @@ -35,7 +35,7 @@ use frame_system::ensure_none; use crate::{ inclusion, scheduler::{self, FreedReason}, - router, + ump, }; use inherents::{InherentIdentifier, InherentData, MakeFatalError, ProvideInherent}; @@ -117,7 +117,7 @@ decl_module! { >::occupied(&occupied); // Give some time slice to dispatch pending upward messages. - >::process_pending_upward_messages(); + >::process_pending_upward_messages(); // And track that we've finished processing the inherent for this block. Included::set(Some(())); diff --git a/runtime/parachains/src/initializer.rs b/runtime/parachains/src/initializer.rs index 8e2e88ff59eb..d32b8dd0eb8c 100644 --- a/runtime/parachains/src/initializer.rs +++ b/runtime/parachains/src/initializer.rs @@ -29,7 +29,7 @@ use sp_runtime::traits::One; use codec::{Encode, Decode}; use crate::{ configuration::{self, HostConfiguration}, - paras, router, scheduler, inclusion, + paras, scheduler, inclusion, dmp, ump, hrmp, }; /// Information about a session change that has just occurred. @@ -63,7 +63,9 @@ pub trait Trait: + paras::Trait + scheduler::Trait + inclusion::Trait - + router::Trait + + dmp::Trait + + ump::Trait + + hrmp::Trait { /// A randomness beacon. type Randomness: Randomness; @@ -122,12 +124,16 @@ decl_module! { // - Scheduler // - Inclusion // - Validity - // - Router + // - DMP + // - UMP + // - HRMP let total_weight = configuration::Module::::initializer_initialize(now) + paras::Module::::initializer_initialize(now) + scheduler::Module::::initializer_initialize(now) + inclusion::Module::::initializer_initialize(now) + - router::Module::::initializer_initialize(now); + dmp::Module::::initializer_initialize(now) + + ump::Module::::initializer_initialize(now) + + hrmp::Module::::initializer_initialize(now); HasInitialized::set(Some(())); @@ -137,7 +143,9 @@ decl_module! { fn on_finalize() { // reverse initialization order. - router::Module::::initializer_finalize(); + hrmp::Module::::initializer_finalize(); + ump::Module::::initializer_finalize(); + dmp::Module::::initializer_finalize(); inclusion::Module::::initializer_finalize(); scheduler::Module::::initializer_finalize(); paras::Module::::initializer_finalize(); @@ -181,7 +189,9 @@ impl Module { paras::Module::::initializer_on_new_session(¬ification); scheduler::Module::::initializer_on_new_session(¬ification); inclusion::Module::::initializer_on_new_session(¬ification); - router::Module::::initializer_on_new_session(¬ification); + dmp::Module::::initializer_on_new_session(¬ification); + ump::Module::::initializer_on_new_session(¬ification); + hrmp::Module::::initializer_on_new_session(¬ification); } /// Should be called when a new session occurs. Buffers the session notification to be applied diff --git a/runtime/parachains/src/lib.rs b/runtime/parachains/src/lib.rs index 833ff6ae4793..3691b41c365c 100644 --- a/runtime/parachains/src/lib.rs +++ b/runtime/parachains/src/lib.rs @@ -27,10 +27,12 @@ pub mod inclusion; pub mod inclusion_inherent; pub mod initializer; pub mod paras; -pub mod router; pub mod scheduler; pub mod validity; pub mod origin; +pub mod dmp; +pub mod ump; +pub mod hrmp; pub mod runtime_api_impl; @@ -40,3 +42,25 @@ mod util; mod mock; pub use origin::{Origin, ensure_parachain}; + +/// Schedule a para to be initialized at the start of the next session with the given genesis data. +pub fn schedule_para_initialize( + id: primitives::v1::Id, + genesis: paras::ParaGenesisArgs, +) { + >::schedule_para_initialize(id, genesis); +} + +/// Schedule a para to be cleaned up at the start of the next session. +pub fn schedule_para_cleanup(id: primitives::v1::Id) +where + T: paras::Trait + + dmp::Trait + + ump::Trait + + hrmp::Trait, +{ + >::schedule_para_cleanup(id); + >::schedule_para_cleanup(id); + >::schedule_para_cleanup(id); + >::schedule_para_cleanup(id); +} diff --git a/runtime/parachains/src/mock.rs b/runtime/parachains/src/mock.rs index 3da3a6448128..edb84e2a1245 100644 --- a/runtime/parachains/src/mock.rs +++ b/runtime/parachains/src/mock.rs @@ -108,9 +108,14 @@ impl crate::paras::Trait for Test { type Origin = Origin; } -impl crate::router::Trait for Test { +impl crate::dmp::Trait for Test { } + +impl crate::ump::Trait for Test { + type UmpSink = crate::ump::mock_sink::MockUmpSink; +} + +impl crate::hrmp::Trait for Test { type Origin = Origin; - type UmpSink = crate::router::MockUmpSink; } impl crate::scheduler::Trait for Test { } @@ -130,8 +135,14 @@ pub type Configuration = crate::configuration::Module; /// Mocked paras. pub type Paras = crate::paras::Module; -/// Mocked router. -pub type Router = crate::router::Module; +/// Mocked DMP +pub type Dmp = crate::dmp::Module; + +/// Mocked UMP +pub type Ump = crate::ump::Module; + +/// Mocked HRMP +pub type Hrmp = crate::hrmp::Module; /// Mocked scheduler. pub type Scheduler = crate::scheduler::Module; diff --git a/runtime/parachains/src/paras.rs b/runtime/parachains/src/paras.rs index 84bdf6cf73ad..ab811f0f7d48 100644 --- a/runtime/parachains/src/paras.rs +++ b/runtime/parachains/src/paras.rs @@ -396,7 +396,7 @@ impl Module { } /// Schedule a para to be initialized at the start of the next session. - pub fn schedule_para_initialize(id: ParaId, genesis: ParaGenesisArgs) -> Weight { + pub(crate) fn schedule_para_initialize(id: ParaId, genesis: ParaGenesisArgs) -> Weight { let dup = UpcomingParas::mutate(|v| { match v.binary_search(&id) { Ok(_) => true, @@ -418,7 +418,7 @@ impl Module { } /// Schedule a para to be cleaned up at the start of the next session. - pub fn schedule_para_cleanup(id: ParaId) -> Weight { + pub(crate) fn schedule_para_cleanup(id: ParaId) -> Weight { let upcoming_weight = UpcomingParas::mutate(|v| { match v.binary_search(&id) { Ok(i) => { diff --git a/runtime/parachains/src/router.rs b/runtime/parachains/src/router.rs deleted file mode 100644 index eefc6900b83f..000000000000 --- a/runtime/parachains/src/router.rs +++ /dev/null @@ -1,331 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! The router module is responsible for handling messaging. -//! -//! The core of the messaging is checking and processing messages sent out by the candidates, -//! routing the messages at their destinations and informing the parachains about the incoming -//! messages. - -use crate::{configuration, paras, initializer, ensure_parachain}; -use sp_std::prelude::*; -use frame_support::{decl_error, decl_module, decl_storage, dispatch::DispatchResult, weights::Weight}; -use sp_std::collections::vec_deque::VecDeque; -use primitives::v1::{ - Id as ParaId, InboundDownwardMessage, Hash, UpwardMessage, HrmpChannelId, InboundHrmpMessage, -}; - -mod dmp; -mod hrmp; -mod ump; - -use hrmp::{HrmpOpenChannelRequest, HrmpChannel}; -pub use dmp::{QueueDownwardMessageError, ProcessedDownwardMessagesAcceptanceErr}; -pub use ump::{UmpSink, AcceptanceCheckErr as UpwardMessagesAcceptanceCheckErr}; -pub use hrmp::{HrmpWatermarkAcceptanceErr, OutboundHrmpAcceptanceErr}; - -#[cfg(test)] -pub use ump::mock_sink::MockUmpSink; - -pub trait Trait: frame_system::Trait + configuration::Trait + paras::Trait { - type Origin: From - + From<::Origin> - + Into::Origin>>; - - /// A place where all received upward messages are funneled. - type UmpSink: UmpSink; -} - -decl_storage! { - trait Store for Module as Router { - /// Paras that are to be cleaned up at the end of the session. - /// The entries are sorted ascending by the para id. - OutgoingParas: Vec; - - /* - * Downward Message Passing (DMP) - * - * Storage layout required for implementation of DMP. - */ - - /// The downward messages addressed for a certain para. - DownwardMessageQueues: map hasher(twox_64_concat) ParaId => Vec>; - /// A mapping that stores the downward message queue MQC head for each para. - /// - /// Each link in this chain has a form: - /// `(prev_head, B, H(M))`, where - /// - `prev_head`: is the previous head hash or zero if none. - /// - `B`: is the relay-chain block number in which a message was appended. - /// - `H(M)`: is the hash of the message being appended. - DownwardMessageQueueHeads: map hasher(twox_64_concat) ParaId => Hash; - - /* - * Upward Message Passing (UMP) - * - * Storage layout required for UMP, specifically dispatchable upward messages. - */ - - /// The messages waiting to be handled by the relay-chain originating from a certain parachain. - /// - /// Note that some upward messages might have been already processed by the inclusion logic. E.g. - /// channel management messages. - /// - /// The messages are processed in FIFO order. - RelayDispatchQueues: map hasher(twox_64_concat) ParaId => VecDeque; - /// Size of the dispatch queues. Caches sizes of the queues in `RelayDispatchQueue`. - /// - /// First item in the tuple is the count of messages and second - /// is the total length (in bytes) of the message payloads. - /// - /// Note that this is an auxilary mapping: it's possible to tell the byte size and the number of - /// messages only looking at `RelayDispatchQueues`. This mapping is separate to avoid the cost of - /// loading the whole message queue if only the total size and count are required. - /// - /// Invariant: - /// - The set of keys should exactly match the set of keys of `RelayDispatchQueues`. - RelayDispatchQueueSize: map hasher(twox_64_concat) ParaId => (u32, u32); - /// The ordered list of `ParaId`s that have a `RelayDispatchQueue` entry. - /// - /// Invariant: - /// - The set of items from this vector should be exactly the set of the keys in - /// `RelayDispatchQueues` and `RelayDispatchQueueSize`. - NeedsDispatch: Vec; - /// This is the para that gets will get dispatched first during the next upward dispatchable queue - /// execution round. - /// - /// Invariant: - /// - If `Some(para)`, then `para` must be present in `NeedsDispatch`. - NextDispatchRoundStartWith: Option; - - /* - * Horizontally Relay-routed Message Passing (HRMP) - * - * HRMP related storage layout - */ - - /// The set of pending HRMP open channel requests. - /// - /// The set is accompanied by a list for iteration. - /// - /// Invariant: - /// - There are no channels that exists in list but not in the set and vice versa. - HrmpOpenChannelRequests: map hasher(twox_64_concat) HrmpChannelId => Option; - HrmpOpenChannelRequestsList: Vec; - - /// This mapping tracks how many open channel requests are inititated by a given sender para. - /// Invariant: `HrmpOpenChannelRequests` should contain the same number of items that has `(X, _)` - /// as the number of `HrmpOpenChannelRequestCount` for `X`. - HrmpOpenChannelRequestCount: map hasher(twox_64_concat) ParaId => u32; - /// This mapping tracks how many open channel requests were accepted by a given recipient para. - /// Invariant: `HrmpOpenChannelRequests` should contain the same number of items `(_, X)` with - /// `confirmed` set to true, as the number of `HrmpAcceptedChannelRequestCount` for `X`. - HrmpAcceptedChannelRequestCount: map hasher(twox_64_concat) ParaId => u32; - - /// A set of pending HRMP close channel requests that are going to be closed during the session change. - /// Used for checking if a given channel is registered for closure. - /// - /// The set is accompanied by a list for iteration. - /// - /// Invariant: - /// - There are no channels that exists in list but not in the set and vice versa. - HrmpCloseChannelRequests: map hasher(twox_64_concat) HrmpChannelId => Option<()>; - HrmpCloseChannelRequestsList: Vec; - - /// The HRMP watermark associated with each para. - /// Invariant: - /// - each para `P` used here as a key should satisfy `Paras::is_valid_para(P)` within a session. - HrmpWatermarks: map hasher(twox_64_concat) ParaId => Option; - /// HRMP channel data associated with each para. - /// Invariant: - /// - each participant in the channel should satisfy `Paras::is_valid_para(P)` within a session. - HrmpChannels: map hasher(twox_64_concat) HrmpChannelId => Option; - /// Ingress/egress indexes allow to find all the senders and receivers given the opposite - /// side. I.e. - /// - /// (a) ingress index allows to find all the senders for a given recipient. - /// (b) egress index allows to find all the recipients for a given sender. - /// - /// Invariants: - /// - for each ingress index entry for `P` each item `I` in the index should present in `HrmpChannels` - /// as `(I, P)`. - /// - for each egress index entry for `P` each item `E` in the index should present in `HrmpChannels` - /// as `(P, E)`. - /// - there should be no other dangling channels in `HrmpChannels`. - /// - the vectors are sorted. - HrmpIngressChannelsIndex: map hasher(twox_64_concat) ParaId => Vec; - HrmpEgressChannelsIndex: map hasher(twox_64_concat) ParaId => Vec; - /// Storage for the messages for each channel. - /// Invariant: cannot be non-empty if the corresponding channel in `HrmpChannels` is `None`. - HrmpChannelContents: map hasher(twox_64_concat) HrmpChannelId => Vec>; - /// Maintains a mapping that can be used to answer the question: - /// What paras sent a message at the given block number for a given reciever. - /// Invariants: - /// - The inner `Vec` is never empty. - /// - The inner `Vec` cannot store two same `ParaId`. - /// - The outer vector is sorted ascending by block number and cannot store two items with the same - /// block number. - HrmpChannelDigests: map hasher(twox_64_concat) ParaId => Vec<(T::BlockNumber, Vec)>; - } -} - -decl_error! { - pub enum Error for Module { - /// The sender tried to open a channel to themselves. - OpenHrmpChannelToSelf, - /// The recipient is not a valid para. - OpenHrmpChannelInvalidRecipient, - /// The requested capacity is zero. - OpenHrmpChannelZeroCapacity, - /// The requested capacity exceeds the global limit. - OpenHrmpChannelCapacityExceedsLimit, - /// The requested maximum message size is 0. - OpenHrmpChannelZeroMessageSize, - /// The open request requested the message size that exceeds the global limit. - OpenHrmpChannelMessageSizeExceedsLimit, - /// The channel already exists - OpenHrmpChannelAlreadyExists, - /// There is already a request to open the same channel. - OpenHrmpChannelAlreadyRequested, - /// The sender already has the maximum number of allowed outbound channels. - OpenHrmpChannelLimitExceeded, - /// The channel from the sender to the origin doesn't exist. - AcceptHrmpChannelDoesntExist, - /// The channel is already confirmed. - AcceptHrmpChannelAlreadyConfirmed, - /// The recipient already has the maximum number of allowed inbound channels. - AcceptHrmpChannelLimitExceeded, - /// The origin tries to close a channel where it is neither the sender nor the recipient. - CloseHrmpChannelUnauthorized, - /// The channel to be closed doesn't exist. - CloseHrmpChannelDoesntExist, - /// The channel close request is already requested. - CloseHrmpChannelAlreadyUnderway, - } -} - -decl_module! { - /// The router module. - pub struct Module for enum Call where origin: ::Origin { - type Error = Error; - - #[weight = 0] - fn hrmp_init_open_channel( - origin, - recipient: ParaId, - proposed_max_capacity: u32, - proposed_max_message_size: u32, - ) -> DispatchResult { - let origin = ensure_parachain(::Origin::from(origin))?; - Self::init_open_channel( - origin, - recipient, - proposed_max_capacity, - proposed_max_message_size - )?; - Ok(()) - } - - #[weight = 0] - fn hrmp_accept_open_channel(origin, sender: ParaId) -> DispatchResult { - let origin = ensure_parachain(::Origin::from(origin))?; - Self::accept_open_channel(origin, sender)?; - Ok(()) - } - - #[weight = 0] - fn hrmp_close_channel(origin, channel_id: HrmpChannelId) -> DispatchResult { - let origin = ensure_parachain(::Origin::from(origin))?; - Self::close_channel(origin, channel_id)?; - Ok(()) - } - } -} - -impl Module { - /// Block initialization logic, called by initializer. - pub(crate) fn initializer_initialize(_now: T::BlockNumber) -> Weight { - 0 - } - - /// Block finalization logic, called by initializer. - pub(crate) fn initializer_finalize() {} - - /// Called by the initializer to note that a new session has started. - pub(crate) fn initializer_on_new_session( - notification: &initializer::SessionChangeNotification, - ) { - Self::perform_outgoing_para_cleanup(); - Self::process_hrmp_open_channel_requests(¬ification.prev_config); - Self::process_hrmp_close_channel_requests(); - } - - /// Iterate over all paras that were registered for offboarding and remove all the data - /// associated with them. - fn perform_outgoing_para_cleanup() { - let outgoing = OutgoingParas::take(); - for outgoing_para in outgoing { - Self::clean_dmp_after_outgoing(outgoing_para); - Self::clean_ump_after_outgoing(outgoing_para); - Self::clean_hrmp_after_outgoing(outgoing_para); - } - } - - /// Schedule a para to be cleaned up at the start of the next session. - pub fn schedule_para_cleanup(id: ParaId) { - OutgoingParas::mutate(|v| { - if let Err(i) = v.binary_search(&id) { - v.insert(i, id); - } - }); - } -} - -#[cfg(test)] -mod tests { - use super::*; - use primitives::v1::BlockNumber; - use frame_support::traits::{OnFinalize, OnInitialize}; - - use crate::mock::{System, Router, GenesisConfig as MockGenesisConfig}; - - pub(crate) fn run_to_block(to: BlockNumber, new_session: Option>) { - while System::block_number() < to { - let b = System::block_number(); - Router::initializer_finalize(); - System::on_finalize(b); - - System::on_initialize(b + 1); - System::set_block_number(b + 1); - - if new_session.as_ref().map_or(false, |v| v.contains(&(b + 1))) { - Router::initializer_on_new_session(&Default::default()); - } - Router::initializer_initialize(b + 1); - } - } - - pub(crate) fn default_genesis_config() -> MockGenesisConfig { - MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { - config: crate::configuration::HostConfiguration { - max_downward_message_size: 1024, - ..Default::default() - }, - }, - ..Default::default() - } - } -} diff --git a/runtime/parachains/src/runtime_api_impl/v1.rs b/runtime/parachains/src/runtime_api_impl/v1.rs index 48e21bf2bfa0..2f49f4af8c7e 100644 --- a/runtime/parachains/src/runtime_api_impl/v1.rs +++ b/runtime/parachains/src/runtime_api_impl/v1.rs @@ -28,7 +28,7 @@ use primitives::v1::{ }; use sp_runtime::traits::Zero; use frame_support::debug; -use crate::{initializer, inclusion, scheduler, configuration, paras, router}; +use crate::{initializer, inclusion, scheduler, configuration, paras, dmp, hrmp}; /// Implementation for the `validators` function of the runtime API. pub fn validators() -> Vec { @@ -310,15 +310,15 @@ where } /// Implementation for the `dmq_contents` function of the runtime API. -pub fn dmq_contents( +pub fn dmq_contents( recipient: ParaId, ) -> Vec> { - >::dmq_contents(recipient) + >::dmq_contents(recipient) } /// Implementation for the `inbound_hrmp_channels_contents` function of the runtime API. -pub fn inbound_hrmp_channels_contents( +pub fn inbound_hrmp_channels_contents( recipient: ParaId, ) -> BTreeMap>> { - >::inbound_hrmp_channels_contents(recipient) + >::inbound_hrmp_channels_contents(recipient) } diff --git a/runtime/parachains/src/router/ump.rs b/runtime/parachains/src/ump.rs similarity index 81% rename from runtime/parachains/src/router/ump.rs rename to runtime/parachains/src/ump.rs index 2bfdafbb6c34..03d52ebb2cd6 100644 --- a/runtime/parachains/src/router/ump.rs +++ b/runtime/parachains/src/ump.rs @@ -14,11 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use super::{Trait, Module, Store}; -use crate::configuration::{self, HostConfiguration}; +use crate::{ + configuration::{self, HostConfiguration}, + initializer, +}; use sp_std::{fmt, prelude::*}; use sp_std::collections::{btree_map::BTreeMap, vec_deque::VecDeque}; -use frame_support::{StorageMap, StorageValue, weights::Weight, traits::Get}; +use frame_support::{decl_module, decl_storage, StorageMap, StorageValue, weights::Weight, traits::Get}; use primitives::v1::{Id as ParaId, UpwardMessage}; /// All upward messages coming from parachains will be funneled into an implementation of this trait. @@ -78,8 +80,7 @@ impl fmt::Debug for AcceptanceCheckErr { AcceptanceCheckErr::MoreMessagesThanPermitted { sent, permitted } => write!( fmt, "more upward messages than permitted by config ({} > {})", - sent, - permitted, + sent, permitted, ), AcceptanceCheckErr::MessageSize { idx, @@ -88,29 +89,109 @@ impl fmt::Debug for AcceptanceCheckErr { } => write!( fmt, "upward message idx {} larger than permitted by config ({} > {})", - idx, - msg_size, - max_size, + idx, msg_size, max_size, ), AcceptanceCheckErr::CapacityExceeded { count, limit } => write!( fmt, "the ump queue would have more items than permitted by config ({} > {})", - count, - limit, + count, limit, ), AcceptanceCheckErr::TotalSizeExceeded { total_size, limit } => write!( fmt, "the ump queue would have grown past the max size permitted by config ({} > {})", - total_size, - limit, + total_size, limit, ), } } } +pub trait Trait: frame_system::Trait + configuration::Trait { + /// A place where all received upward messages are funneled. + type UmpSink: UmpSink; +} + +decl_storage! { + trait Store for Module as Ump { + /// Paras that are to be cleaned up at the end of the session. + /// The entries are sorted ascending by the para id. + OutgoingParas: Vec; + + /// The messages waiting to be handled by the relay-chain originating from a certain parachain. + /// + /// Note that some upward messages might have been already processed by the inclusion logic. E.g. + /// channel management messages. + /// + /// The messages are processed in FIFO order. + RelayDispatchQueues: map hasher(twox_64_concat) ParaId => VecDeque; + /// Size of the dispatch queues. Caches sizes of the queues in `RelayDispatchQueue`. + /// + /// First item in the tuple is the count of messages and second + /// is the total length (in bytes) of the message payloads. + /// + /// Note that this is an auxilary mapping: it's possible to tell the byte size and the number of + /// messages only looking at `RelayDispatchQueues`. This mapping is separate to avoid the cost of + /// loading the whole message queue if only the total size and count are required. + /// + /// Invariant: + /// - The set of keys should exactly match the set of keys of `RelayDispatchQueues`. + RelayDispatchQueueSize: map hasher(twox_64_concat) ParaId => (u32, u32); + /// The ordered list of `ParaId`s that have a `RelayDispatchQueue` entry. + /// + /// Invariant: + /// - The set of items from this vector should be exactly the set of the keys in + /// `RelayDispatchQueues` and `RelayDispatchQueueSize`. + NeedsDispatch: Vec; + /// This is the para that gets will get dispatched first during the next upward dispatchable queue + /// execution round. + /// + /// Invariant: + /// - If `Some(para)`, then `para` must be present in `NeedsDispatch`. + NextDispatchRoundStartWith: Option; + } +} + +decl_module! { + /// The UMP module. + pub struct Module for enum Call where origin: ::Origin { + } +} + /// Routines related to the upward message passing. impl Module { - pub(super) fn clean_ump_after_outgoing(outgoing_para: ParaId) { + /// Block initialization logic, called by initializer. + pub(crate) fn initializer_initialize(_now: T::BlockNumber) -> Weight { + 0 + } + + /// Block finalization logic, called by initializer. + pub(crate) fn initializer_finalize() {} + + /// Called by the initializer to note that a new session has started. + pub(crate) fn initializer_on_new_session( + _notification: &initializer::SessionChangeNotification, + ) { + Self::perform_outgoing_para_cleanup(); + } + + /// Iterate over all paras that were registered for offboarding and remove all the data + /// associated with them. + fn perform_outgoing_para_cleanup() { + let outgoing = OutgoingParas::take(); + for outgoing_para in outgoing { + Self::clean_ump_after_outgoing(outgoing_para); + } + } + + /// Schedule a para to be cleaned up at the start of the next session. + pub(crate) fn schedule_para_cleanup(id: ParaId) { + OutgoingParas::mutate(|v| { + if let Err(i) = v.binary_search(&id) { + v.insert(i, id); + } + }); + } + + fn clean_ump_after_outgoing(outgoing_para: ParaId) { ::RelayDispatchQueueSize::remove(&outgoing_para); ::RelayDispatchQueues::remove(&outgoing_para); @@ -193,13 +274,10 @@ impl Module { v.extend(upward_messages.into_iter()) }); - ::RelayDispatchQueueSize::mutate( - ¶, - |(ref mut cnt, ref mut size)| { - *cnt += extra_cnt; - *size += extra_size; - }, - ); + ::RelayDispatchQueueSize::mutate(¶, |(ref mut cnt, ref mut size)| { + *cnt += extra_cnt; + *size += extra_size; + }); ::NeedsDispatch::mutate(|v| { if let Err(i) = v.binary_search(¶) { @@ -545,8 +623,7 @@ pub(crate) mod mock_sink { mod tests { use super::*; use super::mock_sink::Probe; - use crate::router::tests::default_genesis_config; - use crate::mock::{Configuration, Router, new_test_ext}; + use crate::mock::{Configuration, Ump, new_test_ext, GenesisConfig as MockGenesisConfig}; use frame_support::IterableStorageMap; use std::collections::HashSet; @@ -585,22 +662,33 @@ mod tests { } } + fn default_genesis_config() -> MockGenesisConfig { + MockGenesisConfig { + configuration: crate::configuration::GenesisConfig { + config: crate::configuration::HostConfiguration { + max_downward_message_size: 1024, + ..Default::default() + }, + }, + ..Default::default() + } + } + fn queue_upward_msg(para: ParaId, msg: UpwardMessage) { let msgs = vec![msg]; - assert!(Router::check_upward_messages(&Configuration::config(), para, &msgs).is_ok()); - let _ = Router::enact_upward_messages(para, msgs); + assert!(Ump::check_upward_messages(&Configuration::config(), para, &msgs).is_ok()); + let _ = Ump::enact_upward_messages(para, msgs); } fn assert_storage_consistency_exhaustive() { // check that empty queues don't clutter the storage. - for (_para, queue) in ::RelayDispatchQueues::iter() { + for (_para, queue) in ::RelayDispatchQueues::iter() { assert!(!queue.is_empty()); } // actually count the counts and sizes in queues and compare them to the bookkeeped version. - for (para, queue) in ::RelayDispatchQueues::iter() { - let (expected_count, expected_size) = - ::RelayDispatchQueueSize::get(para); + for (para, queue) in ::RelayDispatchQueues::iter() { + let (expected_count, expected_size) = ::RelayDispatchQueueSize::get(para); let (actual_count, actual_size) = queue.into_iter().fold((0, 0), |(acc_count, acc_size), x| { (acc_count + 1, acc_size + x.len() as u32) @@ -612,27 +700,29 @@ mod tests { // since we wipe the empty queues the sets of paras in queue contents, queue sizes and // need dispatch set should all be equal. - let queue_contents_set = ::RelayDispatchQueues::iter() + let queue_contents_set = ::RelayDispatchQueues::iter() .map(|(k, _)| k) .collect::>(); - let queue_sizes_set = ::RelayDispatchQueueSize::iter() + let queue_sizes_set = ::RelayDispatchQueueSize::iter() .map(|(k, _)| k) .collect::>(); - let needs_dispatch_set = ::NeedsDispatch::get() + let needs_dispatch_set = ::NeedsDispatch::get() .into_iter() .collect::>(); assert_eq!(queue_contents_set, queue_sizes_set); assert_eq!(queue_contents_set, needs_dispatch_set); // `NextDispatchRoundStartWith` should point into a para that is tracked. - if let Some(para) = ::NextDispatchRoundStartWith::get() { + if let Some(para) = ::NextDispatchRoundStartWith::get() { assert!(queue_contents_set.contains(¶)); } // `NeedsDispatch` is always sorted. - assert!(::NeedsDispatch::get() - .windows(2) - .all(|xs| xs[0] <= xs[1])); + assert!( + ::NeedsDispatch::get() + .windows(2) + .all(|xs| xs[0] <= xs[1]) + ); } #[test] @@ -641,7 +731,7 @@ mod tests { assert_storage_consistency_exhaustive(); // make sure that the case with empty queues is handled properly - Router::process_pending_upward_messages(); + Ump::process_pending_upward_messages(); assert_storage_consistency_exhaustive(); }); @@ -658,7 +748,7 @@ mod tests { probe.assert_msg(a, msg.clone(), 0); queue_upward_msg(a, msg); - Router::process_pending_upward_messages(); + Ump::process_pending_upward_messages(); assert_storage_consistency_exhaustive(); }); @@ -697,7 +787,7 @@ mod tests { probe.assert_msg(a, a_msg_1.clone(), 300); probe.assert_msg(c, c_msg_1.clone(), 300); - Router::process_pending_upward_messages(); + Ump::process_pending_upward_messages(); assert_storage_consistency_exhaustive(); drop(probe); @@ -711,7 +801,7 @@ mod tests { let mut probe = Probe::new(); probe.assert_msg(q, q_msg.clone(), 500); - Router::process_pending_upward_messages(); + Ump::process_pending_upward_messages(); assert_storage_consistency_exhaustive(); drop(probe); @@ -723,7 +813,7 @@ mod tests { probe.assert_msg(a, a_msg_2.clone(), 100); probe.assert_msg(c, c_msg_2.clone(), 100); - Router::process_pending_upward_messages(); + Ump::process_pending_upward_messages(); assert_storage_consistency_exhaustive(); drop(probe); @@ -733,7 +823,7 @@ mod tests { { let probe = Probe::new(); - Router::process_pending_upward_messages(); + Ump::process_pending_upward_messages(); assert_storage_consistency_exhaustive(); drop(probe); @@ -775,7 +865,7 @@ mod tests { probe.assert_msg(b, b_msg_1.clone(), 300); probe.assert_msg(a, a_msg_2.clone(), 300); - Router::process_pending_upward_messages(); + Ump::process_pending_upward_messages(); drop(probe); } diff --git a/runtime/parachains/src/util.rs b/runtime/parachains/src/util.rs index 34946de3e3d2..c827a86d6580 100644 --- a/runtime/parachains/src/util.rs +++ b/runtime/parachains/src/util.rs @@ -20,12 +20,12 @@ use sp_runtime::traits::{One, Saturating}; use primitives::v1::{Id as ParaId, PersistedValidationData, TransientValidationData}; -use crate::{configuration, paras, router}; +use crate::{configuration, paras, dmp, hrmp}; /// Make the persisted validation data for a particular parachain. /// /// This ties together the storage of several modules. -pub fn make_persisted_validation_data( +pub fn make_persisted_validation_data( para_id: ParaId, ) -> Option> { let relay_parent_number = >::block_number() - One::one(); @@ -33,15 +33,15 @@ pub fn make_persisted_validation_data( Some(PersistedValidationData { parent_head: >::para_head(¶_id)?, block_number: relay_parent_number, - hrmp_mqc_heads: >::hrmp_mqc_heads(para_id), - dmq_mqc_head: >::dmq_mqc_head(para_id), + hrmp_mqc_heads: >::hrmp_mqc_heads(para_id), + dmq_mqc_head: >::dmq_mqc_head(para_id), }) } /// Make the transient validation data for a particular parachain. /// /// This ties together the storage of several modules. -pub fn make_transient_validation_data( +pub fn make_transient_validation_data( para_id: ParaId, ) -> Option> { let config = >::config(); @@ -67,6 +67,6 @@ pub fn make_transient_validation_data( max_head_data_size: config.max_head_data_size, balance: 0, code_upgrade_allowed, - dmq_length: >::dmq_length(para_id), + dmq_length: >::dmq_length(para_id), }) } diff --git a/runtime/rococo/src/lib.rs b/runtime/rococo/src/lib.rs index 7b5fcfabff5f..aa6fb87e98b4 100644 --- a/runtime/rococo/src/lib.rs +++ b/runtime/rococo/src/lib.rs @@ -73,7 +73,9 @@ use runtime_parachains::inclusion as parachains_inclusion; use runtime_parachains::inclusion_inherent as parachains_inclusion_inherent; use runtime_parachains::initializer as parachains_initializer; use runtime_parachains::paras as parachains_paras; -use runtime_parachains::router as parachains_router; +use runtime_parachains::dmp as parachains_dmp; +use runtime_parachains::ump as parachains_ump; +use runtime_parachains::hrmp as parachains_hrmp; use runtime_parachains::scheduler as parachains_scheduler; pub use pallet_balances::Call as BalancesCall; @@ -184,7 +186,9 @@ construct_runtime! { Scheduler: parachains_scheduler::{Module, Call, Storage}, Paras: parachains_paras::{Module, Call, Storage}, Initializer: parachains_initializer::{Module, Call, Storage}, - Router: parachains_router::{Module, Call, Storage}, + Dmp: parachains_dmp::{Module, Call, Storage}, + Ump: parachains_ump::{Module, Call, Storage}, + Hrmp: parachains_hrmp::{Module, Call, Storage}, Registrar: paras_registrar::{Module, Call, Storage}, ParasSudoWrapper: paras_sudo_wrapper::{Module, Call}, @@ -532,11 +536,16 @@ impl parachains_paras::Trait for Runtime { type Origin = Origin; } -impl parachains_router::Trait for Runtime { - type Origin = Origin; +impl parachains_ump::Trait for Runtime { type UmpSink = (); // TODO: #1873 To be handled by the XCM receiver. } +impl parachains_dmp::Trait for Runtime {} + +impl parachains_hrmp::Trait for Runtime { + type Origin = Origin; +} + impl parachains_inclusion_inherent::Trait for Runtime {} impl parachains_scheduler::Trait for Runtime {} diff --git a/runtime/test-runtime/src/lib.rs b/runtime/test-runtime/src/lib.rs index e54f4118fa29..43a5d186b352 100644 --- a/runtime/test-runtime/src/lib.rs +++ b/runtime/test-runtime/src/lib.rs @@ -30,7 +30,9 @@ use polkadot_runtime_parachains::inclusion as parachains_inclusion; use polkadot_runtime_parachains::inclusion_inherent as parachains_inclusion_inherent; use polkadot_runtime_parachains::initializer as parachains_initializer; use polkadot_runtime_parachains::paras as parachains_paras; -use polkadot_runtime_parachains::router as parachains_router; +use polkadot_runtime_parachains::dmp as parachains_dmp; +use polkadot_runtime_parachains::ump as parachains_ump; +use polkadot_runtime_parachains::hrmp as parachains_hrmp; use polkadot_runtime_parachains::scheduler as parachains_scheduler; use polkadot_runtime_parachains::runtime_api_impl::v1 as runtime_impl; @@ -459,11 +461,16 @@ impl parachains_paras::Trait for Runtime { type Origin = Origin; } -impl parachains_router::Trait for Runtime { - type Origin = Origin; +impl parachains_dmp::Trait for Runtime {} + +impl parachains_ump::Trait for Runtime { type UmpSink = (); } +impl parachains_hrmp::Trait for Runtime { + type Origin = Origin; +} + impl parachains_scheduler::Trait for Runtime {} impl paras_sudo_wrapper::Trait for Runtime {}