Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Commit

Permalink
Merge branch 'master' into ao-collator-prototol-a-fix
Browse files Browse the repository at this point in the history
* master:
  remove duplicate Deposit from OnUnbalanced implementation (#4180)
  differentiate log messages (#4183)
  increase ump_service_total_weight's default value (#4127)
  companion PR to removal of light client (#4105)
  Introduce `OriginPrivilegeCmp` (#4166)
  • Loading branch information
ordian committed Oct 30, 2021
2 parents 5aea24f + fb730e4 commit a21b8da
Show file tree
Hide file tree
Showing 11 changed files with 229 additions and 384 deletions.
329 changes: 165 additions & 164 deletions Cargo.lock

Large diffs are not rendered by default.

2 changes: 2 additions & 0 deletions bridges/.config/lingua.dic
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ BFT/M
bitfield/MS
blake2/MS
blockchain/MS
boolean
borked
BridgeStorage
BlockNumber
Expand All @@ -30,6 +31,7 @@ ChainTime
chain_getBlock
choosen
config/MS
cooldown
crypto/MS
customizable/B
Debian/M
Expand Down
6 changes: 3 additions & 3 deletions node/core/dispute-coordinator/src/real/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -419,7 +419,7 @@ where
tracing::warn!(
target: LOG_TARGET,
session,
"Missing info for session which has an active dispute",
"Recovering lacks info for session which has an active dispute",
);
continue
},
Expand Down Expand Up @@ -861,7 +861,7 @@ async fn handle_import_statements(
tracing::warn!(
target: LOG_TARGET,
session,
"Missing info for session which has an active dispute",
"Importing statement lacks info for session which has an active dispute",
);

return Ok(ImportStatementsResult::InvalidImport)
Expand Down Expand Up @@ -894,7 +894,7 @@ async fn handle_import_statements(
tracing::warn!(
target: LOG_TARGET,
session,
"Missing info for session which has an active dispute",
"Not seen backing vote for candidate which has an active dispute",
);
return Ok(ImportStatementsResult::InvalidImport)
},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ struct RunningTask {

/// Index of validator group to fetch the chunk from.
///
/// Needef for reporting bad validators.
/// Needed for reporting bad validators.
group_index: GroupIndex,

/// Validators to request the chunk from.
Expand Down
2 changes: 0 additions & 2 deletions node/service/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -152,8 +152,6 @@ full-node = [
"kvdb-rocksdb"
]

light-node = []

# Configure the native runtimes to use. Polkadot is enabled by default.
#
# Validators require the native runtime currently
Expand Down
7 changes: 1 addition & 6 deletions node/service/src/chain_spec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -182,12 +182,7 @@ fn default_parachains_host_configuration(
max_upward_queue_count: 8,
max_upward_queue_size: 1024 * 1024,
max_downward_message_size: 1024 * 1024,
// this is approximatelly 4ms.
//
// Same as `4 * frame_support::weights::WEIGHT_PER_MILLIS`. We don't bother with
// an import since that's a made up number and should be replaced with a constant
// obtained by benchmarking anyway.
ump_service_total_weight: 4 * 1_000_000_000,
ump_service_total_weight: 100_000_000_000,
max_upward_message_size: 1024 * 1024,
max_upward_message_num_per_candidate: 5,
hrmp_sender_deposit: 0,
Expand Down
199 changes: 1 addition & 198 deletions node/service/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -101,8 +101,7 @@ pub use sc_executor::NativeExecutionDispatch;
pub use service::{
config::{DatabaseSource, PrometheusConfig},
ChainSpec, Configuration, Error as SubstrateServiceError, PruningMode, Role, RuntimeGenesis,
TFullBackend, TFullCallExecutor, TFullClient, TLightBackend, TLightCallExecutor, TLightClient,
TaskManager, TransactionPoolOptions,
TFullBackend, TFullCallExecutor, TFullClient, TaskManager, TransactionPoolOptions,
};
pub use sp_api::{ApiRef, ConstructRuntimeApi, Core as CoreApi, ProvideRuntimeApi, StateBackend};
pub use sp_runtime::{
Expand Down Expand Up @@ -313,13 +312,6 @@ type FullGrandpaBlockImport<RuntimeApi, ExecutorDispatch, ChainSelection = FullS
ChainSelection,
>;

#[cfg(feature = "light-node")]
type LightBackend = service::TLightBackendWithHash<Block, sp_runtime::traits::BlakeTwo256>;

#[cfg(feature = "light-node")]
type LightClient<RuntimeApi, ExecutorDispatch> =
service::TLightClientWithBackend<Block, RuntimeApi, ExecutorDispatch, LightBackend>;

#[cfg(feature = "full-node")]
struct Basics<RuntimeApi, ExecutorDispatch>
where
Expand Down Expand Up @@ -1127,168 +1119,6 @@ where
Ok(NewFull { task_manager, client, overseer_handle, network, rpc_handlers, backend })
}

/// Builds a new service for a light client.
#[cfg(feature = "light-node")]
fn new_light<Runtime, Dispatch>(
mut config: Configuration,
) -> Result<(TaskManager, RpcHandlers), Error>
where
Runtime: 'static + Send + Sync + ConstructRuntimeApi<Block, LightClient<Runtime, Dispatch>>,
<Runtime as ConstructRuntimeApi<Block, LightClient<Runtime, Dispatch>>>::RuntimeApi:
RuntimeApiCollection<StateBackend = sc_client_api::StateBackendFor<LightBackend, Block>>,
Dispatch: NativeExecutionDispatch + 'static,
{
set_prometheus_registry(&mut config)?;
use sc_client_api::backend::RemoteBackend;

let telemetry = config
.telemetry_endpoints
.clone()
.filter(|x| !x.is_empty())
.map(|endpoints| -> Result<_, telemetry::Error> {
let worker = TelemetryWorker::new(16)?;
let telemetry = worker.handle().new_telemetry(endpoints);
Ok((worker, telemetry))
})
.transpose()?;

let (client, backend, keystore_container, mut task_manager, on_demand) =
service::new_light_parts::<Block, Runtime, Dispatch>(
&config,
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
)?;

let mut telemetry = telemetry.map(|(worker, telemetry)| {
task_manager.spawn_handle().spawn("telemetry", worker.run());
telemetry
});

config.network.extra_sets.push(grandpa::grandpa_peers_set_config());

let select_chain = sc_consensus::LongestChain::new(backend.clone());

let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light(
config.transaction_pool.clone(),
config.prometheus_registry(),
task_manager.spawn_essential_handle(),
client.clone(),
on_demand.clone(),
));

let (grandpa_block_import, grandpa_link) = grandpa::block_import(
client.clone(),
&(client.clone() as Arc<_>),
select_chain.clone(),
telemetry.as_ref().map(|x| x.handle()),
)?;
let justification_import = grandpa_block_import.clone();

let (babe_block_import, babe_link) = babe::block_import(
babe::Config::get_or_compute(&*client)?,
grandpa_block_import,
client.clone(),
)?;

// FIXME: pruning task isn't started since light client doesn't do `AuthoritySetup`.
let slot_duration = babe_link.config().slot_duration();
let import_queue = babe::import_queue(
babe_link,
babe_block_import,
Some(Box::new(justification_import)),
client.clone(),
select_chain.clone(),
move |_, ()| async move {
let timestamp = sp_timestamp::InherentDataProvider::from_system_time();

let slot =
sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_duration(
*timestamp,
slot_duration,
);

Ok((timestamp, slot))
},
&task_manager.spawn_essential_handle(),
config.prometheus_registry(),
consensus_common::NeverCanAuthor,
telemetry.as_ref().map(|x| x.handle()),
)?;

let warp_sync = Arc::new(grandpa::warp_proof::NetworkProvider::new(
backend.clone(),
grandpa_link.shared_authority_set().clone(),
));

let (network, system_rpc_tx, network_starter) =
service::build_network(service::BuildNetworkParams {
config: &config,
client: client.clone(),
transaction_pool: transaction_pool.clone(),
spawn_handle: task_manager.spawn_handle(),
import_queue,
on_demand: Some(on_demand.clone()),
block_announce_validator_builder: None,
warp_sync: Some(warp_sync),
})?;

let enable_grandpa = !config.disable_grandpa;
if enable_grandpa {
let name = config.network.node_name.clone();

let config = grandpa::Config {
gossip_duration: Duration::from_millis(1000),
justification_period: 512,
name: Some(name),
observer_enabled: false,
keystore: None,
local_role: config.role.clone(),
telemetry: telemetry.as_ref().map(|x| x.handle()),
};

task_manager.spawn_handle().spawn_blocking(
"grandpa-observer",
grandpa::run_grandpa_observer(config, grandpa_link, network.clone())?,
);
}

if config.offchain_worker.enabled {
let _ = service::build_offchain_workers(
&config,
task_manager.spawn_handle(),
client.clone(),
network.clone(),
);
}

let light_deps = polkadot_rpc::LightDeps {
remote_blockchain: backend.remote_blockchain(),
fetcher: on_demand.clone(),
client: client.clone(),
pool: transaction_pool.clone(),
};

let rpc_extensions = polkadot_rpc::create_light(light_deps);

let rpc_handlers = service::spawn_tasks(service::SpawnTasksParams {
on_demand: Some(on_demand),
remote_blockchain: Some(backend.remote_blockchain()),
rpc_extensions_builder: Box::new(service::NoopRpcExtensionBuilder(rpc_extensions)),
task_manager: &mut task_manager,
config,
keystore: keystore_container.sync_keystore(),
backend,
transaction_pool,
client,
network,
system_rpc_tx,
telemetry: telemetry.as_mut(),
})?;

network_starter.start_network();

Ok((task_manager, rpc_handlers))
}

#[cfg(feature = "full-node")]
macro_rules! chain_ops {
($config:expr, $jaeger_agent:expr, $telemetry_worker_handle:expr; $scope:ident, $executor:ident, $variant:ident) => {{
Expand Down Expand Up @@ -1356,33 +1186,6 @@ pub fn new_chain_ops(
Err(Error::NoRuntime)
}

/// Build a new light node.
#[cfg(feature = "light-node")]
pub fn build_light(config: Configuration) -> Result<(TaskManager, RpcHandlers), Error> {
#[cfg(feature = "rococo-native")]
if config.chain_spec.is_rococo() || config.chain_spec.is_wococo() {
return new_light::<rococo_runtime::RuntimeApi, RococoExecutorDispatch>(config)
}

#[cfg(feature = "kusama-native")]
if config.chain_spec.is_kusama() {
return new_light::<kusama_runtime::RuntimeApi, KusamaExecutorDispatch>(config)
}

#[cfg(feature = "westend-native")]
if config.chain_spec.is_westend() {
return new_light::<westend_runtime::RuntimeApi, WestendExecutorDispatch>(config)
}

#[cfg(feature = "polkadot-native")]
{
return new_light::<polkadot_runtime::RuntimeApi, PolkadotExecutorDispatch>(config)
}

#[cfg(not(feature = "polkadot-native"))]
Err(Error::NoRuntime)
}

#[cfg(feature = "full-node")]
pub fn build_full(
config: Configuration,
Expand Down
6 changes: 0 additions & 6 deletions runtime/common/src/impls.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,16 +29,10 @@ where
<R as frame_system::Config>::Event: From<pallet_balances::Event<R>>,
{
fn on_nonzero_unbalanced(amount: NegativeImbalance<R>) {
let numeric_amount = amount.peek();
let author = <pallet_authorship::Pallet<R>>::author();
<pallet_balances::Pallet<R>>::resolve_creating(
&<pallet_authorship::Pallet<R>>::author(),
amount,
);
<frame_system::Pallet<R>>::deposit_event(pallet_balances::Event::Deposit(
author,
numeric_amount,
));
}
}

Expand Down
31 changes: 29 additions & 2 deletions runtime/kusama/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ use runtime_common::{
OffchainSolutionWeightLimit, RocksDbWeight, SlowAdjustingFeeUpdate, ToAuthor,
};
use sp_core::u32_trait::{_1, _2, _3, _5};
use sp_std::{collections::btree_map::BTreeMap, prelude::*};
use sp_std::{cmp::Ordering, collections::btree_map::BTreeMap, prelude::*};

use runtime_parachains::{
configuration as parachains_configuration, dmp as parachains_dmp, hrmp as parachains_hrmp,
Expand All @@ -49,7 +49,10 @@ use authority_discovery_primitives::AuthorityId as AuthorityDiscoveryId;
use beefy_primitives::crypto::AuthorityId as BeefyId;
use frame_support::{
construct_runtime, match_type, parameter_types,
traits::{Contains, Everything, InstanceFilter, KeyOwnerProofSystem, LockIdentifier, Nothing},
traits::{
Contains, Everything, InstanceFilter, KeyOwnerProofSystem, LockIdentifier, Nothing,
PrivilegeCmp,
},
weights::Weight,
PalletId, RuntimeDebug,
};
Expand Down Expand Up @@ -195,6 +198,29 @@ type ScheduleOrigin = EnsureOneOf<
pallet_collective::EnsureProportionAtLeast<_1, _2, AccountId, CouncilCollective>,
>;

/// Used the compare the privilege of an origin inside the scheduler.
pub struct OriginPrivilegeCmp;

impl PrivilegeCmp<OriginCaller> for OriginPrivilegeCmp {
fn cmp_privilege(left: &OriginCaller, right: &OriginCaller) -> Option<Ordering> {
if left == right {
return Some(Ordering::Equal)
}

match (left, right) {
// Root is greater than anything.
(OriginCaller::system(frame_system::RawOrigin::Root), _) => Some(Ordering::Greater),
// Check which one has more yes votes.
(
OriginCaller::Council(pallet_collective::RawOrigin::Members(l_yes_votes, l_count)),
OriginCaller::Council(pallet_collective::RawOrigin::Members(r_yes_votes, r_count)),
) => Some((l_yes_votes * r_count).cmp(&(r_yes_votes * l_count))),
// For every other origin we don't care, as they are not used for `ScheduleOrigin`.
_ => None,
}
}
}

impl pallet_scheduler::Config for Runtime {
type Event = Event;
type Origin = Origin;
Expand All @@ -204,6 +230,7 @@ impl pallet_scheduler::Config for Runtime {
type ScheduleOrigin = ScheduleOrigin;
type MaxScheduledPerBlock = MaxScheduledPerBlock;
type WeightInfo = weights::pallet_scheduler::WeightInfo<Runtime>;
type OriginPrivilegeCmp = OriginPrivilegeCmp;
}

parameter_types! {
Expand Down
Loading

0 comments on commit a21b8da

Please sign in to comment.