Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feat/polkadot v0.9.43 uplift #978

Merged
merged 33 commits into from
Jul 26, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
62d0fb0
Init dep bump
Dinonard Jul 11, 2023
71e2dd6
Progress
Dinonard Jul 12, 2023
17c28dd
Progress 2
Dinonard Jul 13, 2023
9a455d8
Progress 3
Dinonard Jul 13, 2023
e58599f
Progress 4
Dinonard Jul 14, 2023
8eb1c2d
Progress 5
Dinonard Jul 17, 2023
e9d5c0c
Checkpoint
Dinonard Jul 17, 2023
694d11a
Progress 6
Dinonard Jul 17, 2023
026ccbd
Tracing
Dinonard Jul 17, 2023
5f92a43
Compile & tests should work
Dinonard Jul 17, 2023
446db86
Integration tests
Dinonard Jul 18, 2023
c31604a
contract migrations
Dinonard Jul 18, 2023
bd96f04
precompiles
Dinonard Jul 18, 2023
9cc1eac
Fixed precompiles
Dinonard Jul 18, 2023
aff2e53
toml format
Dinonard Jul 18, 2023
d71bb4e
XCM origin derivation change
Dinonard Jul 19, 2023
8bac618
Xcm tools adapt
Dinonard Jul 19, 2023
8088e2e
Some fixes
Dinonard Jul 19, 2023
f750615
Merge remote-tracking branch 'origin/master' into feat/polkadot-v0.9.…
Dinonard Jul 19, 2023
01b3f6e
Adapt for weightv2
Dinonard Jul 19, 2023
a701f16
Small updates
Dinonard Jul 20, 2023
3f214c7
Update contracts
Dinonard Jul 20, 2023
5e89bab
Update contracts
Dinonard Jul 20, 2023
afb033c
Deps fix
Dinonard Jul 20, 2023
c393846
Address PR comments
Dinonard Jul 24, 2023
c1ff0e6
Update weights
Dinonard Jul 24, 2023
21c0491
license
Dinonard Jul 24, 2023
69d1b46
Fix
Dinonard Jul 24, 2023
385d44c
Adapt xcm tools
Dinonard Jul 24, 2023
e8e6340
Bump versions
Dinonard Jul 24, 2023
8d4ca14
Small changes
Dinonard Jul 25, 2023
be76184
Merge remote-tracking branch 'origin/master' into feat/polkadot-v0.9.…
Dinonard Jul 25, 2023
0433307
PR comments
Dinonard Jul 25, 2023
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2,062 changes: 1,216 additions & 846 deletions Cargo.lock

Large diffs are not rendered by default.

296 changes: 148 additions & 148 deletions Cargo.toml

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion bin/collator/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "astar-collator"
version = "5.13.0"
version = "5.15.0"
description = "Astar collator implementation in Rust."
build = "build.rs"
default-run = "astar-collator"
Expand Down
9 changes: 3 additions & 6 deletions bin/collator/src/cli.rs
Original file line number Diff line number Diff line change
Expand Up @@ -130,14 +130,11 @@ impl RelayChainCli {
) -> Self {
let extension = crate::parachain::chain_spec::Extensions::try_get(&*para_config.chain_spec);
let chain_id = extension.map(|e| e.relay_chain.clone());
let base_path = para_config
.base_path
.as_ref()
.map(|x| x.path().join("polkadot"));
let base_path = para_config.base_path.path().join("polkadot");
Self {
base_path,
chain_id,
base: polkadot_cli::RunCmd::parse_from(relay_chain_args),
chain_id,
base_path: Some(base_path),
}
}
}
22 changes: 5 additions & 17 deletions bin/collator/src/command.rs
Original file line number Diff line number Diff line change
Expand Up @@ -962,14 +962,10 @@ impl DefaultConfigurationValues for RelayChainCli {
30334
}

fn rpc_ws_listen_port() -> u16 {
fn rpc_listen_port() -> u16 {
9945
}

fn rpc_http_listen_port() -> u16 {
9934
}

fn prometheus_listen_port() -> u16 {
9616
}
Expand Down Expand Up @@ -999,16 +995,8 @@ impl CliConfiguration<Self> for RelayChainCli {
.or_else(|| self.base_path.clone().map(Into::into)))
}

fn rpc_http(&self, default_listen_port: u16) -> Result<Option<SocketAddr>> {
self.base.base.rpc_http(default_listen_port)
}

fn rpc_ipc(&self) -> Result<Option<String>> {
self.base.base.rpc_ipc()
}

fn rpc_ws(&self, default_listen_port: u16) -> Result<Option<SocketAddr>> {
self.base.base.rpc_ws(default_listen_port)
fn rpc_addr(&self, default_listen_port: u16) -> Result<Option<SocketAddr>> {
self.base.base.rpc_addr(default_listen_port)
}

fn prometheus_config(
Expand Down Expand Up @@ -1060,8 +1048,8 @@ impl CliConfiguration<Self> for RelayChainCli {
self.base.base.rpc_methods()
}

fn rpc_ws_max_connections(&self) -> Result<Option<usize>> {
self.base.base.rpc_ws_max_connections()
fn rpc_max_connections(&self) -> Result<u32> {
self.base.base.rpc_max_connections()
}

fn rpc_cors(&self, is_dev: bool) -> Result<Option<Vec<String>>> {
Expand Down
43 changes: 17 additions & 26 deletions bin/collator/src/local/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -79,17 +79,11 @@ pub fn new_partial(
>,
sc_consensus_grandpa::LinkHalf<Block, FullClient, FullSelectChain>,
Option<Telemetry>,
Arc<fc_db::Backend<Block>>,
Arc<fc_db::kv::Backend<Block>>,
),
>,
ServiceError,
> {
if config.keystore_remote.is_some() {
return Err(ServiceError::Other(
"Remote Keystores are not supported.".to_string(),
));
}

let telemetry = config
.telemetry_endpoints
.clone()
Expand All @@ -100,12 +94,8 @@ pub fn new_partial(
Ok((worker, telemetry))
})
.transpose()?;
let executor = sc_executor::NativeElseWasmExecutor::<Executor>::new(
config.wasm_method,
config.default_heap_pages,
config.max_runtime_instances,
config.runtime_cache_size,
);

let executor = sc_service::new_native_or_wasm_executor(&config);

let (client, backend, keystore_container, task_manager) =
sc_service::new_full_parts::<Block, RuntimeApi, _>(
Expand Down Expand Up @@ -135,11 +125,8 @@ pub fn new_partial(
telemetry.as_ref().map(|x| x.handle()),
)?;
let frontier_backend = crate::rpc::open_frontier_backend(client.clone(), config)?;
let frontier_block_import = FrontierBlockImport::new(
grandpa_block_import.clone(),
client.clone(),
frontier_backend.clone(),
);
let frontier_block_import =
FrontierBlockImport::new(grandpa_block_import.clone(), client.clone());
let slot_duration = sc_consensus_aura::slot_duration(&*client)?;
let import_queue = sc_consensus_aura::import_queue::<AuraPair, _, _, _, _, _>(
ImportQueueParams {
Expand Down Expand Up @@ -208,10 +195,12 @@ pub fn start_node(
.expect("Genesis block exists; qed"),
&config.chain_spec,
);
let net_config = sc_network::config::FullNetworkConfiguration::new(&config.network);

let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) =
sc_service::build_network(sc_service::BuildNetworkParams {
config: &config,
net_config,
client: client.clone(),
transaction_pool: transaction_pool.clone(),
spawn_handle: task_manager.spawn_handle(),
Expand Down Expand Up @@ -268,7 +257,7 @@ pub fn start_node(
task_manager.spawn_essential_handle().spawn(
"frontier-mapping-sync-worker",
Some("frontier"),
fc_mapping_sync::MappingSyncWorker::new(
fc_mapping_sync::kv::MappingSyncWorker::new(
client.import_notification_stream(),
Duration::new(6, 0),
client.clone(),
Expand Down Expand Up @@ -368,7 +357,7 @@ pub fn start_node(
let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams {
network: network.clone(),
client: client.clone(),
keystore: keystore_container.sync_keystore(),
keystore: keystore_container.keystore(),
task_manager: &mut task_manager,
transaction_pool: transaction_pool.clone(),
rpc_builder: rpc_extensions_builder,
Expand Down Expand Up @@ -411,7 +400,7 @@ pub fn start_node(
},
force_authoring,
backoff_authoring_blocks,
keystore: keystore_container.sync_keystore(),
keystore: keystore_container.keystore(),
sync_oracle: sync_service.clone(),
justification_sync_link: sync_service.clone(),
block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32),
Expand All @@ -431,7 +420,7 @@ pub fn start_node(
// if the node isn't actively participating in consensus then it doesn't
// need a keystore, regardless of which protocol we use below.
let keystore = if role.is_authority() {
Some(keystore_container.sync_keystore())
Some(keystore_container.keystore())
} else {
None
};
Expand Down Expand Up @@ -501,10 +490,12 @@ pub fn start_node(config: Configuration) -> Result<TaskManager, ServiceError> {
.expect("Genesis block exists; qed"),
&config.chain_spec,
);
let net_config = sc_network::config::FullNetworkConfiguration::new(&config.network);

let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) =
sc_service::build_network(sc_service::BuildNetworkParams {
config: &config,
net_config,
client: client.clone(),
transaction_pool: transaction_pool.clone(),
spawn_handle: task_manager.spawn_handle(),
Expand Down Expand Up @@ -540,7 +531,7 @@ pub fn start_node(config: Configuration) -> Result<TaskManager, ServiceError> {
task_manager.spawn_essential_handle().spawn(
"frontier-mapping-sync-worker",
Some("frontier"),
fc_mapping_sync::MappingSyncWorker::new(
fc_mapping_sync::kv::MappingSyncWorker::new(
client.import_notification_stream(),
Duration::new(6, 0),
client.clone(),
Expand Down Expand Up @@ -630,7 +621,7 @@ pub fn start_node(config: Configuration) -> Result<TaskManager, ServiceError> {
let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams {
network: network.clone(),
client: client.clone(),
keystore: keystore_container.sync_keystore(),
keystore: keystore_container.keystore(),
task_manager: &mut task_manager,
transaction_pool: transaction_pool.clone(),
rpc_builder: rpc_extensions_builder,
Expand Down Expand Up @@ -673,7 +664,7 @@ pub fn start_node(config: Configuration) -> Result<TaskManager, ServiceError> {
},
force_authoring,
backoff_authoring_blocks,
keystore: keystore_container.sync_keystore(),
keystore: keystore_container.keystore(),
sync_oracle: sync_service.clone(),
justification_sync_link: sync_service.clone(),
block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32),
Expand All @@ -693,7 +684,7 @@ pub fn start_node(config: Configuration) -> Result<TaskManager, ServiceError> {
// if the node isn't actively participating in consensus then it doesn't
// need a keystore, regardless of which protocol we use below.
let keystore = if role.is_authority() {
Some(keystore_container.sync_keystore())
Some(keystore_container.keystore())
} else {
None
};
Expand Down
42 changes: 22 additions & 20 deletions bin/collator/src/parachain/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ use sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, Ta
use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle};
use sp_api::ConstructRuntimeApi;
use sp_consensus_aura::sr25519::AuthorityId as AuraId;
use sp_keystore::SyncCryptoStorePtr;
use sp_keystore::KeystorePtr;
use sp_runtime::traits::BlakeTwo256;
use sp_runtime::Percent;
use std::{collections::BTreeMap, sync::Arc, time::Duration};
Expand Down Expand Up @@ -155,7 +155,7 @@ pub fn new_partial<RuntimeApi, Executor, BIQ>(
>,
Option<Telemetry>,
Option<TelemetryWorkerHandle>,
Arc<fc_db::Backend<Block>>,
Arc<fc_db::kv::Backend<Block>>,
),
>,
sc_service::Error,
Expand Down Expand Up @@ -209,12 +209,7 @@ where
})
.transpose()?;

let executor = sc_executor::NativeElseWasmExecutor::<Executor>::new(
config.wasm_method,
config.default_heap_pages,
config.max_runtime_instances,
config.runtime_cache_size,
);
let executor = sc_service::new_native_or_wasm_executor(&config);

let (client, backend, keystore_container, task_manager) =
sc_service::new_full_parts::<Block, RuntimeApi, _>(
Expand Down Expand Up @@ -242,8 +237,7 @@ where
);

let frontier_backend = crate::rpc::open_frontier_backend(client.clone(), config)?;
let frontier_block_import =
FrontierBlockImport::new(client.clone(), client.clone(), frontier_backend.clone());
let frontier_block_import = FrontierBlockImport::new(client.clone(), client.clone());

let parachain_block_import: ParachainBlockImport<_, _, _> =
ParachainBlockImport::new(frontier_block_import, backend.clone());
Expand Down Expand Up @@ -382,7 +376,7 @@ where
>,
>,
Arc<SyncingService<Block>>,
SyncCryptoStorePtr,
KeystorePtr,
bool,
) -> Result<Box<dyn ParachainConsensus<Block>>, sc_service::Error>,
{
Expand All @@ -391,6 +385,7 @@ where
let params = new_partial::<RuntimeApi, Executor, BIQ>(&parachain_config, build_import_queue)?;
let (parachain_block_import, mut telemetry, telemetry_worker_handle, frontier_backend) =
params.other;
let net_config = sc_network::config::FullNetworkConfiguration::new(&parachain_config.network);

let client = params.client.clone();
let backend = params.backend.clone();
Expand All @@ -414,6 +409,7 @@ where
let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) =
cumulus_client_service::build_network(BuildNetworkParams {
parachain_config: &parachain_config,
net_config,
para_id: id,
client: client.clone(),
transaction_pool: transaction_pool.clone(),
Expand Down Expand Up @@ -441,7 +437,7 @@ where
task_manager.spawn_essential_handle().spawn(
"frontier-mapping-sync-worker",
Some("frontier"),
fc_mapping_sync::MappingSyncWorker::new(
fc_mapping_sync::kv::MappingSyncWorker::new(
client.import_notification_stream(),
Duration::new(6, 0),
client.clone(),
Expand Down Expand Up @@ -527,7 +523,7 @@ where
transaction_pool: transaction_pool.clone(),
task_manager: &mut task_manager,
config: parachain_config,
keystore: params.keystore_container.sync_keystore(),
keystore: params.keystore_container.keystore(),
backend: backend.clone(),
network: network.clone(),
system_rpc_tx,
Expand Down Expand Up @@ -556,8 +552,8 @@ where
&task_manager,
relay_chain_interface.clone(),
transaction_pool,
sync_service,
params.keystore_container.sync_keystore(),
sync_service.clone(),
params.keystore_container.keystore(),
force_authoring,
)?;

Expand All @@ -576,6 +572,7 @@ where
collator_key: collator_key.expect("Command line arguments do not allow this. qed"),
relay_chain_slot_duration,
recovery_handle: Box::new(overseer_handle),
sync_service,
};

start_collator(params).await?;
Expand All @@ -589,6 +586,7 @@ where
relay_chain_slot_duration,
import_queue: import_queue_service,
recovery_handle: Box::new(overseer_handle),
sync_service,
};

start_full_node(params)?;
Expand Down Expand Up @@ -698,7 +696,7 @@ where
>,
>,
Arc<SyncingService<Block>>,
SyncCryptoStorePtr,
KeystorePtr,
bool,
) -> Result<Box<dyn ParachainConsensus<Block>>, sc_service::Error>,
{
Expand All @@ -707,6 +705,7 @@ where
let params = new_partial::<RuntimeApi, Executor, BIQ>(&parachain_config, build_import_queue)?;
let (parachain_block_import, mut telemetry, telemetry_worker_handle, frontier_backend) =
params.other;
let net_config = sc_network::config::FullNetworkConfiguration::new(&parachain_config.network);

let client = params.client.clone();
let backend = params.backend.clone();
Expand All @@ -730,6 +729,7 @@ where
let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) =
cumulus_client_service::build_network(BuildNetworkParams {
parachain_config: &parachain_config,
net_config,
para_id: id,
client: client.clone(),
transaction_pool: transaction_pool.clone(),
Expand Down Expand Up @@ -778,7 +778,7 @@ where
task_manager.spawn_essential_handle().spawn(
"frontier-mapping-sync-worker",
Some("frontier"),
fc_mapping_sync::MappingSyncWorker::new(
fc_mapping_sync::kv::MappingSyncWorker::new(
client.import_notification_stream(),
Duration::new(6, 0),
client.clone(),
Expand Down Expand Up @@ -874,7 +874,7 @@ where
transaction_pool: transaction_pool.clone(),
task_manager: &mut task_manager,
config: parachain_config,
keystore: params.keystore_container.sync_keystore(),
keystore: params.keystore_container.keystore(),
backend: backend.clone(),
network: network.clone(),
system_rpc_tx,
Expand Down Expand Up @@ -903,8 +903,8 @@ where
&task_manager,
relay_chain_interface.clone(),
transaction_pool,
sync_service,
params.keystore_container.sync_keystore(),
sync_service.clone(),
params.keystore_container.keystore(),
force_authoring,
)?;

Expand All @@ -923,6 +923,7 @@ where
collator_key: collator_key.expect("Command line arguments do not allow this. qed"),
relay_chain_slot_duration,
recovery_handle: Box::new(overseer_handle),
sync_service,
};

start_collator(params).await?;
Expand All @@ -936,6 +937,7 @@ where
relay_chain_slot_duration,
import_queue: import_queue_service,
recovery_handle: Box::new(overseer_handle),
sync_service,
};

start_full_node(params)?;
Expand Down
Loading
Loading