Skip to content

Commit

Permalink
Fix/ignore clippy warnings (paritytech#1157)
Browse files Browse the repository at this point in the history
* fix/ignore clippy

* --locked check && test

* RUSTC_WRAPPER="" for clippy-nightly, check-nightly, test-nightly
  • Loading branch information
svyatonik authored and serban300 committed Apr 9, 2024
1 parent ac41b47 commit e039d0a
Show file tree
Hide file tree
Showing 13 changed files with 45 additions and 57 deletions.
2 changes: 0 additions & 2 deletions bridges/bin/rialto-parachain/node/src/cli.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,6 @@
// along with Parity Bridges Common. If not, see <http://www.gnu.org/licenses/>.

use crate::chain_spec;
use cumulus_client_cli;
use sc_cli;
use std::path::PathBuf;
use structopt::StructOpt;

Expand Down
10 changes: 5 additions & 5 deletions bridges/bin/rialto-parachain/node/src/command.rs
Original file line number Diff line number Diff line change
Expand Up @@ -112,15 +112,15 @@ impl SubstrateCli for RelayChainCli {
}

fn load_spec(&self, id: &str) -> std::result::Result<Box<dyn sc_service::ChainSpec>, String> {
polkadot_cli::Cli::from_iter([RelayChainCli::executable_name().to_string()].iter()).load_spec(id)
polkadot_cli::Cli::from_iter([RelayChainCli::executable_name()].iter()).load_spec(id)
}

fn native_runtime_version(chain_spec: &Box<dyn ChainSpec>) -> &'static RuntimeVersion {
polkadot_cli::Cli::native_runtime_version(chain_spec)
}
}

fn extract_genesis_wasm(chain_spec: &Box<dyn sc_service::ChainSpec>) -> Result<Vec<u8>> {
fn extract_genesis_wasm(chain_spec: &dyn sc_service::ChainSpec) -> Result<Vec<u8>> {
let mut storage = chain_spec.build_storage()?;

storage
Expand Down Expand Up @@ -181,7 +181,7 @@ pub fn run() -> Result<()> {
runner.sync_run(|config| {
let polkadot_cli = RelayChainCli::new(
&config,
[RelayChainCli::executable_name().to_string()]
[RelayChainCli::executable_name()]
.iter()
.chain(cli.relaychain_args.iter()),
);
Expand Down Expand Up @@ -225,7 +225,7 @@ pub fn run() -> Result<()> {
builder.with_profiling(sc_tracing::TracingReceiver::Log, "");
let _ = builder.init();

let raw_wasm_blob = extract_genesis_wasm(&cli.load_spec(&params.chain.clone().unwrap_or_default())?)?;
let raw_wasm_blob = extract_genesis_wasm(&*cli.load_spec(&params.chain.clone().unwrap_or_default())?)?;
let output_buf = if params.raw {
raw_wasm_blob
} else {
Expand Down Expand Up @@ -259,7 +259,7 @@ pub fn run() -> Result<()> {

let polkadot_cli = RelayChainCli::new(
&config,
[RelayChainCli::executable_name().to_string()]
[RelayChainCli::executable_name()]
.iter()
.chain(cli.relaychain_args.iter()),
);
Expand Down
10 changes: 6 additions & 4 deletions bridges/bin/rialto-parachain/node/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@ impl NativeExecutionDispatch for ExecutorDispatch {
///
/// Use this macro if you don't actually need the full service, but just the builder in order to
/// be able to perform chain operations.
#[allow(clippy::type_complexity)]
pub fn new_partial<RuntimeApi, Executor, BIQ>(
config: &Configuration,
build_import_queue: BIQ,
Expand Down Expand Up @@ -123,7 +124,7 @@ where
);

let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::<Block, RuntimeApi, _>(
&config,
config,
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
executor,
)?;
Expand Down Expand Up @@ -332,6 +333,7 @@ where
}

/// Build the import queue for the the parachain runtime.
#[allow(clippy::type_complexity)]
pub fn parachain_build_import_queue(
client: Arc<TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<ParachainRuntimeExecutor>>>,
config: &Configuration,
Expand Down Expand Up @@ -360,7 +362,7 @@ pub fn parachain_build_import_queue(

Ok((time, slot))
},
registry: config.prometheus_registry().clone(),
registry: config.prometheus_registry(),
can_author_with: sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()),
spawner: &task_manager.spawn_essential_handle(),
telemetry,
Expand Down Expand Up @@ -399,7 +401,7 @@ pub async fn start_node(
task_manager.spawn_handle(),
client.clone(),
transaction_pool,
prometheus_registry.clone(),
prometheus_registry,
telemetry.clone(),
);

Expand Down Expand Up @@ -444,7 +446,7 @@ pub async fn start_node(
block_import: client.clone(),
relay_chain_client: relay_chain_node.client.clone(),
relay_chain_backend: relay_chain_node.backend.clone(),
para_client: client.clone(),
para_client: client,
backoff_authoring_blocks: Option::<()>::None,
sync_oracle,
keystore,
Expand Down
10 changes: 5 additions & 5 deletions bridges/bin/rialto-parachain/runtime/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -263,10 +263,10 @@ impl pallet_timestamp::Config for Runtime {
}

parameter_types! {
pub const ExistentialDeposit: u128 = 1 * MILLIUNIT;
pub const TransferFee: u128 = 1 * MILLIUNIT;
pub const CreationFee: u128 = 1 * MILLIUNIT;
pub const TransactionByteFee: u128 = 1 * MICROUNIT;
pub const ExistentialDeposit: u128 = MILLIUNIT;
pub const TransferFee: u128 = MILLIUNIT;
pub const CreationFee: u128 = MILLIUNIT;
pub const TransactionByteFee: u128 = MICROUNIT;
pub const MaxLocks: u32 = 50;
pub const MaxReserves: u32 = 50;
}
Expand Down Expand Up @@ -671,7 +671,7 @@ impl cumulus_pallet_parachain_system::CheckInherents<Block> for CheckInherents {
.create_inherent_data()
.expect("Could not create the timestamp inherent data");

inherent_data.check_extrinsics(&block)
inherent_data.check_extrinsics(block)
}
}

Expand Down
22 changes: 11 additions & 11 deletions bridges/bin/rialto/node/src/overseer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ where
///
/// A convenience for usage with malus, to avoid
/// repetitive code across multiple behavior strain implementations.
pub fn create_default_subsystems<'a, Spawner, RuntimeClient>(
pub fn create_default_subsystems<Spawner, RuntimeClient>(
OverseerGenArgs {
keystore,
runtime_client,
Expand All @@ -130,7 +130,7 @@ pub fn create_default_subsystems<'a, Spawner, RuntimeClient>(
chain_selection_config,
dispute_coordinator_config,
..
}: OverseerGenArgs<'a, Spawner, RuntimeClient>,
}: OverseerGenArgs<'_, Spawner, RuntimeClient>,
) -> Result<
AllSubsystems<
CandidateValidationSubsystem,
Expand Down Expand Up @@ -212,7 +212,7 @@ where
Metrics::register(registry)?,
),
provisioner: ProvisionerSubsystem::new(spawner.clone(), (), Metrics::register(registry)?),
runtime_api: RuntimeApiSubsystem::new(runtime_client.clone(), Metrics::register(registry)?, spawner.clone()),
runtime_api: RuntimeApiSubsystem::new(runtime_client, Metrics::register(registry)?, spawner),
statement_distribution: StatementDistributionSubsystem::new(
keystore.clone(),
statement_req_receiver,
Expand All @@ -223,7 +223,7 @@ where
approval_voting_config,
parachains_db.clone(),
keystore.clone(),
Box::new(network_service.clone()),
Box::new(network_service),
Metrics::register(registry)?,
),
gossip_support: GossipSupportSubsystem::new(keystore.clone()),
Expand All @@ -234,9 +234,9 @@ where
),
dispute_participation: DisputeParticipationSubsystem::new(),
dispute_distribution: DisputeDistributionSubsystem::new(
keystore.clone(),
keystore,
dispute_req_receiver,
authority_discovery_service.clone(),
authority_discovery_service,
Metrics::register(registry)?,
),
chain_selection: ChainSelectionSubsystem::new(chain_selection_config, parachains_db),
Expand All @@ -250,9 +250,9 @@ where
/// would do.
pub trait OverseerGen {
/// Overwrite the full generation of the overseer, including the subsystems.
fn generate<'a, Spawner, RuntimeClient>(
fn generate<Spawner, RuntimeClient>(
&self,
args: OverseerGenArgs<'a, Spawner, RuntimeClient>,
args: OverseerGenArgs<'_, Spawner, RuntimeClient>,
) -> Result<(Overseer<Spawner, Arc<RuntimeClient>>, OverseerHandle), Error>
where
RuntimeClient: 'static + ProvideRuntimeApi<Block> + HeaderBackend<Block> + AuxStore,
Expand All @@ -271,9 +271,9 @@ pub trait OverseerGen {
pub struct RealOverseerGen;

impl OverseerGen for RealOverseerGen {
fn generate<'a, Spawner, RuntimeClient>(
fn generate<Spawner, RuntimeClient>(
&self,
args: OverseerGenArgs<'a, Spawner, RuntimeClient>,
args: OverseerGenArgs<'_, Spawner, RuntimeClient>,
) -> Result<(Overseer<Spawner, Arc<RuntimeClient>>, OverseerHandle), Error>
where
RuntimeClient: 'static + ProvideRuntimeApi<Block> + HeaderBackend<Block> + AuxStore,
Expand All @@ -283,7 +283,7 @@ impl OverseerGen for RealOverseerGen {
let spawner = args.spawner.clone();
let leaves = args.leaves.clone();
let runtime_client = args.runtime_client.clone();
let registry = args.registry.clone();
let registry = args.registry;

let all_subsystems = create_default_subsystems::<Spawner, RuntimeClient>(args)?;

Expand Down
2 changes: 1 addition & 1 deletion bridges/bin/rialto/node/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -605,7 +605,7 @@ where
pov_req_receiver,
statement_req_receiver,
})?;
let handle = Handle::Connected(overseer_handle.clone());
let handle = Handle::Connected(overseer_handle);
let handle_clone = handle.clone();

task_manager.spawn_essential_handle().spawn_blocking(
Expand Down
12 changes: 1 addition & 11 deletions bridges/modules/ethereum/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -556,6 +556,7 @@ pub mod pallet {
StorageMap<_, Identity, H256, AuraScheduledChange>;

#[pallet::genesis_config]
#[cfg_attr(feature = "std", derive(Default))]
pub struct GenesisConfig {
/// PoA header to start with.
pub initial_header: AuraHeader,
Expand All @@ -565,17 +566,6 @@ pub mod pallet {
pub initial_validators: Vec<Address>,
}

#[cfg(feature = "std")]
impl Default for GenesisConfig {
fn default() -> Self {
Self {
initial_header: Default::default(),
initial_difficulty: Default::default(),
initial_validators: Default::default(),
}
}
}

#[pallet::genesis_build]
impl<T: Config<I>, I: 'static> GenesisBuild<T, I> for GenesisConfig {
fn build(&self) {
Expand Down
1 change: 1 addition & 0 deletions bridges/modules/token-swap/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -192,6 +192,7 @@ pub mod pallet {
/// rules will lead to losing message fees for this and other transactions + losing fees for message
/// transfer.
#[pallet::weight(0)]
#[allow(clippy::too_many_arguments)]
pub fn create_swap(
origin: OriginFor<T>,
swap: TokenSwapOf<T, I>,
Expand Down
2 changes: 1 addition & 1 deletion bridges/primitives/chain-kusama/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ pub const EXISTENTIAL_DEPOSIT: Balance = 1_000_000_000_000 / 30_000;
///
/// Note that since this is a target sessions may change before/after this time depending on network
/// conditions.
pub const SESSION_LENGTH: BlockNumber = 1 * time_units::HOURS;
pub const SESSION_LENGTH: BlockNumber = time_units::HOURS;

/// Name of the With-Polkadot messages pallet instance in the Kusama runtime.
pub const WITH_POLKADOT_MESSAGES_PALLET_NAME: &str = "BridgePolkadotMessages";
Expand Down
2 changes: 2 additions & 0 deletions bridges/primitives/ethereum-poa/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -458,6 +458,8 @@ impl PartialEq<Bloom> for Bloom {
}
}

// there's no default for [_; 256], but clippy still complains
#[allow(clippy::derivable_impls)]
impl Default for Bloom {
fn default() -> Self {
Bloom([0; 256])
Expand Down
4 changes: 2 additions & 2 deletions bridges/primitives/runtime/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -216,9 +216,9 @@ pub fn storage_map_final_key_identity(pallet_prefix: &str, map_name: &str, key_h
/// Copypaste from `frame_support::parameter_types` macro
pub fn storage_parameter_key(parameter_name: &str) -> StorageKey {
let mut buffer = Vec::with_capacity(1 + parameter_name.len() + 1 + 1);
buffer.push(':' as u8);
buffer.push(b':');
buffer.extend_from_slice(parameter_name.as_bytes());
buffer.push(':' as u8);
buffer.push(b':');
buffer.push(0);
StorageKey(sp_io::hashing::twox_128(&buffer).to_vec())
}
16 changes: 10 additions & 6 deletions bridges/relays/bin-substrate/src/cli/swap_tokens.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,10 @@

//! Tokens swap using token-swap bridge pallet.

// TokenSwapBalances fields are never directly accessed, but the whole struct is printed
// to show token swap progress
#![allow(dead_code)]

use codec::Encode;
use num_traits::One;
use rand::random;
Expand Down Expand Up @@ -436,7 +440,7 @@ impl SwapTokens {

// prepare token swap intention
Ok(bp_token_swap::TokenSwap {
swap_type: self.prepare_token_swap_type(&source_client).await?,
swap_type: self.prepare_token_swap_type(source_client).await?,
source_balance_at_this_chain,
source_account_at_this_chain: source_account_at_this_chain.clone(),
target_balance_at_bridged_chain,
Expand Down Expand Up @@ -498,26 +502,26 @@ async fn read_account_balances<Source: ChainWithBalances, Target: ChainWithBalan
) -> anyhow::Result<TokenSwapBalances<BalanceOf<Source>, BalanceOf<Target>>> {
Ok(TokenSwapBalances {
source_account_at_this_chain_balance: read_account_balance(
&source_client,
source_client,
&accounts.source_account_at_this_chain,
)
.await?,
source_account_at_bridged_chain_balance: read_account_balance(
&target_client,
target_client,
&accounts.source_account_at_bridged_chain,
)
.await?,
target_account_at_bridged_chain_balance: read_account_balance(
&target_client,
target_client,
&accounts.target_account_at_bridged_chain,
)
.await?,
target_account_at_this_chain_balance: read_account_balance(
&source_client,
source_client,
&accounts.target_account_at_this_chain,
)
.await?,
swap_account_balance: read_account_balance(&source_client, &accounts.swap_account).await?,
swap_account_balance: read_account_balance(source_client, &accounts.swap_account).await?,
})
}

Expand Down
9 changes: 0 additions & 9 deletions bridges/relays/headers/src/headers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -82,15 +82,6 @@ pub struct QueuedHeaders<P: HeadersSyncPipeline> {
prune_border: P::Number,
}

/// Header completion data.
#[derive(Debug)]
struct HeaderCompletion<Completion> {
/// Last time when we tried to upload completion data to target node, if ever.
pub last_upload_time: Option<Instant>,
/// Completion data.
pub completion: Completion,
}

impl<P: HeadersSyncPipeline> Default for QueuedHeaders<P> {
fn default() -> Self {
QueuedHeaders {
Expand Down

0 comments on commit e039d0a

Please sign in to comment.