From 4dfcb154ca650f7d1b53cff30f01f6ed7bca3bac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Fri, 18 Jun 2021 15:34:46 +0200 Subject: [PATCH 1/4] chore: cargo +nightly clippy --fix -Z unstable-options (cherry picked from commit 6514096a675ba6962a7fe105353155e22421cfe3) # Conflicts: # core/src/banking_stage.rs # core/src/cost_model.rs # core/src/cost_tracker.rs # core/src/execute_cost_table.rs # core/src/replay_stage.rs # core/src/tvu.rs # ledger-tool/src/main.rs # programs/bpf_loader/build.rs # rbpf-cli/src/main.rs # sdk/cargo-build-bpf/src/main.rs # sdk/cargo-test-bpf/src/main.rs # sdk/src/secp256k1_instruction.rs --- account-decoder/src/lib.rs | 12 +- account-decoder/src/parse_config.rs | 6 +- accounts-cluster-bench/src/main.rs | 8 +- banking-bench/src/main.rs | 2 +- banks-client/src/lib.rs | 4 +- banks-server/src/banks_server.rs | 2 +- banks-server/src/send_transaction_service.rs | 4 +- bench-exchange/src/bench.rs | 18 +- bench-streamer/src/main.rs | 2 +- bench-tps/src/bench.rs | 10 +- bench-tps/src/main.rs | 6 +- clap-utils/src/keypair.rs | 8 +- cli-config/src/config.rs | 10 +- cli-output/src/cli_output.rs | 6 +- cli/src/cli.rs | 62 +-- cli/src/cluster_query.rs | 2 +- cli/src/inflation.rs | 2 +- cli/src/main.rs | 8 +- cli/src/nonce.rs | 16 +- cli/src/program.rs | 30 +- cli/src/spend_utils.rs | 2 +- cli/src/stake.rs | 44 +- cli/src/validator_info.rs | 10 +- cli/src/vote.rs | 14 +- cli/tests/program.rs | 42 +- client/src/rpc_cache.rs | 2 +- client/src/rpc_client.rs | 12 +- client/src/thin_client.rs | 4 +- client/src/tpu_client.rs | 2 +- core/benches/banking_stage.rs | 2 +- core/benches/consensus.rs | 4 +- core/src/accounts_hash_verifier.rs | 2 +- core/src/banking_stage.rs | 64 ++- core/src/broadcast_stage.rs | 4 +- .../broadcast_duplicates_run.rs | 4 +- .../broadcast_stage/standard_broadcast_run.rs | 2 +- core/src/cluster_info_vote_listener.rs | 10 +- core/src/cluster_slot_state_verifier.rs | 2 +- core/src/commitment_service.rs | 6 +- core/src/consensus.rs | 28 +- core/src/cost_model.rs | 513 ++++++++++++++++++ core/src/cost_tracker.rs | 356 ++++++++++++ core/src/execute_cost_table.rs | 277 ++++++++++ core/src/fetch_stage.rs | 10 +- core/src/heaviest_subtree_fork_choice.rs | 30 +- core/src/ledger_cleanup_service.rs | 2 +- core/src/optimistic_confirmation_verifier.rs | 2 +- core/src/progress_map.rs | 2 +- core/src/repair_service.rs | 8 +- core/src/repair_weight.rs | 16 +- core/src/repair_weighted_traversal.rs | 2 +- core/src/replay_stage.rs | 107 ++-- core/src/serve_repair.rs | 10 +- core/src/serve_repair_service.rs | 2 +- core/src/shred_fetch_stage.rs | 8 +- core/src/tpu.rs | 12 +- core/src/tvu.rs | 14 +- .../unfrozen_gossip_verified_vote_hashes.rs | 2 +- core/src/validator.rs | 22 +- core/src/window_service.rs | 6 +- core/tests/fork-selection.rs | 4 +- core/tests/snapshots.rs | 26 +- dos/src/main.rs | 4 +- faucet/src/faucet.rs | 4 +- faucet/tests/local-faucet.rs | 2 +- frozen-abi/macro/src/lib.rs | 8 +- frozen-abi/src/abi_example.rs | 2 +- genesis-utils/src/lib.rs | 6 +- genesis/src/genesis_accounts.rs | 10 +- genesis/src/main.rs | 12 +- gossip/src/cluster_info.rs | 10 +- gossip/src/contact_info.rs | 12 +- gossip/src/crds_gossip.rs | 2 +- gossip/src/crds_gossip_push.rs | 2 +- gossip/src/crds_value.rs | 6 +- gossip/src/gossip_service.rs | 4 +- gossip/src/main.rs | 4 +- gossip/tests/crds_gossip.rs | 8 +- install/src/command.rs | 12 +- install/src/lib.rs | 20 +- keygen/src/keygen.rs | 16 +- ledger-tool/src/bigtable.rs | 2 +- ledger-tool/src/main.rs | 95 +++- ledger-tool/tests/basic.rs | 4 +- ledger/src/bank_forks_utils.rs | 12 +- ledger/src/blockstore.rs | 30 +- ledger/src/blockstore_processor.rs | 20 +- ledger/src/entry.rs | 14 +- ledger/src/leader_schedule_utils.rs | 4 +- ledger/src/poh.rs | 22 +- ledger/src/shred.rs | 4 +- ledger/src/sigverify_shreds.rs | 4 +- local-cluster/src/cluster_tests.rs | 16 +- local-cluster/src/local_cluster.rs | 10 +- local-cluster/tests/local_cluster.rs | 50 +- measure/src/measure.rs | 2 +- metrics/src/counter.rs | 2 +- perf/src/packet.rs | 2 +- perf/src/sigverify.rs | 8 +- poh/benches/poh_verify.rs | 4 +- poh/src/poh_recorder.rs | 4 +- program-test/src/lib.rs | 10 +- programs/bpf_loader/benches/serialization.rs | 8 +- programs/bpf_loader/build.rs | 34 ++ programs/bpf_loader/src/lib.rs | 8 +- programs/bpf_loader/src/serialization.rs | 16 +- programs/bpf_loader/src/syscalls.rs | 8 +- programs/config/src/config_processor.rs | 20 +- programs/config/src/date_instruction.rs | 2 +- programs/exchange/src/exchange_processor.rs | 28 +- programs/ownable/src/ownable_instruction.rs | 2 +- programs/ownable/src/ownable_processor.rs | 4 +- programs/stake/src/config.rs | 2 +- programs/stake/src/stake_instruction.rs | 6 +- programs/stake/src/stake_state.rs | 20 +- programs/vote/src/vote_state/mod.rs | 8 +- programs/vote/src/vote_transaction.rs | 2 +- rbpf-cli/src/main.rs | 253 +++++++++ remote-wallet/src/remote_wallet.rs | 4 +- rpc/src/parsed_token_accounts.rs | 6 +- rpc/src/rpc.rs | 118 ++-- rpc/src/rpc_health.rs | 2 +- rpc/src/rpc_pubsub.rs | 16 +- rpc/src/rpc_service.rs | 2 +- rpc/src/rpc_subscriptions.rs | 24 +- rpc/src/send_transaction_service.rs | 4 +- runtime/benches/accounts.rs | 2 +- runtime/benches/bank.rs | 10 +- runtime/src/accounts.rs | 10 +- runtime/src/accounts_background_service.rs | 2 +- runtime/src/accounts_db.rs | 94 ++-- runtime/src/accounts_hash.rs | 2 +- runtime/src/accounts_index.rs | 36 +- runtime/src/ancestors.rs | 2 +- runtime/src/append_vec.rs | 8 +- runtime/src/bank.rs | 84 +-- runtime/src/bank_forks.rs | 2 +- runtime/src/epoch_stakes.rs | 2 +- runtime/src/genesis_utils.rs | 6 +- runtime/src/hardened_unpack.rs | 4 +- runtime/src/loader_utils.rs | 18 +- runtime/src/message_processor.rs | 14 +- runtime/src/native_loader.rs | 2 +- runtime/src/non_circulating_supply.rs | 2 +- runtime/src/secondary_index.rs | 8 +- runtime/src/serde_snapshot/future.rs | 2 +- runtime/src/serde_snapshot/tests.rs | 4 +- runtime/src/snapshot_utils.rs | 12 +- runtime/src/stakes.rs | 10 +- runtime/src/status_cache.rs | 2 +- runtime/src/system_instruction_processor.rs | 30 +- runtime/tests/accounts.rs | 6 +- runtime/tests/stake.rs | 2 +- sdk/cargo-build-bpf/src/main.rs | 16 +- sdk/cargo-test-bpf/src/main.rs | 10 +- sdk/program/src/message.rs | 20 +- sdk/program/src/slot_hashes.rs | 4 +- sdk/program/src/stake/state.rs | 4 +- sdk/src/account.rs | 2 +- sdk/src/derivation_path.rs | 4 +- sdk/src/genesis_config.rs | 8 +- sdk/src/nonce_keyed_account.rs | 32 +- sdk/src/secp256k1_instruction.rs | 10 +- sdk/src/signature.rs | 4 +- sdk/src/transaction.rs | 4 +- stake-accounts/src/args.rs | 6 +- stake-accounts/src/main.rs | 6 +- stake-accounts/src/stake_accounts.rs | 22 +- storage-bigtable/src/bigtable.rs | 4 +- storage-bigtable/src/lib.rs | 2 +- tokens/src/commands.rs | 16 +- transaction-status/src/token_balances.rs | 12 +- upload-perf/src/upload-perf.rs | 6 +- validator/src/bin/solana-test-validator.rs | 2 +- validator/src/dashboard.rs | 2 +- validator/src/main.rs | 30 +- watchtower/src/main.rs | 6 +- 177 files changed, 2578 insertions(+), 999 deletions(-) create mode 100644 core/src/cost_model.rs create mode 100644 core/src/cost_tracker.rs create mode 100644 core/src/execute_cost_table.rs create mode 100644 programs/bpf_loader/build.rs create mode 100644 rbpf-cli/src/main.rs diff --git a/account-decoder/src/lib.rs b/account-decoder/src/lib.rs index 7f1e7c40c70c20..904ae3fbd26dcd 100644 --- a/account-decoder/src/lib.rs +++ b/account-decoder/src/lib.rs @@ -69,32 +69,32 @@ impl UiAccount { ) -> Self { let data = match encoding { UiAccountEncoding::Binary => UiAccountData::LegacyBinary( - bs58::encode(slice_data(&account.data(), data_slice_config)).into_string(), + bs58::encode(slice_data(account.data(), data_slice_config)).into_string(), ), UiAccountEncoding::Base58 => UiAccountData::Binary( - bs58::encode(slice_data(&account.data(), data_slice_config)).into_string(), + bs58::encode(slice_data(account.data(), data_slice_config)).into_string(), encoding, ), UiAccountEncoding::Base64 => UiAccountData::Binary( - base64::encode(slice_data(&account.data(), data_slice_config)), + base64::encode(slice_data(account.data(), data_slice_config)), encoding, ), UiAccountEncoding::Base64Zstd => { let mut encoder = zstd::stream::write::Encoder::new(Vec::new(), 0).unwrap(); match encoder - .write_all(slice_data(&account.data(), data_slice_config)) + .write_all(slice_data(account.data(), data_slice_config)) .and_then(|()| encoder.finish()) { Ok(zstd_data) => UiAccountData::Binary(base64::encode(zstd_data), encoding), Err(_) => UiAccountData::Binary( - base64::encode(slice_data(&account.data(), data_slice_config)), + base64::encode(slice_data(account.data(), data_slice_config)), UiAccountEncoding::Base64, ), } } UiAccountEncoding::JsonParsed => { if let Ok(parsed_data) = - parse_account_data(pubkey, &account.owner(), &account.data(), additional_data) + parse_account_data(pubkey, account.owner(), account.data(), additional_data) { UiAccountData::Json(parsed_data) } else { diff --git a/account-decoder/src/parse_config.rs b/account-decoder/src/parse_config.rs index c545c8c0d85324..e4cdf2457ef0d4 100644 --- a/account-decoder/src/parse_config.rs +++ b/account-decoder/src/parse_config.rs @@ -37,7 +37,7 @@ fn parse_config_data(data: &[u8], keys: Vec<(Pubkey, bool)>) -> Option { let mut tries = 0; loop { @@ -431,7 +431,7 @@ fn run_accounts_bench( if !airdrop_lamports( &client, &faucet_addr, - &payer_keypairs[i], + payer_keypairs[i], lamports * 100_000, ) { warn!("failed airdrop, exiting"); @@ -487,14 +487,14 @@ fn run_accounts_bench( .into_par_iter() .map(|_| { let message = make_close_message( - &payer_keypairs[0], + payer_keypairs[0], &base_keypair, seed_tracker.max_closed.clone(), 1, min_balance, mint.is_some(), ); - let signers: Vec<&Keypair> = vec![&payer_keypairs[0], &base_keypair]; + let signers: Vec<&Keypair> = vec![payer_keypairs[0], &base_keypair]; Transaction::new(&signers, message, recent_blockhash.0) }) .collect(); diff --git a/banking-bench/src/main.rs b/banking-bench/src/main.rs index 9226150503461b..de7aeca4699500 100644 --- a/banking-bench/src/main.rs +++ b/banking-bench/src/main.rs @@ -195,7 +195,7 @@ fn main() { if !skip_sanity { //sanity check, make sure all the transactions can execute sequentially transactions.iter().for_each(|tx| { - let res = bank.process_transaction(&tx); + let res = bank.process_transaction(tx); assert!(res.is_ok(), "sanity test transactions error: {:?}", res); }); bank.clear_signatures(); diff --git a/banks-client/src/lib.rs b/banks-client/src/lib.rs index 5a4d36580e77b4..12b148675e72e3 100644 --- a/banks-client/src/lib.rs +++ b/banks-client/src/lib.rs @@ -377,8 +377,8 @@ mod tests { let mint_pubkey = &genesis.mint_keypair.pubkey(); let bob_pubkey = solana_sdk::pubkey::new_rand(); - let instruction = system_instruction::transfer(&mint_pubkey, &bob_pubkey, 1); - let message = Message::new(&[instruction], Some(&mint_pubkey)); + let instruction = system_instruction::transfer(mint_pubkey, &bob_pubkey, 1); + let message = Message::new(&[instruction], Some(mint_pubkey)); Runtime::new()?.block_on(async { let client_transport = start_local_server(bank_forks, block_commitment_cache).await; diff --git a/banks-server/src/banks_server.rs b/banks-server/src/banks_server.rs index 1f1a303ac2da81..cf909266a94b91 100644 --- a/banks-server/src/banks_server.rs +++ b/banks-server/src/banks_server.rs @@ -150,7 +150,7 @@ impl Banks for BanksServer { .read() .unwrap() .root_bank() - .get_blockhash_last_valid_slot(&blockhash) + .get_blockhash_last_valid_slot(blockhash) .unwrap(); let signature = transaction.signatures.get(0).cloned().unwrap_or_default(); let info = diff --git a/banks-server/src/send_transaction_service.rs b/banks-server/src/send_transaction_service.rs index 54eb6b3f4d7e9a..a1a930e126617c 100644 --- a/banks-server/src/send_transaction_service.rs +++ b/banks-server/src/send_transaction_service.rs @@ -138,8 +138,8 @@ impl SendTransactionService { result.retried += 1; inc_new_counter_info!("send_transaction_service-retry", 1); Self::send_transaction( - &send_socket, - &tpu_address, + send_socket, + tpu_address, &transaction_info.wire_transaction, ); true diff --git a/bench-exchange/src/bench.rs b/bench-exchange/src/bench.rs index f975d1602f1345..cffb7605be56ff 100644 --- a/bench-exchange/src/bench.rs +++ b/bench-exchange/src/bench.rs @@ -451,13 +451,13 @@ fn swapper( let to_swap_txs: Vec<_> = to_swap .par_iter() .map(|(signer, swap, profit)| { - let s: &Keypair = &signer; + let s: &Keypair = signer; let owner = &signer.pubkey(); let instruction = exchange_instruction::swap_request( owner, &swap.0.pubkey, &swap.1.pubkey, - &profit, + profit, ); let message = Message::new(&[instruction], Some(&s.pubkey())); Transaction::new(&[s], message, blockhash) @@ -600,7 +600,7 @@ fn trader( src, ), ]; - let message = Message::new(&instructions, Some(&owner_pubkey)); + let message = Message::new(&instructions, Some(owner_pubkey)); Transaction::new(&[owner.as_ref(), trade], message, blockhash) }) .collect(); @@ -739,7 +739,7 @@ pub fn fund_keys(client: &T, source: &Keypair, dests: &[Arc] let mut to_fund_txs: Vec<_> = chunk .par_iter() .map(|(k, m)| { - let instructions = system_instruction::transfer_many(&k.pubkey(), &m); + let instructions = system_instruction::transfer_many(&k.pubkey(), m); let message = Message::new(&instructions, Some(&k.pubkey())); (k.clone(), Transaction::new_unsigned(message)) }) @@ -777,7 +777,7 @@ pub fn fund_keys(client: &T, source: &Keypair, dests: &[Arc] let mut waits = 0; loop { sleep(Duration::from_millis(200)); - to_fund_txs.retain(|(_, tx)| !verify_funding_transfer(client, &tx, amount)); + to_fund_txs.retain(|(_, tx)| !verify_funding_transfer(client, tx, amount)); if to_fund_txs.is_empty() { break; } @@ -836,7 +836,7 @@ pub fn create_token_accounts( ); let request_ix = exchange_instruction::account_request(owner_pubkey, &new_keypair.pubkey()); - let message = Message::new(&[create_ix, request_ix], Some(&owner_pubkey)); + let message = Message::new(&[create_ix, request_ix], Some(owner_pubkey)); ( (from_keypair, new_keypair), Transaction::new_unsigned(message), @@ -872,7 +872,7 @@ pub fn create_token_accounts( let mut waits = 0; while !to_create_txs.is_empty() { sleep(Duration::from_millis(200)); - to_create_txs.retain(|(_, tx)| !verify_transaction(client, &tx)); + to_create_txs.retain(|(_, tx)| !verify_transaction(client, tx)); if to_create_txs.is_empty() { break; } @@ -958,7 +958,7 @@ fn compute_and_report_stats(maxes: &Arc>>, tot fn generate_keypairs(num: u64) -> Vec { let mut seed = [0_u8; 32]; - seed.copy_from_slice(&Keypair::new().pubkey().as_ref()); + seed.copy_from_slice(Keypair::new().pubkey().as_ref()); let mut rnd = GenKeys::new(seed); rnd.gen_n_keypairs(num) } @@ -989,7 +989,7 @@ pub fn airdrop_lamports( let (blockhash, _fee_calculator, _last_valid_slot) = client .get_recent_blockhash_with_commitment(CommitmentConfig::processed()) .expect("Failed to get blockhash"); - match request_airdrop_transaction(&faucet_addr, &id.pubkey(), amount_to_drop, blockhash) { + match request_airdrop_transaction(faucet_addr, &id.pubkey(), amount_to_drop, blockhash) { Ok(transaction) => { let signature = client.async_send_transaction(transaction).unwrap(); diff --git a/bench-streamer/src/main.rs b/bench-streamer/src/main.rs index e3975991877068..4e1f070eb0fa65 100644 --- a/bench-streamer/src/main.rs +++ b/bench-streamer/src/main.rs @@ -18,7 +18,7 @@ fn producer(addr: &SocketAddr, exit: Arc) -> JoinHandle<()> { msgs.packets.resize(10, Packet::default()); for w in msgs.packets.iter_mut() { w.meta.size = PACKET_DATA_SIZE; - w.meta.set_addr(&addr); + w.meta.set_addr(addr); } let msgs = Arc::new(msgs); spawn(move || loop { diff --git a/bench-tps/src/bench.rs b/bench-tps/src/bench.rs index 084b81ddec2f00..a2c21ce7efead1 100644 --- a/bench-tps/src/bench.rs +++ b/bench-tps/src/bench.rs @@ -544,12 +544,12 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> { // re-sign retained to_fund_txes with updated blockhash self.sign(blockhash); - self.send(&client); + self.send(client); // Sleep a few slots to allow transactions to process sleep(Duration::from_secs(1)); - self.verify(&client, to_lamports); + self.verify(client, to_lamports); // retry anything that seems to have dropped through cracks // again since these txs are all or nothing, they're fine to @@ -564,7 +564,7 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> { let to_fund_txs: Vec<(&Keypair, Transaction)> = to_fund .par_iter() .map(|(k, t)| { - let instructions = system_instruction::transfer_many(&k.pubkey(), &t); + let instructions = system_instruction::transfer_many(&k.pubkey(), t); let message = Message::new(&instructions, Some(&k.pubkey())); (*k, Transaction::new_unsigned(message)) }) @@ -617,7 +617,7 @@ impl<'a> FundingTransactions<'a> for Vec<(&'a Keypair, Transaction)> { return None; } - let verified = if verify_funding_transfer(&client, &tx, to_lamports) { + let verified = if verify_funding_transfer(&client, tx, to_lamports) { verified_txs.fetch_add(1, Ordering::Relaxed); Some(k.pubkey()) } else { @@ -733,7 +733,7 @@ pub fn airdrop_lamports( ); let (blockhash, _fee_calculator) = get_recent_blockhash(client); - match request_airdrop_transaction(&faucet_addr, &id.pubkey(), airdrop_amount, blockhash) { + match request_airdrop_transaction(faucet_addr, &id.pubkey(), airdrop_amount, blockhash) { Ok(transaction) => { let mut tries = 0; loop { diff --git a/bench-tps/src/main.rs b/bench-tps/src/main.rs index abb9b3a7eb4491..ad2ad5b8f9ba93 100644 --- a/bench-tps/src/main.rs +++ b/bench-tps/src/main.rs @@ -39,7 +39,7 @@ fn main() { let keypair_count = *tx_count * keypair_multiplier; if *write_to_client_file { info!("Generating {} keypairs", keypair_count); - let (keypairs, _) = generate_keypairs(&id, keypair_count as u64); + let (keypairs, _) = generate_keypairs(id, keypair_count as u64); let num_accounts = keypairs.len() as u64; let max_fee = FeeRateGovernor::new(*target_lamports_per_signature, 0).max_lamports_per_signature; @@ -68,7 +68,7 @@ fn main() { } info!("Connecting to the cluster"); - let nodes = discover_cluster(&entrypoint_addr, *num_nodes).unwrap_or_else(|err| { + let nodes = discover_cluster(entrypoint_addr, *num_nodes).unwrap_or_else(|err| { eprintln!("Failed to discover {} nodes: {:?}", num_nodes, err); exit(1); }); @@ -135,7 +135,7 @@ fn main() { generate_and_fund_keypairs( client.clone(), Some(*faucet_addr), - &id, + id, keypair_count, *num_lamports_per_account, ) diff --git a/clap-utils/src/keypair.rs b/clap-utils/src/keypair.rs index 38dc7b0a2ca98c..f63227043dce45 100644 --- a/clap-utils/src/keypair.rs +++ b/clap-utils/src/keypair.rs @@ -506,7 +506,7 @@ pub const SKIP_SEED_PHRASE_VALIDATION_ARG: ArgConstant<'static> = ArgConstant { /// Prompts user for a passphrase and then asks for confirmirmation to check for mistakes pub fn prompt_passphrase(prompt: &str) -> Result> { - let passphrase = prompt_password_stderr(&prompt)?; + let passphrase = prompt_password_stderr(prompt)?; if !passphrase.is_empty() { let confirmed = rpassword::prompt_password_stderr("Enter same passphrase again: ")?; if confirmed != passphrase { @@ -586,9 +586,9 @@ pub fn keypair_from_seed_phrase( let keypair = if skip_validation { let passphrase = prompt_passphrase(&passphrase_prompt)?; if legacy { - keypair_from_seed_phrase_and_passphrase(&seed_phrase, &passphrase)? + keypair_from_seed_phrase_and_passphrase(seed_phrase, &passphrase)? } else { - let seed = generate_seed_from_seed_phrase_and_passphrase(&seed_phrase, &passphrase); + let seed = generate_seed_from_seed_phrase_and_passphrase(seed_phrase, &passphrase); keypair_from_seed_and_derivation_path(&seed, derivation_path)? } } else { @@ -616,7 +616,7 @@ pub fn keypair_from_seed_phrase( if legacy { keypair_from_seed(seed.as_bytes())? } else { - keypair_from_seed_and_derivation_path(&seed.as_bytes(), derivation_path)? + keypair_from_seed_and_derivation_path(seed.as_bytes(), derivation_path)? } }; diff --git a/cli-config/src/config.rs b/cli-config/src/config.rs index f98af53f4a6ce7..d9706ef9295655 100644 --- a/cli-config/src/config.rs +++ b/cli-config/src/config.rs @@ -107,24 +107,24 @@ mod test { #[test] fn compute_websocket_url() { assert_eq!( - Config::compute_websocket_url(&"http://api.devnet.solana.com"), + Config::compute_websocket_url("http://api.devnet.solana.com"), "ws://api.devnet.solana.com/".to_string() ); assert_eq!( - Config::compute_websocket_url(&"https://api.devnet.solana.com"), + Config::compute_websocket_url("https://api.devnet.solana.com"), "wss://api.devnet.solana.com/".to_string() ); assert_eq!( - Config::compute_websocket_url(&"http://example.com:8899"), + Config::compute_websocket_url("http://example.com:8899"), "ws://example.com:8900/".to_string() ); assert_eq!( - Config::compute_websocket_url(&"https://example.com:1234"), + Config::compute_websocket_url("https://example.com:1234"), "wss://example.com:1235/".to_string() ); - assert_eq!(Config::compute_websocket_url(&"garbage"), String::new()); + assert_eq!(Config::compute_websocket_url("garbage"), String::new()); } } diff --git a/cli-output/src/cli_output.rs b/cli-output/src/cli_output.rs index a1605b587a0240..9d94cfab2dc5ef 100644 --- a/cli-output/src/cli_output.rs +++ b/cli-output/src/cli_output.rs @@ -1287,7 +1287,7 @@ impl fmt::Display for CliValidatorInfo { writeln_name_value( f, &format!(" {}:", to_title_case(key)), - &value.as_str().unwrap_or("?"), + value.as_str().unwrap_or("?"), )?; } Ok(()) @@ -1768,7 +1768,7 @@ impl fmt::Display for CliTokenAccount { writeln_name_value( f, "Close authority:", - &account.close_authority.as_ref().unwrap_or(&String::new()), + account.close_authority.as_ref().unwrap_or(&String::new()), )?; Ok(()) } @@ -2006,7 +2006,7 @@ pub fn return_signers_with_config( } pub fn parse_sign_only_reply_string(reply: &str) -> SignOnly { - let object: Value = serde_json::from_str(&reply).unwrap(); + let object: Value = serde_json::from_str(reply).unwrap(); let blockhash_str = object.get("blockhash").unwrap().as_str().unwrap(); let blockhash = blockhash_str.parse::().unwrap(); let mut present_signers: Vec<(Pubkey, Signature)> = Vec::new(); diff --git a/cli/src/cli.rs b/cli/src/cli.rs index a306d8c63e6c5b..f4f4772fde8a9d 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -1000,7 +1000,7 @@ fn process_airdrop( let result = request_and_confirm_airdrop(rpc_client, config, &pubkey, lamports); if let Ok(signature) = result { - let signature_cli_message = log_instruction_custom_error::(result, &config)?; + let signature_cli_message = log_instruction_custom_error::(result, config)?; println!("{}", signature_cli_message); let current_balance = rpc_client.get_balance(&pubkey)?; @@ -1013,7 +1013,7 @@ fn process_airdrop( Ok(build_balance_message(current_balance, false, true)) } } else { - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } } @@ -1098,7 +1098,7 @@ fn process_confirm( #[allow(clippy::unnecessary_wraps)] fn process_decode_transaction(config: &CliConfig, transaction: &Transaction) -> ProcessResult { - let sigverify_status = CliSignatureVerificationStatus::verify_transaction(&transaction); + let sigverify_status = CliSignatureVerificationStatus::verify_transaction(transaction); let decode_transaction = CliTransaction { decoded_transaction: transaction.clone(), transaction: EncodedTransaction::encode(transaction.clone(), UiTransactionEncoding::Json), @@ -1269,7 +1269,7 @@ fn process_transfer( } else { rpc_client.send_and_confirm_transaction_with_spinner(&tx) }; - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } } @@ -1324,7 +1324,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { from_pubkey, seed, program_id, - } => process_create_address_with_seed(config, from_pubkey.as_ref(), &seed, &program_id), + } => process_create_address_with_seed(config, from_pubkey.as_ref(), seed, program_id), CliCommand::Fees { ref blockhash } => process_fees(&rpc_client, config, blockhash.as_ref()), CliCommand::Feature(feature_subcommand) => { process_feature_subcommand(&rpc_client, config, feature_subcommand) @@ -1347,8 +1347,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { CliCommand::LeaderSchedule { epoch } => { process_leader_schedule(&rpc_client, config, *epoch) } - CliCommand::LiveSlots => process_live_slots(&config), - CliCommand::Logs { filter } => process_logs(&config, filter), + CliCommand::LiveSlots => process_live_slots(config), + CliCommand::Logs { filter } => process_logs(config, filter), CliCommand::Ping { lamports, interval, @@ -1453,7 +1453,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { ), // Get the current nonce CliCommand::GetNonce(nonce_account_pubkey) => { - process_get_nonce(&rpc_client, config, &nonce_account_pubkey) + process_get_nonce(&rpc_client, config, nonce_account_pubkey) } // Get a new nonce CliCommand::NewNonce { @@ -1474,7 +1474,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_show_nonce_account( &rpc_client, config, - &nonce_account_pubkey, + nonce_account_pubkey, *use_lamports_unit, ), // Withdraw lamports from a nonce account @@ -1487,10 +1487,10 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_withdraw_from_nonce_account( &rpc_client, config, - &nonce_account, + nonce_account, *nonce_authority, memo.as_ref(), - &destination_account_pubkey, + destination_account_pubkey, *lamports, ), @@ -1564,7 +1564,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_deactivate_stake_account( &rpc_client, config, - &stake_account_pubkey, + stake_account_pubkey, *stake_authority, *sign_only, *dump_transaction_message, @@ -1590,8 +1590,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_delegate_stake( &rpc_client, config, - &stake_account_pubkey, - &vote_account_pubkey, + stake_account_pubkey, + vote_account_pubkey, *stake_authority, *force, *sign_only, @@ -1618,7 +1618,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_split_stake( &rpc_client, config, - &stake_account_pubkey, + stake_account_pubkey, *stake_authority, *sign_only, *dump_transaction_message, @@ -1645,8 +1645,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_merge_stake( &rpc_client, config, - &stake_account_pubkey, - &source_stake_account_pubkey, + stake_account_pubkey, + source_stake_account_pubkey, *stake_authority, *sign_only, *dump_transaction_message, @@ -1663,7 +1663,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_show_stake_account( &rpc_client, config, - &stake_account_pubkey, + stake_account_pubkey, *use_lamports_unit, *with_rewards, ), @@ -1686,7 +1686,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_stake_authorize( &rpc_client, config, - &stake_account_pubkey, + stake_account_pubkey, new_authorizations, *custodian, *sign_only, @@ -1712,7 +1712,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_stake_set_lockup( &rpc_client, config, - &stake_account_pubkey, + stake_account_pubkey, &mut lockup, *custodian, *sign_only, @@ -1740,8 +1740,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_withdraw_stake( &rpc_client, config, - &stake_account_pubkey, - &destination_account_pubkey, + stake_account_pubkey, + destination_account_pubkey, *amount, *withdraw_authority, *custodian, @@ -1769,7 +1769,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_set_validator_info( &rpc_client, config, - &validator_info, + validator_info, *force_keybase, *info_pubkey, ), @@ -1803,7 +1803,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_show_vote_account( &rpc_client, config, - &vote_account_pubkey, + vote_account_pubkey, *use_lamports_unit, *with_rewards, ), @@ -1830,8 +1830,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_vote_authorize( &rpc_client, config, - &vote_account_pubkey, - &new_authorized_pubkey, + vote_account_pubkey, + new_authorized_pubkey, *vote_authorize, memo.as_ref(), ), @@ -1843,7 +1843,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_vote_update_validator( &rpc_client, config, - &vote_account_pubkey, + vote_account_pubkey, *new_identity_account, *withdraw_authority, memo.as_ref(), @@ -1856,7 +1856,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_vote_update_commission( &rpc_client, config, - &vote_account_pubkey, + vote_account_pubkey, *commission, *withdraw_authority, memo.as_ref(), @@ -1872,7 +1872,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { CliCommand::Balance { pubkey, use_lamports_unit, - } => process_balance(&rpc_client, config, &pubkey, *use_lamports_unit), + } => process_balance(&rpc_client, config, pubkey, *use_lamports_unit), // Confirm the last client transaction by signature CliCommand::Confirm(signature) => process_confirm(&rpc_client, config, signature), CliCommand::DecodeTransaction(transaction) => { @@ -1892,8 +1892,8 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { } => process_show_account( &rpc_client, config, - &pubkey, - &output_file, + pubkey, + output_file, *use_lamports_unit, ), CliCommand::Transfer { diff --git a/cli/src/cluster_query.rs b/cli/src/cluster_query.rs index 4328161c7ae863..3baf9d952e655d 100644 --- a/cli/src/cluster_query.rs +++ b/cli/src/cluster_query.rs @@ -122,7 +122,7 @@ impl ClusterQuerySubCommands for App<'_, '_> { .long("our-localhost") .takes_value(false) .value_name("PORT") - .default_value(&DEFAULT_RPC_PORT_STR) + .default_value(DEFAULT_RPC_PORT_STR) .validator(is_port) .help("Guess Identity pubkey and validator rpc node assuming local (possibly private) validator"), ) diff --git a/cli/src/inflation.rs b/cli/src/inflation.rs index 11d3fbfb5248ee..8ec8233db04291 100644 --- a/cli/src/inflation.rs +++ b/cli/src/inflation.rs @@ -102,7 +102,7 @@ fn process_rewards( rewards_epoch: Option, ) -> ProcessResult { let rewards = rpc_client - .get_inflation_reward(&addresses, rewards_epoch) + .get_inflation_reward(addresses, rewards_epoch) .map_err(|err| { if let Some(epoch) = rewards_epoch { format!("Rewards not available for epoch {}", epoch) diff --git a/cli/src/main.rs b/cli/src/main.rs index 164a684dc91454..732f9dbc2e0d97 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -184,7 +184,7 @@ pub fn parse_args<'a>( let CliCommandInfo { command, mut signers, - } = parse_command(&matches, &default_signer, &mut wallet_manager)?; + } = parse_command(matches, &default_signer, &mut wallet_manager)?; if signers.is_empty() { if let Ok(signer_info) = @@ -257,7 +257,7 @@ fn main() -> Result<(), Box> { .global(true) .help("Configuration file to use"); if let Some(ref config_file) = *CONFIG_FILE { - arg.default_value(&config_file) + arg.default_value(config_file) } else { arg } @@ -411,10 +411,10 @@ fn main() -> Result<(), Box> { } fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box> { - if parse_settings(&matches)? { + if parse_settings(matches)? { let mut wallet_manager = None; - let (mut config, signers) = parse_args(&matches, &mut wallet_manager)?; + let (mut config, signers) = parse_args(matches, &mut wallet_manager)?; config.signers = signers.iter().map(|s| s.as_ref()).collect(); let result = process_command(&config)?; println!("{}", result); diff --git a/cli/src/nonce.rs b/cli/src/nonce.rs index 8e4b625fe8c29c..50d951b5a0ad58 100644 --- a/cli/src/nonce.rs +++ b/cli/src/nonce.rs @@ -364,7 +364,7 @@ pub fn process_authorize_nonce_account( config.commitment, )?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } pub fn process_create_nonce_account( @@ -449,7 +449,7 @@ pub fn process_create_nonce_account( let mut tx = Transaction::new_unsigned(message); tx.try_sign(&config.signers, recent_blockhash)?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } pub fn process_get_nonce( @@ -474,10 +474,10 @@ pub fn process_new_nonce( ) -> ProcessResult { check_unique_pubkeys( (&config.signers[0].pubkey(), "cli keypair".to_string()), - (&nonce_account, "nonce_account_pubkey".to_string()), + (nonce_account, "nonce_account_pubkey".to_string()), )?; - if let Err(err) = rpc_client.get_account(&nonce_account) { + if let Err(err) = rpc_client.get_account(nonce_account) { return Err(CliError::BadParameter(format!( "Unable to advance nonce account {}. error: {}", nonce_account, err @@ -487,7 +487,7 @@ pub fn process_new_nonce( let nonce_authority = config.signers[nonce_authority]; let ixs = vec![advance_nonce_account( - &nonce_account, + nonce_account, &nonce_authority.pubkey(), )] .with_memo(memo); @@ -503,7 +503,7 @@ pub fn process_new_nonce( config.commitment, )?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } pub fn process_show_nonce_account( @@ -522,7 +522,7 @@ pub fn process_show_nonce_account( use_lamports_unit, ..CliNonceAccount::default() }; - if let Some(ref data) = data { + if let Some(data) = data { nonce_account.nonce = Some(data.blockhash.to_string()); nonce_account.lamports_per_signature = Some(data.fee_calculator.lamports_per_signature); nonce_account.authority = Some(data.authority.to_string()); @@ -566,7 +566,7 @@ pub fn process_withdraw_from_nonce_account( config.commitment, )?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } #[cfg(test)] diff --git a/cli/src/program.rs b/cli/src/program.rs index d1592d24937793..0c557c9563577c 100644 --- a/cli/src/program.rs +++ b/cli/src/program.rs @@ -767,7 +767,7 @@ fn process_program_deploy( }; let upgrade_authority_signer = config.signers[upgrade_authority_signer_index]; - let default_program_keypair = get_default_program_keypair(&program_location); + let default_program_keypair = get_default_program_keypair(program_location); let (program_signer, program_pubkey) = if let Some(i) = program_signer_index { (Some(config.signers[i]), config.signers[i].pubkey()) } else if let Some(program_pubkey) = program_pubkey { @@ -843,7 +843,7 @@ fn process_program_deploy( }; let (program_data, program_len) = if let Some(program_location) = program_location { - let program_data = read_and_verify_elf(&program_location)?; + let program_data = read_and_verify_elf(program_location)?; let program_len = program_data.len(); (program_data, program_len) } else if buffer_provided { @@ -1259,7 +1259,7 @@ fn process_dump( UpgradeableLoaderState::programdata_data_offset().unwrap_or(0); let program_data = &programdata_account.data[offset..]; let mut f = File::create(output_location)?; - f.write_all(&program_data)?; + f.write_all(program_data)?; Ok(format!("Wrote program to {}", output_location)) } else { Err( @@ -1279,7 +1279,7 @@ fn process_dump( let offset = UpgradeableLoaderState::buffer_data_offset().unwrap_or(0); let program_data = &account.data[offset..]; let mut f = File::create(output_location)?; - f.write_all(&program_data)?; + f.write_all(program_data)?; Ok(format!("Wrote program to {}", output_location)) } else { Err(format!( @@ -1310,8 +1310,8 @@ fn close( let mut tx = Transaction::new_unsigned(Message::new( &[bpf_loader_upgradeable::close( - &account_pubkey, - &recipient_pubkey, + account_pubkey, + recipient_pubkey, &authority_signer.pubkey(), )], Some(&config.signers[0].pubkey()), @@ -1420,7 +1420,7 @@ fn process_close( if close( rpc_client, config, - &address, + address, &recipient_pubkey, authority_signer, ) @@ -1521,7 +1521,7 @@ fn do_process_program_write_and_deploy( .value { complete_partial_program_init( - &loader_id, + loader_id, &config.signers[0].pubkey(), buffer_pubkey, &account, @@ -1551,7 +1551,7 @@ fn do_process_program_write_and_deploy( buffer_pubkey, minimum_balance, buffer_data_len as u64, - &loader_id, + loader_id, )], minimum_balance, ) @@ -1579,7 +1579,7 @@ fn do_process_program_write_and_deploy( } else { loader_instruction::write( buffer_pubkey, - &loader_id, + loader_id, (i * DATA_CHUNK_SIZE) as u32, chunk.to_vec(), ) @@ -1623,7 +1623,7 @@ fn do_process_program_write_and_deploy( ) } else { Message::new( - &[loader_instruction::finalize(buffer_pubkey, &loader_id)], + &[loader_instruction::finalize(buffer_pubkey, loader_id)], Some(&config.signers[0].pubkey()), ) }; @@ -1749,8 +1749,8 @@ fn do_process_program_upgrade( // Create and add final message let final_message = Message::new( &[bpf_loader_upgradeable::upgrade( - &program_id, - &buffer_pubkey, + program_id, + buffer_pubkey, &upgrade_authority.pubkey(), &config.signers[0].pubkey(), )], @@ -1818,7 +1818,7 @@ fn complete_partial_program_init( account_data_len as u64, )); if account.owner != *loader_id { - instructions.push(system_instruction::assign(elf_pubkey, &loader_id)); + instructions.push(system_instruction::assign(elf_pubkey, loader_id)); } } if account.lamports < minimum_balance { @@ -1890,7 +1890,7 @@ fn send_deploy_messages( initial_transaction.try_sign(&[payer_signer], blockhash)?; } let result = rpc_client.send_and_confirm_transaction_with_spinner(&initial_transaction); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) .map_err(|err| format!("Account allocation failed: {}", err))?; } else { return Err("Buffer account not created yet, must provide a key pair".into()); diff --git a/cli/src/spend_utils.rs b/cli/src/spend_utils.rs index 95431ccad26c85..df785e457b1845 100644 --- a/cli/src/spend_utils.rs +++ b/cli/src/spend_utils.rs @@ -92,7 +92,7 @@ where Ok((message, spend)) } else { let from_balance = rpc_client - .get_balance_with_commitment(&from_pubkey, commitment)? + .get_balance_with_commitment(from_pubkey, commitment)? .value; let (message, SpendAndFee { spend, fee }) = resolve_spend_message( amount, diff --git a/cli/src/stake.rs b/cli/src/stake.rs index 42d21fea41863a..682f41b4651228 100644 --- a/cli/src/stake.rs +++ b/cli/src/stake.rs @@ -972,7 +972,7 @@ pub fn process_create_stake_account( ) -> ProcessResult { let stake_account = config.signers[stake_account]; let stake_account_address = if let Some(seed) = seed { - Pubkey::create_with_seed(&stake_account.pubkey(), &seed, &stake::program::id())? + Pubkey::create_with_seed(&stake_account.pubkey(), seed, &stake::program::id())? } else { stake_account.pubkey() }; @@ -1085,7 +1085,7 @@ pub fn process_create_stake_account( } else { tx.try_sign(&config.signers, recent_blockhash)?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } } @@ -1172,7 +1172,7 @@ pub fn process_stake_authorize( } else { rpc_client.send_and_confirm_transaction_with_spinner(&tx) }; - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } } @@ -1196,7 +1196,7 @@ pub fn process_deactivate_stake_account( let stake_authority = config.signers[stake_authority]; let stake_account_address = if let Some(seed) = seed { - Pubkey::create_with_seed(&stake_account_pubkey, seed, &stake::program::id())? + Pubkey::create_with_seed(stake_account_pubkey, seed, &stake::program::id())? } else { *stake_account_pubkey }; @@ -1248,7 +1248,7 @@ pub fn process_deactivate_stake_account( config.commitment, )?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } } @@ -1274,7 +1274,7 @@ pub fn process_withdraw_stake( let custodian = custodian.map(|index| config.signers[index]); let stake_account_address = if let Some(seed) = seed { - Pubkey::create_with_seed(&stake_account_pubkey, seed, &stake::program::id())? + Pubkey::create_with_seed(stake_account_pubkey, seed, &stake::program::id())? } else { *stake_account_pubkey }; @@ -1347,7 +1347,7 @@ pub fn process_withdraw_stake( config.commitment, )?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } } @@ -1382,10 +1382,10 @@ pub fn process_split_stake( } check_unique_pubkeys( (&fee_payer.pubkey(), "fee-payer keypair".to_string()), - (&stake_account_pubkey, "stake_account".to_string()), + (stake_account_pubkey, "stake_account".to_string()), )?; check_unique_pubkeys( - (&stake_account_pubkey, "stake_account".to_string()), + (stake_account_pubkey, "stake_account".to_string()), ( &split_stake_account.pubkey(), "split_stake_account".to_string(), @@ -1395,7 +1395,7 @@ pub fn process_split_stake( let stake_authority = config.signers[stake_authority]; let split_stake_account_address = if let Some(seed) = split_stake_account_seed { - Pubkey::create_with_seed(&split_stake_account.pubkey(), &seed, &stake::program::id())? + Pubkey::create_with_seed(&split_stake_account.pubkey(), seed, &stake::program::id())? } else { split_stake_account.pubkey() }; @@ -1433,7 +1433,7 @@ pub fn process_split_stake( let ixs = if let Some(seed) = split_stake_account_seed { stake_instruction::split_with_seed( - &stake_account_pubkey, + stake_account_pubkey, &stake_authority.pubkey(), lamports, &split_stake_account_address, @@ -1443,7 +1443,7 @@ pub fn process_split_stake( .with_memo(memo) } else { stake_instruction::split( - &stake_account_pubkey, + stake_account_pubkey, &stake_authority.pubkey(), lamports, &split_stake_account_address, @@ -1492,7 +1492,7 @@ pub fn process_split_stake( config.commitment, )?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } } @@ -1515,19 +1515,19 @@ pub fn process_merge_stake( check_unique_pubkeys( (&fee_payer.pubkey(), "fee-payer keypair".to_string()), - (&stake_account_pubkey, "stake_account".to_string()), + (stake_account_pubkey, "stake_account".to_string()), )?; check_unique_pubkeys( (&fee_payer.pubkey(), "fee-payer keypair".to_string()), ( - &source_stake_account_pubkey, + source_stake_account_pubkey, "source_stake_account".to_string(), ), )?; check_unique_pubkeys( - (&stake_account_pubkey, "stake_account".to_string()), + (stake_account_pubkey, "stake_account".to_string()), ( - &source_stake_account_pubkey, + source_stake_account_pubkey, "source_stake_account".to_string(), ), )?; @@ -1552,8 +1552,8 @@ pub fn process_merge_stake( blockhash_query.get_blockhash_and_fee_calculator(rpc_client, config.commitment)?; let ixs = stake_instruction::merge( - &stake_account_pubkey, - &source_stake_account_pubkey, + stake_account_pubkey, + source_stake_account_pubkey, &stake_authority.pubkey(), ) .with_memo(memo); @@ -1603,7 +1603,7 @@ pub fn process_merge_stake( config.commitment, config.send_transaction_config, ); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } } @@ -1674,7 +1674,7 @@ pub fn process_stake_set_lockup( config.commitment, )?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } } @@ -2076,7 +2076,7 @@ pub fn process_delegate_stake( config.commitment, )?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } } diff --git a/cli/src/validator_info.rs b/cli/src/validator_info.rs index 5479c0cb87300b..6ac5f239b95a4d 100644 --- a/cli/src/validator_info.rs +++ b/cli/src/validator_info.rs @@ -119,7 +119,7 @@ fn parse_validator_info( let key_list: ConfigKeys = deserialize(&account.data)?; if !key_list.keys.is_empty() { let (validator_pubkey, _) = key_list.keys[1]; - let validator_info_string: String = deserialize(&get_config_data(&account.data)?)?; + let validator_info_string: String = deserialize(get_config_data(&account.data)?)?; let validator_info: Map<_, _> = serde_json::from_str(&validator_info_string)?; Ok((validator_pubkey, validator_info)) } else { @@ -246,7 +246,7 @@ pub fn process_set_validator_info( ) -> ProcessResult { // Validate keybase username if let Some(string) = validator_info.get("keybaseUsername") { - let result = verify_keybase(&config.signers[0].pubkey(), &string); + let result = verify_keybase(&config.signers[0].pubkey(), string); if result.is_err() { if force_keybase { println!("--force supplied, ignoring: {:?}", result); @@ -272,7 +272,7 @@ pub fn process_set_validator_info( }, ) .find(|(pubkey, account)| { - let (validator_pubkey, _) = parse_validator_info(&pubkey, &account).unwrap(); + let (validator_pubkey, _) = parse_validator_info(pubkey, account).unwrap(); validator_pubkey == config.signers[0].pubkey() }); @@ -393,7 +393,7 @@ pub fn process_get_validator_info( } for (validator_info_pubkey, validator_info_account) in validator_info.iter() { let (validator_pubkey, validator_info) = - parse_validator_info(&validator_info_pubkey, &validator_info_account)?; + parse_validator_info(validator_info_pubkey, validator_info_account)?; validator_info_list.push(CliValidatorInfo { identity_pubkey: validator_pubkey.to_string(), info_pubkey: validator_info_pubkey.to_string(), @@ -451,7 +451,7 @@ mod tests { "name": "Alice", "keybaseUsername": "alice_keybase", }); - assert_eq!(parse_args(&matches), expected); + assert_eq!(parse_args(matches), expected); } #[test] diff --git a/cli/src/vote.rs b/cli/src/vote.rs index 8ef78a50039248..0b45efdaecdaf8 100644 --- a/cli/src/vote.rs +++ b/cli/src/vote.rs @@ -468,7 +468,7 @@ pub fn process_create_vote_account( let vote_account = config.signers[vote_account]; let vote_account_pubkey = vote_account.pubkey(); let vote_account_address = if let Some(seed) = seed { - Pubkey::create_with_seed(&vote_account_pubkey, &seed, &solana_vote_program::id())? + Pubkey::create_with_seed(&vote_account_pubkey, seed, &solana_vote_program::id())? } else { vote_account_pubkey }; @@ -549,7 +549,7 @@ pub fn process_create_vote_account( let mut tx = Transaction::new_unsigned(message); tx.try_sign(&config.signers, recent_blockhash)?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } pub fn process_vote_authorize( @@ -592,7 +592,7 @@ pub fn process_vote_authorize( config.commitment, )?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } pub fn process_vote_update_validator( @@ -629,7 +629,7 @@ pub fn process_vote_update_validator( config.commitment, )?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } pub fn process_vote_update_commission( @@ -660,7 +660,7 @@ pub fn process_vote_update_commission( config.commitment, )?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } fn get_vote_account( @@ -763,7 +763,7 @@ pub fn process_withdraw_from_vote_account( let (recent_blockhash, fee_calculator) = rpc_client.get_recent_blockhash()?; let withdraw_authority = config.signers[withdraw_authority]; - let current_balance = rpc_client.get_balance(&vote_account_pubkey)?; + let current_balance = rpc_client.get_balance(vote_account_pubkey)?; let minimum_balance = rpc_client.get_minimum_balance_for_rent_exemption(VoteState::size_of())?; let lamports = match withdraw_amount { @@ -798,7 +798,7 @@ pub fn process_withdraw_from_vote_account( config.commitment, )?; let result = rpc_client.send_and_confirm_transaction_with_spinner(&transaction); - log_instruction_custom_error::(result, &config) + log_instruction_custom_error::(result, config) } #[cfg(test)] diff --git a/cli/tests/program.rs b/cli/tests/program.rs index 5a83d0ea2409bc..adf96fb72e8567 100644 --- a/cli/tests/program.rs +++ b/cli/tests/program.rs @@ -68,7 +68,7 @@ fn test_cli_program_deploy_non_upgradeable() { .unwrap() .as_str() .unwrap(); - let program_id = Pubkey::from_str(&program_id_str).unwrap(); + let program_id = Pubkey::from_str(program_id_str).unwrap(); let account0 = rpc_client.get_account(&program_id).unwrap(); assert_eq!(account0.lamports, minimum_balance_for_rent_exemption); assert_eq!(account0.owner, bpf_loader::id()); @@ -198,7 +198,7 @@ fn test_cli_program_deploy_no_authority() { .unwrap() .as_str() .unwrap(); - let program_id = Pubkey::from_str(&program_id_str).unwrap(); + let program_id = Pubkey::from_str(program_id_str).unwrap(); // Attempt to upgrade the program config.signers = vec![&keypair, &upgrade_authority]; @@ -284,7 +284,7 @@ fn test_cli_program_deploy_with_authority() { .unwrap(); assert_eq!( program_keypair.pubkey(), - Pubkey::from_str(&program_pubkey_str).unwrap() + Pubkey::from_str(program_pubkey_str).unwrap() ); let program_account = rpc_client.get_account(&program_keypair.pubkey()).unwrap(); assert_eq!(program_account.lamports, minimum_balance_for_program); @@ -328,7 +328,7 @@ fn test_cli_program_deploy_with_authority() { .unwrap() .as_str() .unwrap(); - let program_pubkey = Pubkey::from_str(&program_pubkey_str).unwrap(); + let program_pubkey = Pubkey::from_str(program_pubkey_str).unwrap(); let program_account = rpc_client.get_account(&program_pubkey).unwrap(); assert_eq!(program_account.lamports, minimum_balance_for_program); assert_eq!(program_account.owner, bpf_loader_upgradeable::id()); @@ -397,7 +397,7 @@ fn test_cli_program_deploy_with_authority() { .as_str() .unwrap(); assert_eq!( - Pubkey::from_str(&new_upgrade_authority_str).unwrap(), + Pubkey::from_str(new_upgrade_authority_str).unwrap(), new_upgrade_authority.pubkey() ); @@ -452,7 +452,7 @@ fn test_cli_program_deploy_with_authority() { .unwrap(); assert_eq!( new_upgrade_authority.pubkey(), - Pubkey::from_str(&authority_pubkey_str).unwrap() + Pubkey::from_str(authority_pubkey_str).unwrap() ); // Set no authority @@ -510,7 +510,7 @@ fn test_cli_program_deploy_with_authority() { .unwrap() .as_str() .unwrap(); - let program_pubkey = Pubkey::from_str(&program_pubkey_str).unwrap(); + let program_pubkey = Pubkey::from_str(program_pubkey_str).unwrap(); let (programdata_pubkey, _) = Pubkey::find_program_address(&[program_pubkey.as_ref()], &bpf_loader_upgradeable::id()); let programdata_account = rpc_client.get_account(&programdata_pubkey).unwrap(); @@ -606,7 +606,7 @@ fn test_cli_program_write_buffer() { .unwrap() .as_str() .unwrap(); - let new_buffer_pubkey = Pubkey::from_str(&buffer_pubkey_str).unwrap(); + let new_buffer_pubkey = Pubkey::from_str(buffer_pubkey_str).unwrap(); let buffer_account = rpc_client.get_account(&new_buffer_pubkey).unwrap(); assert_eq!(buffer_account.lamports, minimum_balance_for_buffer_default); assert_eq!(buffer_account.owner, bpf_loader_upgradeable::id()); @@ -641,7 +641,7 @@ fn test_cli_program_write_buffer() { .unwrap(); assert_eq!( buffer_keypair.pubkey(), - Pubkey::from_str(&buffer_pubkey_str).unwrap() + Pubkey::from_str(buffer_pubkey_str).unwrap() ); let buffer_account = rpc_client.get_account(&buffer_keypair.pubkey()).unwrap(); assert_eq!(buffer_account.lamports, minimum_balance_for_buffer); @@ -675,7 +675,7 @@ fn test_cli_program_write_buffer() { .unwrap(); assert_eq!( keypair.pubkey(), - Pubkey::from_str(&authority_pubkey_str).unwrap() + Pubkey::from_str(authority_pubkey_str).unwrap() ); // Specify buffer authority @@ -700,7 +700,7 @@ fn test_cli_program_write_buffer() { .unwrap(); assert_eq!( buffer_keypair.pubkey(), - Pubkey::from_str(&buffer_pubkey_str).unwrap() + Pubkey::from_str(buffer_pubkey_str).unwrap() ); let buffer_account = rpc_client.get_account(&buffer_keypair.pubkey()).unwrap(); assert_eq!(buffer_account.lamports, minimum_balance_for_buffer_default); @@ -735,7 +735,7 @@ fn test_cli_program_write_buffer() { .unwrap() .as_str() .unwrap(); - let buffer_pubkey = Pubkey::from_str(&buffer_pubkey_str).unwrap(); + let buffer_pubkey = Pubkey::from_str(buffer_pubkey_str).unwrap(); let buffer_account = rpc_client.get_account(&buffer_pubkey).unwrap(); assert_eq!(buffer_account.lamports, minimum_balance_for_buffer_default); assert_eq!(buffer_account.owner, bpf_loader_upgradeable::id()); @@ -768,7 +768,7 @@ fn test_cli_program_write_buffer() { .unwrap(); assert_eq!( authority_keypair.pubkey(), - Pubkey::from_str(&authority_pubkey_str).unwrap() + Pubkey::from_str(authority_pubkey_str).unwrap() ); // Close buffer @@ -806,7 +806,7 @@ fn test_cli_program_write_buffer() { .unwrap() .as_str() .unwrap(); - let new_buffer_pubkey = Pubkey::from_str(&buffer_pubkey_str).unwrap(); + let new_buffer_pubkey = Pubkey::from_str(buffer_pubkey_str).unwrap(); // Close buffers and deposit default keypair let pre_lamports = rpc_client.get_account(&keypair.pubkey()).unwrap().lamports; @@ -901,7 +901,7 @@ fn test_cli_program_set_buffer_authority() { .as_str() .unwrap(); assert_eq!( - Pubkey::from_str(&new_buffer_authority_str).unwrap(), + Pubkey::from_str(new_buffer_authority_str).unwrap(), new_buffer_authority.pubkey() ); let buffer_account = rpc_client.get_account(&buffer_keypair.pubkey()).unwrap(); @@ -928,7 +928,7 @@ fn test_cli_program_set_buffer_authority() { .as_str() .unwrap(); assert_eq!( - Pubkey::from_str(&buffer_authority_str).unwrap(), + Pubkey::from_str(buffer_authority_str).unwrap(), buffer_keypair.pubkey() ); let buffer_account = rpc_client.get_account(&buffer_keypair.pubkey()).unwrap(); @@ -1101,7 +1101,7 @@ fn test_cli_program_show() { .unwrap(); assert_eq!( buffer_keypair.pubkey(), - Pubkey::from_str(&address_str).unwrap() + Pubkey::from_str(address_str).unwrap() ); let authority_str = json .as_object() @@ -1112,7 +1112,7 @@ fn test_cli_program_show() { .unwrap(); assert_eq!( authority_keypair.pubkey(), - Pubkey::from_str(&authority_str).unwrap() + Pubkey::from_str(authority_str).unwrap() ); let data_len = json .as_object() @@ -1161,7 +1161,7 @@ fn test_cli_program_show() { .unwrap(); assert_eq!( program_keypair.pubkey(), - Pubkey::from_str(&address_str).unwrap() + Pubkey::from_str(address_str).unwrap() ); let programdata_address_str = json .as_object() @@ -1176,7 +1176,7 @@ fn test_cli_program_show() { ); assert_eq!( programdata_pubkey, - Pubkey::from_str(&programdata_address_str).unwrap() + Pubkey::from_str(programdata_address_str).unwrap() ); let authority_str = json .as_object() @@ -1187,7 +1187,7 @@ fn test_cli_program_show() { .unwrap(); assert_eq!( authority_keypair.pubkey(), - Pubkey::from_str(&authority_str).unwrap() + Pubkey::from_str(authority_str).unwrap() ); let deployed_slot = json .as_object() diff --git a/client/src/rpc_cache.rs b/client/src/rpc_cache.rs index 38dbba582562dd..4207d3ce36752b 100644 --- a/client/src/rpc_cache.rs +++ b/client/src/rpc_cache.rs @@ -31,7 +31,7 @@ impl LargestAccountsCache { &self, filter: &Option, ) -> Option<(u64, Vec)> { - self.cache.get(&filter).and_then(|value| { + self.cache.get(filter).and_then(|value| { if let Ok(elapsed) = value.cached_time.elapsed() { if elapsed < Duration::from_secs(self.duration) { return Some((value.slot, value.accounts.clone())); diff --git a/client/src/rpc_client.rs b/client/src/rpc_client.rs index c54ef4ad94f5bf..f994fd63e861b1 100644 --- a/client/src/rpc_client.rs +++ b/client/src/rpc_client.rs @@ -1627,7 +1627,7 @@ impl RpcClient { ) -> ClientResult { let now = Instant::now(); loop { - match self.get_balance_with_commitment(&pubkey, commitment_config) { + match self.get_balance_with_commitment(pubkey, commitment_config) { Ok(bal) => { return Ok(bal.value); } @@ -1696,7 +1696,7 @@ impl RpcClient { let now = Instant::now(); loop { if let Ok(Some(_)) = - self.get_signature_status_with_commitment(&signature, commitment_config) + self.get_signature_status_with_commitment(signature, commitment_config) { break; } @@ -1853,11 +1853,11 @@ impl RpcClient { let (signature, status) = loop { // Get recent commitment in order to count confirmations for successful transactions let status = self - .get_signature_status_with_commitment(&signature, CommitmentConfig::processed())?; + .get_signature_status_with_commitment(signature, CommitmentConfig::processed())?; if status.is_none() { if self .get_fee_calculator_for_blockhash_with_commitment( - &recent_blockhash, + recent_blockhash, CommitmentConfig::processed(), )? .value @@ -1891,7 +1891,7 @@ impl RpcClient { // Return when specified commitment is reached // Failed transactions have already been eliminated, `is_some` check is sufficient if self - .get_signature_status_with_commitment(&signature, commitment)? + .get_signature_status_with_commitment(signature, commitment)? .is_some() { progress_bar.set_message("Transaction confirmed"); @@ -1907,7 +1907,7 @@ impl RpcClient { )); sleep(Duration::from_millis(500)); confirmations = self - .get_num_blocks_since_signature_confirmation(&signature) + .get_num_blocks_since_signature_confirmation(signature) .unwrap_or(confirmations); if now.elapsed().as_secs() >= MAX_HASH_AGE_IN_SECONDS as u64 { return Err( diff --git a/client/src/thin_client.rs b/client/src/thin_client.rs index 8b3d0830840c6e..3988e8e5d086b3 100644 --- a/client/src/thin_client.rs +++ b/client/src/thin_client.rs @@ -451,7 +451,7 @@ impl SyncClient for ThinClient { ) -> TransportResult>> { let status = self .rpc_client() - .get_signature_status(&signature) + .get_signature_status(signature) .map_err(|err| { io::Error::new( io::ErrorKind::Other, @@ -468,7 +468,7 @@ impl SyncClient for ThinClient { ) -> TransportResult>> { let status = self .rpc_client() - .get_signature_status_with_commitment(&signature, commitment_config) + .get_signature_status_with_commitment(signature, commitment_config) .map_err(|err| { io::Error::new( io::ErrorKind::Other, diff --git a/client/src/tpu_client.rs b/client/src/tpu_client.rs index ae264f9875fe18..01c902af12a009 100644 --- a/client/src/tpu_client.rs +++ b/client/src/tpu_client.rs @@ -121,7 +121,7 @@ struct LeaderTpuCache { impl LeaderTpuCache { fn new(rpc_client: &RpcClient, first_slot: Slot) -> Self { let leaders = Self::fetch_slot_leaders(rpc_client, first_slot).unwrap_or_default(); - let leader_tpu_map = Self::fetch_cluster_tpu_sockets(&rpc_client).unwrap_or_default(); + let leader_tpu_map = Self::fetch_cluster_tpu_sockets(rpc_client).unwrap_or_default(); Self { first_slot, leaders, diff --git a/core/benches/banking_stage.rs b/core/benches/banking_stage.rs index 281d4dd6359429..ff3a2ab047e690 100644 --- a/core/benches/banking_stage.rs +++ b/core/benches/banking_stage.rs @@ -183,7 +183,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) { }); //sanity check, make sure all the transactions can execute sequentially transactions.iter().for_each(|tx| { - let res = bank.process_transaction(&tx); + let res = bank.process_transaction(tx); assert!(res.is_ok(), "sanity test transactions"); }); bank.clear_signatures(); diff --git a/core/benches/consensus.rs b/core/benches/consensus.rs index 64035f4c3af177..280ee08c13e46e 100644 --- a/core/benches/consensus.rs +++ b/core/benches/consensus.rs @@ -24,10 +24,10 @@ fn bench_save_tower(bench: &mut Bencher) { let heaviest_bank = BankForks::new(Bank::default()).working_bank(); let tower = Tower::new( &node_keypair.pubkey(), - &vote_account_pubkey, + vote_account_pubkey, 0, &heaviest_bank, - &path, + path, ); bench.iter(move || { diff --git a/core/src/accounts_hash_verifier.rs b/core/src/accounts_hash_verifier.rs index 07f115ac3047bd..58a3c8331ed143 100644 --- a/core/src/accounts_hash_verifier.rs +++ b/core/src/accounts_hash_verifier.rs @@ -148,7 +148,7 @@ impl AccountsHashVerifier { for (slot, hash) in hashes.iter() { slot_to_hash.insert(*slot, *hash); } - if Self::should_halt(&cluster_info, trusted_validators, &mut slot_to_hash) { + if Self::should_halt(cluster_info, trusted_validators, &mut slot_to_hash) { exit.store(true, Ordering::Relaxed); } } diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 29a553df35aff2..aa35614a9e0edb 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -231,6 +231,11 @@ impl BankingStage { Self::num_threads(), transaction_status_sender, gossip_vote_sender, +<<<<<<< HEAD +======= + cost_model, + &cost_tracker, +>>>>>>> 6514096a6 (chore: cargo +nightly clippy --fix -Z unstable-options) ) } @@ -352,9 +357,9 @@ impl BankingStage { // We've hit the end of this slot, no need to perform more processing, // just filter the remaining packets for the invalid (e.g. too old) ones let new_unprocessed_indexes = Self::filter_unprocessed_packets( - &bank, - &msgs, - &original_unprocessed_indexes, + bank, + msgs, + original_unprocessed_indexes, my_pubkey, *next_leader, ); @@ -369,8 +374,8 @@ impl BankingStage { Self::process_packets_transactions( &bank, &bank_creation_time, - &recorder, - &msgs, + recorder, + msgs, original_unprocessed_indexes.to_owned(), transaction_status_sender.clone(), gossip_vote_sender, @@ -403,7 +408,7 @@ impl BankingStage { // `original_unprocessed_indexes` must have remaining packets to process // if not yet processed. assert!(Self::packet_has_more_unprocessed_transactions( - &original_unprocessed_indexes + original_unprocessed_indexes )); true } @@ -597,7 +602,7 @@ impl BankingStage { let decision = Self::process_buffered_packets( &my_pubkey, &socket, - &poh_recorder, + poh_recorder, cluster_info, &mut buffered_packets, enable_forwarding, @@ -627,8 +632,8 @@ impl BankingStage { match Self::process_packets( &my_pubkey, - &verified_receiver, - &poh_recorder, + verified_receiver, + poh_recorder, recv_start, recv_timeout, id, @@ -738,7 +743,7 @@ impl BankingStage { let mut mint_decimals: HashMap = HashMap::new(); let pre_token_balances = if transaction_status_sender.is_some() { - collect_token_balances(&bank, &batch, &mut mint_decimals) + collect_token_balances(bank, batch, &mut mint_decimals) } else { vec![] }; @@ -798,7 +803,7 @@ impl BankingStage { if let Some(transaction_status_sender) = transaction_status_sender { let txs = batch.transactions_iter().cloned().collect(); let post_balances = bank.collect_balances(batch); - let post_token_balances = collect_token_balances(&bank, &batch, &mut mint_decimals); + let post_token_balances = collect_token_balances(bank, batch, &mut mint_decimals); transaction_status_sender.send_transaction_status_batch( bank.clone(), txs, @@ -1072,7 +1077,19 @@ impl BankingStage { ); process_tx_time.stop(); +<<<<<<< HEAD let unprocessed_tx_count = unprocessed_tx_indexes.len(); +======= + // applying cost of processed transactions to shared cost_tracker + transactions.iter().enumerate().for_each(|(index, tx)| { + if !unprocessed_tx_indexes.iter().any(|&i| i == index) { + let tx_cost = cost_model.read().unwrap().calculate_cost(tx.transaction()); + let mut guard = cost_tracker.lock().unwrap(); + let _result = guard.try_add(tx_cost); + drop(guard); + } + }); +>>>>>>> 6514096a6 (chore: cargo +nightly clippy --fix -Z unstable-options) let mut filter_pending_packets_time = Measure::start("filter_pending_packets_time"); let filtered_unprocessed_packet_indexes = Self::filter_pending_packets_from_pending_txs( @@ -1117,11 +1134,22 @@ impl BankingStage { } } +<<<<<<< HEAD let (transactions, transaction_to_packet_indexes) = Self::transactions_from_packets( msgs, &transaction_indexes, bank.secp256k1_program_enabled(), ); +======= + let (transactions, transaction_to_packet_indexes, retry_packet_indexes) = + Self::transactions_from_packets( + msgs, + transaction_indexes, + bank.secp256k1_program_enabled(), + cost_model, + cost_tracker, + ); +>>>>>>> 6514096a6 (chore: cargo +nightly clippy --fix -Z unstable-options) let tx_count = transaction_to_packet_indexes.len(); @@ -1249,7 +1277,7 @@ impl BankingStage { &bank, &msgs, &packet_indexes, - &my_pubkey, + my_pubkey, next_leader, ); Self::push_unprocessed( @@ -2449,7 +2477,7 @@ mod tests { Receiver, JoinHandle<()>, ) { - Blockstore::destroy(&ledger_path).unwrap(); + Blockstore::destroy(ledger_path).unwrap(); let genesis_config_info = create_slow_genesis_config(10_000); let GenesisConfigInfo { genesis_config, @@ -2457,8 +2485,8 @@ mod tests { .. } = &genesis_config_info; let blockstore = - Blockstore::open(&ledger_path).expect("Expected to be able to open database ledger"); - let bank = Arc::new(Bank::new_no_wallclock_throttle(&genesis_config)); + Blockstore::open(ledger_path).expect("Expected to be able to open database ledger"); + let bank = Arc::new(Bank::new_no_wallclock_throttle(genesis_config)); let exit = Arc::new(AtomicBool::default()); let (poh_recorder, entry_receiver, record_receiver) = PohRecorder::new( bank.tick_height(), @@ -2479,9 +2507,9 @@ mod tests { let pubkey1 = solana_sdk::pubkey::new_rand(); let pubkey2 = solana_sdk::pubkey::new_rand(); let transactions = vec![ - system_transaction::transfer(&mint_keypair, &pubkey0, 1, genesis_config.hash()), - system_transaction::transfer(&mint_keypair, &pubkey1, 1, genesis_config.hash()), - system_transaction::transfer(&mint_keypair, &pubkey2, 1, genesis_config.hash()), + system_transaction::transfer(mint_keypair, &pubkey0, 1, genesis_config.hash()), + system_transaction::transfer(mint_keypair, &pubkey1, 1, genesis_config.hash()), + system_transaction::transfer(mint_keypair, &pubkey2, 1, genesis_config.hash()), ]; let poh_simulator = simulate_poh(record_receiver, &poh_recorder); diff --git a/core/src/broadcast_stage.rs b/core/src/broadcast_stage.rs index 9a8028f66051ea..77ed6cfa9b4a2b 100644 --- a/core/src/broadcast_stage.rs +++ b/core/src/broadcast_stage.rs @@ -408,7 +408,7 @@ pub fn broadcast_shreds( let packets: Vec<_> = shreds .iter() .map(|shred| { - let broadcast_index = weighted_best(&peers_and_stakes, shred.seed()); + let broadcast_index = weighted_best(peers_and_stakes, shred.seed()); (&shred.payload, &peers[broadcast_index].tvu) }) @@ -429,7 +429,7 @@ pub fn broadcast_shreds( send_mmsg_time.stop(); transmit_stats.send_mmsg_elapsed += send_mmsg_time.as_us(); - let num_live_peers = num_live_peers(&peers); + let num_live_peers = num_live_peers(peers); update_peer_stats( num_live_peers, broadcast_len as i64 + 1, diff --git a/core/src/broadcast_stage/broadcast_duplicates_run.rs b/core/src/broadcast_stage/broadcast_duplicates_run.rs index 674d8d06bf1f11..d9d738267e550d 100644 --- a/core/src/broadcast_stage/broadcast_duplicates_run.rs +++ b/core/src/broadcast_stage/broadcast_duplicates_run.rs @@ -212,9 +212,9 @@ impl BroadcastRun for BroadcastDuplicatesRun { .collect(); stakes.sort_by(|(l_key, l_stake), (r_key, r_stake)| { if r_stake == l_stake { - l_key.cmp(&r_key) + l_key.cmp(r_key) } else { - r_stake.cmp(&l_stake) + r_stake.cmp(l_stake) } }); diff --git a/core/src/broadcast_stage/standard_broadcast_run.rs b/core/src/broadcast_stage/standard_broadcast_run.rs index 6908d5dd1b9908..8b9cf78e27f961 100644 --- a/core/src/broadcast_stage/standard_broadcast_run.rs +++ b/core/src/broadcast_stage/standard_broadcast_run.rs @@ -161,7 +161,7 @@ impl StandardBroadcastRun { ) -> Result<()> { let (bsend, brecv) = channel(); let (ssend, srecv) = channel(); - self.process_receive_results(&blockstore, &ssend, &bsend, receive_results)?; + self.process_receive_results(blockstore, &ssend, &bsend, receive_results)?; let srecv = Arc::new(Mutex::new(srecv)); let brecv = Arc::new(Mutex::new(brecv)); //data diff --git a/core/src/cluster_info_vote_listener.rs b/core/src/cluster_info_vote_listener.rs index 587e5f5903f85b..3f07732f701ed6 100644 --- a/core/src/cluster_info_vote_listener.rs +++ b/core/src/cluster_info_vote_listener.rs @@ -110,7 +110,7 @@ impl VoteTracker { epoch_schedule: *root_bank.epoch_schedule(), ..VoteTracker::default() }; - vote_tracker.progress_with_new_root_bank(&root_bank); + vote_tracker.progress_with_new_root_bank(root_bank); assert_eq!( *vote_tracker.leader_schedule_epoch.read().unwrap(), root_bank.get_leader_schedule_epoch(root_bank.slot()) @@ -603,7 +603,7 @@ impl ClusterInfoVoteListener { if slot == last_vote_slot { let vote_accounts = Stakes::vote_accounts(epoch_stakes.stakes()); let stake = vote_accounts - .get(&vote_pubkey) + .get(vote_pubkey) .map(|(stake, _)| *stake) .unwrap_or_default(); let total_stake = epoch_stakes.total_stake(); @@ -692,7 +692,7 @@ impl ClusterInfoVoteListener { // voters trying to make votes for slots earlier than the epoch for // which they are authorized let actual_authorized_voter = - vote_tracker.get_authorized_voter(&vote_pubkey, *last_vote_slot); + vote_tracker.get_authorized_voter(vote_pubkey, *last_vote_slot); if actual_authorized_voter.is_none() { return false; @@ -700,7 +700,7 @@ impl ClusterInfoVoteListener { // Voting without the correct authorized pubkey, dump the vote if !VoteTracker::vote_contains_authorized_voter( - &gossip_tx, + gossip_tx, &actual_authorized_voter.unwrap(), ) { return false; @@ -738,7 +738,7 @@ impl ClusterInfoVoteListener { Self::track_new_votes_and_notify_confirmations( vote, &vote_pubkey, - &vote_tracker, + vote_tracker, root_bank, subscriptions, verified_vote_sender, diff --git a/core/src/cluster_slot_state_verifier.rs b/core/src/cluster_slot_state_verifier.rs index 9a91823a95dec7..2ad5090ce5c5e8 100644 --- a/core/src/cluster_slot_state_verifier.rs +++ b/core/src/cluster_slot_state_verifier.rs @@ -192,7 +192,7 @@ fn get_cluster_duplicate_confirmed_hash<'a>( slot, gossip_duplicate_confirmed_hash, local_duplicate_confirmed_hash ); } - Some(&local_frozen_hash) + Some(local_frozen_hash) } (Some(local_frozen_hash), None) => Some(local_frozen_hash), _ => gossip_duplicate_confirmed_hash, diff --git a/core/src/commitment_service.rs b/core/src/commitment_service.rs index 02cb4732c651aa..fe10848b755704 100644 --- a/core/src/commitment_service.rs +++ b/core/src/commitment_service.rs @@ -352,15 +352,15 @@ mod tests { if *a <= root { let mut expected = BlockCommitment::default(); expected.increase_rooted_stake(lamports); - assert_eq!(*commitment.get(&a).unwrap(), expected); + assert_eq!(*commitment.get(a).unwrap(), expected); } else if i <= 4 { let mut expected = BlockCommitment::default(); expected.increase_confirmation_stake(2, lamports); - assert_eq!(*commitment.get(&a).unwrap(), expected); + assert_eq!(*commitment.get(a).unwrap(), expected); } else if i <= 6 { let mut expected = BlockCommitment::default(); expected.increase_confirmation_stake(1, lamports); - assert_eq!(*commitment.get(&a).unwrap(), expected); + assert_eq!(*commitment.get(a).unwrap(), expected); } } assert_eq!(rooted_stake[0], (root, lamports)); diff --git a/core/src/consensus.rs b/core/src/consensus.rs index 74a2bf6d011871..7740bf983bbd6b 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -164,7 +164,7 @@ impl Tower { bank: &Bank, path: &Path, ) -> Self { - let path = Self::get_filename(&path, node_pubkey); + let path = Self::get_filename(path, node_pubkey); let tmp_path = Self::get_tmp_filename(&path); let mut tower = Self { node_pubkey: *node_pubkey, @@ -205,8 +205,8 @@ impl Tower { crate::replay_stage::ReplayStage::initialize_progress_and_fork_choice( root_bank.deref(), bank_forks.frozen_banks().values().cloned().collect(), - &my_pubkey, - &vote_account, + my_pubkey, + vote_account, ); let root = root_bank.slot(); @@ -219,11 +219,11 @@ impl Tower { .clone(); Self::new( - &my_pubkey, - &vote_account, + my_pubkey, + vote_account, root, &heaviest_bank, - &ledger_path, + ledger_path, ) } @@ -736,7 +736,7 @@ impl Tower { // finding any lockout intervals in the `lockout_intervals` tree // for this bank that contain `last_vote`. let lockout_intervals = &progress - .get(&candidate_slot) + .get(candidate_slot) .unwrap() .fork_stats .lockout_intervals; @@ -1328,7 +1328,7 @@ pub fn reconcile_blockstore_roots_with_tower( if last_blockstore_root < tower_root { // Ensure tower_root itself to exist and be marked as rooted in the blockstore // in addition to its ancestors. - let new_roots: Vec<_> = AncestorIterator::new_inclusive(tower_root, &blockstore) + let new_roots: Vec<_> = AncestorIterator::new_inclusive(tower_root, blockstore) .take_while(|current| match current.cmp(&last_blockstore_root) { Ordering::Greater => true, Ordering::Equal => false, @@ -1490,7 +1490,7 @@ pub mod test { tower: &mut Tower, ) -> Vec { // Try to simulate the vote - let my_keypairs = self.validator_keypairs.get(&my_pubkey).unwrap(); + let my_keypairs = self.validator_keypairs.get(my_pubkey).unwrap(); let my_vote_pubkey = my_keypairs.vote_keypair.pubkey(); let ancestors = self.bank_forks.read().unwrap().ancestors(); let mut frozen_banks: Vec<_> = self @@ -1503,7 +1503,7 @@ pub mod test { .collect(); let _ = ReplayStage::compute_bank_stats( - &my_pubkey, + my_pubkey, &ancestors, &mut frozen_banks, tower, @@ -1582,9 +1582,9 @@ pub mod test { .filter_map(|slot| { let mut fork_tip_parent = tr(slot - 1); fork_tip_parent.push_front(tr(slot)); - self.fill_bank_forks(fork_tip_parent, &cluster_votes); + self.fill_bank_forks(fork_tip_parent, cluster_votes); if votes_to_simulate.contains(&slot) { - Some((slot, self.simulate_vote(slot, &my_pubkey, tower))) + Some((slot, self.simulate_vote(slot, my_pubkey, tower))) } else { None } @@ -1627,7 +1627,7 @@ pub mod test { fork_tip_parent.push_front(tr(start_slot + i)); self.fill_bank_forks(fork_tip_parent, cluster_votes); if self - .simulate_vote(i + start_slot, &my_pubkey, tower) + .simulate_vote(i + start_slot, my_pubkey, tower) .is_empty() { cluster_votes @@ -2850,7 +2850,7 @@ pub mod test { tower.save(&identity_keypair).unwrap(); modify_serialized(&tower.path); - let loaded = Tower::restore(&dir.path(), &identity_keypair.pubkey()); + let loaded = Tower::restore(dir.path(), &identity_keypair.pubkey()); (tower, loaded) } diff --git a/core/src/cost_model.rs b/core/src/cost_model.rs new file mode 100644 index 00000000000000..6a12005cb003e0 --- /dev/null +++ b/core/src/cost_model.rs @@ -0,0 +1,513 @@ +//! 'cost_model` provides service to estimate a transaction's cost +//! It does so by analyzing accounts the transaction touches, and instructions +//! it includes. Using historical data as guideline, it estimates cost of +//! reading/writing account, the sum of that comes up to "account access cost"; +//! Instructions take time to execute, both historical and runtime data are +//! used to determine each instruction's execution time, the sum of that +//! is transaction's "execution cost" +//! The main function is `calculate_cost` which returns a TransactionCost struct. +//! +use crate::execute_cost_table::ExecuteCostTable; +use log::*; +use solana_sdk::{message::Message, pubkey::Pubkey, transaction::Transaction}; +use std::collections::HashMap; + +// Guestimated from mainnet-beta data, sigver averages 1us, read averages 7us and write avergae 25us +const SIGNED_WRITABLE_ACCOUNT_ACCESS_COST: u64 = 1 + 25; +const SIGNED_READONLY_ACCOUNT_ACCESS_COST: u64 = 1 + 7; +const NON_SIGNED_WRITABLE_ACCOUNT_ACCESS_COST: u64 = 25; +const NON_SIGNED_READONLY_ACCOUNT_ACCESS_COST: u64 = 7; + +// Sampled from mainnet-beta, the instruction execution timings stats are (in us): +// min=194, max=62164, avg=8214.49, med=2243 +pub const ACCOUNT_MAX_COST: u64 = 100_000_000; +pub const BLOCK_MAX_COST: u64 = 2_500_000_000; + +// cost of transaction is made of account_access_cost and instruction execution_cost +// where +// account_access_cost is the sum of read/write/sign all accounts included in the transaction +// read is cheaper than write. +// execution_cost is the sum of all instructions execution cost, which is +// observed during runtime and feedback by Replay +#[derive(Default, Debug)] +pub struct TransactionCost { + pub writable_accounts: Vec, + pub account_access_cost: u64, + pub execution_cost: u64, +} + +#[derive(Debug)] +pub struct CostModel { + account_cost_limit: u64, + block_cost_limit: u64, + instruction_execution_cost_table: ExecuteCostTable, +} + +impl Default for CostModel { + fn default() -> Self { + CostModel::new(ACCOUNT_MAX_COST, BLOCK_MAX_COST) + } +} + +impl CostModel { + pub fn new(chain_max: u64, block_max: u64) -> Self { + Self { + account_cost_limit: chain_max, + block_cost_limit: block_max, + instruction_execution_cost_table: ExecuteCostTable::default(), + } + } + + pub fn get_account_cost_limit(&self) -> u64 { + self.account_cost_limit + } + + pub fn get_block_cost_limit(&self) -> u64 { + self.block_cost_limit + } + + pub fn calculate_cost(&self, transaction: &Transaction) -> TransactionCost { + let ( + signed_writable_accounts, + signed_readonly_accounts, + non_signed_writable_accounts, + non_signed_readonly_accounts, + ) = CostModel::sort_accounts_by_type(transaction.message()); + + let mut cost = TransactionCost { + writable_accounts: vec![], + account_access_cost: CostModel::find_account_access_cost( + &signed_writable_accounts, + &signed_readonly_accounts, + &non_signed_writable_accounts, + &non_signed_readonly_accounts, + ), + execution_cost: self.find_transaction_cost(transaction), + }; + cost.writable_accounts.extend(&signed_writable_accounts); + cost.writable_accounts.extend(&non_signed_writable_accounts); + debug!("transaction {:?} has cost {:?}", transaction, cost); + cost + } + + // To update or insert instruction cost to table. + pub fn upsert_instruction_cost( + &mut self, + program_key: &Pubkey, + cost: &u64, + ) -> Result { + self.instruction_execution_cost_table + .upsert(program_key, cost); + match self.instruction_execution_cost_table.get_cost(program_key) { + Some(cost) => Ok(*cost), + None => Err("failed to upsert to ExecuteCostTable"), + } + } + + pub fn get_instruction_cost_table(&self) -> &HashMap { + self.instruction_execution_cost_table.get_cost_table() + } + + fn find_instruction_cost(&self, program_key: &Pubkey) -> u64 { + match self.instruction_execution_cost_table.get_cost(program_key) { + Some(cost) => *cost, + None => { + let default_value = self.instruction_execution_cost_table.get_mode(); + debug!( + "Program key {:?} does not have assigned cost, using mode {}", + program_key, default_value + ); + default_value + } + } + } + + fn find_transaction_cost(&self, transaction: &Transaction) -> u64 { + let mut cost: u64 = 0; + + for instruction in &transaction.message().instructions { + let program_id = + transaction.message().account_keys[instruction.program_id_index as usize]; + let instruction_cost = self.find_instruction_cost(&program_id); + trace!( + "instruction {:?} has cost of {}", + instruction, + instruction_cost + ); + cost += instruction_cost; + } + cost + } + + fn find_account_access_cost( + signed_writable_accounts: &[Pubkey], + signed_readonly_accounts: &[Pubkey], + non_signed_writable_accounts: &[Pubkey], + non_signed_readonly_accounts: &[Pubkey], + ) -> u64 { + let mut cost = 0; + cost += signed_writable_accounts.len() as u64 * SIGNED_WRITABLE_ACCOUNT_ACCESS_COST; + cost += signed_readonly_accounts.len() as u64 * SIGNED_READONLY_ACCOUNT_ACCESS_COST; + cost += non_signed_writable_accounts.len() as u64 * NON_SIGNED_WRITABLE_ACCOUNT_ACCESS_COST; + cost += non_signed_readonly_accounts.len() as u64 * NON_SIGNED_READONLY_ACCOUNT_ACCESS_COST; + cost + } + + fn sort_accounts_by_type( + message: &Message, + ) -> (Vec, Vec, Vec, Vec) { + let demote_sysvar_write_locks = true; + let mut signer_writable: Vec = vec![]; + let mut signer_readonly: Vec = vec![]; + let mut non_signer_writable: Vec = vec![]; + let mut non_signer_readonly: Vec = vec![]; + message.account_keys.iter().enumerate().for_each(|(i, k)| { + let is_signer = message.is_signer(i); + let is_writable = message.is_writable(i, demote_sysvar_write_locks); + + if is_signer && is_writable { + signer_writable.push(*k); + } else if is_signer && !is_writable { + signer_readonly.push(*k); + } else if !is_signer && is_writable { + non_signer_writable.push(*k); + } else { + non_signer_readonly.push(*k); + } + }); + ( + signer_writable, + signer_readonly, + non_signer_writable, + non_signer_readonly, + ) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use solana_runtime::{ + bank::Bank, + genesis_utils::{create_genesis_config, GenesisConfigInfo}, + }; + use solana_sdk::{ + bpf_loader, + hash::Hash, + instruction::CompiledInstruction, + message::Message, + signature::{Keypair, Signer}, + system_instruction::{self}, + system_program, system_transaction, + }; + use std::{ + str::FromStr, + sync::{Arc, RwLock}, + thread::{self, JoinHandle}, + }; + + fn test_setup() -> (Keypair, Hash) { + solana_logger::setup(); + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(10); + let bank = Arc::new(Bank::new_no_wallclock_throttle(&genesis_config)); + let start_hash = bank.last_blockhash(); + (mint_keypair, start_hash) + } + + #[test] + fn test_cost_model_instruction_cost() { + let mut testee = CostModel::default(); + + let known_key = Pubkey::from_str("known11111111111111111111111111111111111111").unwrap(); + testee.upsert_instruction_cost(&known_key, &100).unwrap(); + // find cost for known programs + assert_eq!(100, testee.find_instruction_cost(&known_key)); + + testee + .upsert_instruction_cost(&bpf_loader::id(), &1999) + .unwrap(); + assert_eq!(1999, testee.find_instruction_cost(&bpf_loader::id())); + + // unknown program is assigned with default cost + assert_eq!( + testee.instruction_execution_cost_table.get_mode(), + testee.find_instruction_cost( + &Pubkey::from_str("unknown111111111111111111111111111111111111").unwrap() + ) + ); + } + + #[test] + fn test_cost_model_simple_transaction() { + let (mint_keypair, start_hash) = test_setup(); + + let keypair = Keypair::new(); + let simple_transaction = + system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 2, start_hash); + debug!( + "system_transaction simple_transaction {:?}", + simple_transaction + ); + + // expected cost for one system transfer instructions + let expected_cost = 8; + + let mut testee = CostModel::default(); + testee + .upsert_instruction_cost(&system_program::id(), &expected_cost) + .unwrap(); + assert_eq!( + expected_cost, + testee.find_transaction_cost(&simple_transaction) + ); + } + + #[test] + fn test_cost_model_transaction_many_transfer_instructions() { + let (mint_keypair, start_hash) = test_setup(); + + let key1 = solana_sdk::pubkey::new_rand(); + let key2 = solana_sdk::pubkey::new_rand(); + let instructions = + system_instruction::transfer_many(&mint_keypair.pubkey(), &[(key1, 1), (key2, 1)]); + let message = Message::new(&instructions, Some(&mint_keypair.pubkey())); + let tx = Transaction::new(&[&mint_keypair], message, start_hash); + debug!("many transfer transaction {:?}", tx); + + // expected cost for two system transfer instructions + let program_cost = 8; + let expected_cost = program_cost * 2; + + let mut testee = CostModel::default(); + testee + .upsert_instruction_cost(&system_program::id(), &program_cost) + .unwrap(); + assert_eq!(expected_cost, testee.find_transaction_cost(&tx)); + } + + #[test] + fn test_cost_model_message_many_different_instructions() { + let (mint_keypair, start_hash) = test_setup(); + + // construct a transaction with multiple random instructions + let key1 = solana_sdk::pubkey::new_rand(); + let key2 = solana_sdk::pubkey::new_rand(); + let prog1 = solana_sdk::pubkey::new_rand(); + let prog2 = solana_sdk::pubkey::new_rand(); + let instructions = vec![ + CompiledInstruction::new(3, &(), vec![0, 1]), + CompiledInstruction::new(4, &(), vec![0, 2]), + ]; + let tx = Transaction::new_with_compiled_instructions( + &[&mint_keypair], + &[key1, key2], + start_hash, + vec![prog1, prog2], + instructions, + ); + debug!("many random transaction {:?}", tx); + + let testee = CostModel::default(); + let result = testee.find_transaction_cost(&tx); + + // expected cost for two random/unknown program is + let expected_cost = testee.instruction_execution_cost_table.get_mode() * 2; + assert_eq!(expected_cost, result); + } + + #[test] + fn test_cost_model_sort_message_accounts_by_type() { + // construct a transaction with two random instructions with same signer + let signer1 = Keypair::new(); + let signer2 = Keypair::new(); + let key1 = Pubkey::new_unique(); + let key2 = Pubkey::new_unique(); + let prog1 = Pubkey::new_unique(); + let prog2 = Pubkey::new_unique(); + let instructions = vec![ + CompiledInstruction::new(4, &(), vec![0, 2]), + CompiledInstruction::new(5, &(), vec![1, 3]), + ]; + let tx = Transaction::new_with_compiled_instructions( + &[&signer1, &signer2], + &[key1, key2], + Hash::new_unique(), + vec![prog1, prog2], + instructions, + ); + debug!("many random transaction {:?}", tx); + + let ( + signed_writable_accounts, + signed_readonly_accounts, + non_signed_writable_accounts, + non_signed_readonly_accounts, + ) = CostModel::sort_accounts_by_type(tx.message()); + + assert_eq!(2, signed_writable_accounts.len()); + assert_eq!(signer1.pubkey(), signed_writable_accounts[0]); + assert_eq!(signer2.pubkey(), signed_writable_accounts[1]); + assert_eq!(0, signed_readonly_accounts.len()); + assert_eq!(2, non_signed_writable_accounts.len()); + assert_eq!(key1, non_signed_writable_accounts[0]); + assert_eq!(key2, non_signed_writable_accounts[1]); + assert_eq!(2, non_signed_readonly_accounts.len()); + assert_eq!(prog1, non_signed_readonly_accounts[0]); + assert_eq!(prog2, non_signed_readonly_accounts[1]); + } + + #[test] + fn test_cost_model_insert_instruction_cost() { + let key1 = Pubkey::new_unique(); + let cost1 = 100; + + let mut cost_model = CostModel::default(); + // Using default cost for unknown instruction + assert_eq!( + cost_model.instruction_execution_cost_table.get_mode(), + cost_model.find_instruction_cost(&key1) + ); + + // insert instruction cost to table + assert!(cost_model.upsert_instruction_cost(&key1, &cost1).is_ok()); + + // now it is known insturction with known cost + assert_eq!(cost1, cost_model.find_instruction_cost(&key1)); + } + + #[test] + fn test_cost_model_calculate_cost() { + let (mint_keypair, start_hash) = test_setup(); + let tx = + system_transaction::transfer(&mint_keypair, &Keypair::new().pubkey(), 2, start_hash); + + let expected_account_cost = SIGNED_WRITABLE_ACCOUNT_ACCESS_COST + + NON_SIGNED_WRITABLE_ACCOUNT_ACCESS_COST + + NON_SIGNED_READONLY_ACCOUNT_ACCESS_COST; + let expected_execution_cost = 8; + + let mut cost_model = CostModel::default(); + cost_model + .upsert_instruction_cost(&system_program::id(), &expected_execution_cost) + .unwrap(); + let tx_cost = cost_model.calculate_cost(&tx); + assert_eq!(expected_account_cost, tx_cost.account_access_cost); + assert_eq!(expected_execution_cost, tx_cost.execution_cost); + assert_eq!(2, tx_cost.writable_accounts.len()); + } + + #[test] + fn test_cost_model_update_instruction_cost() { + let key1 = Pubkey::new_unique(); + let cost1 = 100; + let cost2 = 200; + let updated_cost = (cost1 + cost2) / 2; + + let mut cost_model = CostModel::default(); + + // insert instruction cost to table + assert!(cost_model.upsert_instruction_cost(&key1, &cost1).is_ok()); + assert_eq!(cost1, cost_model.find_instruction_cost(&key1)); + + // update instruction cost + assert!(cost_model.upsert_instruction_cost(&key1, &cost2).is_ok()); + assert_eq!(updated_cost, cost_model.find_instruction_cost(&key1)); + } + + #[test] + fn test_cost_model_can_be_shared_concurrently_as_immutable() { + let (mint_keypair, start_hash) = test_setup(); + let number_threads = 10; + let expected_account_cost = SIGNED_WRITABLE_ACCOUNT_ACCESS_COST + + NON_SIGNED_WRITABLE_ACCOUNT_ACCESS_COST + + NON_SIGNED_READONLY_ACCOUNT_ACCESS_COST; + + let cost_model = Arc::new(CostModel::default()); + + let thread_handlers: Vec> = (0..number_threads) + .map(|_| { + // each thread creates its own simple transaction + let simple_transaction = system_transaction::transfer( + &mint_keypair, + &Keypair::new().pubkey(), + 2, + start_hash, + ); + let cost_model = cost_model.clone(); + thread::spawn(move || { + let tx_cost = cost_model.calculate_cost(&simple_transaction); + assert_eq!(2, tx_cost.writable_accounts.len()); + assert_eq!(expected_account_cost, tx_cost.account_access_cost); + assert_eq!( + cost_model.instruction_execution_cost_table.get_mode(), + tx_cost.execution_cost + ); + }) + }) + .collect(); + + for th in thread_handlers { + th.join().unwrap(); + } + } + + #[test] + fn test_cost_model_can_be_shared_concurrently_with_rwlock() { + let (mint_keypair, start_hash) = test_setup(); + // construct a transaction with multiple random instructions + let key1 = solana_sdk::pubkey::new_rand(); + let key2 = solana_sdk::pubkey::new_rand(); + let prog1 = solana_sdk::pubkey::new_rand(); + let prog2 = solana_sdk::pubkey::new_rand(); + let instructions = vec![ + CompiledInstruction::new(3, &(), vec![0, 1]), + CompiledInstruction::new(4, &(), vec![0, 2]), + ]; + let tx = Arc::new(Transaction::new_with_compiled_instructions( + &[&mint_keypair], + &[key1, key2], + start_hash, + vec![prog1, prog2], + instructions, + )); + + let number_threads = 10; + let expected_account_cost = SIGNED_WRITABLE_ACCOUNT_ACCESS_COST + + NON_SIGNED_WRITABLE_ACCOUNT_ACCESS_COST * 2 + + NON_SIGNED_READONLY_ACCOUNT_ACCESS_COST * 2; + let cost1 = 100; + let cost2 = 200; + // execution cost can be either 2 * Default (before write) or cost1+cost2 (after write) + + let cost_model: Arc> = Arc::new(RwLock::new(CostModel::default())); + + let thread_handlers: Vec> = (0..number_threads) + .map(|i| { + let cost_model = cost_model.clone(); + let tx = tx.clone(); + + if i == 5 { + thread::spawn(move || { + let mut cost_model = cost_model.write().unwrap(); + assert!(cost_model.upsert_instruction_cost(&prog1, &cost1).is_ok()); + assert!(cost_model.upsert_instruction_cost(&prog2, &cost2).is_ok()); + }) + } else { + thread::spawn(move || { + let tx_cost = cost_model.read().unwrap().calculate_cost(&tx); + assert_eq!(3, tx_cost.writable_accounts.len()); + assert_eq!(expected_account_cost, tx_cost.account_access_cost); + }) + } + }) + .collect(); + + for th in thread_handlers { + th.join().unwrap(); + } + } +} diff --git a/core/src/cost_tracker.rs b/core/src/cost_tracker.rs new file mode 100644 index 00000000000000..df544ba702950f --- /dev/null +++ b/core/src/cost_tracker.rs @@ -0,0 +1,356 @@ +//! `cost_tracker` keeps tracking tranasction cost per chained accounts as well as for entire block +//! The main entry function is 'try_add', if success, it returns new block cost. +//! +use crate::cost_model::TransactionCost; +use solana_sdk::{clock::Slot, pubkey::Pubkey}; +use std::collections::HashMap; + +#[derive(Debug, Clone)] +pub struct CostTracker { + account_cost_limit: u64, + block_cost_limit: u64, + current_bank_slot: Slot, + cost_by_writable_accounts: HashMap, + block_cost: u64, +} + +impl CostTracker { + pub fn new(chain_max: u64, package_max: u64) -> Self { + assert!(chain_max <= package_max); + Self { + account_cost_limit: chain_max, + block_cost_limit: package_max, + current_bank_slot: 0, + cost_by_writable_accounts: HashMap::new(), + block_cost: 0, + } + } + + pub fn reset_if_new_bank(&mut self, slot: Slot) { + if slot != self.current_bank_slot { + self.current_bank_slot = slot; + self.cost_by_writable_accounts.clear(); + self.block_cost = 0; + } + } + + pub fn try_add(&mut self, transaction_cost: TransactionCost) -> Result { + let cost = transaction_cost.account_access_cost + transaction_cost.execution_cost; + self.would_fit(&transaction_cost.writable_accounts, &cost)?; + + self.add_transaction(&transaction_cost.writable_accounts, &cost); + Ok(self.block_cost) + } + + fn would_fit(&self, keys: &[Pubkey], cost: &u64) -> Result<(), &'static str> { + // check against the total package cost + if self.block_cost + cost > self.block_cost_limit { + return Err("would exceed block cost limit"); + } + + // check if the transaction itself is more costly than the account_cost_limit + if *cost > self.account_cost_limit { + return Err("Transaction is too expansive, exceeds account cost limit"); + } + + // check each account against account_cost_limit, + for account_key in keys.iter() { + match self.cost_by_writable_accounts.get(account_key) { + Some(chained_cost) => { + if chained_cost + cost > self.account_cost_limit { + return Err("would exceed account cost limit"); + } else { + continue; + } + } + None => continue, + } + } + + Ok(()) + } + + fn add_transaction(&mut self, keys: &[Pubkey], cost: &u64) { + for account_key in keys.iter() { + *self + .cost_by_writable_accounts + .entry(*account_key) + .or_insert(0) += cost; + } + self.block_cost += cost; + } +} + +// CostStats can be collected by util, such as ledger_tool +#[derive(Default, Debug)] +pub struct CostStats { + pub total_cost: u64, + pub number_of_accounts: usize, + pub costliest_account: Pubkey, + pub costliest_account_cost: u64, +} + +impl CostTracker { + pub fn get_stats(&self) -> CostStats { + let mut stats = CostStats { + total_cost: self.block_cost, + number_of_accounts: self.cost_by_writable_accounts.len(), + costliest_account: Pubkey::default(), + costliest_account_cost: 0, + }; + + for (key, cost) in self.cost_by_writable_accounts.iter() { + if cost > &stats.costliest_account_cost { + stats.costliest_account = *key; + stats.costliest_account_cost = *cost; + } + } + + stats + } +} + +#[cfg(test)] +mod tests { + use super::*; + use solana_runtime::{ + bank::Bank, + genesis_utils::{create_genesis_config, GenesisConfigInfo}, + }; + use solana_sdk::{ + hash::Hash, + signature::{Keypair, Signer}, + system_transaction, + transaction::Transaction, + }; + use std::{cmp, sync::Arc}; + + fn test_setup() -> (Keypair, Hash) { + solana_logger::setup(); + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(10); + let bank = Arc::new(Bank::new_no_wallclock_throttle(&genesis_config)); + let start_hash = bank.last_blockhash(); + (mint_keypair, start_hash) + } + + fn build_simple_transaction( + mint_keypair: &Keypair, + start_hash: &Hash, + ) -> (Transaction, Vec, u64) { + let keypair = Keypair::new(); + let simple_transaction = + system_transaction::transfer(mint_keypair, &keypair.pubkey(), 2, *start_hash); + + (simple_transaction, vec![mint_keypair.pubkey()], 5) + } + + #[test] + fn test_cost_tracker_initialization() { + let testee = CostTracker::new(10, 11); + assert_eq!(10, testee.account_cost_limit); + assert_eq!(11, testee.block_cost_limit); + assert_eq!(0, testee.cost_by_writable_accounts.len()); + assert_eq!(0, testee.block_cost); + } + + #[test] + fn test_cost_tracker_ok_add_one() { + let (mint_keypair, start_hash) = test_setup(); + let (_tx, keys, cost) = build_simple_transaction(&mint_keypair, &start_hash); + + // build testee to have capacity for one simple transaction + let mut testee = CostTracker::new(cost, cost); + assert!(testee.would_fit(&keys, &cost).is_ok()); + testee.add_transaction(&keys, &cost); + assert_eq!(cost, testee.block_cost); + } + + #[test] + fn test_cost_tracker_ok_add_two_same_accounts() { + let (mint_keypair, start_hash) = test_setup(); + // build two transactions with same signed account + let (_tx1, keys1, cost1) = build_simple_transaction(&mint_keypair, &start_hash); + let (_tx2, keys2, cost2) = build_simple_transaction(&mint_keypair, &start_hash); + + // build testee to have capacity for two simple transactions, with same accounts + let mut testee = CostTracker::new(cost1 + cost2, cost1 + cost2); + { + assert!(testee.would_fit(&keys1, &cost1).is_ok()); + testee.add_transaction(&keys1, &cost1); + } + { + assert!(testee.would_fit(&keys2, &cost2).is_ok()); + testee.add_transaction(&keys2, &cost2); + } + assert_eq!(cost1 + cost2, testee.block_cost); + assert_eq!(1, testee.cost_by_writable_accounts.len()); + } + + #[test] + fn test_cost_tracker_ok_add_two_diff_accounts() { + let (mint_keypair, start_hash) = test_setup(); + // build two transactions with diff accounts + let (_tx1, keys1, cost1) = build_simple_transaction(&mint_keypair, &start_hash); + let second_account = Keypair::new(); + let (_tx2, keys2, cost2) = build_simple_transaction(&second_account, &start_hash); + + // build testee to have capacity for two simple transactions, with same accounts + let mut testee = CostTracker::new(cmp::max(cost1, cost2), cost1 + cost2); + { + assert!(testee.would_fit(&keys1, &cost1).is_ok()); + testee.add_transaction(&keys1, &cost1); + } + { + assert!(testee.would_fit(&keys2, &cost2).is_ok()); + testee.add_transaction(&keys2, &cost2); + } + assert_eq!(cost1 + cost2, testee.block_cost); + assert_eq!(2, testee.cost_by_writable_accounts.len()); + } + + #[test] + fn test_cost_tracker_chain_reach_limit() { + let (mint_keypair, start_hash) = test_setup(); + // build two transactions with same signed account + let (_tx1, keys1, cost1) = build_simple_transaction(&mint_keypair, &start_hash); + let (_tx2, keys2, cost2) = build_simple_transaction(&mint_keypair, &start_hash); + + // build testee to have capacity for two simple transactions, but not for same accounts + let mut testee = CostTracker::new(cmp::min(cost1, cost2), cost1 + cost2); + // should have room for first transaction + { + assert!(testee.would_fit(&keys1, &cost1).is_ok()); + testee.add_transaction(&keys1, &cost1); + } + // but no more sapce on the same chain (same signer account) + { + assert!(testee.would_fit(&keys2, &cost2).is_err()); + } + } + + #[test] + fn test_cost_tracker_reach_limit() { + let (mint_keypair, start_hash) = test_setup(); + // build two transactions with diff accounts + let (_tx1, keys1, cost1) = build_simple_transaction(&mint_keypair, &start_hash); + let second_account = Keypair::new(); + let (_tx2, keys2, cost2) = build_simple_transaction(&second_account, &start_hash); + + // build testee to have capacity for each chain, but not enough room for both transactions + let mut testee = CostTracker::new(cmp::max(cost1, cost2), cost1 + cost2 - 1); + // should have room for first transaction + { + assert!(testee.would_fit(&keys1, &cost1).is_ok()); + testee.add_transaction(&keys1, &cost1); + } + // but no more room for package as whole + { + assert!(testee.would_fit(&keys2, &cost2).is_err()); + } + } + + #[test] + fn test_cost_tracker_reset() { + let (mint_keypair, start_hash) = test_setup(); + // build two transactions with same signed account + let (_tx1, keys1, cost1) = build_simple_transaction(&mint_keypair, &start_hash); + let (_tx2, keys2, cost2) = build_simple_transaction(&mint_keypair, &start_hash); + + // build testee to have capacity for two simple transactions, but not for same accounts + let mut testee = CostTracker::new(cmp::min(cost1, cost2), cost1 + cost2); + // should have room for first transaction + { + assert!(testee.would_fit(&keys1, &cost1).is_ok()); + testee.add_transaction(&keys1, &cost1); + assert_eq!(1, testee.cost_by_writable_accounts.len()); + assert_eq!(cost1, testee.block_cost); + } + // but no more sapce on the same chain (same signer account) + { + assert!(testee.would_fit(&keys2, &cost2).is_err()); + } + // reset the tracker + { + testee.reset_if_new_bank(100); + assert_eq!(0, testee.cost_by_writable_accounts.len()); + assert_eq!(0, testee.block_cost); + } + //now the second transaction can be added + { + assert!(testee.would_fit(&keys2, &cost2).is_ok()); + } + } + + #[test] + fn test_cost_tracker_try_add_is_atomic() { + let acct1 = Pubkey::new_unique(); + let acct2 = Pubkey::new_unique(); + let acct3 = Pubkey::new_unique(); + let cost = 100; + let account_max = cost * 2; + let block_max = account_max * 3; // for three accts + + let mut testee = CostTracker::new(account_max, block_max); + + // case 1: a tx writes to 3 accounts, should success, we will have: + // | acct1 | $cost | + // | acct2 | $cost | + // | acct2 | $cost | + // and block_cost = $cost + { + let tx_cost = TransactionCost { + writable_accounts: vec![acct1, acct2, acct3], + account_access_cost: 0, + execution_cost: cost, + }; + assert!(testee.try_add(tx_cost).is_ok()); + let stat = testee.get_stats(); + assert_eq!(cost, stat.total_cost); + assert_eq!(3, stat.number_of_accounts); + assert_eq!(cost, stat.costliest_account_cost); + } + + // case 2: add tx writes to acct2 with $cost, should succeed, result to + // | acct1 | $cost | + // | acct2 | $cost * 2 | + // | acct2 | $cost | + // and block_cost = $cost * 2 + { + let tx_cost = TransactionCost { + writable_accounts: vec![acct2], + account_access_cost: 0, + execution_cost: cost, + }; + assert!(testee.try_add(tx_cost).is_ok()); + let stat = testee.get_stats(); + assert_eq!(cost * 2, stat.total_cost); + assert_eq!(3, stat.number_of_accounts); + assert_eq!(cost * 2, stat.costliest_account_cost); + assert_eq!(acct2, stat.costliest_account); + } + + // case 3: add tx writes to [acct1, acct2], acct2 exceeds limit, should failed atomically, + // we shoudl still have: + // | acct1 | $cost | + // | acct2 | $cost | + // | acct2 | $cost | + // and block_cost = $cost + { + let tx_cost = TransactionCost { + writable_accounts: vec![acct1, acct2], + account_access_cost: 0, + execution_cost: cost, + }; + assert!(testee.try_add(tx_cost).is_err()); + let stat = testee.get_stats(); + assert_eq!(cost * 2, stat.total_cost); + assert_eq!(3, stat.number_of_accounts); + assert_eq!(cost * 2, stat.costliest_account_cost); + assert_eq!(acct2, stat.costliest_account); + } + } +} diff --git a/core/src/execute_cost_table.rs b/core/src/execute_cost_table.rs new file mode 100644 index 00000000000000..47cb1c81dc7f88 --- /dev/null +++ b/core/src/execute_cost_table.rs @@ -0,0 +1,277 @@ +/// ExecuteCostTable is aggregated by Cost Model, it keeps each program's +/// average cost in its HashMap, with fixed capacity to avoid from growing +/// unchecked. +/// When its capacity limit is reached, it prunes old and less-used programs +/// to make room for new ones. +use log::*; +use solana_sdk::pubkey::Pubkey; +use std::{collections::HashMap, time::SystemTime}; + +// prune is rather expensive op, free up bulk space in each operation +// would be more efficient. PRUNE_RATIO defines the after prune table +// size will be original_size * PRUNE_RATIO. +const PRUNE_RATIO: f64 = 0.75; +// with 50_000 TPS as norm, weights occurrences '100' per microsec +const OCCURRENCES_WEIGHT: i64 = 100; + +const DEFAULT_CAPACITY: usize = 1024; + +#[derive(Debug)] +pub struct ExecuteCostTable { + capacity: usize, + table: HashMap, + occurrences: HashMap, +} + +impl Default for ExecuteCostTable { + fn default() -> Self { + ExecuteCostTable::new(DEFAULT_CAPACITY) + } +} + +impl ExecuteCostTable { + pub fn new(cap: usize) -> Self { + Self { + capacity: cap, + table: HashMap::new(), + occurrences: HashMap::new(), + } + } + + pub fn get_cost_table(&self) -> &HashMap { + &self.table + } + + pub fn get_count(&self) -> usize { + self.table.len() + } + + // instead of assigning unknown program with a configured/hard-coded cost + // use average or mode function to make a educated guess. + pub fn get_average(&self) -> u64 { + if self.table.is_empty() { + 0 + } else { + self.table.iter().map(|(_, value)| value).sum::() / self.get_count() as u64 + } + } + + pub fn get_mode(&self) -> u64 { + if self.occurrences.is_empty() { + 0 + } else { + let key = self + .occurrences + .iter() + .max_by_key(|&(_, count)| count) + .map(|(key, _)| key) + .expect("cannot find mode from cost table"); + + *self.table.get(key).unwrap() + } + } + + // returns None if program doesn't exist in table. In this case, + // client is advised to call `get_average()` or `get_mode()` to + // assign a 'default' value for new program. + pub fn get_cost(&self, key: &Pubkey) -> Option<&u64> { + self.table.get(key) + } + + pub fn upsert(&mut self, key: &Pubkey, value: &u64) { + let need_to_add = self.table.get(key).is_none(); + let current_size = self.get_count(); + if current_size == self.capacity && need_to_add { + self.prune_to(&((current_size as f64 * PRUNE_RATIO) as usize)); + } + + let program_cost = self.table.entry(*key).or_insert(*value); + *program_cost = (*program_cost + *value) / 2; + + let (count, timestamp) = self + .occurrences + .entry(*key) + .or_insert((0, SystemTime::now())); + *count += 1; + *timestamp = SystemTime::now(); + } + + // prune the old programs so the table contains `new_size` of records, + // where `old` is defined as weighted age, which is negatively correlated + // with program's age and + // positively correlated with how frequently the program + // is executed (eg. occurrence), + fn prune_to(&mut self, new_size: &usize) { + debug!( + "prune cost table, current size {}, new size {}", + self.get_count(), + new_size + ); + + if *new_size == self.get_count() { + return; + } + + if *new_size == 0 { + self.table.clear(); + self.occurrences.clear(); + return; + } + + let now = SystemTime::now(); + let mut sorted_by_weighted_age: Vec<_> = self + .occurrences + .iter() + .map(|(key, (count, timestamp))| { + let age = now.duration_since(*timestamp).unwrap().as_micros(); + let weighted_age = *count as i64 * OCCURRENCES_WEIGHT + -(age as i64); + (weighted_age, *key) + }) + .collect(); + sorted_by_weighted_age.sort_by(|x, y| x.0.partial_cmp(&y.0).unwrap()); + + for i in sorted_by_weighted_age.iter() { + self.table.remove(&i.1); + self.occurrences.remove(&i.1); + if *new_size == self.get_count() { + break; + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_execute_cost_table_prune_simple_table() { + solana_logger::setup(); + let capacity: usize = 3; + let mut testee = ExecuteCostTable::new(capacity); + + let key1 = Pubkey::new_unique(); + let key2 = Pubkey::new_unique(); + let key3 = Pubkey::new_unique(); + + testee.upsert(&key1, &1); + testee.upsert(&key2, &2); + testee.upsert(&key3, &3); + + testee.prune_to(&(capacity - 1)); + + // the oldest, key1, should be pruned + assert!(testee.get_cost(&key1).is_none()); + assert!(testee.get_cost(&key2).is_some()); + assert!(testee.get_cost(&key2).is_some()); + } + + #[test] + fn test_execute_cost_table_prune_weighted_table() { + solana_logger::setup(); + let capacity: usize = 3; + let mut testee = ExecuteCostTable::new(capacity); + + let key1 = Pubkey::new_unique(); + let key2 = Pubkey::new_unique(); + let key3 = Pubkey::new_unique(); + + testee.upsert(&key1, &1); + testee.upsert(&key1, &1); + testee.upsert(&key2, &2); + testee.upsert(&key3, &3); + + testee.prune_to(&(capacity - 1)); + + // the oldest, key1, has 2 counts; 2nd oldest Key2 has 1 count; + // expect key2 to be pruned. + assert!(testee.get_cost(&key1).is_some()); + assert!(testee.get_cost(&key2).is_none()); + assert!(testee.get_cost(&key3).is_some()); + } + + #[test] + fn test_execute_cost_table_upsert_within_capacity() { + solana_logger::setup(); + let mut testee = ExecuteCostTable::default(); + + let key1 = Pubkey::new_unique(); + let key2 = Pubkey::new_unique(); + let cost1: u64 = 100; + let cost2: u64 = 110; + + // query empty table + assert!(testee.get_cost(&key1).is_none()); + + // insert one record + testee.upsert(&key1, &cost1); + assert_eq!(1, testee.get_count()); + assert_eq!(cost1, testee.get_average()); + assert_eq!(cost1, testee.get_mode()); + assert_eq!(&cost1, testee.get_cost(&key1).unwrap()); + + // insert 2nd record + testee.upsert(&key2, &cost2); + assert_eq!(2, testee.get_count()); + assert_eq!((cost1 + cost2) / 2_u64, testee.get_average()); + assert_eq!(cost2, testee.get_mode()); + assert_eq!(&cost1, testee.get_cost(&key1).unwrap()); + assert_eq!(&cost2, testee.get_cost(&key2).unwrap()); + + // update 1st record + testee.upsert(&key1, &cost2); + assert_eq!(2, testee.get_count()); + assert_eq!(((cost1 + cost2) / 2 + cost2) / 2, testee.get_average()); + assert_eq!((cost1 + cost2) / 2, testee.get_mode()); + assert_eq!(&((cost1 + cost2) / 2), testee.get_cost(&key1).unwrap()); + assert_eq!(&cost2, testee.get_cost(&key2).unwrap()); + } + + #[test] + fn test_execute_cost_table_upsert_exceeds_capacity() { + solana_logger::setup(); + let capacity: usize = 2; + let mut testee = ExecuteCostTable::new(capacity); + + let key1 = Pubkey::new_unique(); + let key2 = Pubkey::new_unique(); + let key3 = Pubkey::new_unique(); + let key4 = Pubkey::new_unique(); + let cost1: u64 = 100; + let cost2: u64 = 110; + let cost3: u64 = 120; + let cost4: u64 = 130; + + // insert one record + testee.upsert(&key1, &cost1); + assert_eq!(1, testee.get_count()); + assert_eq!(&cost1, testee.get_cost(&key1).unwrap()); + + // insert 2nd record + testee.upsert(&key2, &cost2); + assert_eq!(2, testee.get_count()); + assert_eq!(&cost1, testee.get_cost(&key1).unwrap()); + assert_eq!(&cost2, testee.get_cost(&key2).unwrap()); + + // insert 3rd record, pushes out the oldest (eg 1st) record + testee.upsert(&key3, &cost3); + assert_eq!(2, testee.get_count()); + assert_eq!((cost2 + cost3) / 2_u64, testee.get_average()); + assert_eq!(cost3, testee.get_mode()); + assert!(testee.get_cost(&key1).is_none()); + assert_eq!(&cost2, testee.get_cost(&key2).unwrap()); + assert_eq!(&cost3, testee.get_cost(&key3).unwrap()); + + // update 2nd record, so the 3rd becomes the oldest + // add 4th record, pushes out 3rd key + testee.upsert(&key2, &cost1); + testee.upsert(&key4, &cost4); + assert_eq!(((cost1 + cost2) / 2 + cost4) / 2_u64, testee.get_average()); + assert_eq!((cost1 + cost2) / 2, testee.get_mode()); + assert_eq!(2, testee.get_count()); + assert!(testee.get_cost(&key1).is_none()); + assert_eq!(&((cost1 + cost2) / 2), testee.get_cost(&key2).unwrap()); + assert!(testee.get_cost(&key3).is_none()); + assert_eq!(&cost4, testee.get_cost(&key4).unwrap()); + } +} diff --git a/core/src/fetch_stage.rs b/core/src/fetch_stage.rs index 48f44d60ecbed3..523ad2a92b7f0c 100644 --- a/core/src/fetch_stage.rs +++ b/core/src/fetch_stage.rs @@ -34,7 +34,7 @@ impl FetchStage { tpu_forwards_sockets, exit, &sender, - &poh_recorder, + poh_recorder, coalesce_ms, ), receiver, @@ -54,8 +54,8 @@ impl FetchStage { tx_sockets, tpu_forwards_sockets, exit, - &sender, - &poh_recorder, + sender, + poh_recorder, coalesce_ms, ) } @@ -108,7 +108,7 @@ impl FetchStage { let tpu_threads = sockets.into_iter().map(|socket| { streamer::receiver( socket, - &exit, + exit, sender.clone(), recycler.clone(), "fetch_stage", @@ -121,7 +121,7 @@ impl FetchStage { let tpu_forwards_threads = tpu_forwards_sockets.into_iter().map(|socket| { streamer::receiver( socket, - &exit, + exit, forward_sender.clone(), recycler.clone(), "fetch_forward_stage", diff --git a/core/src/heaviest_subtree_fork_choice.rs b/core/src/heaviest_subtree_fork_choice.rs index 16e27595ccb1ee..df75eaae7269da 100644 --- a/core/src/heaviest_subtree_fork_choice.rs +++ b/core/src/heaviest_subtree_fork_choice.rs @@ -457,7 +457,7 @@ impl HeaviestSubtreeForkChoice { pub fn is_duplicate_confirmed(&self, slot_hash_key: &SlotHashKey) -> Option { self.fork_infos - .get(&slot_hash_key) + .get(slot_hash_key) .map(|fork_info| fork_info.is_duplicate_confirmed()) } @@ -472,7 +472,7 @@ impl HeaviestSubtreeForkChoice { /// Returns false if the node or any of its ancestors have been marked as duplicate pub fn is_candidate(&self, slot_hash_key: &SlotHashKey) -> Option { self.fork_infos - .get(&slot_hash_key) + .get(slot_hash_key) .map(|fork_info| fork_info.is_candidate()) } @@ -585,7 +585,7 @@ impl HeaviestSubtreeForkChoice { for child_key in &fork_info.children { let child_fork_info = self .fork_infos - .get(&child_key) + .get(child_key) .expect("Child must exist in fork_info map"); let child_stake_voted_subtree = child_fork_info.stake_voted_subtree; is_duplicate_confirmed |= child_fork_info.is_duplicate_confirmed; @@ -770,7 +770,7 @@ impl HeaviestSubtreeForkChoice { let epoch = epoch_schedule.get_epoch(new_vote_slot_hash.0); let stake_update = epoch_stakes .get(&epoch) - .map(|epoch_stakes| epoch_stakes.vote_account_stake(&pubkey)) + .map(|epoch_stakes| epoch_stakes.vote_account_stake(pubkey)) .unwrap_or(0); update_operations @@ -896,7 +896,7 @@ impl TreeDiff for HeaviestSubtreeForkChoice { fn children(&self, slot_hash_key: &SlotHashKey) -> Option<&[SlotHashKey]> { self.fork_infos - .get(&slot_hash_key) + .get(slot_hash_key) .map(|fork_info| &fork_info.children[..]) } } @@ -1497,7 +1497,7 @@ mod test { .chain(std::iter::once(&duplicate_leaves_descended_from_4[1])) { assert!(heaviest_subtree_fork_choice - .children(&duplicate_leaf) + .children(duplicate_leaf) .unwrap() .is_empty(),); } @@ -3116,11 +3116,11 @@ mod test { let slot = slot_hash_key.0; if slot <= duplicate_confirmed_slot { assert!(heaviest_subtree_fork_choice - .is_duplicate_confirmed(&slot_hash_key) + .is_duplicate_confirmed(slot_hash_key) .unwrap()); } else { assert!(!heaviest_subtree_fork_choice - .is_duplicate_confirmed(&slot_hash_key) + .is_duplicate_confirmed(slot_hash_key) .unwrap()); } assert!(heaviest_subtree_fork_choice @@ -3139,7 +3139,7 @@ mod test { // 1) Be duplicate confirmed // 2) Have no invalid ancestors assert!(heaviest_subtree_fork_choice - .is_duplicate_confirmed(&slot_hash_key) + .is_duplicate_confirmed(slot_hash_key) .unwrap()); assert!(heaviest_subtree_fork_choice .latest_invalid_ancestor(slot_hash_key) @@ -3149,7 +3149,7 @@ mod test { // 1) Not be duplicate confirmed // 2) Should have an invalid ancestor == `invalid_descendant_slot` assert!(!heaviest_subtree_fork_choice - .is_duplicate_confirmed(&slot_hash_key) + .is_duplicate_confirmed(slot_hash_key) .unwrap()); assert_eq!( heaviest_subtree_fork_choice @@ -3162,7 +3162,7 @@ mod test { // 1) Not be duplicate confirmed // 2) Should not have an invalid ancestor assert!(!heaviest_subtree_fork_choice - .is_duplicate_confirmed(&slot_hash_key) + .is_duplicate_confirmed(slot_hash_key) .unwrap()); assert!(heaviest_subtree_fork_choice .latest_invalid_ancestor(slot_hash_key) @@ -3186,7 +3186,7 @@ mod test { // 1) Be duplicate confirmed // 2) Have no invalid ancestors assert!(heaviest_subtree_fork_choice - .is_duplicate_confirmed(&slot_hash_key) + .is_duplicate_confirmed(slot_hash_key) .unwrap()); assert!(heaviest_subtree_fork_choice .latest_invalid_ancestor(slot_hash_key) @@ -3196,7 +3196,7 @@ mod test { // 1) Not be duplicate confirmed // 2) Should have an invalid ancestor == `invalid_descendant_slot` assert!(!heaviest_subtree_fork_choice - .is_duplicate_confirmed(&slot_hash_key) + .is_duplicate_confirmed(slot_hash_key) .unwrap()); assert_eq!( heaviest_subtree_fork_choice @@ -3209,7 +3209,7 @@ mod test { // 1) Not be duplicate confirmed // 2) Should not have an invalid ancestor assert!(!heaviest_subtree_fork_choice - .is_duplicate_confirmed(&slot_hash_key) + .is_duplicate_confirmed(slot_hash_key) .unwrap()); assert!(heaviest_subtree_fork_choice .latest_invalid_ancestor(slot_hash_key) @@ -3223,7 +3223,7 @@ mod test { heaviest_subtree_fork_choice.mark_fork_valid_candidate(&last_duplicate_confirmed_key); for slot_hash_key in heaviest_subtree_fork_choice.fork_infos.keys() { assert!(heaviest_subtree_fork_choice - .is_duplicate_confirmed(&slot_hash_key) + .is_duplicate_confirmed(slot_hash_key) .unwrap()); assert!(heaviest_subtree_fork_choice .latest_invalid_ancestor(slot_hash_key) diff --git a/core/src/ledger_cleanup_service.rs b/core/src/ledger_cleanup_service.rs index 8b1a1fc203638c..195601e873f698 100644 --- a/core/src/ledger_cleanup_service.rs +++ b/core/src/ledger_cleanup_service.rs @@ -187,7 +187,7 @@ impl LedgerCleanupService { *last_purge_slot = root; let (slots_to_clean, purge_first_slot, lowest_cleanup_slot, total_shreds) = - Self::find_slots_to_clean(&blockstore, root, max_ledger_shreds); + Self::find_slots_to_clean(blockstore, root, max_ledger_shreds); if slots_to_clean { let purge_complete = Arc::new(AtomicBool::new(false)); diff --git a/core/src/optimistic_confirmation_verifier.rs b/core/src/optimistic_confirmation_verifier.rs index 2f27bc2b785cb0..c5445df46dce33 100644 --- a/core/src/optimistic_confirmation_verifier.rs +++ b/core/src/optimistic_confirmation_verifier.rs @@ -36,7 +36,7 @@ impl OptimisticConfirmationVerifier { .into_iter() .filter(|(optimistic_slot, optimistic_hash)| { (*optimistic_slot == root && *optimistic_hash != root_bank.hash()) - || (!root_ancestors.contains_key(&optimistic_slot) && + || (!root_ancestors.contains_key(optimistic_slot) && // In this second part of the `and`, we account for the possibility that // there was some other root `rootX` set in BankForks where: // diff --git a/core/src/progress_map.rs b/core/src/progress_map.rs index 1e65c31d9d4066..9724c57f92bbc9 100644 --- a/core/src/progress_map.rs +++ b/core/src/progress_map.rs @@ -271,7 +271,7 @@ impl PropagatedStats { pub fn add_node_pubkey(&mut self, node_pubkey: &Pubkey, bank: &Bank) { if !self.propagated_node_ids.contains(node_pubkey) { let node_vote_accounts = bank - .epoch_vote_accounts_for_node_id(&node_pubkey) + .epoch_vote_accounts_for_node_id(node_pubkey) .map(|v| &v.vote_accounts); if let Some(node_vote_accounts) = node_vote_accounts { diff --git a/core/src/repair_service.rs b/core/src/repair_service.rs index ceb0b2c65f0c83..ddffc264721400 100644 --- a/core/src/repair_service.rs +++ b/core/src/repair_service.rs @@ -224,7 +224,7 @@ impl RepairService { add_votes_elapsed = Measure::start("add_votes"); repair_weight.add_votes( - &blockstore, + blockstore, slot_to_vote_pubkeys.into_iter(), root_bank.epoch_stakes_map(), root_bank.epoch_schedule(), @@ -272,7 +272,7 @@ impl RepairService { let mut outstanding_requests = outstanding_requests.write().unwrap(); repairs.into_iter().for_each(|repair_request| { if let Ok((to, req)) = serve_repair.repair_request( - &cluster_slots, + cluster_slots, repair_request, &mut cache, &mut repair_stats, @@ -488,7 +488,7 @@ impl RepairService { repair_validators, ); if let Some((repair_pubkey, repair_addr)) = status.repair_pubkey_and_addr { - let repairs = Self::generate_duplicate_repairs_for_slot(&blockstore, *slot); + let repairs = Self::generate_duplicate_repairs_for_slot(blockstore, *slot); if let Some(repairs) = repairs { let mut outstanding_requests = outstanding_requests.write().unwrap(); @@ -530,7 +530,7 @@ impl RepairService { nonce: Nonce, ) -> Result<()> { let req = - serve_repair.map_repair_request(&repair_type, repair_pubkey, repair_stats, nonce)?; + serve_repair.map_repair_request(repair_type, repair_pubkey, repair_stats, nonce)?; repair_socket.send_to(&req, to)?; Ok(()) } diff --git a/core/src/repair_weight.rs b/core/src/repair_weight.rs index 26cce442e1519a..fe080518a50369 100644 --- a/core/src/repair_weight.rs +++ b/core/src/repair_weight.rs @@ -495,7 +495,7 @@ impl RepairWeight { for ((slot, _), _) in all_slots { *self .slot_to_tree - .get_mut(&slot) + .get_mut(slot) .expect("Nodes in tree must exist in `self.slot_to_tree`") = root2; } } @@ -521,9 +521,9 @@ impl RepairWeight { fn sort_by_stake_weight_slot(slot_stake_voted: &mut Vec<(Slot, u64)>) { slot_stake_voted.sort_by(|(slot, stake_voted), (slot_, stake_voted_)| { if stake_voted == stake_voted_ { - slot.cmp(&slot_) + slot.cmp(slot_) } else { - stake_voted.cmp(&stake_voted_).reverse() + stake_voted.cmp(stake_voted_).reverse() } }); } @@ -757,7 +757,7 @@ mod test { ); for slot in &[8, 10, 11] { - assert_eq!(*repair_weight.slot_to_tree.get(&slot).unwrap(), 8); + assert_eq!(*repair_weight.slot_to_tree.get(slot).unwrap(), 8); } for slot in 0..=1 { assert_eq!(*repair_weight.slot_to_tree.get(&slot).unwrap(), 0); @@ -772,7 +772,7 @@ mod test { ); for slot in &[8, 10, 11] { - assert_eq!(*repair_weight.slot_to_tree.get(&slot).unwrap(), 0); + assert_eq!(*repair_weight.slot_to_tree.get(slot).unwrap(), 0); } assert_eq!(repair_weight.trees.len(), 1); assert!(repair_weight.trees.contains_key(&0)); @@ -1088,10 +1088,10 @@ mod test { let purged_slots = vec![0, 1, 2, 4, 8, 10]; let mut expected_unrooted_len = 0; for purged_slot in &purged_slots { - assert!(!repair_weight.slot_to_tree.contains_key(&purged_slot)); - assert!(!repair_weight.trees.contains_key(&purged_slot)); + assert!(!repair_weight.slot_to_tree.contains_key(purged_slot)); + assert!(!repair_weight.trees.contains_key(purged_slot)); if *purged_slot > 3 { - assert!(repair_weight.unrooted_slots.contains(&purged_slot)); + assert!(repair_weight.unrooted_slots.contains(purged_slot)); expected_unrooted_len += 1; } } diff --git a/core/src/repair_weighted_traversal.rs b/core/src/repair_weighted_traversal.rs index 534ef4841d16af..8b6cd0ceb4e8cc 100644 --- a/core/src/repair_weighted_traversal.rs +++ b/core/src/repair_weighted_traversal.rs @@ -101,7 +101,7 @@ pub fn get_best_repair_shreds<'a>( let new_repairs = RepairService::generate_repairs_for_slot( blockstore, slot, - &slot_meta, + slot_meta, max_repairs - repairs.len(), ); repairs.extend(new_repairs); diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 5eabc8fe6d5ac3..172de63343ee67 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -560,7 +560,7 @@ impl ReplayStage { } Self::handle_votable_bank( - &vote_bank, + vote_bank, &poh_recorder, switch_fork_decision, &bank_forks, @@ -754,8 +754,8 @@ impl ReplayStage { Self::initialize_progress_and_fork_choice( &root_bank, frozen_banks, - &my_pubkey, - &vote_account, + my_pubkey, + vote_account, ) } @@ -776,8 +776,8 @@ impl ReplayStage { bank.slot(), ForkProgress::new_from_bank( bank, - &my_pubkey, - &vote_account, + my_pubkey, + vote_account, prev_leader_slot, 0, 0, @@ -878,7 +878,7 @@ impl ReplayStage { .expect("must exist based on earlier check") { descendants - .get_mut(&a) + .get_mut(a) .expect("If exists in ancestor map must exist in descendants map") .retain(|d| *d != slot && !slot_descendants.contains(d)); } @@ -888,9 +888,9 @@ impl ReplayStage { // Purge all the descendants of this slot from both maps for descendant in slot_descendants { - ancestors.remove(&descendant).expect("must exist"); + ancestors.remove(descendant).expect("must exist"); descendants - .remove(&descendant) + .remove(descendant) .expect("must exist based on earlier check"); } descendants @@ -1348,7 +1348,7 @@ impl ReplayStage { ); Self::handle_new_root( new_root, - &bank_forks, + bank_forks, progress, accounts_background_request_sender, highest_confirmed_root, @@ -1454,7 +1454,7 @@ impl ReplayStage { let vote_ix = switch_fork_decision .to_vote_instruction( vote, - &vote_account_pubkey, + vote_account_pubkey, &authorized_voter_keypair.pubkey(), ) .expect("Switch threshold failure should not lead to voting"); @@ -1606,9 +1606,9 @@ impl ReplayStage { leader_schedule_cache: &LeaderScheduleCache, ) { let next_leader_slot = leader_schedule_cache.next_leader_slot( - &my_pubkey, + my_pubkey, bank.slot(), - &bank, + bank, Some(blockstore), GRACE_TICKS_FACTOR * MAX_GRACE_SLOTS, ); @@ -1685,7 +1685,7 @@ impl ReplayStage { let bank_progress = &mut progress.entry(bank.slot()).or_insert_with(|| { ForkProgress::new_from_bank( &bank, - &my_pubkey, + my_pubkey, vote_account, prev_leader_slot, num_blocks_on_fork, @@ -1696,12 +1696,20 @@ impl ReplayStage { let root_slot = bank_forks.read().unwrap().root(); let replay_result = Self::replay_blockstore_into_bank( &bank, - &blockstore, + blockstore, bank_progress, transaction_status_sender, replay_vote_sender, verify_recyclers, ); +<<<<<<< HEAD +======= + Self::update_cost_model(cost_model, &bank_progress.replay_stats.execute_timings); + debug!( + "after replayed into bank, updated cost model instruction cost table, current values: {:?}", + cost_model.read().unwrap().get_instruction_cost_table() + ); +>>>>>>> 6514096a6 (chore: cargo +nightly clippy --fix -Z unstable-options) match replay_result { Ok(replay_tx_count) => tx_count += replay_tx_count, Err(err) => { @@ -1776,7 +1784,7 @@ impl ReplayStage { ); } } - Self::record_rewards(&bank, &rewards_recorder_sender); + Self::record_rewards(&bank, rewards_recorder_sender); } else { trace!( "bank {} not completed tick_height: {}, max_tick_height: {}", @@ -1820,14 +1828,14 @@ impl ReplayStage { my_vote_pubkey, bank_slot, bank.vote_accounts().into_iter(), - &ancestors, + ancestors, |slot| progress.get_hash(slot), latest_validator_votes_for_frozen_banks, ); // Notify any listeners of the votes found in this newly computed // bank heaviest_subtree_fork_choice.compute_bank_stats( - &bank, + bank, tower, latest_validator_votes_for_frozen_banks, ); @@ -1892,6 +1900,35 @@ impl ReplayStage { new_stats } +<<<<<<< HEAD +======= + fn update_cost_model(cost_model: &RwLock, execute_timings: &ExecuteTimings) { + let mut cost_model_mutable = cost_model.write().unwrap(); + for (program_id, stats) in &execute_timings.details.per_program_timings { + let cost = stats.0 / stats.1 as u64; + match cost_model_mutable.upsert_instruction_cost(program_id, &cost) { + Ok(c) => { + debug!( + "after replayed into bank, instruction {:?} has averaged cost {}", + program_id, c + ); + } + Err(err) => { + debug!( + "after replayed into bank, instruction {:?} failed to update cost, err: {}", + program_id, err + ); + } + } + } + drop(cost_model_mutable); + debug!( + "after replayed into bank, updated cost model instruction cost table, current values: {:?}", + cost_model.read().unwrap().get_instruction_cost_table() + ); + } + +>>>>>>> 6514096a6 (chore: cargo +nightly clippy --fix -Z unstable-options) fn update_propagation_status( progress: &mut ProgressMap, slot: Slot, @@ -1984,9 +2021,9 @@ impl ReplayStage { let selected_fork = { let switch_fork_decision = tower.check_switch_threshold( heaviest_bank.slot(), - &ancestors, - &descendants, - &progress, + ancestors, + descendants, + progress, heaviest_bank.total_epoch_stake(), heaviest_bank .epoch_vote_accounts(heaviest_bank.epoch()) @@ -2232,7 +2269,7 @@ impl ReplayStage { .contains(vote_pubkey); leader_propagated_stats.add_vote_pubkey( *vote_pubkey, - leader_bank.epoch_vote_account_stake(&vote_pubkey), + leader_bank.epoch_vote_account_stake(vote_pubkey), ); !exists }); @@ -2704,7 +2741,7 @@ mod tests { &bank1, bank1.collector_id(), validator_node_to_vote_keys - .get(&bank1.collector_id()) + .get(bank1.collector_id()) .unwrap(), Some(0), 0, @@ -2961,7 +2998,7 @@ mod tests { &bad_hash, hashes_per_tick.saturating_sub(1), vec![system_transaction::transfer( - &genesis_keypair, + genesis_keypair, &keypair2.pubkey(), 2, blockhash, @@ -3079,7 +3116,7 @@ mod tests { entry::create_ticks(bank.ticks_per_slot(), hashes_per_tick, blockhash); let last_entry_hash = entries.last().unwrap().hash; let tx = - system_transaction::transfer(&genesis_keypair, &keypair.pubkey(), 2, blockhash); + system_transaction::transfer(genesis_keypair, &keypair.pubkey(), 2, blockhash); let trailing_entry = entry::next_entry(&last_entry_hash, 1, vec![tx]); entries.push(trailing_entry); entries_to_test_shreds(entries, slot, slot.saturating_sub(1), true, 0) @@ -3159,7 +3196,7 @@ mod tests { &mut bank0_progress, None, &replay_vote_sender, - &&VerifyRecyclers::default(), + &VerifyRecyclers::default(), ); let subscriptions = Arc::new(RpcSubscriptions::new( @@ -3199,12 +3236,12 @@ mod tests { #[test] fn test_replay_commitment_cache() { fn leader_vote(vote_slot: Slot, bank: &Arc, pubkey: &Pubkey) { - let mut leader_vote_account = bank.get_account(&pubkey).unwrap(); + let mut leader_vote_account = bank.get_account(pubkey).unwrap(); let mut vote_state = VoteState::from(&leader_vote_account).unwrap(); vote_state.process_slot_vote_unchecked(vote_slot); let versioned = VoteStateVersions::new_current(vote_state); VoteState::to(&versioned, &mut leader_vote_account).unwrap(); - bank.store_account(&pubkey, &leader_vote_account); + bank.store_account(pubkey, &leader_vote_account); } let leader_pubkey = solana_sdk::pubkey::new_rand(); @@ -3741,7 +3778,7 @@ mod tests { success_index: usize, ) { let stake = 10_000; - let (bank_forks, _, _) = initialize_state(&all_keypairs, stake); + let (bank_forks, _, _) = initialize_state(all_keypairs, stake); let root_bank = bank_forks.root_bank(); let mut propagated_stats = PropagatedStats { total_epoch_stake: stake * all_keypairs.len() as u64, @@ -4375,7 +4412,7 @@ mod tests { )); assert!(check_map_eq( &descendants, - &bank_forks.read().unwrap().descendants() + bank_forks.read().unwrap().descendants() )); // Try to purge the root @@ -4514,7 +4551,7 @@ mod tests { // Record the vote for 4 tower.record_bank_vote( - &bank_forks.read().unwrap().get(4).unwrap(), + bank_forks.read().unwrap().get(4).unwrap(), &Pubkey::default(), ); @@ -4714,7 +4751,7 @@ mod tests { &cluster_info, refresh_bank, &poh_recorder, - Tower::last_voted_slot_in_bank(&refresh_bank, &my_vote_pubkey).unwrap(), + Tower::last_voted_slot_in_bank(refresh_bank, &my_vote_pubkey).unwrap(), &my_vote_pubkey, &my_vote_keypair, &mut voted_signatures, @@ -4894,12 +4931,12 @@ mod tests { progress, &VoteTracker::default(), &ClusterSlots::default(), - &bank_forks, + bank_forks, heaviest_subtree_fork_choice, latest_validator_votes_for_frozen_banks, ); let (heaviest_bank, heaviest_bank_on_same_fork) = heaviest_subtree_fork_choice - .select_forks(&frozen_banks, &tower, &progress, &ancestors, bank_forks); + .select_forks(&frozen_banks, tower, progress, ancestors, bank_forks); assert!(heaviest_bank_on_same_fork.is_none()); let SelectVoteAndResetForkResult { vote_bank, @@ -4908,8 +4945,8 @@ mod tests { } = ReplayStage::select_vote_and_reset_forks( &heaviest_bank, heaviest_bank_on_same_fork.as_ref(), - &ancestors, - &descendants, + ancestors, + descendants, progress, tower, latest_validator_votes_for_frozen_banks, diff --git a/core/src/serve_repair.rs b/core/src/serve_repair.rs index feb5191ba77278..b6beebec980da6 100644 --- a/core/src/serve_repair.rs +++ b/core/src/serve_repair.rs @@ -171,7 +171,7 @@ impl ServeRepair { Self::run_window_request( recycler, from, - &from_addr, + from_addr, blockstore, &me.read().unwrap().my_info, *slot, @@ -186,7 +186,7 @@ impl ServeRepair { ( Self::run_highest_window_request( recycler, - &from_addr, + from_addr, blockstore, *slot, *highest_index, @@ -200,7 +200,7 @@ impl ServeRepair { ( Self::run_orphan( recycler, - &from_addr, + from_addr, blockstore, *slot, MAX_ORPHAN_REPAIR_RESPONSES, @@ -256,7 +256,7 @@ impl ServeRepair { let mut time = Measure::start("repair::handle_packets"); for reqs in reqs_v { - Self::handle_packets(obj, &recycler, blockstore, reqs, response_sender, stats); + Self::handle_packets(obj, recycler, blockstore, reqs, response_sender, stats); } time.stop(); if total_packets >= *max_packets { @@ -411,7 +411,7 @@ impl ServeRepair { let (repair_peers, weighted_index) = match cache.entry(slot) { Entry::Occupied(entry) => entry.into_mut(), Entry::Vacant(entry) => { - let repair_peers = self.repair_peers(&repair_validators, slot); + let repair_peers = self.repair_peers(repair_validators, slot); if repair_peers.is_empty() { return Err(Error::from(ClusterInfoError::NoPeers)); } diff --git a/core/src/serve_repair_service.rs b/core/src/serve_repair_service.rs index dae275a1e1d118..f5b4cdadfc4ab1 100644 --- a/core/src/serve_repair_service.rs +++ b/core/src/serve_repair_service.rs @@ -28,7 +28,7 @@ impl ServeRepairService { ); let t_receiver = streamer::receiver( serve_repair_socket.clone(), - &exit, + exit, request_sender, Recycler::default(), "serve_repair_receiver", diff --git a/core/src/shred_fetch_stage.rs b/core/src/shred_fetch_stage.rs index 2c9a9961a91adf..50a53160743727 100644 --- a/core/src/shred_fetch_stage.rs +++ b/core/src/shred_fetch_stage.rs @@ -145,7 +145,7 @@ impl ShredFetchStage { .map(|s| { streamer::receiver( s, - &exit, + exit, packet_sender.clone(), recycler.clone(), "packet_modifier", @@ -174,7 +174,7 @@ impl ShredFetchStage { let (mut tvu_threads, tvu_filter) = Self::packet_modifier( sockets, - &exit, + exit, sender.clone(), recycler.clone(), bank_forks.clone(), @@ -184,7 +184,7 @@ impl ShredFetchStage { let (tvu_forwards_threads, fwd_thread_hdl) = Self::packet_modifier( forward_sockets, - &exit, + exit, sender.clone(), recycler.clone(), bank_forks.clone(), @@ -194,7 +194,7 @@ impl ShredFetchStage { let (repair_receiver, repair_handler) = Self::packet_modifier( vec![repair_socket], - &exit, + exit, sender.clone(), recycler, bank_forks, diff --git a/core/src/tpu.rs b/core/src/tpu.rs index 4d79cd8f7d6748..dc97a0f595e8c6 100644 --- a/core/src/tpu.rs +++ b/core/src/tpu.rs @@ -74,9 +74,9 @@ impl Tpu { let fetch_stage = FetchStage::new_with_sender( transactions_sockets, tpu_forwards_sockets, - &exit, + exit, &packet_sender, - &poh_recorder, + poh_recorder, tpu_coalesce_ms, ); let (verified_sender, verified_receiver) = unbounded(); @@ -88,10 +88,10 @@ impl Tpu { let (verified_vote_packets_sender, verified_vote_packets_receiver) = unbounded(); let cluster_info_vote_listener = ClusterInfoVoteListener::new( - &exit, + exit, cluster_info.clone(), verified_vote_packets_sender, - &poh_recorder, + poh_recorder, vote_tracker, bank_forks, subscriptions.clone(), @@ -104,7 +104,7 @@ impl Tpu { ); let banking_stage = BankingStage::new( - &cluster_info, + cluster_info, poh_recorder, verified_receiver, verified_vote_packets_receiver, @@ -117,7 +117,7 @@ impl Tpu { cluster_info.clone(), entry_receiver, retransmit_slots_receiver, - &exit, + exit, blockstore, shred_version, ); diff --git a/core/src/tvu.rs b/core/src/tvu.rs index a7ba605b6d9a84..5ae14866aff519 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -152,7 +152,7 @@ impl Tvu { repair_socket.clone(), &fetch_sender, Some(bank_forks.clone()), - &exit, + exit, ); let (verified_sender, verified_receiver) = unbounded(); @@ -172,12 +172,16 @@ impl Tvu { bank_forks.clone(), leader_schedule_cache, blockstore.clone(), - &cluster_info, + cluster_info, Arc::new(retransmit_sockets), repair_socket, verified_receiver, +<<<<<<< HEAD &exit, completed_slots_receiver, +======= + exit, +>>>>>>> 6514096a6 (chore: cargo +nightly clippy --fix -Z unstable-options) cluster_slots_update_receiver, *bank_forks.read().unwrap().working_bank().epoch_schedule(), cfg, @@ -212,7 +216,7 @@ impl Tvu { accounts_hash_receiver, pending_snapshot_package, exit, - &cluster_info, + cluster_info, tvu_config.trusted_validators.clone(), tvu_config.halt_on_trusted_validators_accounts_hash_mismatch, tvu_config.accounts_hash_fault_injection_slots, @@ -300,7 +304,7 @@ impl Tvu { ledger_cleanup_slot_receiver, blockstore.clone(), max_ledger_shreds, - &exit, + exit, compaction_interval, max_compaction_jitter, ) @@ -308,7 +312,7 @@ impl Tvu { let accounts_background_service = AccountsBackgroundService::new( bank_forks.clone(), - &exit, + exit, accounts_background_request_handler, tvu_config.accounts_db_caching_enabled, tvu_config.test_hash_calculation, diff --git a/core/src/unfrozen_gossip_verified_vote_hashes.rs b/core/src/unfrozen_gossip_verified_vote_hashes.rs index 4640e01e72a2c4..30d944754c889a 100644 --- a/core/src/unfrozen_gossip_verified_vote_hashes.rs +++ b/core/src/unfrozen_gossip_verified_vote_hashes.rs @@ -116,7 +116,7 @@ mod tests { if *unfrozen_vote_slot >= frozen_vote_slot { let vote_hashes_map = unfrozen_gossip_verified_vote_hashes .votes_per_slot - .get(&unfrozen_vote_slot) + .get(unfrozen_vote_slot) .unwrap(); assert_eq!(vote_hashes_map.len(), num_duplicate_hashes); for pubkey_votes in vote_hashes_map.values() { diff --git a/core/src/validator.rs b/core/src/validator.rs index 122e658480932a..8f2fe577c89461 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -959,7 +959,7 @@ fn post_process_restored_tower( }) .unwrap_or_else(|err| { let voting_has_been_active = - active_vote_account_exists_in_bank(&bank_forks.working_bank(), &vote_account); + active_vote_account_exists_in_bank(&bank_forks.working_bank(), vote_account); if !err.is_file_missing() { datapoint_error!( "tower_error", @@ -992,10 +992,10 @@ fn post_process_restored_tower( } Tower::new_from_bankforks( - &bank_forks, + bank_forks, tower_path, - &validator_identity, - &vote_account, + validator_identity, + vote_account, ) }) } @@ -1063,9 +1063,9 @@ fn new_banks_from_ledger( let tower_path = config.tower_path.as_deref().unwrap_or(ledger_path); - let restored_tower = Tower::restore(tower_path, &validator_identity); + let restored_tower = Tower::restore(tower_path, validator_identity); if let Ok(tower) = &restored_tower { - reconcile_blockstore_roots_with_tower(&tower, &blockstore).unwrap_or_else(|err| { + reconcile_blockstore_roots_with_tower(tower, &blockstore).unwrap_or_else(|err| { error!("Failed to reconcile blockstore with tower: {:?}", err); abort() }); @@ -1167,7 +1167,7 @@ fn new_banks_from_ledger( None, &snapshot_config.snapshot_package_output_path, snapshot_config.archive_format, - Some(&bank_forks.root_bank().get_thread_pool()), + Some(bank_forks.root_bank().get_thread_pool()), snapshot_config.maximum_snapshots_to_retain, ) .unwrap_or_else(|err| { @@ -1179,9 +1179,9 @@ fn new_banks_from_ledger( let tower = post_process_restored_tower( restored_tower, - &validator_identity, - &vote_account, - &config, + validator_identity, + vote_account, + config, tower_path, &bank_forks, ); @@ -1386,7 +1386,7 @@ fn wait_for_supermajority( ); } - let gossip_stake_percent = get_stake_percent_in_gossip(&bank, &cluster_info, i % 10 == 0); + let gossip_stake_percent = get_stake_percent_in_gossip(bank, cluster_info, i % 10 == 0); if gossip_stake_percent >= WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT { break; diff --git a/core/src/window_service.rs b/core/src/window_service.rs index eac0b4c55b55f7..5fbe0861efff63 100644 --- a/core/src/window_service.rs +++ b/core/src/window_service.rs @@ -134,7 +134,7 @@ fn verify_repair( .map(|repair_meta| { outstanding_requests.register_response( repair_meta.nonce, - &shred, + shred, solana_sdk::timing::timestamp(), ) }) @@ -153,7 +153,7 @@ fn prune_shreds_invalid_repair( let mut outstanding_requests = outstanding_requests.write().unwrap(); shreds.retain(|shred| { let should_keep = ( - verify_repair(&mut outstanding_requests, &shred, &repair_infos[i]), + verify_repair(&mut outstanding_requests, shred, &repair_infos[i]), i += 1, ) .0; @@ -630,7 +630,7 @@ mod test { keypair: &Arc, ) -> Vec { let shredder = Shredder::new(slot, parent, keypair.clone(), 0, 0).unwrap(); - shredder.entries_to_shreds(&entries, true, 0).0 + shredder.entries_to_shreds(entries, true, 0).0 } #[test] diff --git a/core/tests/fork-selection.rs b/core/tests/fork-selection.rs index 23396a10d9d16f..cfeda12228486a 100644 --- a/core/tests/fork-selection.rs +++ b/core/tests/fork-selection.rs @@ -188,7 +188,7 @@ impl Tower { .delayed_votes .iter() .enumerate() - .map(|(i, v)| (*scores.get(&v).unwrap_or(&0), v.time, i)) + .map(|(i, v)| (*scores.get(v).unwrap_or(&0), v.time, i)) .collect(); // highest score, latest vote first best.sort_unstable(); @@ -542,7 +542,7 @@ fn test_with_partitions( let mut scores: HashMap = HashMap::new(); towers.iter().for_each(|n| { n.delayed_votes.iter().for_each(|v| { - *scores.entry(v.clone()).or_insert(0) += n.score(&v, &fork_tree); + *scores.entry(v.clone()).or_insert(0) += n.score(v, &fork_tree); }) }); for tower in towers.iter_mut() { diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index 04cb77426acf41..0a3ed2f34388d4 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -149,7 +149,7 @@ mod tests { let check_hash_calculation = false; let (deserialized_bank, _timing) = snapshot_utils::bank_from_archive( - &account_paths, + account_paths, &[], &old_bank_forks .snapshot_config @@ -216,7 +216,7 @@ mod tests { }; for slot in 0..last_slot { let mut bank = Bank::new_from_parent(&bank_forks[slot], &Pubkey::default(), slot + 1); - f(&mut bank, &mint_keypair); + f(&mut bank, mint_keypair); let bank = bank_forks.insert(bank); // Set root to make sure we don't end up with too many account storage entries // and to allow snapshotting of bank and the purging logic on status_cache to @@ -250,7 +250,7 @@ mod tests { .unwrap(); let snapshot_package = snapshot_utils::process_accounts_package_pre( snapshot_package, - Some(&last_bank.get_thread_pool()), + Some(last_bank.get_thread_pool()), ); snapshot_utils::archive_snapshot_package( &snapshot_package, @@ -277,12 +277,12 @@ mod tests { |bank, mint_keypair| { let key1 = Keypair::new().pubkey(); let tx = - system_transaction::transfer(&mint_keypair, &key1, 1, bank.last_blockhash()); + system_transaction::transfer(mint_keypair, &key1, 1, bank.last_blockhash()); assert_eq!(bank.process_transaction(&tx), Ok(())); let key2 = Keypair::new().pubkey(); let tx = - system_transaction::transfer(&mint_keypair, &key2, 0, bank.last_blockhash()); + system_transaction::transfer(mint_keypair, &key2, 0, bank.last_blockhash()); assert_eq!(bank.process_transaction(&tx), Ok(())); bank.freeze(); @@ -294,7 +294,7 @@ mod tests { fn goto_end_of_slot(bank: &mut Bank) { let mut tick_hash = bank.last_blockhash(); loop { - tick_hash = hashv(&[&tick_hash.as_ref(), &[42]]); + tick_hash = hashv(&[tick_hash.as_ref(), &[42]]); bank.register_tick(&tick_hash); if tick_hash == bank.last_blockhash() { bank.freeze(); @@ -349,7 +349,7 @@ mod tests { ); let slot = bank.slot(); let key1 = Keypair::new().pubkey(); - let tx = system_transaction::transfer(&mint_keypair, &key1, 1, genesis_config.hash()); + let tx = system_transaction::transfer(mint_keypair, &key1, 1, genesis_config.hash()); assert_eq!(bank.process_transaction(&tx), Ok(())); bank.squash(); let accounts_hash = bank.update_accounts_hash(); @@ -368,9 +368,9 @@ mod tests { snapshot_utils::snapshot_bank( &bank, vec![], - &package_sender, - &snapshot_path, - &snapshot_package_output_path, + package_sender, + snapshot_path, + snapshot_package_output_path, snapshot_config.snapshot_version, &snapshot_config.archive_format, None, @@ -428,7 +428,7 @@ mod tests { // Purge all the outdated snapshots, including the ones needed to generate the package // currently sitting in the channel - snapshot_utils::purge_old_snapshots(&snapshot_path); + snapshot_utils::purge_old_snapshots(snapshot_path); assert!(snapshot_utils::get_snapshot_paths(&snapshots_dir) .into_iter() .map(|path| path.slot) @@ -575,14 +575,14 @@ mod tests { (MAX_CACHE_ENTRIES * 2 + 1) as u64, |bank, mint_keypair| { let tx = system_transaction::transfer( - &mint_keypair, + mint_keypair, &key1, 1, bank.parent().unwrap().last_blockhash(), ); assert_eq!(bank.process_transaction(&tx), Ok(())); let tx = system_transaction::transfer( - &mint_keypair, + mint_keypair, &key2, 1, bank.parent().unwrap().last_blockhash(), diff --git a/dos/src/main.rs b/dos/src/main.rs index 891f9c9fa3fc6b..191131fd8ada70 100644 --- a/dos/src/main.rs +++ b/dos/src/main.rs @@ -96,14 +96,14 @@ fn run_dos( let res = rpc_client .as_ref() .unwrap() - .get_account(&Pubkey::from_str(&data_input.as_ref().unwrap()).unwrap()); + .get_account(&Pubkey::from_str(data_input.as_ref().unwrap()).unwrap()); if res.is_err() { error_count += 1; } } "get_program_accounts" => { let res = rpc_client.as_ref().unwrap().get_program_accounts( - &Pubkey::from_str(&data_input.as_ref().unwrap()).unwrap(), + &Pubkey::from_str(data_input.as_ref().unwrap()).unwrap(), ); if res.is_err() { error_count += 1; diff --git a/faucet/src/faucet.rs b/faucet/src/faucet.rs index c2831ee627d926..b253a6543567ef 100644 --- a/faucet/src/faucet.rs +++ b/faucet/src/faucet.rs @@ -654,7 +654,7 @@ mod tests { #[test] fn test_process_faucet_request() { let to = solana_sdk::pubkey::new_rand(); - let blockhash = Hash::new(&to.as_ref()); + let blockhash = Hash::new(to.as_ref()); let lamports = 50; let req = FaucetRequest::GetAirdrop { lamports, @@ -679,6 +679,6 @@ mod tests { assert_eq!(expected_vec_with_length, response_vec); let bad_bytes = "bad bytes".as_bytes(); - assert!(faucet.process_faucet_request(&bad_bytes, ip).is_err()); + assert!(faucet.process_faucet_request(bad_bytes, ip).is_err()); } } diff --git a/faucet/tests/local-faucet.rs b/faucet/tests/local-faucet.rs index 841255274365ff..8629c68ac25dfe 100644 --- a/faucet/tests/local-faucet.rs +++ b/faucet/tests/local-faucet.rs @@ -12,7 +12,7 @@ fn test_local_faucet() { let keypair = Keypair::new(); let to = solana_sdk::pubkey::new_rand(); let lamports = 50; - let blockhash = Hash::new(&to.as_ref()); + let blockhash = Hash::new(to.as_ref()); let create_instruction = system_instruction::transfer(&keypair.pubkey(), &to, lamports); let message = Message::new(&[create_instruction], Some(&keypair.pubkey())); let expected_tx = Transaction::new(&[&keypair], message, blockhash); diff --git a/frozen-abi/macro/src/lib.rs b/frozen-abi/macro/src/lib.rs index bd285a826c9acc..ee0a4fdf2b448a 100644 --- a/frozen-abi/macro/src/lib.rs +++ b/frozen-abi/macro/src/lib.rs @@ -224,7 +224,7 @@ fn do_derive_abi_enum_visitor(input: ItemEnum) -> TokenStream { if filter_serde_attrs(&variant.attrs) { continue; }; - let sample_variant = quote_sample_variant(&type_name, &ty_generics, &variant); + let sample_variant = quote_sample_variant(type_name, &ty_generics, variant); variant_count = if let Some(variant_count) = variant_count.checked_add(1) { variant_count } else { @@ -319,7 +319,7 @@ fn test_mod_name(type_name: &Ident) -> Ident { #[cfg(RUSTC_WITH_SPECIALIZATION)] fn frozen_abi_type_alias(input: ItemType, expected_digest: &str) -> TokenStream { let type_name = &input.ident; - let test = quote_for_test(&test_mod_name(type_name), type_name, &expected_digest); + let test = quote_for_test(&test_mod_name(type_name), type_name, expected_digest); let result = quote! { #input #test @@ -330,7 +330,7 @@ fn frozen_abi_type_alias(input: ItemType, expected_digest: &str) -> TokenStream #[cfg(RUSTC_WITH_SPECIALIZATION)] fn frozen_abi_struct_type(input: ItemStruct, expected_digest: &str) -> TokenStream { let type_name = &input.ident; - let test = quote_for_test(&test_mod_name(type_name), type_name, &expected_digest); + let test = quote_for_test(&test_mod_name(type_name), type_name, expected_digest); let result = quote! { #input #test @@ -387,7 +387,7 @@ fn quote_sample_variant( #[cfg(RUSTC_WITH_SPECIALIZATION)] fn frozen_abi_enum_type(input: ItemEnum, expected_digest: &str) -> TokenStream { let type_name = &input.ident; - let test = quote_for_test(&test_mod_name(type_name), type_name, &expected_digest); + let test = quote_for_test(&test_mod_name(type_name), type_name, expected_digest); let result = quote! { #input #test diff --git a/frozen-abi/src/abi_example.rs b/frozen-abi/src/abi_example.rs index ebb74e31ca2ada..b9bb57b3641914 100644 --- a/frozen-abi/src/abi_example.rs +++ b/frozen-abi/src/abi_example.rs @@ -468,7 +468,7 @@ impl AbiEnumVisitor for &T { default fn visit_for_abi(&self, digester: &mut AbiDigester) -> DigestResult { info!("AbiEnumVisitor for (&default): {}", type_name::()); // Don't call self.visit_for_abi(...) to avoid the infinite recursion! - T::visit_for_abi(&self, digester) + T::visit_for_abi(self, digester) } } diff --git a/genesis-utils/src/lib.rs b/genesis-utils/src/lib.rs index 1efb26aedd3bf7..513da409ad20cd 100644 --- a/genesis-utils/src/lib.rs +++ b/genesis-utils/src/lib.rs @@ -28,7 +28,7 @@ fn load_local_genesis( ledger_path: &std::path::Path, expected_genesis_hash: Option, ) -> Result { - let existing_genesis = GenesisConfig::load(&ledger_path) + let existing_genesis = GenesisConfig::load(ledger_path) .map_err(|err| format!("Failed to load genesis config: {}", err))?; check_genesis_hash(&existing_genesis, expected_genesis_hash)?; @@ -54,12 +54,12 @@ pub fn download_then_check_genesis_hash( { unpack_genesis_archive( &tmp_genesis_package, - &ledger_path, + ledger_path, max_genesis_archive_unpacked_size, ) .map_err(|err| format!("Failed to unpack downloaded genesis config: {}", err))?; - let downloaded_genesis = GenesisConfig::load(&ledger_path) + let downloaded_genesis = GenesisConfig::load(ledger_path) .map_err(|err| format!("Failed to load downloaded genesis config: {}", err))?; check_genesis_hash(&downloaded_genesis, expected_genesis_hash)?; diff --git a/genesis/src/genesis_accounts.rs b/genesis/src/genesis_accounts.rs index 61abf74d27d7e1..7bf3504402c8e0 100644 --- a/genesis/src/genesis_accounts.rs +++ b/genesis/src/genesis_accounts.rs @@ -231,20 +231,20 @@ pub fn add_genesis_accounts(genesis_config: &mut GenesisConfig, mut issued_lampo issued_lamports += add_stakes( genesis_config, - &CREATOR_STAKER_INFOS, + CREATOR_STAKER_INFOS, &UNLOCKS_HALF_AT_9_MONTHS, ) + add_stakes( genesis_config, - &SERVICE_STAKER_INFOS, + SERVICE_STAKER_INFOS, &UNLOCKS_ALL_AT_9_MONTHS, ) + add_stakes( genesis_config, - &FOUNDATION_STAKER_INFOS, + FOUNDATION_STAKER_INFOS, &UNLOCKS_ALL_DAY_ZERO, - ) + add_stakes(genesis_config, &GRANTS_STAKER_INFOS, &UNLOCKS_ALL_DAY_ZERO) + ) + add_stakes(genesis_config, GRANTS_STAKER_INFOS, &UNLOCKS_ALL_DAY_ZERO) + add_stakes( genesis_config, - &COMMUNITY_STAKER_INFOS, + COMMUNITY_STAKER_INFOS, &UNLOCKS_ALL_DAY_ZERO, ); diff --git a/genesis/src/main.rs b/genesis/src/main.rs index cb4ed366c75657..2fb7498337d1ad 100644 --- a/genesis/src/main.rs +++ b/genesis/src/main.rs @@ -534,9 +534,9 @@ fn main() -> Result<(), Box> { ); let vote_account = vote_state::create_account_with_authorized( - &identity_pubkey, - &identity_pubkey, - &identity_pubkey, + identity_pubkey, + identity_pubkey, + identity_pubkey, commission, VoteState::get_rent_exempt_reserve(&rent).max(1), ); @@ -546,8 +546,8 @@ fn main() -> Result<(), Box> { stake_state::create_account( bootstrap_stake_authorized_pubkey .as_ref() - .unwrap_or(&identity_pubkey), - &vote_pubkey, + .unwrap_or(identity_pubkey), + vote_pubkey, &vote_account, &rent, bootstrap_validator_stake_lamports, @@ -782,7 +782,7 @@ mod tests { let pubkey = &pubkey_str.parse().unwrap(); assert_eq!( b64_account.balance, - genesis_config.accounts[&pubkey].lamports, + genesis_config.accounts[pubkey].lamports, ); } diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 452c7c0999d2af..835fdfae260b1f 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -265,7 +265,7 @@ impl PruneData { destination: Pubkey::new_unique(), wallclock, }; - prune_data.sign(&self_keypair); + prune_data.sign(self_keypair); prune_data } } @@ -1328,7 +1328,7 @@ impl ClusterInfo { if r_stake == l_stake { peers[*r_info].id.cmp(&peers[*l_info].id) } else { - r_stake.cmp(&l_stake) + r_stake.cmp(l_stake) } }) .collect(); @@ -1641,7 +1641,7 @@ impl ClusterInfo { generate_pull_requests: bool, require_stake_for_gossip: bool, ) -> Vec<(SocketAddr, Protocol)> { - self.trim_crds_table(CRDS_UNIQUE_PUBKEY_CAPACITY, &stakes); + self.trim_crds_table(CRDS_UNIQUE_PUBKEY_CAPACITY, stakes); // This will flush local pending push messages before generating // pull-request bloom filters, preventing pull responses to return the // same values back to the node itself. Note that packets will arrive @@ -1652,7 +1652,7 @@ impl ClusterInfo { .add_relaxed(out.len() as u64); if generate_pull_requests { let (pings, pull_requests) = - self.new_pull_requests(&thread_pool, gossip_validators, stakes); + self.new_pull_requests(thread_pool, gossip_validators, stakes); self.stats .packets_sent_pull_requests_count .add_relaxed(pull_requests.len() as u64); @@ -2196,7 +2196,7 @@ impl ClusterInfo { if !responses.is_empty() { let timeouts = { let gossip = self.gossip.read().unwrap(); - gossip.make_timeouts(&stakes, epoch_duration) + gossip.make_timeouts(stakes, epoch_duration) }; for (from, data) in responses { self.handle_pull_response(&from, data, &timeouts); diff --git a/gossip/src/contact_info.rs b/gossip/src/contact_info.rs index 6471c30dac00d6..4485e8e1cc88f6 100644 --- a/gossip/src/contact_info.rs +++ b/gossip/src/contact_info.rs @@ -143,14 +143,14 @@ impl ContactInfo { } let tpu = *bind_addr; - let gossip = next_port(&bind_addr, 1); - let tvu = next_port(&bind_addr, 2); - let tpu_forwards = next_port(&bind_addr, 3); - let tvu_forwards = next_port(&bind_addr, 4); - let repair = next_port(&bind_addr, 5); + let gossip = next_port(bind_addr, 1); + let tvu = next_port(bind_addr, 2); + let tpu_forwards = next_port(bind_addr, 3); + let tvu_forwards = next_port(bind_addr, 4); + let repair = next_port(bind_addr, 5); let rpc = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_PORT); let rpc_pubsub = SocketAddr::new(bind_addr.ip(), rpc_port::DEFAULT_RPC_PUBSUB_PORT); - let serve_repair = next_port(&bind_addr, 6); + let serve_repair = next_port(bind_addr, 6); Self { id: *pubkey, gossip, diff --git a/gossip/src/crds_gossip.rs b/gossip/src/crds_gossip.rs index 18c7991f3bbeff..8fd8b9423ed31c 100644 --- a/gossip/src/crds_gossip.rs +++ b/gossip/src/crds_gossip.rs @@ -325,7 +325,7 @@ impl CrdsGossip { assert!(timeouts.contains_key(&Pubkey::default())); rv = self .pull - .purge_active(thread_pool, &mut self.crds, now, &timeouts); + .purge_active(thread_pool, &mut self.crds, now, timeouts); } self.crds .trim_purged(now.saturating_sub(5 * self.pull.crds_timeout)); diff --git a/gossip/src/crds_gossip_push.rs b/gossip/src/crds_gossip_push.rs index 8daa532d9bf869..38319995d16acb 100644 --- a/gossip/src/crds_gossip_push.rs +++ b/gossip/src/crds_gossip_push.rs @@ -277,7 +277,7 @@ impl CrdsGossipPush { let (weights, peers): (Vec<_>, Vec<_>) = self .push_options( crds, - &self_id, + self_id, self_shred_version, stakes, gossip_validators, diff --git a/gossip/src/crds_value.rs b/gossip/src/crds_value.rs index 02c1b623b6a456..b725579e962411 100644 --- a/gossip/src/crds_value.rs +++ b/gossip/src/crds_value.rs @@ -71,7 +71,7 @@ impl Signable for CrdsValue { fn verify(&self) -> bool { self.get_signature() - .verify(&self.pubkey().as_ref(), self.signable_data().borrow()) + .verify(self.pubkey().as_ref(), self.signable_data().borrow()) } } @@ -853,9 +853,9 @@ mod test { wrong_keypair: &Keypair, ) { assert!(!value.verify()); - value.sign(&correct_keypair); + value.sign(correct_keypair); assert!(value.verify()); - value.sign(&wrong_keypair); + value.sign(wrong_keypair); assert!(!value.verify()); serialize_deserialize_value(value, correct_keypair); } diff --git a/gossip/src/gossip_service.rs b/gossip/src/gossip_service.rs index 589ee3758e7dfc..e06f2577cfd8a1 100644 --- a/gossip/src/gossip_service.rs +++ b/gossip/src/gossip_service.rs @@ -49,7 +49,7 @@ impl GossipService { ); let t_receiver = streamer::receiver( gossip_socket.clone(), - &exit, + exit, request_sender, Recycler::default(), "gossip_receiver", @@ -319,7 +319,7 @@ fn make_gossip_node( gossip_socket, None, should_check_duplicate_instance, - &exit, + exit, ); (gossip_service, ip_echo, cluster_info) } diff --git a/gossip/src/main.rs b/gossip/src/main.rs index 20f667ef24dbe2..6eeeef1a193bd7 100644 --- a/gossip/src/main.rs +++ b/gossip/src/main.rs @@ -225,7 +225,7 @@ fn process_spy(matches: &ArgMatches) -> std::io::Result<()> { .value_of("node_pubkey") .map(|pubkey_str| pubkey_str.parse::().unwrap()); let shred_version = value_t_or_exit!(matches, "shred_version", u16); - let identity_keypair = keypair_of(&matches, "identity").map(Arc::new); + let identity_keypair = keypair_of(matches, "identity").map(Arc::new); let entrypoint_addr = parse_entrypoint(matches); @@ -270,7 +270,7 @@ fn parse_entrypoint(matches: &ArgMatches) -> Option { fn process_rpc_url(matches: &ArgMatches) -> std::io::Result<()> { let any = matches.is_present("any"); let all = matches.is_present("all"); - let entrypoint_addr = parse_entrypoint(&matches); + let entrypoint_addr = parse_entrypoint(matches); let timeout = value_t_or_exit!(matches, "timeout", u64); let shred_version = value_t_or_exit!(matches, "shred_version", u16); let (_all_peers, validators) = discover( diff --git a/gossip/tests/crds_gossip.rs b/gossip/tests/crds_gossip.rs index da4c30191ba7ed..0baa28da5c85c4 100644 --- a/gossip/tests/crds_gossip.rs +++ b/gossip/tests/crds_gossip.rs @@ -240,7 +240,7 @@ fn connected_staked_network_create(stakes: &[u64]) -> Network { fn network_simulator_pull_only(thread_pool: &ThreadPool, network: &mut Network) { let num = network.len(); - let (converged, bytes_tx) = network_run_pull(&thread_pool, network, 0, num * 2, 0.9); + let (converged, bytes_tx) = network_run_pull(thread_pool, network, 0, num * 2, 0.9); trace!( "network_simulator_pull_{}: converged: {} total_bytes: {}", num, @@ -253,7 +253,7 @@ fn network_simulator_pull_only(thread_pool: &ThreadPool, network: &mut Network) fn network_simulator(thread_pool: &ThreadPool, network: &mut Network, max_convergance: f64) { let num = network.len(); // run for a small amount of time - let (converged, bytes_tx) = network_run_pull(&thread_pool, network, 0, 10, 1.0); + let (converged, bytes_tx) = network_run_pull(thread_pool, network, 0, 10, 1.0); trace!("network_simulator_push_{}: converged: {}", num, converged); // make sure there is someone in the active set let network_values: Vec = network.values().cloned().collect(); @@ -292,7 +292,7 @@ fn network_simulator(thread_pool: &ThreadPool, network: &mut Network, max_conver bytes_tx ); // pull for a bit - let (converged, bytes_tx) = network_run_pull(&thread_pool, network, start, end, 1.0); + let (converged, bytes_tx) = network_run_pull(thread_pool, network, start, end, 1.0); total_bytes += bytes_tx; trace!( "network_simulator_push_{}: converged: {} bytes: {} total_bytes: {}", @@ -466,7 +466,7 @@ fn network_run_pull( .lock() .unwrap() .new_pull_request( - &thread_pool, + thread_pool, from.keypair.deref(), now, None, diff --git a/install/src/command.rs b/install/src/command.rs index efe7d7bc0e4843..b8e92843519daa 100644 --- a/install/src/command.rs +++ b/install/src/command.rs @@ -548,7 +548,7 @@ pub fn init( init_or_update(config_file, true, false)?; let path_modified = if !no_modify_path { - add_to_path(&config.active_release_bin_dir().to_str().unwrap()) + add_to_path(config.active_release_bin_dir().to_str().unwrap()) } else { false }; @@ -613,10 +613,10 @@ pub fn info(config_file: &str, local_info_only: bool, eval: bool) -> Result<(), return Ok(()); } - println_name_value("Configuration:", &config_file); + println_name_value("Configuration:", config_file); println_name_value( "Active release directory:", - &config.active_release_dir().to_str().unwrap_or("?"), + config.active_release_dir().to_str().unwrap_or("?"), ); fn print_release_version(config: &Config) { @@ -633,14 +633,14 @@ pub fn info(config_file: &str, local_info_only: bool, eval: bool) -> Result<(), if let Some(explicit_release) = &config.explicit_release { match explicit_release { ExplicitRelease::Semver(release_semver) => { - println_name_value(&format!("{}Release version:", BULLET), &release_semver); + println_name_value(&format!("{}Release version:", BULLET), release_semver); println_name_value( &format!("{}Release URL:", BULLET), &github_release_download_url(release_semver), ); } ExplicitRelease::Channel(release_channel) => { - println_name_value(&format!("{}Release channel:", BULLET), &release_channel); + println_name_value(&format!("{}Release channel:", BULLET), release_channel); println_name_value( &format!("{}Release URL:", BULLET), &release_channel_download_url(release_channel), @@ -659,7 +659,7 @@ pub fn info(config_file: &str, local_info_only: bool, eval: bool) -> Result<(), Some(ref update_manifest) => { println_name_value("Installed version:", ""); print_release_version(&config); - print_update_manifest(&update_manifest); + print_update_manifest(update_manifest); } None => { println_name_value("Installed version:", "None"); diff --git a/install/src/lib.rs b/install/src/lib.rs index 188cdfcd061b5f..f79bdfcf1954dc 100644 --- a/install/src/lib.rs +++ b/install/src/lib.rs @@ -18,7 +18,7 @@ mod stop_process; mod update_manifest; pub fn is_semver(semver: &str) -> Result<(), String> { - match semver::Version::parse(&semver) { + match semver::Version::parse(semver) { Ok(_) => Ok(()), Err(err) => Err(format!("{:?}", err)), } @@ -60,10 +60,10 @@ pub fn explicit_release_of( fn handle_init(matches: &ArgMatches<'_>, config_file: &str) -> Result<(), String> { let json_rpc_url = matches.value_of("json_rpc_url").unwrap(); - let update_manifest_pubkey = pubkey_of(&matches, "update_manifest_pubkey"); + let update_manifest_pubkey = pubkey_of(matches, "update_manifest_pubkey"); let data_dir = matches.value_of("data_dir").unwrap(); let no_modify_path = matches.is_present("no_modify_path"); - let explicit_release = explicit_release_of(&matches, "explicit_release"); + let explicit_release = explicit_release_of(matches, "explicit_release"); if update_manifest_pubkey.is_none() && explicit_release.is_none() { Err(format!( @@ -98,7 +98,7 @@ pub fn main() -> Result<(), String> { .global(true) .help("Configuration file to use"); match *defaults::CONFIG_FILE { - Some(ref config_file) => arg.default_value(&config_file), + Some(ref config_file) => arg.default_value(config_file), None => arg.required(true), } }) @@ -115,7 +115,7 @@ pub fn main() -> Result<(), String> { .required(true) .help("Directory to store install data"); match *defaults::DATA_DIR { - Some(ref data_dir) => arg.default_value(&data_dir), + Some(ref data_dir) => arg.default_value(data_dir), None => arg, } }) @@ -181,7 +181,7 @@ pub fn main() -> Result<(), String> { .required(true) .help("Keypair file of the account that funds the deployment"); match *defaults::USER_KEYPAIR { - Some(ref config_file) => arg.default_value(&config_file), + Some(ref config_file) => arg.default_value(config_file), None => arg, } }) @@ -242,7 +242,7 @@ pub fn main() -> Result<(), String> { let config_file = matches.value_of("config_file").unwrap(); match matches.subcommand() { - ("init", Some(matches)) => handle_init(&matches, &config_file), + ("init", Some(matches)) => handle_init(matches, config_file), ("info", Some(matches)) => { let local_info_only = matches.is_present("local_info_only"); let eval = matches.is_present("eval"); @@ -290,7 +290,7 @@ pub fn main_init() -> Result<(), String> { .takes_value(true) .help("Configuration file to use"); match *defaults::CONFIG_FILE { - Some(ref config_file) => arg.default_value(&config_file), + Some(ref config_file) => arg.default_value(config_file), None => arg.required(true), } }) @@ -303,7 +303,7 @@ pub fn main_init() -> Result<(), String> { .required(true) .help("Directory to store install data"); match *defaults::DATA_DIR { - Some(ref data_dir) => arg.default_value(&data_dir), + Some(ref data_dir) => arg.default_value(data_dir), None => arg, } }) @@ -342,5 +342,5 @@ pub fn main_init() -> Result<(), String> { .get_matches(); let config_file = matches.value_of("config_file").unwrap(); - handle_init(&matches, &config_file) + handle_init(&matches, config_file) } diff --git a/keygen/src/keygen.rs b/keygen/src/keygen.rs index a3feaedf93a4f0..c7c962a58aa850 100644 --- a/keygen/src/keygen.rs +++ b/keygen/src/keygen.rs @@ -153,9 +153,9 @@ fn output_keypair( ) -> Result<(), Box> { if outfile == STDOUT_OUTFILE_TOKEN { let mut stdout = std::io::stdout(); - write_keypair(&keypair, &mut stdout)?; + write_keypair(keypair, &mut stdout)?; } else { - write_keypair_file(&keypair, outfile)?; + write_keypair_file(keypair, outfile)?; println!("Wrote {} keypair to {}", source, outfile); } Ok(()) @@ -342,7 +342,7 @@ fn main() -> Result<(), Box> { .global(true) .help("Configuration file to use"); if let Some(ref config_file) = *CONFIG_FILE { - arg.default_value(&config_file) + arg.default_value(config_file) } else { arg } @@ -539,7 +539,7 @@ fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box> { if matches.is_present("outfile") { let outfile = matches.value_of("outfile").unwrap(); - check_for_overwrite(&outfile, &matches); + check_for_overwrite(outfile, matches); write_pubkey_file(outfile, pubkey)?; } else { println!("{}", pubkey); @@ -558,7 +558,7 @@ fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box> { match outfile { Some(STDOUT_OUTFILE_TOKEN) => (), - Some(outfile) => check_for_overwrite(&outfile, &matches), + Some(outfile) => check_for_overwrite(outfile, matches), None => (), } @@ -577,7 +577,7 @@ fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box> { let keypair = keypair_from_seed(seed.as_bytes())?; if let Some(outfile) = outfile { - output_keypair(&keypair, &outfile, "new") + output_keypair(&keypair, outfile, "new") .map_err(|err| format!("Unable to write {}: {}", outfile, err))?; } @@ -600,7 +600,7 @@ fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box> { }; if outfile != STDOUT_OUTFILE_TOKEN { - check_for_overwrite(&outfile, &matches); + check_for_overwrite(outfile, matches); } let keypair_name = "recover"; @@ -610,7 +610,7 @@ fn do_main(matches: &ArgMatches<'_>) -> Result<(), Box> { let skip_validation = matches.is_present(SKIP_SEED_PHRASE_VALIDATION_ARG.name); keypair_from_seed_phrase(keypair_name, skip_validation, true, None, true)? }; - output_keypair(&keypair, &outfile, "recovered")?; + output_keypair(&keypair, outfile, "recovered")?; } ("grind", Some(matches)) => { let ignore_case = matches.is_present("ignore_case"); diff --git a/ledger-tool/src/bigtable.rs b/ledger-tool/src/bigtable.rs index 38c387404319e7..289cc766841dee 100644 --- a/ledger-tool/src/bigtable.rs +++ b/ledger-tool/src/bigtable.rs @@ -405,7 +405,7 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) { let allow_missing_metadata = arg_matches.is_present("allow_missing_metadata"); let force_reupload = arg_matches.is_present("force_reupload"); let blockstore = - crate::open_blockstore(&ledger_path, AccessType::TryPrimaryThenSecondary, None); + crate::open_blockstore(ledger_path, AccessType::TryPrimaryThenSecondary, None); runtime.block_on(upload( blockstore, diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index ec599b5c516c65..1caf97ade7a5d3 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -134,7 +134,7 @@ fn output_entry( .map(|transaction_status| transaction_status.into()); solana_cli_output::display::println_transaction( - &transaction, + transaction, &transaction_status, " ", None, @@ -453,7 +453,7 @@ fn graph_forks(bank_forks: &BankForks, include_all_votes: bool) -> String { let mut lowest_total_stake = 0; for (node_pubkey, (last_vote_slot, vote_state, stake, total_stake)) in &last_votes { all_votes.entry(*node_pubkey).and_modify(|validator_votes| { - validator_votes.remove(&last_vote_slot); + validator_votes.remove(last_vote_slot); }); dot.push(format!( @@ -473,7 +473,7 @@ fn graph_forks(bank_forks: &BankForks, include_all_votes: bool) -> String { dot.push(format!( r#" "last vote {}" -> "{}" [style=dashed,label="latest vote"];"#, node_pubkey, - if styled_slots.contains(&last_vote_slot) { + if styled_slots.contains(last_vote_slot) { last_vote_slot.to_string() } else { if *last_vote_slot < lowest_last_vote_slot { @@ -520,7 +520,7 @@ fn graph_forks(bank_forks: &BankForks, include_all_votes: bool) -> String { r#" "{} vote {}" -> "{}" [style=dotted,label="vote"];"#, node_pubkey, vote_slot, - if styled_slots.contains(&vote_slot) { + if styled_slots.contains(vote_slot) { vote_slot.to_string() } else { "...".to_string() @@ -712,8 +712,8 @@ fn load_bank_forks( }; bank_forks_utils::load( - &genesis_config, - &blockstore, + genesis_config, + blockstore, account_paths, None, snapshot_config.as_ref(), @@ -723,6 +723,61 @@ fn load_bank_forks( ) } +<<<<<<< HEAD +======= +fn compute_slot_cost(blockstore: &Blockstore, slot: Slot) -> Result<(), String> { + if blockstore.is_dead(slot) { + return Err("Dead slot".to_string()); + } + + let (entries, _num_shreds, _is_full) = blockstore + .get_slot_entries_with_shred_info(slot, 0, false) + .map_err(|err| format!(" Slot: {}, Failed to load entries, err {:?}", slot, err))?; + + let mut transactions = 0; + let mut programs = 0; + let mut program_ids = HashMap::new(); + let cost_model = CostModel::new(ACCOUNT_MAX_COST, BLOCK_MAX_COST); + let mut cost_tracker = CostTracker::new( + cost_model.get_account_cost_limit(), + cost_model.get_block_cost_limit(), + ); + + for entry in &entries { + transactions += entry.transactions.len(); + for transaction in &entry.transactions { + programs += transaction.message().instructions.len(); + let tx_cost = cost_model.calculate_cost(transaction); + if cost_tracker.try_add(tx_cost).is_err() { + println!( + "Slot: {}, CostModel rejected transaction {:?}, stats {:?}!", + slot, + transaction, + cost_tracker.get_stats() + ); + } + for instruction in &transaction.message().instructions { + let program_id = + transaction.message().account_keys[instruction.program_id_index as usize]; + *program_ids.entry(program_id).or_insert(0) += 1; + } + } + } + + println!( + "Slot: {}, Entries: {}, Transactions: {}, Programs {}, {:?}", + slot, + entries.len(), + transactions, + programs, + cost_tracker.get_stats() + ); + println!(" Programs: {:?}", program_ids); + + Ok(()) +} + +>>>>>>> 6514096a6 (chore: cargo +nightly clippy --fix -Z unstable-options) fn open_genesis_config_by(ledger_path: &Path, matches: &ArgMatches<'_>) -> GenesisConfig { let max_genesis_archive_unpacked_size = value_t_or_exit!(matches, "max_genesis_archive_unpacked_size", u64); @@ -833,7 +888,7 @@ fn main() { .long("maximum-snapshots-to-retain") .value_name("NUMBER") .takes_value(true) - .default_value(&default_max_snapshot_to_retain) + .default_value(default_max_snapshot_to_retain) .help("Maximum number of snapshots to hold on to during snapshot purge"); let rent = Rent::default(); @@ -1859,14 +1914,14 @@ fn main() { let remove_stake_accounts = arg_matches.is_present("remove_stake_accounts"); let new_hard_forks = hardforks_of(arg_matches, "hard_forks"); - let faucet_pubkey = pubkey_of(&arg_matches, "faucet_pubkey"); + let faucet_pubkey = pubkey_of(arg_matches, "faucet_pubkey"); let faucet_lamports = value_t!(arg_matches, "faucet_lamports", u64).unwrap_or(0); let rent_burn_percentage = value_t!(arg_matches, "rent_burn_percentage", u8); let hashes_per_tick = arg_matches.value_of("hashes_per_tick"); let bootstrap_stake_authorized_pubkey = - pubkey_of(&arg_matches, "bootstrap_stake_authorized_pubkey"); + pubkey_of(arg_matches, "bootstrap_stake_authorized_pubkey"); let bootstrap_validator_lamports = value_t_or_exit!(arg_matches, "bootstrap_validator_lamports", u64); let bootstrap_validator_stake_lamports = @@ -1880,9 +1935,9 @@ fn main() { ); exit(1); } - let bootstrap_validator_pubkeys = pubkeys_of(&arg_matches, "bootstrap_validator"); + let bootstrap_validator_pubkeys = pubkeys_of(arg_matches, "bootstrap_validator"); let accounts_to_remove = - pubkeys_of(&arg_matches, "accounts_to_remove").unwrap_or_default(); + pubkeys_of(arg_matches, "accounts_to_remove").unwrap_or_default(); let snapshot_version = arg_matches .value_of("snapshot_version") @@ -2034,9 +2089,9 @@ fn main() { ); let vote_account = vote_state::create_account_with_authorized( - &identity_pubkey, - &identity_pubkey, - &identity_pubkey, + identity_pubkey, + identity_pubkey, + identity_pubkey, 100, VoteState::get_rent_exempt_reserve(&rent).max(1), ); @@ -2046,8 +2101,8 @@ fn main() { &stake_state::create_account( bootstrap_stake_authorized_pubkey .as_ref() - .unwrap_or(&identity_pubkey), - &vote_pubkey, + .unwrap_or(identity_pubkey), + vote_pubkey, &vote_account, &rent, bootstrap_validator_stake_lamports, @@ -2472,7 +2527,7 @@ fn main() { } }; let warped_bank = Bank::new_from_parent_with_tracer( - &base_bank, + base_bank, base_bank.collector_id(), next_epoch, tracer, @@ -2489,7 +2544,7 @@ fn main() { println!("Slot: {} => {}", base_bank.slot(), warped_bank.slot()); println!("Epoch: {} => {}", base_bank.epoch(), warped_bank.epoch()); - assert_capitalization(&base_bank); + assert_capitalization(base_bank); assert_capitalization(&warped_bank); let interest_per_epoch = ((warped_bank.capitalization() as f64) / (base_bank.capitalization() as f64) @@ -2517,7 +2572,7 @@ fn main() { pubkey, account, base_bank - .get_account(&pubkey) + .get_account(pubkey) .map(|a| a.lamports()) .unwrap_or_default(), ) @@ -2716,7 +2771,7 @@ fn main() { ); } - assert_capitalization(&bank); + assert_capitalization(bank); println!("Inflation: {:?}", bank.inflation()); println!("RentCollector: {:?}", bank.rent_collector()); println!("Capitalization: {}", Sol(bank.capitalization())); diff --git a/ledger-tool/tests/basic.rs b/ledger-tool/tests/basic.rs index c9ccf9ae690cd1..4cda481e6ed358 100644 --- a/ledger-tool/tests/basic.rs +++ b/ledger-tool/tests/basic.rs @@ -39,11 +39,11 @@ fn nominal() { let ledger_path = ledger_path.to_str().unwrap(); // Basic validation - let output = run_ledger_tool(&["-l", &ledger_path, "verify"]); + let output = run_ledger_tool(&["-l", ledger_path, "verify"]); assert!(output.status.success()); // Print everything - let output = run_ledger_tool(&["-l", &ledger_path, "print", "-vvv"]); + let output = run_ledger_tool(&["-l", ledger_path, "print", "-vvv"]); assert!(output.status.success()); assert_eq!(count_newlines(&output.stdout), ticks + meta_lines); } diff --git a/ledger/src/bank_forks_utils.rs b/ledger/src/bank_forks_utils.rs index bc416f8795e83d..9a8d5321c2250a 100644 --- a/ledger/src/bank_forks_utils.rs +++ b/ledger/src/bank_forks_utils.rs @@ -58,8 +58,8 @@ pub fn load( ) { return load_from_snapshot( - &genesis_config, - &blockstore, + genesis_config, + blockstore, account_paths, shrink_paths, snapshot_config, @@ -79,8 +79,8 @@ pub fn load( } load_from_genesis( - &genesis_config, - &blockstore, + genesis_config, + blockstore, account_paths, process_options, cache_block_meta_sender, @@ -97,8 +97,8 @@ fn load_from_genesis( info!("Processing ledger from genesis"); to_loadresult( blockstore_processor::process_blockstore( - &genesis_config, - &blockstore, + genesis_config, + blockstore, account_paths, process_options, cache_block_meta_sender, diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 4cee093610f539..5220af916e20f8 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -705,7 +705,7 @@ impl Blockstore { for (&(slot, set_index), erasure_meta) in erasure_metas.iter() { let index_meta_entry = index_working_set.get_mut(&slot).expect("Index"); let index = &mut index_meta_entry.index; - match erasure_meta.status(&index) { + match erasure_meta.status(index) { ErasureMetaStatus::CanRecover => { Self::recover_shreds( index, @@ -838,7 +838,7 @@ impl Blockstore { let mut num_recovered_exists = 0; if let Some(leader_schedule_cache) = leader_schedule { let recovered_data = Self::try_shred_recovery( - &db, + db, &erasure_metas, &mut index_working_set, &mut just_inserted_data_shreds, @@ -1135,14 +1135,14 @@ impl Blockstore { let maybe_shred = self.get_coding_shred(slot, coding_index); if let Ok(Some(shred_data)) = maybe_shred { let potential_shred = Shred::new_from_serialized_shred(shred_data).unwrap(); - if Self::erasure_mismatch(&potential_shred, &shred) { + if Self::erasure_mismatch(&potential_shred, shred) { conflicting_shred = Some(potential_shred.payload); } break; } else if let Some(potential_shred) = just_received_coding_shreds.get(&(slot, coding_index)) { - if Self::erasure_mismatch(&potential_shred, &shred) { + if Self::erasure_mismatch(potential_shred, shred) { conflicting_shred = Some(potential_shred.payload.clone()); } break; @@ -1183,7 +1183,7 @@ impl Blockstore { let slot_meta = &mut slot_meta_entry.new_slot_meta.borrow_mut(); if !is_trusted { - if Self::is_data_shred_present(&shred, slot_meta, &index_meta.data()) { + if Self::is_data_shred_present(&shred, slot_meta, index_meta.data()) { handle_duplicate(shred); return Err(InsertDataShredError::Exists); } @@ -1474,7 +1474,7 @@ impl Blockstore { index as u32, new_consumed, shred.reference_tick(), - &data_index, + data_index, ); if slot_meta.is_full() { datapoint_info!( @@ -1694,7 +1694,7 @@ impl Blockstore { } break; } - let (current_slot, index) = C::index(&db_iterator.key().expect("Expect a valid key")); + let (current_slot, index) = C::index(db_iterator.key().expect("Expect a valid key")); let current_index = { if current_slot > slot { @@ -1707,7 +1707,7 @@ impl Blockstore { let upper_index = cmp::min(current_index, end_index); // the tick that will be used to figure out the timeout for this hole let reference_tick = u64::from(Shred::reference_tick_from_data( - &db_iterator.value().expect("couldn't read value"), + db_iterator.value().expect("couldn't read value"), )); if ticks_since_first_insert < reference_tick + MAX_TURBINE_DELAY_IN_TICKS { @@ -2445,7 +2445,7 @@ impl Blockstore { address_signatures.extend( signatures .into_iter() - .filter(|(_, signature)| !excluded_signatures.contains(&signature)), + .filter(|(_, signature)| !excluded_signatures.contains(signature)), ) } else { address_signatures.append(&mut signatures); @@ -2528,7 +2528,7 @@ impl Blockstore { next_primary_index_iter_timer.stop(); let mut address_signatures: Vec<(Slot, Signature)> = address_signatures .into_iter() - .filter(|(_, signature)| !until_excluded_signatures.contains(&signature)) + .filter(|(_, signature)| !until_excluded_signatures.contains(signature)) .collect(); address_signatures.truncate(limit); @@ -3006,7 +3006,7 @@ impl Blockstore { } pub fn scan_and_fix_roots(&self, exit: &Arc) -> Result<()> { - let ancestor_iterator = AncestorIterator::new(self.last_root(), &self) + let ancestor_iterator = AncestorIterator::new(self.last_root(), self) .take_while(|&slot| slot >= self.lowest_cleanup_slot()); let mut find_missing_roots = Measure::start("find_missing_roots"); @@ -3291,8 +3291,8 @@ fn commit_slot_meta_working_set( } // Check if the working copy of the metadata has changed if Some(meta) != meta_backup.as_ref() { - should_signal = should_signal || slot_has_updates(meta, &meta_backup); - write_batch.put::(*slot, &meta)?; + should_signal = should_signal || slot_has_updates(meta, meta_backup); + write_batch.put::(*slot, meta)?; } } @@ -3443,7 +3443,7 @@ fn handle_chaining_for_slot( traverse_children_mut( db, slot, - &meta, + meta, working_set, new_chained_slots, slot_function, @@ -3533,7 +3533,7 @@ pub fn create_new_ledger( access_type: AccessType, ) -> Result { Blockstore::destroy(ledger_path)?; - genesis_config.write(&ledger_path)?; + genesis_config.write(ledger_path)?; // Fill slot 0 with ticks that link back to the genesis_config to bootstrap the ledger. let blockstore = Blockstore::open_with_access_type(ledger_path, access_type, None, false)?; diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 5a49f7f4dc6a7a..3d8f1335b777ba 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -112,7 +112,7 @@ fn execute_batch( let mut mint_decimals: HashMap = HashMap::new(); let pre_token_balances = if record_token_balances { - collect_token_balances(&bank, &batch, &mut mint_decimals) + collect_token_balances(bank, batch, &mut mint_decimals) } else { vec![] }; @@ -139,7 +139,7 @@ fn execute_batch( if let Some(transaction_status_sender) = transaction_status_sender { let txs = batch.transactions_iter().cloned().collect(); let post_token_balances = if record_token_balances { - collect_token_balances(&bank, &batch, &mut mint_decimals) + collect_token_balances(bank, batch, &mut mint_decimals) } else { vec![] }; @@ -327,7 +327,7 @@ fn process_entries_with_callback( timings, )?; for hash in tick_hashes { - bank.register_tick(&hash); + bank.register_tick(hash); } Ok(()) } @@ -396,7 +396,7 @@ pub fn process_blockstore( // Setup bank for slot 0 let bank0 = Bank::new_with_paths( - &genesis_config, + genesis_config, account_paths, &opts.frozen_accounts, opts.debug_keys.clone(), @@ -896,9 +896,9 @@ fn process_next_slots( // handles any partials if next_meta.is_full() { let next_bank = Arc::new(Bank::new_from_parent( - &bank, + bank, &leader_schedule_cache - .slot_leader_at(*next_slot, Some(&bank)) + .slot_leader_at(*next_slot, Some(bank)) .unwrap(), *next_slot, )); @@ -1048,7 +1048,7 @@ fn load_frozen_forks( *root = new_root_bank.slot(); last_root = new_root_bank.slot(); - leader_schedule_cache.set_root(&new_root_bank); + leader_schedule_cache.set_root(new_root_bank); new_root_bank.squash(); if last_free.elapsed() > Duration::from_secs(10) { @@ -3093,7 +3093,7 @@ pub mod tests { account_paths: Vec, ) -> EpochSchedule { let bank = Bank::new_with_paths( - &genesis_config, + genesis_config, account_paths, &[], None, @@ -3274,7 +3274,7 @@ pub mod tests { slot_leader_keypair: &Arc, ) { // Add votes to `last_slot` so that `root` will be confirmed - let vote_entry = next_entry(&parent_blockhash, 1, vec![vote_tx]); + let vote_entry = next_entry(parent_blockhash, 1, vec![vote_tx]); let mut entries = create_ticks(ticks_per_slot, 0, vote_entry.hash); entries.insert(0, vote_entry); blockstore @@ -3285,7 +3285,7 @@ pub mod tests { ticks_per_slot, Some(parent_slot), true, - &slot_leader_keypair, + slot_leader_keypair, entries, 0, ) diff --git a/ledger/src/entry.rs b/ledger/src/entry.rs index 8e7ea398aeecba..2c45bf9c9bcb72 100644 --- a/ledger/src/entry.rs +++ b/ledger/src/entry.rs @@ -682,7 +682,7 @@ impl EntrySlice for [Entry] { } pub fn next_entry_mut(start: &mut Hash, num_hashes: u64, transactions: Vec) -> Entry { - let entry = Entry::new(&start, num_hashes, transactions); + let entry = Entry::new(start, num_hashes, transactions); *start = entry.hash; entry } @@ -737,7 +737,7 @@ mod tests { #[test] fn test_entry_verify() { let zero = Hash::default(); - let one = hash(&zero.as_ref()); + let one = hash(zero.as_ref()); assert!(Entry::new_tick(0, &zero).verify(&zero)); // base case, never used assert!(!Entry::new_tick(0, &zero).verify(&one)); // base case, bad assert!(next_entry(&zero, 1, vec![]).verify(&zero)); // inductive step @@ -826,7 +826,7 @@ mod tests { fn test_verify_slice1() { solana_logger::setup(); let zero = Hash::default(); - let one = hash(&zero.as_ref()); + let one = hash(zero.as_ref()); assert!(vec![][..].verify(&zero)); // base case assert!(vec![Entry::new_tick(0, &zero)][..].verify(&zero)); // singleton case 1 assert!(!vec![Entry::new_tick(0, &zero)][..].verify(&one)); // singleton case 2, bad @@ -841,8 +841,8 @@ mod tests { fn test_verify_slice_with_hashes1() { solana_logger::setup(); let zero = Hash::default(); - let one = hash(&zero.as_ref()); - let two = hash(&one.as_ref()); + let one = hash(zero.as_ref()); + let two = hash(one.as_ref()); assert!(vec![][..].verify(&one)); // base case assert!(vec![Entry::new_tick(1, &two)][..].verify(&one)); // singleton case 1 assert!(!vec![Entry::new_tick(1, &two)][..].verify(&two)); // singleton case 2, bad @@ -861,8 +861,8 @@ mod tests { fn test_verify_slice_with_hashes_and_transactions() { solana_logger::setup(); let zero = Hash::default(); - let one = hash(&zero.as_ref()); - let two = hash(&one.as_ref()); + let one = hash(zero.as_ref()); + let two = hash(one.as_ref()); let alice_keypair = Keypair::new(); let bob_keypair = Keypair::new(); let tx0 = system_transaction::transfer(&alice_keypair, &bob_keypair.pubkey(), 1, one); diff --git a/ledger/src/leader_schedule_utils.rs b/ledger/src/leader_schedule_utils.rs index 47df87242824a4..ea21a79b736e7d 100644 --- a/ledger/src/leader_schedule_utils.rs +++ b/ledger/src/leader_schedule_utils.rs @@ -63,9 +63,9 @@ fn sort_stakes(stakes: &mut Vec<(Pubkey, u64)>) { // Note: Use unstable sort, because we dedup right after to remove the equal elements. stakes.sort_unstable_by(|(l_pubkey, l_stake), (r_pubkey, r_stake)| { if r_stake == l_stake { - r_pubkey.cmp(&l_pubkey) + r_pubkey.cmp(l_pubkey) } else { - r_stake.cmp(&l_stake) + r_stake.cmp(l_stake) } }); diff --git a/ledger/src/poh.rs b/ledger/src/poh.rs index 0ade8d7a75a444..23521c975900c4 100644 --- a/ledger/src/poh.rs +++ b/ledger/src/poh.rs @@ -63,7 +63,7 @@ impl Poh { let num_hashes = std::cmp::min(self.remaining_hashes - 1, max_num_hashes); for _ in 0..num_hashes { - self.hash = hash(&self.hash.as_ref()); + self.hash = hash(self.hash.as_ref()); } self.num_hashes += num_hashes; self.remaining_hashes -= num_hashes; @@ -77,7 +77,7 @@ impl Poh { return None; // Caller needs to `tick()` first } - self.hash = hashv(&[&self.hash.as_ref(), &mixin.as_ref()]); + self.hash = hashv(&[self.hash.as_ref(), mixin.as_ref()]); let num_hashes = self.num_hashes + 1; self.num_hashes = 0; self.remaining_hashes -= 1; @@ -89,7 +89,7 @@ impl Poh { } pub fn tick(&mut self) -> Option { - self.hash = hash(&self.hash.as_ref()); + self.hash = hash(self.hash.as_ref()); self.num_hashes += 1; self.remaining_hashes -= 1; @@ -115,7 +115,7 @@ pub fn compute_hash_time_ns(hashes_sample_size: u64) -> u64 { let mut v = Hash::default(); let start = Instant::now(); for _ in 0..hashes_sample_size { - v = hash(&v.as_ref()); + v = hash(v.as_ref()); } start.elapsed().as_nanos() as u64 } @@ -139,11 +139,11 @@ mod tests { assert_ne!(entry.num_hashes, 0); for _ in 1..entry.num_hashes { - current_hash = hash(¤t_hash.as_ref()); + current_hash = hash(current_hash.as_ref()); } current_hash = match mixin { - Some(mixin) => hashv(&[¤t_hash.as_ref(), &mixin.as_ref()]), - None => hash(¤t_hash.as_ref()), + Some(mixin) => hashv(&[current_hash.as_ref(), mixin.as_ref()]), + None => hash(current_hash.as_ref()), }; if current_hash != entry.hash { return false; @@ -192,9 +192,9 @@ mod tests { #[test] fn test_poh_verify() { let zero = Hash::default(); - let one = hash(&zero.as_ref()); - let two = hash(&one.as_ref()); - let one_with_zero = hashv(&[&zero.as_ref(), &zero.as_ref()]); + let one = hash(zero.as_ref()); + let two = hash(one.as_ref()); + let one_with_zero = hashv(&[zero.as_ref(), zero.as_ref()]); let mut poh = Poh::new(zero, None); assert!(verify( @@ -262,7 +262,7 @@ mod tests { ( PohEntry { num_hashes: 1, - hash: hash(&one_with_zero.as_ref()), + hash: hash(one_with_zero.as_ref()), }, None ) diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index f1d6ff92c9d5e4..8549562ebaed4a 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -840,7 +840,7 @@ impl Shredder { first_index: usize, slot: Slot, ) -> std::result::Result, reed_solomon_erasure::Error> { - Self::verify_consistent_shred_payload_sizes(&"try_recovery()", &shreds)?; + Self::verify_consistent_shred_payload_sizes("try_recovery()", &shreds)?; let mut recovered_data = vec![]; let fec_set_size = num_data + num_coding; @@ -933,7 +933,7 @@ impl Shredder { pub fn deshred(shreds: &[Shred]) -> std::result::Result, reed_solomon_erasure::Error> { use reed_solomon_erasure::Error::TooFewDataShards; const SHRED_DATA_OFFSET: usize = SIZE_OF_COMMON_SHRED_HEADER + SIZE_OF_DATA_SHRED_HEADER; - Self::verify_consistent_shred_payload_sizes(&"deshred()", shreds)?; + Self::verify_consistent_shred_payload_sizes("deshred()", shreds)?; let index = shreds.first().ok_or(TooFewDataShards)?.index(); let aligned = shreds.iter().zip(index..).all(|(s, i)| s.index() == i); let data_complete = { diff --git a/ledger/src/sigverify_shreds.rs b/ledger/src/sigverify_shreds.rs index 4f6511078a22ad..42ae66d83d97fa 100644 --- a/ledger/src/sigverify_shreds.rs +++ b/ledger/src/sigverify_shreds.rs @@ -312,7 +312,7 @@ fn sign_shred_cpu(keypair: &Keypair, packet: &mut Packet) { ); let signature = keypair.sign_message(&packet.data[msg_start..msg_end]); trace!("signature {:?}", signature); - packet.data[0..sig_end].copy_from_slice(&signature.as_ref()); + packet.data[0..sig_end].copy_from_slice(signature.as_ref()); } pub fn sign_shreds_cpu(keypair: &Keypair, batches: &mut [Packets]) { @@ -364,7 +364,7 @@ pub fn sign_shreds_gpu( let mut elems = Vec::new(); let offset: usize = pinned_keypair.len(); - let num_keypair_packets = vec_size_in_packets(&pinned_keypair); + let num_keypair_packets = vec_size_in_packets(pinned_keypair); let mut num_packets = num_keypair_packets; //should be zero diff --git a/local-cluster/src/cluster_tests.rs b/local-cluster/src/cluster_tests.rs index dfb95a42a40613..f37759b007f5b4 100644 --- a/local-cluster/src/cluster_tests.rs +++ b/local-cluster/src/cluster_tests.rs @@ -63,10 +63,10 @@ pub fn spend_and_verify_all_nodes( .get_recent_blockhash_with_commitment(CommitmentConfig::confirmed()) .unwrap(); let mut transaction = - system_transaction::transfer(&funding_keypair, &random_keypair.pubkey(), 1, blockhash); + system_transaction::transfer(funding_keypair, &random_keypair.pubkey(), 1, blockhash); let confs = VOTE_THRESHOLD_DEPTH + 1; let sig = client - .retry_transfer_until_confirmed(&funding_keypair, &mut transaction, 10, confs) + .retry_transfer_until_confirmed(funding_keypair, &mut transaction, 10, confs) .unwrap(); for validator in &cluster_nodes { if ignore_nodes.contains(&validator.id) { @@ -114,14 +114,14 @@ pub fn send_many_transactions( let transfer_amount = thread_rng().gen_range(1, max_tokens_per_transfer); let mut transaction = system_transaction::transfer( - &funding_keypair, + funding_keypair, &random_keypair.pubkey(), transfer_amount, blockhash, ); client - .retry_transfer(&funding_keypair, &mut transaction, 5) + .retry_transfer(funding_keypair, &mut transaction, 5) .unwrap(); expected_balances.insert(random_keypair.pubkey(), transfer_amount); @@ -236,7 +236,7 @@ pub fn kill_entry_and_spend_and_verify_rest( .get_recent_blockhash_with_commitment(CommitmentConfig::processed()) .unwrap(); let mut transaction = system_transaction::transfer( - &funding_keypair, + funding_keypair, &random_keypair.pubkey(), 1, blockhash, @@ -245,7 +245,7 @@ pub fn kill_entry_and_spend_and_verify_rest( let confs = VOTE_THRESHOLD_DEPTH + 1; let sig = { let sig = client.retry_transfer_until_confirmed( - &funding_keypair, + funding_keypair, &mut transaction, 5, confs, @@ -260,7 +260,7 @@ pub fn kill_entry_and_spend_and_verify_rest( } }; info!("poll_all_nodes_for_signature()"); - match poll_all_nodes_for_signature(&entry_point_info, &cluster_nodes, &sig, confs) { + match poll_all_nodes_for_signature(entry_point_info, &cluster_nodes, &sig, confs) { Err(e) => { info!("poll_all_nodes_for_signature() failed {:?}", e); result = Err(e); @@ -377,7 +377,7 @@ fn poll_all_nodes_for_signature( continue; } let client = create_client(validator.client_facing_addr(), VALIDATOR_PORT_RANGE); - client.poll_for_signature_confirmation(&sig, confs)?; + client.poll_for_signature_confirmation(sig, confs)?; } Ok(()) diff --git a/local-cluster/src/local_cluster.rs b/local-cluster/src/local_cluster.rs index d455daefbbeba8..c842d5bfe1827b 100644 --- a/local-cluster/src/local_cluster.rs +++ b/local-cluster/src/local_cluster.rs @@ -449,7 +449,7 @@ impl LocalCluster { .get_recent_blockhash_with_commitment(CommitmentConfig::processed()) .unwrap(); let mut tx = - system_transaction::transfer(&source_keypair, dest_pubkey, lamports, blockhash); + system_transaction::transfer(source_keypair, dest_pubkey, lamports, blockhash); info!( "executing transfer of {} from {} to {}", lamports, @@ -457,7 +457,7 @@ impl LocalCluster { *dest_pubkey ); client - .retry_transfer(&source_keypair, &mut tx, 10) + .retry_transfer(source_keypair, &mut tx, 10) .expect("client transfer"); client .wait_for_balance_with_commitment( @@ -512,7 +512,7 @@ impl LocalCluster { .0, ); client - .retry_transfer(&from_account, &mut transaction, 10) + .retry_transfer(from_account, &mut transaction, 10) .expect("fund vote"); client .wait_for_balance_with_commitment( @@ -616,7 +616,7 @@ impl Cluster for LocalCluster { } fn exit_node(&mut self, pubkey: &Pubkey) -> ClusterValidatorInfo { - let mut node = self.validators.remove(&pubkey).unwrap(); + let mut node = self.validators.remove(pubkey).unwrap(); // Shut down the validator let mut validator = node.validator.take().expect("Validator must be running"); @@ -631,7 +631,7 @@ impl Cluster for LocalCluster { cluster_validator_info: &mut ClusterValidatorInfo, ) -> (Node, Option) { // Update the stored ContactInfo for this node - let node = Node::new_localhost_with_pubkey(&pubkey); + let node = Node::new_localhost_with_pubkey(pubkey); cluster_validator_info.info.contact_info = node.info.clone(); cluster_validator_info.config.rpc_addrs = Some((node.info.rpc, node.info.rpc_pubsub)); diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index 845beb8812dfcd..f5869cb4a8661b 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -425,7 +425,7 @@ fn run_cluster_partition( fn test_cluster_partition_1_2() { let empty = |_: &mut LocalCluster, _: &mut ()| {}; let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| { - cluster.check_for_new_roots(16, &"PARTITION_TEST"); + cluster.check_for_new_roots(16, "PARTITION_TEST"); }; run_cluster_partition( &[vec![1], vec![1, 1]], @@ -445,7 +445,7 @@ fn test_cluster_partition_1_2() { fn test_cluster_partition_1_1() { let empty = |_: &mut LocalCluster, _: &mut ()| {}; let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| { - cluster.check_for_new_roots(16, &"PARTITION_TEST"); + cluster.check_for_new_roots(16, "PARTITION_TEST"); }; run_cluster_partition( &[vec![1], vec![1]], @@ -465,7 +465,7 @@ fn test_cluster_partition_1_1() { fn test_cluster_partition_1_1_1() { let empty = |_: &mut LocalCluster, _: &mut ()| {}; let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| { - cluster.check_for_new_roots(16, &"PARTITION_TEST"); + cluster.check_for_new_roots(16, "PARTITION_TEST"); }; run_cluster_partition( &[vec![1], vec![1], vec![1]], @@ -525,7 +525,7 @@ fn test_kill_heaviest_partition() { let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| { info!("Killing validator with id: {}", validator_to_kill); cluster.exit_node(&validator_to_kill); - cluster.check_for_new_roots(16, &"PARTITION_TEST"); + cluster.check_for_new_roots(16, "PARTITION_TEST"); }; run_cluster_partition( &partitions, @@ -594,7 +594,7 @@ fn run_kill_partition_switch_threshold( .iter() .map(|validator_to_kill| { info!("Killing validator with id: {}", validator_to_kill); - cluster.exit_node(&validator_to_kill) + cluster.exit_node(validator_to_kill) }) .collect(); on_partition_start( @@ -622,7 +622,7 @@ fn find_latest_replayed_slot_from_ledger( mut latest_slot: Slot, ) -> (Slot, HashSet) { loop { - let mut blockstore = open_blockstore(&ledger_path); + let mut blockstore = open_blockstore(ledger_path); // This is kind of a hack because we can't query for new frozen blocks over RPC // since the validator is not voting. let new_latest_slots: Vec = blockstore @@ -644,7 +644,7 @@ fn find_latest_replayed_slot_from_ledger( break; } else { sleep(Duration::from_millis(50)); - blockstore = open_blockstore(&ledger_path); + blockstore = open_blockstore(ledger_path); } } // Check the slot has been replayed @@ -666,7 +666,7 @@ fn find_latest_replayed_slot_from_ledger( ); } else { sleep(Duration::from_millis(50)); - blockstore = open_blockstore(&ledger_path); + blockstore = open_blockstore(ledger_path); } } } else { @@ -870,7 +870,7 @@ fn test_switch_threshold_uses_gossip_votes() { 0, crds_value::Vote::new(node_keypair.pubkey(), vote_tx, timestamp()), ), - &node_keypair, + node_keypair, )], context .dead_validator_info @@ -962,7 +962,7 @@ fn test_kill_partition_switch_threshold_no_progress() { |_: &mut LocalCluster, _: &[Pubkey], _: Vec, _: &mut ()| {}; let on_before_partition_resolved = |_: &mut LocalCluster, _: &mut ()| {}; let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| { - cluster.check_no_new_roots(400, &"PARTITION_TEST"); + cluster.check_no_new_roots(400, "PARTITION_TEST"); }; // This kills `max_failures_stake`, so no progress should be made @@ -1015,7 +1015,7 @@ fn test_kill_partition_switch_threshold_progress() { |_: &mut LocalCluster, _: &[Pubkey], _: Vec, _: &mut ()| {}; let on_before_partition_resolved = |_: &mut LocalCluster, _: &mut ()| {}; let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| { - cluster.check_for_new_roots(16, &"PARTITION_TEST"); + cluster.check_for_new_roots(16, "PARTITION_TEST"); }; run_kill_partition_switch_threshold( &[&[(failures_stake as usize, 16)]], @@ -1246,7 +1246,7 @@ fn test_fork_choice_refresh_old_votes() { // for lockouts built during partition to resolve and gives validators an opportunity // to try and switch forks) let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut PartitionContext| { - cluster.check_for_new_roots(16, &"PARTITION_TEST"); + cluster.check_for_new_roots(16, "PARTITION_TEST"); }; run_kill_partition_switch_threshold( @@ -1320,7 +1320,7 @@ fn test_forwarding() { .unwrap(); // Confirm that transactions were forwarded to and processed by the leader. - cluster_tests::send_many_transactions(&validator_info, &cluster.funding_keypair, 10, 20); + cluster_tests::send_many_transactions(validator_info, &cluster.funding_keypair, 10, 20); } #[test] @@ -1532,7 +1532,7 @@ fn test_frozen_account_from_snapshot() { trace!("Waiting for snapshot at {:?}", snapshot_package_output_path); let (archive_filename, _archive_snapshot_hash) = - wait_for_next_snapshot(&cluster, &snapshot_package_output_path); + wait_for_next_snapshot(&cluster, snapshot_package_output_path); trace!("Found snapshot: {:?}", archive_filename); @@ -1668,7 +1668,7 @@ fn test_snapshot_download() { trace!("Waiting for snapshot"); let (archive_filename, archive_snapshot_hash) = - wait_for_next_snapshot(&cluster, &snapshot_package_output_path); + wait_for_next_snapshot(&cluster, snapshot_package_output_path); trace!("found: {:?}", archive_filename); let validator_archive_path = snapshot_utils::get_snapshot_archive_path( @@ -1743,7 +1743,7 @@ fn test_snapshot_restart_tower() { .snapshot_package_output_path; let (archive_filename, archive_snapshot_hash) = - wait_for_next_snapshot(&cluster, &snapshot_package_output_path); + wait_for_next_snapshot(&cluster, snapshot_package_output_path); // Copy archive to validator's snapshot output directory let validator_archive_path = snapshot_utils::get_snapshot_archive_path( @@ -1765,7 +1765,7 @@ fn test_snapshot_restart_tower() { // validator's ContactInfo let restarted_node_info = cluster.get_contact_info(&validator_id).unwrap(); cluster_tests::spend_and_verify_all_nodes( - &restarted_node_info, + restarted_node_info, &cluster.funding_keypair, 1, HashSet::new(), @@ -1926,7 +1926,7 @@ fn test_snapshots_restart_validity() { expected_balances.extend(new_balances); - wait_for_next_snapshot(&cluster, &snapshot_package_output_path); + wait_for_next_snapshot(&cluster, snapshot_package_output_path); // Create new account paths since validator exit is not guaranteed to cleanup RPC threads, // which may delete the old accounts on exit at any point @@ -2019,7 +2019,7 @@ fn test_faulty_node(faulty_node_type: BroadcastStageType) { let cluster = LocalCluster::new(&mut cluster_config); // Check for new roots - cluster.check_for_new_roots(16, &"test_faulty_node"); + cluster.check_for_new_roots(16, "test_faulty_node"); } #[test] @@ -2354,7 +2354,7 @@ fn purge_slots(blockstore: &Blockstore, start_slot: Slot, slot_count: Slot) { } fn restore_tower(ledger_path: &Path, node_pubkey: &Pubkey) -> Option { - let tower = Tower::restore(&ledger_path, &node_pubkey); + let tower = Tower::restore(ledger_path, node_pubkey); if let Err(tower_err) = tower { if tower_err.is_file_missing() { return None; @@ -2363,7 +2363,7 @@ fn restore_tower(ledger_path: &Path, node_pubkey: &Pubkey) -> Option { } } // actually saved tower must have at least one vote. - Tower::restore(&ledger_path, &node_pubkey).ok() + Tower::restore(ledger_path, node_pubkey).ok() } fn last_vote_in_tower(ledger_path: &Path, node_pubkey: &Pubkey) -> Option<(Slot, Hash)> { @@ -2375,7 +2375,7 @@ fn root_in_tower(ledger_path: &Path, node_pubkey: &Pubkey) -> Option { } fn remove_tower(ledger_path: &Path, node_pubkey: &Pubkey) { - fs::remove_file(Tower::get_filename(&ledger_path, &node_pubkey)).unwrap(); + fs::remove_file(Tower::get_filename(ledger_path, node_pubkey)).unwrap(); } // A bit convoluted test case; but this roughly follows this test theoretical scenario: @@ -2836,7 +2836,7 @@ fn test_hard_fork_invalidates_tower() { cluster .lock() .unwrap() - .check_for_new_roots(16, &"hard fork"); + .check_for_new_roots(16, "hard fork"); } #[test] @@ -2895,7 +2895,7 @@ fn run_test_load_program_accounts_partition(scan_commitment: CommitmentConfig) { let on_partition_before_resolved = |_: &mut LocalCluster, _: &mut ()| {}; let on_partition_resolved = |cluster: &mut LocalCluster, _: &mut ()| { - cluster.check_for_new_roots(20, &"run_test_load_program_accounts_partition"); + cluster.check_for_new_roots(20, "run_test_load_program_accounts_partition"); exit.store(true, Ordering::Relaxed); t_update.join().unwrap(); t_scan.join().unwrap(); @@ -3086,7 +3086,7 @@ fn run_test_load_program_accounts(scan_commitment: CommitmentConfig) { scan_client_sender.send(scan_client).unwrap(); // Wait for some roots to pass - cluster.check_for_new_roots(40, &"run_test_load_program_accounts"); + cluster.check_for_new_roots(40, "run_test_load_program_accounts"); // Exit and ensure no violations of consistency were found exit.store(true, Ordering::Relaxed); diff --git a/measure/src/measure.rs b/measure/src/measure.rs index 3b342605235093..26f32b097cd5e4 100644 --- a/measure/src/measure.rs +++ b/measure/src/measure.rs @@ -216,7 +216,7 @@ mod tests { { let some_struct = SomeStruct { x: 42 }; let (result, _measure) = Measure::this( - |(obj, x)| SomeStruct::add_to(&obj, x), + |(obj, x)| SomeStruct::add_to(obj, x), (&some_struct, 4), "test", ); diff --git a/metrics/src/counter.rs b/metrics/src/counter.rs index 1bbc0dd778516e..29b7ecc526d2e9 100644 --- a/metrics/src/counter.rs +++ b/metrics/src/counter.rs @@ -222,7 +222,7 @@ mod tests { INIT_HOOK.call_once(|| { ENV_LOCK = Some(RwLock::new(())); }); - &ENV_LOCK.as_ref().unwrap() + ENV_LOCK.as_ref().unwrap() } } diff --git a/perf/src/packet.rs b/perf/src/packet.rs index bdd2052c7e5e29..e73a5ad6b7af25 100644 --- a/perf/src/packet.rs +++ b/perf/src/packet.rs @@ -66,7 +66,7 @@ impl Packets { pub fn set_addr(&mut self, addr: &SocketAddr) { for m in self.packets.iter_mut() { - m.meta.set_addr(&addr); + m.meta.set_addr(addr); } } diff --git a/perf/src/sigverify.rs b/perf/src/sigverify.rs index a9651071920424..2b5975a6bfc05e 100644 --- a/perf/src/sigverify.rs +++ b/perf/src/sigverify.rs @@ -519,11 +519,11 @@ mod tests { let packet_offsets = sigverify::get_packet_offsets(&packet, 0); assert_eq!( - memfind(&tx_bytes, &tx.signatures[0].as_ref()), + memfind(&tx_bytes, tx.signatures[0].as_ref()), Some(SIG_OFFSET) ); assert_eq!( - memfind(&tx_bytes, &tx.message().account_keys[0].as_ref()), + memfind(&tx_bytes, tx.message().account_keys[0].as_ref()), Some(packet_offsets.pubkey_start as usize) ); assert_eq!( @@ -531,7 +531,7 @@ mod tests { Some(packet_offsets.msg_start as usize) ); assert_eq!( - memfind(&tx_bytes, &tx.signatures[0].as_ref()), + memfind(&tx_bytes, tx.signatures[0].as_ref()), Some(packet_offsets.sig_start as usize) ); assert_eq!(packet_offsets.sig_len, 1); @@ -667,7 +667,7 @@ mod tests { let tx_bytes = serialize(&tx0).unwrap(); assert!(tx_bytes.len() <= PACKET_DATA_SIZE); assert_eq!( - memfind(&tx_bytes, &tx0.signatures[0].as_ref()), + memfind(&tx_bytes, tx0.signatures[0].as_ref()), Some(SIG_OFFSET) ); let tx1 = deserialize(&tx_bytes).unwrap(); diff --git a/poh/benches/poh_verify.rs b/poh/benches/poh_verify.rs index e917a5ebeaad16..b0ae0b7aaecfc6 100644 --- a/poh/benches/poh_verify.rs +++ b/poh/benches/poh_verify.rs @@ -18,7 +18,7 @@ const NUM_ENTRIES: usize = 800; fn bench_poh_verify_ticks(bencher: &mut Bencher) { solana_logger::setup(); let zero = Hash::default(); - let start_hash = hash(&zero.as_ref()); + let start_hash = hash(zero.as_ref()); let mut cur_hash = start_hash; let mut ticks: Vec = Vec::with_capacity(NUM_ENTRIES); @@ -34,7 +34,7 @@ fn bench_poh_verify_ticks(bencher: &mut Bencher) { #[bench] fn bench_poh_verify_transaction_entries(bencher: &mut Bencher) { let zero = Hash::default(); - let start_hash = hash(&zero.as_ref()); + let start_hash = hash(zero.as_ref()); let mut cur_hash = start_hash; let keypair1 = Keypair::new(); diff --git a/poh/src/poh_recorder.rs b/poh/src/poh_recorder.rs index 2fd475999c9d89..2a17c68d418d27 100644 --- a/poh/src/poh_recorder.rs +++ b/poh/src/poh_recorder.rs @@ -754,11 +754,11 @@ pub fn create_test_recorder( bank.ticks_per_slot(), &Pubkey::default(), blockstore, - &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), + &Arc::new(LeaderScheduleCache::new_from_bank(bank)), &poh_config, exit.clone(), ); - poh_recorder.set_bank(&bank); + poh_recorder.set_bank(bank); let poh_recorder = Arc::new(Mutex::new(poh_recorder)); let poh_service = PohService::new( diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index c9e0328abdfc05..6e7b9e33e7cce5 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -311,7 +311,7 @@ impl solana_sdk::program_stubs::SyscallStubs for SyscallStubs { { let mut program_signer = false; for seeds in signers_seeds.iter() { - let signer = Pubkey::create_program_address(&seeds, &caller).unwrap(); + let signer = Pubkey::create_program_address(seeds, &caller).unwrap(); if instruction_account.pubkey == signer { program_signer = true; break; @@ -324,7 +324,7 @@ impl solana_sdk::program_stubs::SyscallStubs for SyscallStubs { } } - invoke_context.record_instruction(&instruction); + invoke_context.record_instruction(instruction); solana_runtime::message_processor::MessageProcessor::process_cross_program_instruction( &message, @@ -769,7 +769,7 @@ impl ProgramTest { // Add commonly-used SPL programs as a convenience to the user for (program_id, account) in programs::spl_programs(&Rent::default()).iter() { - bank.store_account(program_id, &account); + bank.store_account(program_id, account); } // User-supplied additional builtins @@ -782,10 +782,10 @@ impl ProgramTest { } for (address, account) in self.accounts.iter() { - if bank.get_account(&address).is_some() { + if bank.get_account(address).is_some() { info!("Overriding account at {}", address); } - bank.store_account(&address, &account); + bank.store_account(address, account); } bank.set_capitalization(); if let Some(max_units) = self.bpf_compute_max_units { diff --git a/programs/bpf_loader/benches/serialization.rs b/programs/bpf_loader/benches/serialization.rs index e99c953b496d4d..472c7953053aa2 100644 --- a/programs/bpf_loader/benches/serialization.rs +++ b/programs/bpf_loader/benches/serialization.rs @@ -107,9 +107,9 @@ fn bench_serialize_unaligned(bencher: &mut Bencher) { .enumerate() .map(|(i, (key, account))| { if i <= accounts.len() / 2 { - KeyedAccount::new_readonly(&key, false, &account) + KeyedAccount::new_readonly(key, false, account) } else { - KeyedAccount::new(&key, false, &account) + KeyedAccount::new(key, false, account) } }) .collect(); @@ -128,9 +128,9 @@ fn bench_serialize_aligned(bencher: &mut Bencher) { .enumerate() .map(|(i, (key, account))| { if i <= accounts.len() / 2 { - KeyedAccount::new_readonly(&key, false, &account) + KeyedAccount::new_readonly(key, false, account) } else { - KeyedAccount::new(&key, false, &account) + KeyedAccount::new(key, false, account) } }) .collect(); diff --git a/programs/bpf_loader/build.rs b/programs/bpf_loader/build.rs new file mode 100644 index 00000000000000..2c665cd93fd96e --- /dev/null +++ b/programs/bpf_loader/build.rs @@ -0,0 +1,34 @@ +use regex::Regex; +use std::{ + fs::File, + io::{prelude::*, BufWriter, Read}, + path::PathBuf, + process::exit, + str, +}; + +/** + * Extract a list of registered syscall names and save it in a file + * for distribution with the SDK. This file is read by cargo-build-bpf + * to verify undefined symbols in a .so module that cargo-build-bpf has built. + */ +fn main() { + let path = PathBuf::from("src/syscalls.rs"); + let mut file = match File::open(&path) { + Ok(x) => x, + _ => exit(1), + }; + let mut text = vec![]; + file.read_to_end(&mut text).unwrap(); + let text = str::from_utf8(&text).unwrap(); + let path = PathBuf::from("../../sdk/bpf/syscalls.txt"); + let file = match File::create(&path) { + Ok(x) => x, + _ => exit(1), + }; + let mut out = BufWriter::new(file); + let sysc_re = Regex::new(r#"register_syscall_by_name\([[:space:]]*b"([^"]+)","#).unwrap(); + for caps in sysc_re.captures_iter(text) { + writeln!(out, "{}", caps[1].to_string()).unwrap(); + } +} diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index 630a9a9dfcd0e5..a724fd1b9b52cf 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -129,7 +129,7 @@ fn write_program_data( ); return Err(InstructionError::AccountDataTooSmall); } - data[program_data_offset..program_data_offset + len].copy_from_slice(&bytes); + data[program_data_offset..program_data_offset + len].copy_from_slice(bytes); Ok(()) } @@ -370,7 +370,7 @@ fn process_loader_upgradeable_instruction( // Create ProgramData account let (derived_address, bump_seed) = - Pubkey::find_program_address(&[program.unsigned_key().as_ref()], &program_id); + Pubkey::find_program_address(&[program.unsigned_key().as_ref()], program_id); if derived_address != *programdata.unsigned_key() { ic_logger_msg!(logger, "ProgramData address is not derived"); return Err(InstructionError::InvalidArgument); @@ -760,7 +760,7 @@ impl Executor for BpfExecutor { let mut serialize_time = Measure::start("serialize"); let keyed_accounts = invoke_context.get_keyed_accounts()?; let mut parameter_bytes = - serialize_parameters(loader_id, program_id, keyed_accounts, &instruction_data)?; + serialize_parameters(loader_id, program_id, keyed_accounts, instruction_data)?; serialize_time.stop(); let mut create_vm_time = Measure::start("create_vm"); let mut execute_time; @@ -2229,7 +2229,7 @@ mod tests { .unwrap(); buffer_account.borrow_mut().data_as_mut_slice() [UpgradeableLoaderState::buffer_data_offset().unwrap()..] - .copy_from_slice(&elf_new); + .copy_from_slice(elf_new); let programdata_account = AccountSharedData::new_ref( min_programdata_balance, UpgradeableLoaderState::programdata_len(elf_orig.len().max(elf_new.len())).unwrap(), diff --git a/programs/bpf_loader/src/serialization.rs b/programs/bpf_loader/src/serialization.rs index f9529c017d697b..4f842abebb36d7 100644 --- a/programs/bpf_loader/src/serialization.rs +++ b/programs/bpf_loader/src/serialization.rs @@ -104,7 +104,7 @@ pub fn serialize_parameters_unaligned( .map_err(|_| InstructionError::InvalidArgument)?; v.write_u64::(keyed_account.data_len()? as u64) .map_err(|_| InstructionError::InvalidArgument)?; - v.write_all(&keyed_account.try_account_ref()?.data()) + v.write_all(keyed_account.try_account_ref()?.data()) .map_err(|_| InstructionError::InvalidArgument)?; v.write_all(keyed_account.owner()?.as_ref()) .map_err(|_| InstructionError::InvalidArgument)?; @@ -223,7 +223,7 @@ pub fn serialize_parameters_aligned( .map_err(|_| InstructionError::InvalidArgument)?; v.write_u64::(keyed_account.data_len()? as u64) .map_err(|_| InstructionError::InvalidArgument)?; - v.write_all(&keyed_account.try_account_ref()?.data()) + v.write_all(keyed_account.try_account_ref()?.data()) .map_err(|_| InstructionError::InvalidArgument)?; v.resize( MAX_PERMITTED_DATA_INCREASE @@ -382,9 +382,9 @@ mod tests { .enumerate() .map(|(i, (key, account))| { if i <= accounts.len() / 2 { - KeyedAccount::new_readonly(&key, false, &account) + KeyedAccount::new_readonly(key, false, account) } else { - KeyedAccount::new(&key, false, &account) + KeyedAccount::new(key, false, account) } }) .collect(); @@ -439,9 +439,9 @@ mod tests { .enumerate() .map(|(i, (key, account))| { if i <= accounts.len() / 2 { - KeyedAccount::new_readonly(&key, false, &account) + KeyedAccount::new_readonly(key, false, account) } else { - KeyedAccount::new(&key, false, &account) + KeyedAccount::new(key, false, account) } }) .collect(); @@ -487,9 +487,9 @@ mod tests { .enumerate() .map(|(i, (key, account))| { if i < accounts.len() / 2 { - KeyedAccount::new_readonly(&key, false, &account) + KeyedAccount::new_readonly(key, false, account) } else { - KeyedAccount::new(&key, false, &account) + KeyedAccount::new(key, false, account) } }) .collect(); diff --git a/programs/bpf_loader/src/syscalls.rs b/programs/bpf_loader/src/syscalls.rs index 87e06a0c6c1b5f..394e06aa55caed 100644 --- a/programs/bpf_loader/src/syscalls.rs +++ b/programs/bpf_loader/src/syscalls.rs @@ -583,7 +583,7 @@ impl<'a> SyscallObject for SyscallPanic<'a> { memory_mapping, file, len, - &self.loader_id, + self.loader_id, self.enforce_aligned_host_addrs, &mut |string: &str| Err(SyscallError::Panic(string.to_string(), line, column).into()), ); @@ -614,7 +614,7 @@ impl<'a> SyscallObject for SyscallLog<'a> { memory_mapping, addr, len, - &self.loader_id, + self.loader_id, self.enforce_aligned_host_addrs, &mut |string: &str| { stable_log::program_log(&self.logger, string); @@ -1978,7 +1978,7 @@ where let mut accounts = Vec::with_capacity(account_keys.len()); let mut refs = Vec::with_capacity(account_keys.len()); for (i, ref account_key) in account_keys.iter().enumerate() { - let account = invoke_context.get_account(&account_key).ok_or_else(|| { + let account = invoke_context.get_account(account_key).ok_or_else(|| { ic_msg!( invoke_context, "Instruction references an unknown account {}", @@ -2142,7 +2142,7 @@ fn call<'a>( let instruction = syscall.translate_instruction( instruction_addr, - &memory_mapping, + memory_mapping, enforce_aligned_host_addrs, )?; let signers = syscall.translate_signers( diff --git a/programs/config/src/config_processor.rs b/programs/config/src/config_processor.rs index 09743d38c35357..a7ac27313afd49 100644 --- a/programs/config/src/config_processor.rs +++ b/programs/config/src/config_processor.rs @@ -30,7 +30,7 @@ pub fn process_instruction( return Err(InstructionError::InvalidAccountOwner); } - deserialize(&config_account.data()).map_err(|err| { + deserialize(config_account.data()).map_err(|err| { ic_msg!( invoke_context, "Unable to deserialize config account: {}", @@ -130,7 +130,7 @@ pub fn process_instruction( config_keyed_account .try_account_ref_mut()? .data_as_mut_slice()[..data.len()] - .copy_from_slice(&data); + .copy_from_slice(data); Ok(()) } @@ -216,7 +216,7 @@ mod tests { let (_, config_account) = create_config_account(keys); assert_eq!( Some(MyConfig::default()), - deserialize(get_config_data(&config_account.borrow().data()).unwrap()).ok() + deserialize(get_config_data(config_account.borrow().data()).unwrap()).ok() ); } @@ -241,7 +241,7 @@ mod tests { ); assert_eq!( Some(my_config), - deserialize(get_config_data(&config_account.borrow().data()).unwrap()).ok() + deserialize(get_config_data(config_account.borrow().data()).unwrap()).ok() ); } @@ -321,11 +321,11 @@ mod tests { ), Ok(()) ); - let meta_data: ConfigKeys = deserialize(&config_account.borrow().data()).unwrap(); + let meta_data: ConfigKeys = deserialize(config_account.borrow().data()).unwrap(); assert_eq!(meta_data.keys, keys); assert_eq!( Some(my_config), - deserialize(get_config_data(&config_account.borrow().data()).unwrap()).ok() + deserialize(get_config_data(config_account.borrow().data()).unwrap()).ok() ); } @@ -454,11 +454,11 @@ mod tests { ), Ok(()) ); - let meta_data: ConfigKeys = deserialize(&config_account.borrow().data()).unwrap(); + let meta_data: ConfigKeys = deserialize(config_account.borrow().data()).unwrap(); assert_eq!(meta_data.keys, keys); assert_eq!( new_config, - MyConfig::deserialize(get_config_data(&config_account.borrow().data()).unwrap()) + MyConfig::deserialize(get_config_data(config_account.borrow().data()).unwrap()) .unwrap() ); @@ -646,11 +646,11 @@ mod tests { ), Ok(()) ); - let meta_data: ConfigKeys = deserialize(&config_account.borrow().data()).unwrap(); + let meta_data: ConfigKeys = deserialize(config_account.borrow().data()).unwrap(); assert_eq!(meta_data.keys, keys); assert_eq!( new_config, - MyConfig::deserialize(get_config_data(&config_account.borrow().data()).unwrap()) + MyConfig::deserialize(get_config_data(config_account.borrow().data()).unwrap()) .unwrap() ); diff --git a/programs/config/src/date_instruction.rs b/programs/config/src/date_instruction.rs index c4f732644a8d41..7bc4ae41450658 100644 --- a/programs/config/src/date_instruction.rs +++ b/programs/config/src/date_instruction.rs @@ -54,5 +54,5 @@ pub fn create_account( /// transaction containing this instruction. pub fn store(date_pubkey: &Pubkey, date: Date) -> Instruction { let date_config = DateConfig::new(date); - config_instruction::store(&date_pubkey, true, vec![], &date_config) + config_instruction::store(date_pubkey, true, vec![], &date_config) } diff --git a/programs/exchange/src/exchange_processor.rs b/programs/exchange/src/exchange_processor.rs index f2a36630b57812..e16a06773b98b5 100644 --- a/programs/exchange/src/exchange_processor.rs +++ b/programs/exchange/src/exchange_processor.rs @@ -193,11 +193,11 @@ impl ExchangeProcessor { error!("Not enough accounts"); return Err(InstructionError::InvalidArgument); } - Self::is_account_unallocated(&keyed_accounts[NEW_ACCOUNT_INDEX].try_account_ref()?.data())?; + Self::is_account_unallocated(keyed_accounts[NEW_ACCOUNT_INDEX].try_account_ref()?.data())?; Self::serialize( &ExchangeState::Account( TokenAccountInfo::default() - .owner(&keyed_accounts[OWNER_INDEX].unsigned_key()) + .owner(keyed_accounts[OWNER_INDEX].unsigned_key()) .tokens(100_000, 100_000, 100_000, 100_000), ), &mut keyed_accounts[NEW_ACCOUNT_INDEX] @@ -221,13 +221,13 @@ impl ExchangeProcessor { } let mut to_account = - Self::deserialize_account(&keyed_accounts[TO_ACCOUNT_INDEX].try_account_ref()?.data())?; + Self::deserialize_account(keyed_accounts[TO_ACCOUNT_INDEX].try_account_ref()?.data())?; if &faucet::id() == keyed_accounts[FROM_ACCOUNT_INDEX].unsigned_key() { to_account.tokens[token] += tokens; } else { let state: ExchangeState = - bincode::deserialize(&keyed_accounts[FROM_ACCOUNT_INDEX].try_account_ref()?.data()) + bincode::deserialize(keyed_accounts[FROM_ACCOUNT_INDEX].try_account_ref()?.data()) .map_err(Self::map_to_invalid_arg)?; match state { ExchangeState::Account(mut from_account) => { @@ -309,10 +309,10 @@ impl ExchangeProcessor { return Err(InstructionError::InvalidArgument); } - Self::is_account_unallocated(&keyed_accounts[ORDER_INDEX].try_account_ref()?.data())?; + Self::is_account_unallocated(keyed_accounts[ORDER_INDEX].try_account_ref()?.data())?; let mut account = Self::deserialize_account( - &keyed_accounts[ACCOUNT_INDEX].try_account_ref_mut()?.data(), + keyed_accounts[ACCOUNT_INDEX].try_account_ref_mut()?.data(), )?; if &account.owner != keyed_accounts[OWNER_INDEX].unsigned_key() { @@ -368,7 +368,7 @@ impl ExchangeProcessor { } let order = - Self::deserialize_order(&keyed_accounts[ORDER_INDEX].try_account_ref()?.data())?; + Self::deserialize_order(keyed_accounts[ORDER_INDEX].try_account_ref()?.data())?; if &order.owner != keyed_accounts[OWNER_INDEX].unsigned_key() { error!("Signer does not own order"); @@ -404,11 +404,11 @@ impl ExchangeProcessor { } let mut to_order = - Self::deserialize_order(&keyed_accounts[TO_ORDER_INDEX].try_account_ref()?.data())?; + Self::deserialize_order(keyed_accounts[TO_ORDER_INDEX].try_account_ref()?.data())?; let mut from_order = - Self::deserialize_order(&keyed_accounts[FROM_ORDER_INDEX].try_account_ref()?.data())?; + Self::deserialize_order(keyed_accounts[FROM_ORDER_INDEX].try_account_ref()?.data())?; let mut profit_account = Self::deserialize_account( - &keyed_accounts[PROFIT_ACCOUNT_INDEX] + keyed_accounts[PROFIT_ACCOUNT_INDEX] .try_account_ref()? .data(), )?; @@ -639,7 +639,7 @@ mod test { } fn create_token_account(client: &BankClient, owner: &Keypair) -> Pubkey { - let new = create_account(&client, &owner); + let new = create_account(client, owner); let instruction = exchange_instruction::account_request(&owner.pubkey(), &new); client .send_and_confirm_instruction(owner, instruction) @@ -670,9 +670,9 @@ mod test { trade_tokens: u64, price: u64, ) -> (Pubkey, Pubkey) { - let trade = create_account(&client, &owner); - let src = create_token_account(&client, &owner); - transfer(&client, &owner, &src, from_token, src_tokens); + let trade = create_account(client, owner); + let src = create_token_account(client, owner); + transfer(client, owner, &src, from_token, src_tokens); let instruction = exchange_instruction::trade_request( &owner.pubkey(), diff --git a/programs/ownable/src/ownable_instruction.rs b/programs/ownable/src/ownable_instruction.rs index 2e4cb1c5b0c188..d9c7dab4e20940 100644 --- a/programs/ownable/src/ownable_instruction.rs +++ b/programs/ownable/src/ownable_instruction.rs @@ -33,7 +33,7 @@ pub fn create_account( let space = std::mem::size_of::() as u64; vec![ system_instruction::create_account( - &payer_pubkey, + payer_pubkey, account_pubkey, lamports, space, diff --git a/programs/ownable/src/ownable_processor.rs b/programs/ownable/src/ownable_processor.rs index d9532922cada79..b6696614cf24ce 100644 --- a/programs/ownable/src/ownable_processor.rs +++ b/programs/ownable/src/ownable_processor.rs @@ -38,7 +38,7 @@ pub fn process_instruction( let new_owner_pubkey: Pubkey = limited_deserialize(data)?; let account_keyed_account = &mut keyed_account_at_index(keyed_accounts, 0)?; let mut account_owner_pubkey: Pubkey = - limited_deserialize(&account_keyed_account.try_account_ref()?.data())?; + limited_deserialize(account_keyed_account.try_account_ref()?.data())?; if account_owner_pubkey == Pubkey::default() { account_owner_pubkey = new_owner_pubkey; @@ -47,7 +47,7 @@ pub fn process_instruction( set_owner( &mut account_owner_pubkey, new_owner_pubkey, - &owner_keyed_account, + owner_keyed_account, )?; } diff --git a/programs/stake/src/config.rs b/programs/stake/src/config.rs index b6947d410b946f..98d5b9df44cd88 100644 --- a/programs/stake/src/config.rs +++ b/programs/stake/src/config.rs @@ -17,7 +17,7 @@ use solana_sdk::{ pub use solana_sdk::stake::config::*; pub fn from(account: &T) -> Option { - get_config_data(&account.data()) + get_config_data(account.data()) .ok() .and_then(|data| deserialize(data).ok()) } diff --git a/programs/stake/src/stake_instruction.rs b/programs/stake/src/stake_instruction.rs index 96412c3e93f2c4..7873d059e84c18 100644 --- a/programs/stake/src/stake_instruction.rs +++ b/programs/stake/src/stake_instruction.rs @@ -91,7 +91,7 @@ pub fn process_instruction( keyed_account_at_index(keyed_accounts, 3).map(|ka| ka.unsigned_key()); me.authorize_with_seed( - &authority_base, + authority_base, &args.authority_seed, &args.authority_owner, &args.new_authorized_pubkey, @@ -102,7 +102,7 @@ pub fn process_instruction( ) } else { me.authorize_with_seed( - &authority_base, + authority_base, &args.authority_seed, &args.authority_owner, &args.new_authorized_pubkey, @@ -119,7 +119,7 @@ pub fn process_instruction( let vote = keyed_account_at_index(keyed_accounts, 1)?; me.delegate( - &vote, + vote, &from_keyed_account::(keyed_account_at_index(keyed_accounts, 2)?)?, &from_keyed_account::(keyed_account_at_index(keyed_accounts, 3)?)?, &config::from_keyed_account(keyed_account_at_index(keyed_accounts, 4)?)?, diff --git a/programs/stake/src/stake_state.rs b/programs/stake/src/stake_state.rs index 64e98f77cae7cf..1dfba03f83343f 100644 --- a/programs/stake/src/stake_state.rs +++ b/programs/stake/src/stake_state.rs @@ -513,7 +513,7 @@ impl<'a> StakeAccount for KeyedAccount<'a> { } self.authorize( &signers, - &new_authority, + new_authority, stake_authorize, require_custodian_for_locked_stake_authorize, clock, @@ -686,7 +686,7 @@ impl<'a> StakeAccount for KeyedAccount<'a> { split.set_state(&StakeState::Initialized(split_meta))?; } StakeState::Uninitialized => { - if !signers.contains(&self.unsigned_key()) { + if !signers.contains(self.unsigned_key()) { return Err(InstructionError::MissingRequiredSignature); } } @@ -810,7 +810,7 @@ impl<'a> StakeAccount for KeyedAccount<'a> { (meta.lockup, reserve, false) } StakeState::Uninitialized => { - if !signers.contains(&self.unsigned_key()) { + if !signers.contains(self.unsigned_key()) { return Err(InstructionError::MissingRequiredSignature); } (Lockup::default(), 0, false) // no lockup, no restrictions @@ -821,7 +821,7 @@ impl<'a> StakeAccount for KeyedAccount<'a> { // verify that lockup has expired or that the withdrawal is signed by // the custodian, both epoch and unix_timestamp must have passed let custodian_pubkey = custodian.and_then(|keyed_account| keyed_account.signer_key()); - if lockup.is_in_force(&clock, custodian_pubkey) { + if lockup.is_in_force(clock, custodian_pubkey) { return Err(StakeError::LockupInForce.into()); } @@ -3863,7 +3863,7 @@ mod tests { fn test_authorize_with_seed() { let base_pubkey = solana_sdk::pubkey::new_rand(); let seed = "42"; - let withdrawer_pubkey = Pubkey::create_with_seed(&base_pubkey, &seed, &id()).unwrap(); + let withdrawer_pubkey = Pubkey::create_with_seed(&base_pubkey, seed, &id()).unwrap(); let stake_lamports = 42; let stake_account = AccountSharedData::new_ref_data_with_space( stake_lamports, @@ -3884,7 +3884,7 @@ mod tests { assert_eq!( stake_keyed_account.authorize_with_seed( &base_keyed_account, - &"", + "", &id(), &new_authority, StakeAuthorize::Staker, @@ -3899,7 +3899,7 @@ mod tests { assert_eq!( stake_keyed_account.authorize_with_seed( &stake_keyed_account, - &seed, + seed, &id(), &new_authority, StakeAuthorize::Staker, @@ -3914,7 +3914,7 @@ mod tests { assert_eq!( stake_keyed_account.authorize_with_seed( &base_keyed_account, - &seed, + seed, &id(), &new_authority, StakeAuthorize::Staker, @@ -3929,7 +3929,7 @@ mod tests { assert_eq!( stake_keyed_account.authorize_with_seed( &base_keyed_account, - &seed, + seed, &id(), &new_authority, StakeAuthorize::Withdrawer, @@ -3944,7 +3944,7 @@ mod tests { assert_eq!( stake_keyed_account.authorize_with_seed( &stake_keyed_account, - &seed, + seed, &id(), &new_authority, StakeAuthorize::Withdrawer, diff --git a/programs/vote/src/vote_state/mod.rs b/programs/vote/src/vote_state/mod.rs index bd21e855072d0a..70e59c3d28de7d 100644 --- a/programs/vote/src/vote_state/mod.rs +++ b/programs/vote/src/vote_state/mod.rs @@ -230,7 +230,7 @@ impl VoteState { // utility function, used by Stakes, tests pub fn from(account: &T) -> Option { - Self::deserialize(&account.data()).ok() + Self::deserialize(account.data()).ok() } // utility function, used by Stakes, tests @@ -239,7 +239,7 @@ impl VoteState { } pub fn deserialize(input: &[u8]) -> Result { - deserialize::(&input) + deserialize::(input) .map(|versioned| versioned.convert_to_current()) .map_err(|_| InstructionError::InvalidAccountData) } @@ -638,7 +638,7 @@ pub fn update_validator_identity( verify_authorized_signer(&vote_state.authorized_withdrawer, signers)?; // new node must say "yay" - verify_authorized_signer(&node_pubkey, signers)?; + verify_authorized_signer(node_pubkey, signers)?; vote_state.node_pubkey = *node_pubkey; @@ -938,7 +938,7 @@ mod tests { slot_hashes: &[SlotHash], epoch: Epoch, ) -> Result { - let keyed_accounts = &[KeyedAccount::new(&vote_pubkey, true, vote_account)]; + let keyed_accounts = &[KeyedAccount::new(vote_pubkey, true, vote_account)]; let signers: HashSet = get_signers(keyed_accounts); process_vote( &keyed_accounts[0], diff --git a/programs/vote/src/vote_transaction.rs b/programs/vote/src/vote_transaction.rs index fdbce1798217ef..f5446564c5126f 100644 --- a/programs/vote/src/vote_transaction.rs +++ b/programs/vote/src/vote_transaction.rs @@ -19,7 +19,7 @@ pub fn parse_vote_transaction(tx: &Transaction) -> Option<(Pubkey, Vote, Option< let prog_id_idx = first_instruction.program_id_index as usize; match message.account_keys.get(prog_id_idx) { Some(program_id) => { - if !crate::check_id(&program_id) { + if !crate::check_id(program_id) { return None; } } diff --git a/rbpf-cli/src/main.rs b/rbpf-cli/src/main.rs new file mode 100644 index 00000000000000..0a98d596638285 --- /dev/null +++ b/rbpf-cli/src/main.rs @@ -0,0 +1,253 @@ +use clap::{crate_version, App, Arg}; +use serde::{Deserialize, Serialize}; +use serde_json::Result; +use solana_bpf_loader_program::{ + create_vm, serialization::serialize_parameters, syscalls::register_syscalls, BpfError, + ThisInstructionMeter, +}; +use solana_rbpf::{ + assembler::assemble, + static_analysis::Analysis, + verifier::check, + vm::{Config, DynamicAnalysis, Executable}, +}; +use solana_sdk::{ + account::AccountSharedData, + bpf_loader, + keyed_account::KeyedAccount, + process_instruction::{InvokeContext, MockInvokeContext}, + pubkey::Pubkey, +}; +use std::{cell::RefCell, fs::File, io::Read, io::Seek, io::SeekFrom, path::Path}; + +#[derive(Serialize, Deserialize, Debug)] +struct Account { + lamports: u64, + data: Vec, + owner: Pubkey, +} +#[derive(Serialize, Deserialize)] +struct Input { + accounts: Vec, + insndata: Vec, +} +fn load_accounts(path: &Path) -> Result { + let file = File::open(path).unwrap(); + let input: Input = serde_json::from_reader(file)?; + println!("Program input:"); + println!("accounts {:?}", &input.accounts); + println!("insndata {:?}", &input.insndata); + println!("----------------------------------------"); + Ok(input) +} + +fn main() { + solana_logger::setup(); + let matches = App::new("Solana BPF CLI") + .version(crate_version!()) + .author("Solana Maintainers ") + .about( + r##"CLI to test and analyze eBPF programs. + +The tool executes eBPF programs in a mocked environment. +Some features, such as sysvars syscall and CPI, are not +available for the programs executed by the CLI tool. + +The input data for a program execution have to be in JSON format +and the following fields are required +{ + "accounts": [ + { + "lamports": 1000, + "data": [0, 0, 0, 3], + "owner": [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + ] + } + ], + "insndata": [] +} +"##, + ) + .arg( + Arg::new("PROGRAM") + .about( + "Program file to use. This is either an ELF shared-object file to be executed, \ + or an assembly file to be assembled and executed.", + ) + .required(true) + .index(1) + ) + .arg( + Arg::new("input") + .about( + "Input for the program to run on, where FILE is a name of a JSON file \ +with input data, or BYTES is the number of 0-valued bytes to allocate for program parameters", + ) + .short('i') + .long("input") + .value_name("FILE / BYTES") + .takes_value(true) + .default_value("0"), + ) + .arg( + Arg::new("memory") + .about("Heap memory for the program to run on") + .short('m') + .long("memory") + .value_name("BYTES") + .takes_value(true) + .default_value("0"), + ) + .arg( + Arg::new("use") + .about( + "Method of execution to use, where 'cfg' generates Control Flow Graph \ +of the program, 'disassembler' dumps disassembled code of the program, 'interpreter' runs \ +the program in the virtual machine's interpreter, and 'jit' precompiles the program to \ +native machine code before execting it in the virtual machine.", + ) + .short('u') + .long("use") + .takes_value(true) + .value_name("VALUE") + .possible_values(&["cfg", "disassembler", "interpreter", "jit"]) + .default_value("interpreter"), + ) + .arg( + Arg::new("instruction limit") + .about("Limit the number of instructions to execute") + .short('l') + .long("limit") + .takes_value(true) + .value_name("COUNT") + .default_value(&std::i64::MAX.to_string()), + ) + .arg( + Arg::new("trace") + .about("Output trace to 'trace.out' file using tracing instrumentation") + .short('t') + .long("trace"), + ) + .arg( + Arg::new("profile") + .about("Output profile to 'profile.dot' file using tracing instrumentation") + .short('p') + .long("profile"), + ) + .arg( + Arg::new("verify") + .about("Run the verifier before execution or disassembly") + .short('v') + .long("verify"), + ) + .get_matches(); + + let config = Config { + enable_instruction_tracing: matches.is_present("trace") || matches.is_present("profile"), + ..Config::default() + }; + let mut accounts = Vec::new(); + let mut account_refcells = Vec::new(); + let default_account = RefCell::new(AccountSharedData::default()); + let key = solana_sdk::pubkey::new_rand(); + let mut mem = match matches.value_of("input").unwrap().parse::() { + Ok(allocate) => { + accounts.push(KeyedAccount::new(&key, false, &default_account)); + vec![0u8; allocate] + } + Err(_) => { + let input = load_accounts(Path::new(matches.value_of("input").unwrap())).unwrap(); + for acc in input.accounts { + let asd = AccountSharedData::new_ref(acc.lamports, acc.data.len(), &acc.owner); + asd.borrow_mut().set_data(acc.data); + account_refcells.push(asd); + } + for acc in &account_refcells { + accounts.push(KeyedAccount::new(&key, false, acc)); + } + let lid = bpf_loader::id(); + let pid = Pubkey::new(&[0u8; 32]); + let mut bytes = serialize_parameters(&lid, &pid, &accounts, &input.insndata).unwrap(); + Vec::from(bytes.as_slice_mut()) + } + }; + let mut invoke_context = MockInvokeContext::new(accounts); + let logger = invoke_context.logger.clone(); + let compute_meter = invoke_context.get_compute_meter(); + let mut instruction_meter = ThisInstructionMeter { compute_meter }; + + let program = matches.value_of("PROGRAM").unwrap(); + let mut file = File::open(&Path::new(program)).unwrap(); + let mut magic = [0u8; 4]; + file.read_exact(&mut magic).unwrap(); + file.seek(SeekFrom::Start(0)).unwrap(); + let mut contents = Vec::new(); + file.read_to_end(&mut contents).unwrap(); + let mut executable = if magic == [0x7f, 0x45, 0x4c, 0x46] { + >::from_elf(&contents, None, config) + .map_err(|err| format!("Executable constructor failed: {:?}", err)) + } else { + assemble::( + std::str::from_utf8(contents.as_slice()).unwrap(), + None, + config, + ) + } + .unwrap(); + + if matches.is_present("verify") { + let (_, elf_bytes) = executable.get_text_bytes().unwrap(); + check(elf_bytes).unwrap(); + } + executable.set_syscall_registry(register_syscalls(&mut invoke_context).unwrap()); + executable.jit_compile().unwrap(); + let analysis = Analysis::from_executable(executable.as_ref()); + + match matches.value_of("use") { + Some("cfg") => { + let mut file = File::create("cfg.dot").unwrap(); + analysis.visualize_graphically(&mut file, None).unwrap(); + return; + } + Some("disassembler") => { + let stdout = std::io::stdout(); + analysis.disassemble(&mut stdout.lock()).unwrap(); + return; + } + _ => {} + } + + let id = bpf_loader::id(); + let mut vm = create_vm(&id, executable.as_ref(), &mut mem, &mut invoke_context).unwrap(); + let result = if matches.value_of("use").unwrap() == "interpreter" { + vm.execute_program_interpreted(&mut instruction_meter) + } else { + vm.execute_program_jit(&mut instruction_meter) + }; + if logger.log.borrow().len() > 0 { + println!("Program output:"); + for s in logger.log.borrow_mut().iter() { + println!("{}", s); + } + println!("----------------------------------------"); + } + println!("Result: {:?}", result); + println!("Instruction Count: {}", vm.get_total_instruction_count()); + if matches.is_present("trace") { + println!("Trace is saved in trace.out"); + let mut file = File::create("trace.out").unwrap(); + let analysis = Analysis::from_executable(executable.as_ref()); + vm.get_tracer().write(&mut file, &analysis).unwrap(); + } + if matches.is_present("profile") { + println!("Profile is saved in profile.dot"); + let tracer = &vm.get_tracer(); + let dynamic_analysis = DynamicAnalysis::new(tracer, &analysis); + let mut file = File::create("profile.dot").unwrap(); + analysis + .visualize_graphically(&mut file, Some(&dynamic_analysis)) + .unwrap(); + } +} diff --git a/remote-wallet/src/remote_wallet.rs b/remote-wallet/src/remote_wallet.rs index afa5abc2f53631..af061650180ff5 100644 --- a/remote-wallet/src/remote_wallet.rs +++ b/remote-wallet/src/remote_wallet.rs @@ -114,10 +114,10 @@ impl RemoteWalletManager { is_valid_hid_device(device_info.usage_page(), device_info.interface_number()) && is_valid_ledger(device_info.vendor_id(), device_info.product_id()) }) { - match usb.open_path(&device_info.path()) { + match usb.open_path(device_info.path()) { Ok(device) => { let mut ledger = LedgerWallet::new(device); - let result = ledger.read_device(&device_info); + let result = ledger.read_device(device_info); match result { Ok(info) => { ledger.pretty_path = info.get_pretty_path(); diff --git a/rpc/src/parsed_token_accounts.rs b/rpc/src/parsed_token_accounts.rs index bfcc9a1fa7c61d..1e2c22007bbe04 100644 --- a/rpc/src/parsed_token_accounts.rs +++ b/rpc/src/parsed_token_accounts.rs @@ -20,7 +20,7 @@ pub fn get_parsed_token_account( pubkey: &Pubkey, account: AccountSharedData, ) -> UiAccount { - let additional_data = get_token_account_mint(&account.data()) + let additional_data = get_token_account_mint(account.data()) .and_then(|mint_pubkey| get_mint_owner_and_decimals(&bank, &mint_pubkey).ok()) .map(|(_, decimals)| AccountAdditionalData { spl_token_decimals: Some(decimals), @@ -44,7 +44,7 @@ where { let mut mint_decimals: HashMap = HashMap::new(); keyed_accounts.filter_map(move |(pubkey, account)| { - let additional_data = get_token_account_mint(&account.data()).map(|mint_pubkey| { + let additional_data = get_token_account_mint(account.data()).map(|mint_pubkey| { let spl_token_decimals = mint_decimals.get(&mint_pubkey).cloned().or_else(|| { let (_, decimals) = get_mint_owner_and_decimals(&bank, &mint_pubkey).ok()?; mint_decimals.insert(mint_pubkey, decimals); @@ -80,7 +80,7 @@ pub fn get_mint_owner_and_decimals(bank: &Arc, mint: &Pubkey) -> Result<(P let mint_account = bank.get_account(mint).ok_or_else(|| { Error::invalid_params("Invalid param: could not find mint".to_string()) })?; - let decimals = get_mint_decimals(&mint_account.data())?; + let decimals = get_mint_decimals(mint_account.data())?; Ok((*mint_account.owner(), decimals)) } } diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 3643fc7da1b5b4..d2b35c08d1a302 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -1558,12 +1558,12 @@ impl JsonRpcRequestProcessor { "Invalid param: not a v2.0 Token account".to_string(), )); } - let token_account = TokenAccount::unpack(&account.data()).map_err(|_| { + let token_account = TokenAccount::unpack(account.data()).map_err(|_| { Error::invalid_params("Invalid param: not a v2.0 Token account".to_string()) })?; let mint = &Pubkey::from_str(&token_account.mint.to_string()) .expect("Token account mint should be convertible to Pubkey"); - let (_, decimals) = get_mint_owner_and_decimals(&bank, &mint)?; + let (_, decimals) = get_mint_owner_and_decimals(&bank, mint)?; let balance = token_amount_to_ui_amount(token_account.amount, decimals); Ok(new_response(&bank, balance)) } @@ -1582,7 +1582,7 @@ impl JsonRpcRequestProcessor { "Invalid param: not a v2.0 Token mint".to_string(), )); } - let mint = Mint::unpack(&mint_account.data()).map_err(|_| { + let mint = Mint::unpack(mint_account.data()).map_err(|_| { Error::invalid_params("Invalid param: mint could not be unpacked".to_string()) })?; @@ -1603,10 +1603,10 @@ impl JsonRpcRequestProcessor { )); } let mut token_balances: Vec = self - .get_filtered_spl_token_accounts_by_mint(&bank, &mint, vec![])? + .get_filtered_spl_token_accounts_by_mint(&bank, mint, vec![])? .into_iter() .map(|(address, account)| { - let amount = TokenAccount::unpack(&account.data()) + let amount = TokenAccount::unpack(account.data()) .map(|account| account.amount) .unwrap_or(0); let amount = token_amount_to_ui_amount(amount, decimals); @@ -1742,7 +1742,7 @@ impl JsonRpcRequestProcessor { let filter_closure = |account: &AccountSharedData| { filters.iter().all(|filter_type| match filter_type { RpcFilterType::DataSize(size) => account.data().len() as u64 == *size, - RpcFilterType::Memcmp(compare) => compare.bytes_match(&account.data()), + RpcFilterType::Memcmp(compare) => compare.bytes_match(account.data()), }) }; if self @@ -1811,7 +1811,7 @@ impl JsonRpcRequestProcessor { account.owner() == &spl_token_id_v2_0() && filters.iter().all(|filter_type| match filter_type { RpcFilterType::DataSize(size) => account.data().len() as u64 == *size, - RpcFilterType::Memcmp(compare) => compare.bytes_match(&account.data()), + RpcFilterType::Memcmp(compare) => compare.bytes_match(account.data()), }) }, )) @@ -1858,7 +1858,7 @@ impl JsonRpcRequestProcessor { account.owner() == &spl_token_id_v2_0() && filters.iter().all(|filter_type| match filter_type { RpcFilterType::DataSize(size) => account.data().len() as u64 == *size, - RpcFilterType::Memcmp(compare) => compare.bytes_match(&account.data()), + RpcFilterType::Memcmp(compare) => compare.bytes_match(account.data()), }) }), ) @@ -2043,7 +2043,7 @@ fn get_token_program_id_and_mint( ) -> Result<(Pubkey, Option)> { match token_account_filter { TokenAccountsFilter::Mint(mint) => { - let (mint_owner, _) = get_mint_owner_and_decimals(&bank, &mint)?; + let (mint_owner, _) = get_mint_owner_and_decimals(bank, &mint)?; if mint_owner != spl_token_id_v2_0() { return Err(Error::invalid_params( "Invalid param: not a v2.0 Token mint".to_string(), @@ -2951,7 +2951,7 @@ pub mod rpc_full { let durable_nonce_info = solana_sdk::transaction::uses_durable_nonce(&transaction) .and_then(|nonce_ix| { solana_sdk::transaction::get_nonce_pubkey_from_instruction( - &nonce_ix, + nonce_ix, &transaction, ) }) @@ -3077,7 +3077,7 @@ pub mod rpc_full { }; Ok(new_response( - &bank, + bank, RpcSimulateTransactionResult { err: result.err(), logs: Some(logs), @@ -3798,17 +3798,17 @@ pub fn create_test_transactions_and_populate_blockstore( // Generate transactions for processing // Successful transaction let success_tx = - solana_sdk::system_transaction::transfer(&mint_keypair, &keypair1.pubkey(), 2, blockhash); + solana_sdk::system_transaction::transfer(mint_keypair, &keypair1.pubkey(), 2, blockhash); let success_signature = success_tx.signatures[0]; let entry_1 = solana_ledger::entry::next_entry(&blockhash, 1, vec![success_tx]); // Failed transaction, InstructionError let ix_error_tx = - solana_sdk::system_transaction::transfer(&keypair2, &keypair3.pubkey(), 10, blockhash); + solana_sdk::system_transaction::transfer(keypair2, &keypair3.pubkey(), 10, blockhash); let ix_error_signature = ix_error_tx.signatures[0]; let entry_2 = solana_ledger::entry::next_entry(&entry_1.hash, 1, vec![ix_error_tx]); // Failed transaction let fail_tx = solana_sdk::system_transaction::transfer( - &mint_keypair, + mint_keypair, &keypair2.pubkey(), 2, Hash::default(), @@ -4181,7 +4181,7 @@ pub mod tests { let req = r#"{"jsonrpc":"2.0","id":1,"method":"getClusterNodes"}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(req, meta); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); @@ -4203,7 +4203,7 @@ pub mod tests { let req = r#"{"jsonrpc":"2.0","id":1,"method":"getRecentPerformanceSamples"}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(req, meta); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); @@ -4233,7 +4233,7 @@ pub mod tests { let req = r#"{"jsonrpc":"2.0","id":1,"method":"getRecentPerformanceSamples","params":[10000]}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(req, meta); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); @@ -4262,7 +4262,7 @@ pub mod tests { } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getSlotLeader"}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(req, meta); let expected = format!(r#"{{"jsonrpc":"2.0","result":"{}","id":1}}"#, leader_pubkey); let expected: Response = serde_json::from_str(&expected).expect("expected response deserialization"); @@ -4292,10 +4292,10 @@ pub mod tests { io.extend_with(rpc_minimal::MinimalImpl.to_delegate()); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getTransactionCount"}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(req, meta); let expected = r#"{"jsonrpc":"2.0","result":4,"id":1}"#; let expected: Response = - serde_json::from_str(&expected).expect("expected response deserialization"); + serde_json::from_str(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); @@ -4307,10 +4307,10 @@ pub mod tests { let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"minimumLedgerSlot"}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(req, meta); let expected = r#"{"jsonrpc":"2.0","result":0,"id":1}"#; let expected: Response = - serde_json::from_str(&expected).expect("expected response deserialization"); + serde_json::from_str(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); @@ -4321,7 +4321,7 @@ pub mod tests { let bob_pubkey = solana_sdk::pubkey::new_rand(); let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getSupply"}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(req, meta); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let supply: RpcSupply = serde_json::from_value(json["result"]["value"].clone()) .expect("actual response deserialization"); @@ -4348,7 +4348,7 @@ pub mod tests { io, meta, alice, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getLargestAccounts"}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let largest_accounts: Vec = serde_json::from_value(json["result"]["value"].clone()) @@ -4385,14 +4385,14 @@ pub mod tests { // Test Circulating/NonCirculating Filter let req = r#"{"jsonrpc":"2.0","id":1,"method":"getLargestAccounts","params":[{"filter":"circulating"}]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let largest_accounts: Vec = serde_json::from_value(json["result"]["value"].clone()) .expect("actual response deserialization"); assert_eq!(largest_accounts.len(), 20); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getLargestAccounts","params":[{"filter":"nonCirculating"}]}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(req, meta); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let largest_accounts: Vec = serde_json::from_value(json["result"]["value"].clone()) @@ -4438,7 +4438,7 @@ pub mod tests { let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getInflationGovernor"}"#; - let rep = io.handle_request_sync(&req, meta.clone()); + let rep = io.handle_request_sync(req, meta.clone()); let res: Response = serde_json::from_str(&rep.expect("actual response")) .expect("actual response deserialization"); let inflation_governor: RpcInflationGovernor = if let Response::Single(res) = res { @@ -4454,7 +4454,7 @@ pub mod tests { assert_eq!(inflation_governor, expected_inflation_governor); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getInflationRate"}"#; // Queries current epoch - let rep = io.handle_request_sync(&req, meta); + let rep = io.handle_request_sync(req, meta); let res: Response = serde_json::from_str(&rep.expect("actual response")) .expect("actual response deserialization"); let inflation_rate: RpcInflationRate = if let Response::Single(res) = res { @@ -4484,7 +4484,7 @@ pub mod tests { let RpcHandler { io, meta, bank, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getEpochSchedule"}"#; - let rep = io.handle_request_sync(&req, meta); + let rep = io.handle_request_sync(req, meta); let res: Response = serde_json::from_str(&rep.expect("actual response")) .expect("actual response deserialization"); @@ -4519,7 +4519,7 @@ pub mod tests { ] .iter() { - let rep = io.handle_request_sync(&req, meta.clone()); + let rep = io.handle_request_sync(req, meta.clone()); let res: Response = serde_json::from_str(&rep.expect("actual response")) .expect("actual response deserialization"); @@ -4548,7 +4548,7 @@ pub mod tests { } let req = r#"{"jsonrpc":"2.0","id":1,"method":"getLeaderSchedule", "params": [42424242]}"#; - let rep = io.handle_request_sync(&req, meta.clone()); + let rep = io.handle_request_sync(req, meta.clone()); let res: Response = serde_json::from_str(&rep.expect("actual response")) .expect("actual response deserialization"); @@ -5415,7 +5415,7 @@ pub mod tests { } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getRecentBlockhash"}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(req, meta); let expected = json!({ "jsonrpc": "2.0", "result": { @@ -5446,7 +5446,7 @@ pub mod tests { } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getFees"}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(req, meta); let expected = json!({ "jsonrpc": "2.0", "result": { @@ -5522,7 +5522,7 @@ pub mod tests { let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getFeeRateGovernor"}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(req, meta); let expected = json!({ "jsonrpc": "2.0", "result": { @@ -5796,7 +5796,7 @@ pub mod tests { let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getIdentity"}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(req, meta); let expected = json!({ "jsonrpc": "2.0", "result": { @@ -5835,7 +5835,7 @@ pub mod tests { let RpcHandler { io, meta, .. } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getVersion"}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(req, meta); let version = solana_version::Version::default(); let expected = json!({ "jsonrpc": "2.0", @@ -5930,7 +5930,7 @@ pub mod tests { } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlockCommitment","params":[0]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let RpcBlockCommitment { @@ -5956,7 +5956,7 @@ pub mod tests { assert_eq!(total_stake, 10); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlockCommitment","params":[2]}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(req, meta); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let commitment_response: RpcBlockCommitment = @@ -5985,7 +5985,7 @@ pub mod tests { } = start_rpc_handler_with_tx(&bob_pubkey); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlock","params":[0]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_block: Option = @@ -6030,7 +6030,7 @@ pub mod tests { } let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlock","params":[0,"binary"]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_block: Option = @@ -6075,7 +6075,7 @@ pub mod tests { // disable rpc-tx-history meta.config.enable_rpc_transaction_history = false; let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlock","params":[0]}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(req, meta); assert_eq!( res, Some( @@ -6152,7 +6152,7 @@ pub mod tests { .set_highest_confirmed_root(8); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlockProduction","params":[]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let block_production: RpcBlockProduction = @@ -6228,35 +6228,35 @@ pub mod tests { .set_highest_confirmed_root(8); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocks","params":[0]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(confirmed_blocks, roots[1..].to_vec()); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocks","params":[2]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(confirmed_blocks, vec![3, 4, 8]); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocks","params":[0,4]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(confirmed_blocks, vec![1, 3, 4]); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocks","params":[0,7]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(confirmed_blocks, vec![1, 3, 4]); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocks","params":[9,11]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec = serde_json::from_value(result["result"].clone()).unwrap(); @@ -6305,7 +6305,7 @@ pub mod tests { .set_highest_confirmed_root(8); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocksWithLimit","params":[0,500001]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); assert_eq!( res, Some( @@ -6314,35 +6314,35 @@ pub mod tests { ); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocksWithLimit","params":[0,0]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec = serde_json::from_value(result["result"].clone()).unwrap(); assert!(confirmed_blocks.is_empty()); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocksWithLimit","params":[2,2]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(confirmed_blocks, vec![3, 4]); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocksWithLimit","params":[2,3]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(confirmed_blocks, vec![3, 4, 8]); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocksWithLimit","params":[2,500000]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec = serde_json::from_value(result["result"].clone()).unwrap(); assert_eq!(confirmed_blocks, vec![3, 4, 8]); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getBlocksWithLimit","params":[9,500000]}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(req, meta); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); let confirmed_blocks: Vec = serde_json::from_value(result["result"].clone()).unwrap(); @@ -6410,7 +6410,7 @@ pub mod tests { let res = io.handle_request_sync(&req, meta); let expected = r#"{"jsonrpc":"2.0","error":{"code":-32004,"message":"Block not available for slot 12345"},"id":1}"#; let expected: Response = - serde_json::from_str(&expected).expect("expected response deserialization"); + serde_json::from_str(expected).expect("expected response deserialization"); let result: Response = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); assert_eq!(expected, result); @@ -6472,7 +6472,7 @@ pub mod tests { // stake but has never voted, and the vote account with no stake should not be present. { let req = r#"{"jsonrpc":"2.0","id":1,"method":"getVoteAccounts"}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let result: Value = serde_json::from_str(&res.expect("actual response")) .expect("actual response deserialization"); @@ -7338,7 +7338,7 @@ pub mod tests { ); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getSlot","params":[{"commitment": "confirmed"}]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let slot: Slot = serde_json::from_value(json["result"].clone()).unwrap(); assert_eq!(slot, 2); @@ -7353,7 +7353,7 @@ pub mod tests { ); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getSlot","params":[{"commitment": "confirmed"}]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let slot: Slot = serde_json::from_value(json["result"].clone()).unwrap(); assert_eq!(slot, 2); @@ -7368,7 +7368,7 @@ pub mod tests { ); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getSlot","params":[{"commitment": "confirmed"}]}"#; - let res = io.handle_request_sync(&req, meta.clone()); + let res = io.handle_request_sync(req, meta.clone()); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let slot: Slot = serde_json::from_value(json["result"].clone()).unwrap(); assert_eq!(slot, 2); @@ -7384,7 +7384,7 @@ pub mod tests { ); let req = r#"{"jsonrpc":"2.0","id":1,"method":"getSlot","params":[{"commitment": "confirmed"}]}"#; - let res = io.handle_request_sync(&req, meta); + let res = io.handle_request_sync(req, meta); let json: Value = serde_json::from_str(&res.unwrap()).unwrap(); let slot: Slot = serde_json::from_value(json["result"].clone()).unwrap(); assert_eq!(slot, 3); diff --git a/rpc/src/rpc_health.rs b/rpc/src/rpc_health.rs index 32e6c3b336c4ec..4317741b5eb1d6 100644 --- a/rpc/src/rpc_health.rs +++ b/rpc/src/rpc_health.rs @@ -65,7 +65,7 @@ impl RpcHealth { .iter() .filter_map(|trusted_validator| { self.cluster_info - .get_accounts_hash_for_node(&trusted_validator, |hashes| { + .get_accounts_hash_for_node(trusted_validator, |hashes| { hashes .iter() .max_by(|a, b| a.0.cmp(&b.0)) diff --git a/rpc/src/rpc_pubsub.rs b/rpc/src/rpc_pubsub.rs index 9d9937237ee112..f1cd796468bac3 100644 --- a/rpc/src/rpc_pubsub.rs +++ b/rpc/src/rpc_pubsub.rs @@ -772,19 +772,19 @@ mod tests { let _res = io.handle_request_sync(&req, session.clone()); let req = r#"{"jsonrpc":"2.0","id":1,"method":"signatureUnsubscribe","params":[0]}"#; - let res = io.handle_request_sync(&req, session.clone()); + let res = io.handle_request_sync(req, session.clone()); let expected = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - let expected: Response = serde_json::from_str(&expected).unwrap(); + let expected: Response = serde_json::from_str(expected).unwrap(); let result: Response = serde_json::from_str(&res.unwrap()).unwrap(); assert_eq!(expected, result); // Test bad parameter let req = r#"{"jsonrpc":"2.0","id":1,"method":"signatureUnsubscribe","params":[1]}"#; - let res = io.handle_request_sync(&req, session); + let res = io.handle_request_sync(req, session); let expected = r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"Invalid subscription id."},"id":1}"#; - let expected: Response = serde_json::from_str(&expected).unwrap(); + let expected: Response = serde_json::from_str(expected).unwrap(); let result: Response = serde_json::from_str(&res.unwrap()).unwrap(); assert_eq!(expected, result); @@ -1016,19 +1016,19 @@ mod tests { let _res = io.handle_request_sync(&req, session.clone()); let req = r#"{"jsonrpc":"2.0","id":1,"method":"accountUnsubscribe","params":[0]}"#; - let res = io.handle_request_sync(&req, session.clone()); + let res = io.handle_request_sync(req, session.clone()); let expected = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - let expected: Response = serde_json::from_str(&expected).unwrap(); + let expected: Response = serde_json::from_str(expected).unwrap(); let result: Response = serde_json::from_str(&res.unwrap()).unwrap(); assert_eq!(expected, result); // Test bad parameter let req = r#"{"jsonrpc":"2.0","id":1,"method":"accountUnsubscribe","params":[1]}"#; - let res = io.handle_request_sync(&req, session); + let res = io.handle_request_sync(req, session); let expected = r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"Invalid subscription id."},"id":1}"#; - let expected: Response = serde_json::from_str(&expected).unwrap(); + let expected: Response = serde_json::from_str(expected).unwrap(); let result: Response = serde_json::from_str(&res.unwrap()).unwrap(); assert_eq!(expected, result); diff --git a/rpc/src/rpc_service.rs b/rpc/src/rpc_service.rs index 0d8adf819f0385..e3807423dd83df 100644 --- a/rpc/src/rpc_service.rs +++ b/rpc/src/rpc_service.rs @@ -209,7 +209,7 @@ impl RequestMiddleware for RpcRequestMiddleware { .file_name() .unwrap_or_else(|| std::ffi::OsStr::new("")) .to_str() - .unwrap_or(&"") + .unwrap_or("") )) } else { RpcRequestMiddleware::not_found() diff --git a/rpc/src/rpc_subscriptions.rs b/rpc/src/rpc_subscriptions.rs index 6ee7146c8e6b28..8652091e95ba2c 100644 --- a/rpc/src/rpc_subscriptions.rs +++ b/rpc/src/rpc_subscriptions.rs @@ -335,7 +335,7 @@ fn filter_program_results( let keyed_accounts = accounts.into_iter().filter(move |(_, account)| { filters.iter().all(|filter_type| match filter_type { RpcFilterType::DataSize(size) => account.data().len() as u64 == *size, - RpcFilterType::Memcmp(compare) => compare.bytes_match(&account.data()), + RpcFilterType::Memcmp(compare) => compare.bytes_match(account.data()), }) }); let accounts: Box> = if program_id == &spl_token_id_v2_0() @@ -614,7 +614,7 @@ impl RpcSubscriptions { if let Some(subscription_ids) = subscriptions.get_mut(signature) { subscription_ids.retain(|k, _| !notified_ids.contains(k)); if subscription_ids.is_empty() { - subscriptions.remove(&signature); + subscriptions.remove(signature); } } notified_ids @@ -1156,7 +1156,7 @@ impl RpcSubscriptions { &subscriptions.gossip_signature_subscriptions, bank_forks, &commitment_slots, - ¬ifier, + notifier, "gossip", ); } @@ -1182,8 +1182,8 @@ impl RpcSubscriptions { pubkey, bank_forks, account_subscriptions.clone(), - ¬ifier, - &commitment_slots, + notifier, + commitment_slots, ) .len(); } @@ -1200,8 +1200,8 @@ impl RpcSubscriptions { address, bank_forks, logs_subscriptions.clone(), - ¬ifier, - &commitment_slots, + notifier, + commitment_slots, ) .len(); } @@ -1218,8 +1218,8 @@ impl RpcSubscriptions { program_id, bank_forks, program_subscriptions.clone(), - ¬ifier, - &commitment_slots, + notifier, + commitment_slots, ) .len(); } @@ -1236,8 +1236,8 @@ impl RpcSubscriptions { signature, bank_forks, signature_subscriptions.clone(), - ¬ifier, - &commitment_slots, + notifier, + commitment_slots, ) .len(); } @@ -1304,7 +1304,7 @@ impl RpcSubscriptions { ReceivedSignatureResult::ReceivedSignature, ), }, - &sink, + sink, ); } } diff --git a/rpc/src/send_transaction_service.rs b/rpc/src/send_transaction_service.rs index ed5e24512d95dc..1b68b316150140 100644 --- a/rpc/src/send_transaction_service.rs +++ b/rpc/src/send_transaction_service.rs @@ -269,10 +269,10 @@ impl SendTransactionService { address_list } }) - .unwrap_or_else(|| vec![&tpu_address]); + .unwrap_or_else(|| vec![tpu_address]); for address in addresses { Self::send_transaction( - &send_socket, + send_socket, address, &transaction_info.wire_transaction, ); diff --git a/runtime/benches/accounts.rs b/runtime/benches/accounts.rs index 0eab6e639c69c0..6d6d7c6cdf9314 100644 --- a/runtime/benches/accounts.rs +++ b/runtime/benches/accounts.rs @@ -240,7 +240,7 @@ fn store_accounts_with_possible_contention( // Write to a different slot than the one being read from. Because // there's a new account pubkey being written to every time, will // compete for the accounts index lock on every store - accounts.store_slow_uncached(slot + 1, &solana_sdk::pubkey::new_rand(), &account); + accounts.store_slow_uncached(slot + 1, &solana_sdk::pubkey::new_rand(), account); } }) } diff --git a/runtime/benches/bank.rs b/runtime/benches/bank.rs index e739050d0caeb9..22f7b48c067624 100644 --- a/runtime/benches/bank.rs +++ b/runtime/benches/bank.rs @@ -50,7 +50,7 @@ pub fn create_builtin_transactions( // Seed the signer account let rando0 = Keypair::new(); bank_client - .transfer_and_confirm(10_000, &mint_keypair, &rando0.pubkey()) + .transfer_and_confirm(10_000, mint_keypair, &rando0.pubkey()) .unwrap_or_else(|_| panic!("{}:{}", line!(), file!())); let instruction = create_invoke_instruction(rando0.pubkey(), program_id, &1u8); @@ -72,7 +72,7 @@ pub fn create_native_loader_transactions( // Seed the signer account©41 let rando0 = Keypair::new(); bank_client - .transfer_and_confirm(10_000, &mint_keypair, &rando0.pubkey()) + .transfer_and_confirm(10_000, mint_keypair, &rando0.pubkey()) .unwrap_or_else(|_| panic!("{}:{}", line!(), file!())); let instruction = create_invoke_instruction(rando0.pubkey(), program_id, &1u8); @@ -94,7 +94,7 @@ fn async_bencher(bank: &Arc, bank_client: &BankClient, transactions: &[Tra } for _ in 0..1_000_000_000_u64 { if bank - .get_signature_status(&transactions.last().unwrap().signatures.get(0).unwrap()) + .get_signature_status(transactions.last().unwrap().signatures.get(0).unwrap()) .is_some() { break; @@ -102,13 +102,13 @@ fn async_bencher(bank: &Arc, bank_client: &BankClient, transactions: &[Tra sleep(Duration::from_nanos(1)); } if bank - .get_signature_status(&transactions.last().unwrap().signatures.get(0).unwrap()) + .get_signature_status(transactions.last().unwrap().signatures.get(0).unwrap()) .unwrap() .is_err() { error!( "transaction failed: {:?}", - bank.get_signature_status(&transactions.last().unwrap().signatures.get(0).unwrap()) + bank.get_signature_status(transactions.last().unwrap().signatures.get(0).unwrap()) .unwrap() ); panic!(); diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index 84f194772fcbe3..d475f29488cd32 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -215,7 +215,7 @@ impl Accounts { let mut account_deps = Vec::with_capacity(message.account_keys.len()); let demote_sysvar_write_locks = feature_set.is_active(&feature_set::demote_sysvar_write_locks::id()); - let mut key_check = MessageProgramIdsCache::new(&message); + let mut key_check = MessageProgramIdsCache::new(message); let mut rent_debits = RentDebits::default(); for (i, key) in message.account_keys.iter().enumerate() { let account = if key_check.is_non_loader_key(key, i) { @@ -237,7 +237,7 @@ impl Accounts { .map(|(mut account, _)| { if message.is_writable(i, demote_sysvar_write_locks) { let rent_due = rent_collector - .collect_from_existing_account(&key, &mut account); + .collect_from_existing_account(key, &mut account); (account, rent_due) } else { (account, 0) @@ -1016,7 +1016,7 @@ impl Accounts { } } if account.rent_epoch() == INITIAL_RENT_EPOCH { - let rent = rent_collector.collect_from_created_account(&key, account); + let rent = rent_collector.collect_from_created_account(key, account); loaded_transaction.rent += rent; loaded_transaction .rent_debits @@ -1093,7 +1093,7 @@ pub fn update_accounts_bench(accounts: &Accounts, pubkeys: &[Pubkey], slot: u64) for pubkey in pubkeys { let amount = thread_rng().gen_range(0, 10); let account = AccountSharedData::new(amount, 0, AccountSharedData::default().owner()); - accounts.store_slow_uncached(slot, &pubkey, &account); + accounts.store_slow_uncached(slot, pubkey, &account); } } @@ -1127,7 +1127,7 @@ mod tests { error_counters: &mut ErrorCounters, ) -> Vec { let mut hash_queue = BlockhashQueue::new(100); - hash_queue.register_hash(&tx.message().recent_blockhash, &fee_calculator); + hash_queue.register_hash(&tx.message().recent_blockhash, fee_calculator); let accounts = Accounts::new_with_config( Vec::new(), &ClusterType::Development, diff --git a/runtime/src/accounts_background_service.rs b/runtime/src/accounts_background_service.rs index fef9e88166f031..d947d58bf5008e 100644 --- a/runtime/src/accounts_background_service.rs +++ b/runtime/src/accounts_background_service.rs @@ -415,7 +415,7 @@ impl AccountsBackgroundService { total_remove_slots_time: &mut u64, ) { let mut remove_slots_time = Measure::start("remove_slots_time"); - *removed_slots_count += request_handler.handle_pruned_banks(&bank, true); + *removed_slots_count += request_handler.handle_pruned_banks(bank, true); remove_slots_time.stop(); *total_remove_slots_time += remove_slots_time.as_us(); diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 89fc50db4fd2e0..8418a021afc3e5 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -354,7 +354,7 @@ impl<'a> LoadedAccount<'a> { pub fn owner(&self) -> &Pubkey { match self { LoadedAccount::Stored(stored_account_meta) => &stored_account_meta.account_meta.owner, - LoadedAccount::Cached((_, cached_account)) => &cached_account.account.owner(), + LoadedAccount::Cached((_, cached_account)) => cached_account.account.owner(), } } @@ -377,7 +377,7 @@ impl<'a> LoadedAccount<'a> { pub fn pubkey(&self) -> &Pubkey { match self { LoadedAccount::Stored(stored_account_meta) => &stored_account_meta.meta.pubkey, - LoadedAccount::Cached((pubkey, _)) => &pubkey, + LoadedAccount::Cached((pubkey, _)) => pubkey, } } @@ -391,7 +391,7 @@ impl<'a> LoadedAccount<'a> { pub fn compute_hash(&self, slot: Slot, pubkey: &Pubkey) -> Hash { match self { LoadedAccount::Stored(stored_account_meta) => { - AccountsDb::hash_stored_account(slot, &stored_account_meta) + AccountsDb::hash_stored_account(slot, stored_account_meta) } LoadedAccount::Cached((_, cached_account)) => { AccountsDb::hash_account(slot, &cached_account.account, pubkey) @@ -1479,7 +1479,7 @@ impl AccountsDb { let mut reclaims = Vec::new(); for pubkey in pubkeys { self.accounts_index.clean_rooted_entries( - &pubkey, + pubkey, &mut reclaims, max_clean_root, ); @@ -1571,7 +1571,7 @@ impl AccountsDb { let affected_pubkeys = &store_counts.get(&id).unwrap().1; for key in affected_pubkeys { - for (_slot, account_info) in &purges.get(&key).unwrap().0 { + for (_slot, account_info) in &purges.get(key).unwrap().0 { if !already_counted.contains(&account_info.store_id) { pending_store_ids.insert(account_info.store_id); } @@ -1624,7 +1624,7 @@ impl AccountsDb { for (pubkey, slots_set) in pubkey_to_slot_set { let is_empty = self .accounts_index - .purge_exact(&pubkey, slots_set, &mut reclaims); + .purge_exact(pubkey, slots_set, &mut reclaims); if is_empty { dead_keys.push(pubkey); } @@ -1845,12 +1845,12 @@ impl AccountsDb { // Then purge if we can let mut store_counts: HashMap)> = HashMap::new(); for (key, (account_infos, ref_count)) in purges_zero_lamports.iter_mut() { - if purged_account_slots.contains_key(&key) { - *ref_count = self.accounts_index.ref_count_from_storage(&key); + if purged_account_slots.contains_key(key) { + *ref_count = self.accounts_index.ref_count_from_storage(key); } account_infos.retain(|(slot, account_info)| { let was_slot_purged = purged_account_slots - .get(&key) + .get(key) .map(|slots_removed| slots_removed.contains(slot)) .unwrap_or(false); if was_slot_purged { @@ -2044,7 +2044,7 @@ impl AccountsDb { return; } let mut clean_dead_slots = Measure::start("reclaims::clean_dead_slots"); - self.clean_stored_dead_slots(&dead_slots, purged_account_slots); + self.clean_stored_dead_slots(dead_slots, purged_account_slots); clean_dead_slots.stop(); let mut purge_removed_slots = Measure::start("reclaims::purge_removed_slots"); @@ -3261,7 +3261,7 @@ impl AccountsDb { let path_index = thread_rng().gen_range(0, paths.len()); let store = Arc::new(self.new_storage_entry( slot, - &Path::new(&paths[path_index]), + Path::new(&paths[path_index]), Self::page_align(size), )); @@ -3442,7 +3442,7 @@ impl AccountsDb { let mut remove_storage_entries_elapsed = Measure::start("remove_storage_entries_elapsed"); for remove_slot in removed_slots { // Remove the storage entries and collect some metrics - if let Some((_, slot_storages_to_be_removed)) = self.storage.0.remove(&remove_slot) { + if let Some((_, slot_storages_to_be_removed)) = self.storage.0.remove(remove_slot) { { let r_slot_removed_storages = slot_storages_to_be_removed.read().unwrap(); total_removed_storage_entries += r_slot_removed_storages.len(); @@ -3705,10 +3705,10 @@ impl AccountsDb { Self::hash_account_data( slot, account.lamports(), - &account.owner(), + account.owner(), account.executable(), account.rent_epoch(), - &account.data(), + account.data(), pubkey, ) } @@ -3716,8 +3716,8 @@ impl AccountsDb { fn hash_frozen_account_data(account: &AccountSharedData) -> Hash { let mut hasher = Hasher::default(); - hasher.hash(&account.data()); - hasher.hash(&account.owner().as_ref()); + hasher.hash(account.data()); + hasher.hash(account.owner().as_ref()); if account.executable() { hasher.hash(&[1u8; 1]); @@ -3749,7 +3749,7 @@ impl AccountsDb { hasher.update(&rent_epoch.to_le_bytes()); - hasher.update(&data); + hasher.update(data); if executable { hasher.update(&[1u8; 1]); @@ -3757,8 +3757,8 @@ impl AccountsDb { hasher.update(&[0u8; 1]); } - hasher.update(&owner.as_ref()); - hasher.update(&pubkey.as_ref()); + hasher.update(owner.as_ref()); + hasher.update(pubkey.as_ref()); Hash(<[u8; solana_sdk::hash::HASH_BYTES]>::try_from(hasher.finalize().as_slice()).unwrap()) } @@ -4778,7 +4778,7 @@ impl AccountsDb { ); if check_hash { - let computed_hash = loaded_account.compute_hash(slot, &pubkey); + let computed_hash = loaded_account.compute_hash(slot, pubkey); if computed_hash != source_item.hash { info!( "hash mismatch found: computed: {}, loaded: {}, pubkey: {}", @@ -4849,7 +4849,7 @@ impl AccountsDb { }; let result = Self::scan_snapshot_stores_with_cache( - &storages, + storages, &mut stats, PUBKEY_BINS_FOR_CALCULATING_HASHES, &bounds, @@ -5044,8 +5044,8 @@ impl AccountsDb { self.accounts_index.upsert( slot, pubkey, - &pubkey_account.1.owner(), - &pubkey_account.1.data(), + pubkey_account.1.owner(), + pubkey_account.1.data(), &self.account_indexes, info, &mut reclaims, @@ -5297,7 +5297,7 @@ impl AccountsDb { pub(crate) fn freeze_accounts(&mut self, ancestors: &Ancestors, account_pubkeys: &[Pubkey]) { for account_pubkey in account_pubkeys { - if let Some((account, _slot)) = self.load_without_fixed_root(ancestors, &account_pubkey) + if let Some((account, _slot)) = self.load_without_fixed_root(ancestors, account_pubkey) { let frozen_account_info = FrozenAccountInfo { hash: Self::hash_frozen_account_data(&account), @@ -5335,7 +5335,7 @@ impl AccountsDb { ) } - let hash = Self::hash_frozen_account_data(&account); + let hash = Self::hash_frozen_account_data(account); if hash != frozen_account_info.hash { FROZEN_ACCOUNT_PANIC.store(true, Ordering::Relaxed); panic!( @@ -5679,10 +5679,10 @@ impl AccountsDb { if *slot <= snapshot_slot && (self.accounts_index.is_root(*slot) || ancestors - .map(|ancestors| ancestors.contains_key(&slot)) + .map(|ancestors| ancestors.contains_key(slot)) .unwrap_or_default()) { - self.storage.0.get(&slot).map_or_else( + self.storage.0.get(slot).map_or_else( || None, |item| { let storages = item @@ -5836,9 +5836,9 @@ impl AccountsDb { if !self.account_indexes.is_empty() { for (pubkey, (_, _store_id, stored_account)) in accounts_map.iter() { self.accounts_index.update_secondary_indexes( - &pubkey, + pubkey, &stored_account.account_meta.owner, - &stored_account.data, + stored_account.data, &self.account_indexes, ); } @@ -5876,7 +5876,7 @@ impl AccountsDb { for (id, store) in slot_stores.value().read().unwrap().iter() { // Should be default at this point assert_eq!(store.alive_bytes(), 0); - if let Some((stored_size, count)) = stored_sizes_and_counts.get(&id) { + if let Some((stored_size, count)) = stored_sizes_and_counts.get(id) { trace!("id: {} setting count: {} cur: {}", id, count, store.count(),); store.count_and_status.write().unwrap().0 = *count; store.alive_bytes.store(*stored_size, Ordering::SeqCst); @@ -5967,7 +5967,7 @@ impl AccountsDb { pub fn get_append_vec_id(&self, pubkey: &Pubkey, slot: Slot) -> Option { let ancestors = vec![(slot, 1)].into_iter().collect(); - let result = self.accounts_index.get(&pubkey, Some(&ancestors), None); + let result = self.accounts_index.get(pubkey, Some(&ancestors), None); result.map(|(list, index)| list.slot_list()[index].1.store_id) } @@ -7232,7 +7232,7 @@ pub mod tests { for (i, key) in keys.iter().enumerate() { assert_eq!( accounts - .load_without_fixed_root(&ancestors, &key) + .load_without_fixed_root(&ancestors, key) .unwrap() .0 .lamports(), @@ -7398,7 +7398,7 @@ pub mod tests { } fn ref_count_for_pubkey(&self, pubkey: &Pubkey) -> RefCount { - self.accounts_index.ref_count_from_storage(&pubkey) + self.accounts_index.ref_count_from_storage(pubkey) } } @@ -9314,7 +9314,7 @@ pub mod tests { current_slot += 1; for pubkey in &pubkeys { - accounts.store_uncached(current_slot, &[(&pubkey, &account)]); + accounts.store_uncached(current_slot, &[(pubkey, &account)]); } let shrink_slot = current_slot; accounts.get_accounts_delta_hash(current_slot); @@ -9325,7 +9325,7 @@ pub mod tests { let updated_pubkeys = &pubkeys[0..pubkey_count - pubkey_count_after_shrink]; for pubkey in updated_pubkeys { - accounts.store_uncached(current_slot, &[(&pubkey, &account)]); + accounts.store_uncached(current_slot, &[(pubkey, &account)]); } accounts.get_accounts_delta_hash(current_slot); accounts.add_root(current_slot); @@ -9383,7 +9383,7 @@ pub mod tests { current_slot += 1; for pubkey in &pubkeys { - accounts.store_uncached(current_slot, &[(&pubkey, &account)]); + accounts.store_uncached(current_slot, &[(pubkey, &account)]); } let shrink_slot = current_slot; accounts.get_accounts_delta_hash(current_slot); @@ -9394,7 +9394,7 @@ pub mod tests { let updated_pubkeys = &pubkeys[0..pubkey_count - pubkey_count_after_shrink]; for pubkey in updated_pubkeys { - accounts.store_uncached(current_slot, &[(&pubkey, &account)]); + accounts.store_uncached(current_slot, &[(pubkey, &account)]); } accounts.get_accounts_delta_hash(current_slot); accounts.add_root(current_slot); @@ -9441,7 +9441,7 @@ pub mod tests { let dummy_id1 = 22; let entry1 = Arc::new(AccountStorageEntry::new( - &dummy_path, + dummy_path, dummy_slot, dummy_id1, dummy_size, @@ -9455,7 +9455,7 @@ pub mod tests { let dummy_id2 = 44; let entry2 = Arc::new(AccountStorageEntry::new( - &dummy_path, + dummy_path, dummy_slot, dummy_id2, dummy_size, @@ -9477,7 +9477,7 @@ pub mod tests { let dummy_size = 4 * PAGE_SIZE; let dummy_id1 = 22; let entry1 = Arc::new(AccountStorageEntry::new( - &dummy_path, + dummy_path, dummy_slot, dummy_id1, dummy_size, @@ -9492,7 +9492,7 @@ pub mod tests { let dummy_id2 = 44; let dummy_slot2 = 44; let entry2 = Arc::new(AccountStorageEntry::new( - &dummy_path, + dummy_path, dummy_slot2, dummy_id2, dummy_size, @@ -9536,7 +9536,7 @@ pub mod tests { current_slot += 1; for pubkey in &pubkeys { - accounts.store_uncached(current_slot, &[(&pubkey, &account)]); + accounts.store_uncached(current_slot, &[(pubkey, &account)]); } let shrink_slot = current_slot; accounts.get_accounts_delta_hash(current_slot); @@ -9547,7 +9547,7 @@ pub mod tests { let updated_pubkeys = &pubkeys[0..pubkey_count - pubkey_count_after_shrink]; for pubkey in updated_pubkeys { - accounts.store_uncached(current_slot, &[(&pubkey, &account)]); + accounts.store_uncached(current_slot, &[(pubkey, &account)]); } accounts.get_accounts_delta_hash(current_slot); accounts.add_root(current_slot); @@ -9690,7 +9690,7 @@ pub mod tests { info!( "store: {:?} : {:?}", store, - store_counts.get(&store).unwrap() + store_counts.get(store).unwrap() ); } for x in 0..3 { @@ -10950,7 +10950,7 @@ pub mod tests { let dummy_id1 = 22; let entry1 = Arc::new(AccountStorageEntry::new( - &dummy_path, + dummy_path, dummy_slot, dummy_id1, dummy_size, @@ -10958,7 +10958,7 @@ pub mod tests { let dummy_id2 = 44; let entry2 = Arc::new(AccountStorageEntry::new( - &dummy_path, + dummy_path, dummy_slot, dummy_id2, dummy_size, @@ -11492,7 +11492,7 @@ pub mod tests { let mut accounts = AccountsDb::new_single(); let dummy_path = Path::new(""); let dummy_size = 2 * PAGE_SIZE; - let entry = Arc::new(AccountStorageEntry::new(&dummy_path, 0, 1, dummy_size)); + let entry = Arc::new(AccountStorageEntry::new(dummy_path, 0, 1, dummy_size)); match accounts.shrink_ratio { AccountShrinkThreshold::TotalSpace { shrink_ratio } => { assert_eq!( diff --git a/runtime/src/accounts_hash.rs b/runtime/src/accounts_hash.rs index 4d402120f18f60..6aad8ea9cff3af 100644 --- a/runtime/src/accounts_hash.rs +++ b/runtime/src/accounts_hash.rs @@ -255,7 +255,7 @@ impl AccountsHash { let mut hasher = Hasher::default(); for item in hashes.iter().take(end_index).skip(start_index) { - let h = extractor(&item); + let h = extractor(item); hasher.hash(h.as_ref()); } diff --git a/runtime/src/accounts_index.rs b/runtime/src/accounts_index.rs index df2a6e517e1ec5..a58ee0d79c85fa 100644 --- a/runtime/src/accounts_index.rs +++ b/runtime/src/accounts_index.rs @@ -855,7 +855,7 @@ impl AccountsIndex { read_lock_timer.stop(); read_lock_elapsed += read_lock_timer.as_us(); let mut latest_slot_timer = Measure::start("latest_slot"); - if let Some(index) = self.latest_slot(Some(ancestors), &list_r, max_root) { + if let Some(index) = self.latest_slot(Some(ancestors), list_r, max_root) { latest_slot_timer.stop(); latest_slot_elapsed += latest_slot_timer.as_us(); let mut load_account_timer = Measure::start("load_account"); @@ -1076,7 +1076,7 @@ impl AccountsIndex { max: Option, ) -> (SlotList, RefCount) { ( - self.get_rooted_entries(&locked_account_entry.slot_list(), max), + self.get_rooted_entries(locked_account_entry.slot_list(), max), locked_account_entry.ref_count().load(Ordering::Relaxed), ) } @@ -1093,7 +1093,7 @@ impl AccountsIndex { if let Some(mut write_account_map_entry) = self.get_account_write_entry(pubkey) { write_account_map_entry.slot_list_mut(|slot_list| { slot_list.retain(|(slot, item)| { - let should_purge = slots_to_purge.contains(&slot); + let should_purge = slots_to_purge.contains(slot); if should_purge { reclaims.push((*slot, item.clone())); false @@ -1147,7 +1147,7 @@ impl AccountsIndex { Some(inner) => inner, None => self.roots_tracker.read().unwrap(), }; - if lock.roots.contains(&slot) { + if lock.roots.contains(slot) { rv = Some(i); current_max = *slot; } @@ -1402,7 +1402,7 @@ impl AccountsIndex { ) { let roots_tracker = &self.roots_tracker.read().unwrap(); let newest_root_in_slot_list = - Self::get_newest_root_in_slot_list(&roots_tracker.roots, &slot_list, max_clean_root); + Self::get_newest_root_in_slot_list(&roots_tracker.roots, slot_list, max_clean_root); let max_clean_root = max_clean_root.unwrap_or(roots_tracker.max_root); let mut purged_slots: HashSet = HashSet::new(); @@ -1865,7 +1865,7 @@ pub mod tests { fn remove(&mut self, slot: &u64) -> bool { let result = self.bitfield.remove(slot); assert_eq!(result, self.hash_set.remove(slot)); - assert!(!self.bitfield.contains(&slot)); + assert!(!self.bitfield.contains(slot)); self.compare(); result } @@ -2130,7 +2130,7 @@ pub mod tests { compare_internal(hashset, bitfield); let clone = bitfield.clone(); compare_internal(hashset, &clone); - assert!(clone.eq(&bitfield)); + assert!(clone.eq(bitfield)); assert_eq!(clone, *bitfield); } @@ -2181,8 +2181,8 @@ pub mod tests { // remove the rest, including a call that removes slot again for item in all.iter() { - assert!(tester.remove(&item)); - assert!(!tester.remove(&item)); + assert!(tester.remove(item)); + assert!(!tester.remove(item)); } let min = max + ((width * 2) as u64) + 3; @@ -2457,15 +2457,15 @@ pub mod tests { assert!(index.zero_lamport_pubkeys().is_empty()); let mut ancestors = Ancestors::default(); - assert!(index.get(&pubkey, Some(&ancestors), None).is_none()); - assert!(index.get(&pubkey, None, None).is_none()); + assert!(index.get(pubkey, Some(&ancestors), None).is_none()); + assert!(index.get(pubkey, None, None).is_none()); let mut num = 0; index.unchecked_scan_accounts("", &ancestors, |_pubkey, _index| num += 1); assert_eq!(num, 0); ancestors.insert(slot, 0); - assert!(index.get(&pubkey, Some(&ancestors), None).is_some()); - assert_eq!(index.ref_count_from_storage(&pubkey), 1); + assert!(index.get(pubkey, Some(&ancestors), None).is_some()); + assert_eq!(index.ref_count_from_storage(pubkey), 1); index.unchecked_scan_accounts("", &ancestors, |_pubkey, _index| num += 1); assert_eq!(num, 1); @@ -2478,15 +2478,15 @@ pub mod tests { assert!(!index.zero_lamport_pubkeys().is_empty()); let mut ancestors = Ancestors::default(); - assert!(index.get(&pubkey, Some(&ancestors), None).is_none()); - assert!(index.get(&pubkey, None, None).is_none()); + assert!(index.get(pubkey, Some(&ancestors), None).is_none()); + assert!(index.get(pubkey, None, None).is_none()); let mut num = 0; index.unchecked_scan_accounts("", &ancestors, |_pubkey, _index| num += 1); assert_eq!(num, 0); ancestors.insert(slot, 0); - assert!(index.get(&pubkey, Some(&ancestors), None).is_some()); - assert_eq!(index.ref_count_from_storage(&pubkey), 0); // cached, so 0 + assert!(index.get(pubkey, Some(&ancestors), None).is_some()); + assert_eq!(index.ref_count_from_storage(pubkey), 0); // cached, so 0 index.unchecked_scan_accounts("", &ancestors, |_pubkey, _index| num += 1); assert_eq!(num, 1); } @@ -3600,7 +3600,7 @@ pub mod tests { // Both pubkeys will now be present in the index check_secondary_index_mapping_correct( - &secondary_index, + secondary_index, &[secondary_key1, secondary_key2], &account_key, ); diff --git a/runtime/src/ancestors.rs b/runtime/src/ancestors.rs index 048e07cdb416ea..e75ea8d05941b1 100644 --- a/runtime/src/ancestors.rs +++ b/runtime/src/ancestors.rs @@ -166,7 +166,7 @@ pub mod tests { let key = item.0; min = std::cmp::min(min, *key); max = std::cmp::max(max, *key); - assert!(ancestors.get(&key)); + assert!(ancestors.get(key)); } for slot in min - 1..max + 2 { assert_eq!(ancestors.get(&slot), hashset.contains(&slot)); diff --git a/runtime/src/append_vec.rs b/runtime/src/append_vec.rs index 2ede4b6aa2fb90..1f8f9744eae8f9 100644 --- a/runtime/src/append_vec.rs +++ b/runtime/src/append_vec.rs @@ -778,7 +778,7 @@ pub mod tests { fn test_new_from_file_crafted_zero_lamport_account() { let file = get_append_vec_path("test_append"); let path = &file.path; - let mut av = AppendVec::new(&path, true, 1024 * 1024); + let mut av = AppendVec::new(path, true, 1024 * 1024); av.set_no_remove_on_drop(); let pubkey = solana_sdk::pubkey::new_rand(); @@ -806,7 +806,7 @@ pub mod tests { fn test_new_from_file_crafted_data_len() { let file = get_append_vec_path("test_new_from_file_crafted_data_len"); let path = &file.path; - let mut av = AppendVec::new(&path, true, 1024 * 1024); + let mut av = AppendVec::new(path, true, 1024 * 1024); av.set_no_remove_on_drop(); let crafted_data_len = 1; @@ -834,7 +834,7 @@ pub mod tests { fn test_new_from_file_too_large_data_len() { let file = get_append_vec_path("test_new_from_file_too_large_data_len"); let path = &file.path; - let mut av = AppendVec::new(&path, true, 1024 * 1024); + let mut av = AppendVec::new(path, true, 1024 * 1024); av.set_no_remove_on_drop(); let too_large_data_len = u64::max_value(); @@ -860,7 +860,7 @@ pub mod tests { fn test_new_from_file_crafted_executable() { let file = get_append_vec_path("test_new_from_crafted_executable"); let path = &file.path; - let mut av = AppendVec::new(&path, true, 1024 * 1024); + let mut av = AppendVec::new(path, true, 1024 * 1024); av.set_no_remove_on_drop(); av.append_account_test(&create_test_account(10)).unwrap(); { diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index d21d66b8567592..e3cf34c723c1aa 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -1003,7 +1003,7 @@ impl Default for BlockhashQueue { impl Bank { pub fn new(genesis_config: &GenesisConfig) -> Self { Self::new_with_paths( - &genesis_config, + genesis_config, Vec::new(), &[], None, @@ -1017,7 +1017,7 @@ impl Bank { pub fn new_no_wallclock_throttle(genesis_config: &GenesisConfig) -> Self { let mut bank = Self::new_with_paths( - &genesis_config, + genesis_config, Vec::new(), &[], None, @@ -1040,7 +1040,7 @@ impl Bank { shrink_ratio: AccountShrinkThreshold, ) -> Self { Self::new_with_paths( - &genesis_config, + genesis_config, Vec::new(), &[], None, @@ -1698,7 +1698,7 @@ impl Bank { // if I'm the first Bank in an epoch, ensure stake_history is updated self.update_sysvar_account(&sysvar::stake_history::id(), |account| { create_account::( - &self.stakes.read().unwrap().history(), + self.stakes.read().unwrap().history(), self.inherit_specially_retained_account_fields(account), ) }); @@ -1756,7 +1756,7 @@ impl Bank { .feature_set .full_inflation_features_enabled() .iter() - .filter_map(|id| self.feature_set.activated_slot(&id)) + .filter_map(|id| self.feature_set.activated_slot(id)) .collect::>(); slots.sort_unstable(); slots.get(0).cloned().unwrap_or_else(|| { @@ -1903,7 +1903,7 @@ impl Bank { .iter() .for_each(|(stake_pubkey, delegation)| { match ( - self.get_account_with_fixed_root(&stake_pubkey), + self.get_account_with_fixed_root(stake_pubkey), self.get_account_with_fixed_root(&delegation.voter_pubkey), ) { (Some(stake_account), Some(vote_account)) => { @@ -1969,8 +1969,8 @@ impl Bank { }) .map(|(stake_account, vote_account)| { stake_state::calculate_points( - &stake_account, - &vote_account, + stake_account, + vote_account, Some(&stake_history), fix_stake_deactivate, ) @@ -2009,7 +2009,7 @@ impl Bank { fix_stake_deactivate, ); if let Ok((stakers_reward, _voters_reward)) = redeemed { - self.store_account(&stake_pubkey, &stake_account); + self.store_account(stake_pubkey, stake_account); vote_account_changed = true; if stakers_reward > 0 { @@ -2043,7 +2043,7 @@ impl Bank { }, )); } - self.store_account(&vote_pubkey, &vote_account); + self.store_account(vote_pubkey, vote_account); } } self.rewards.write().unwrap().append(&mut rewards); @@ -2286,7 +2286,7 @@ impl Bank { self.fee_calculator = self.fee_rate_governor.create_fee_calculator(); for (pubkey, account) in genesis_config.accounts.iter() { - if self.get_account(&pubkey).is_some() { + if self.get_account(pubkey).is_some() { panic!("{} repeated in genesis config", pubkey); } self.store_account(pubkey, &AccountSharedData::from(account.clone())); @@ -2297,7 +2297,7 @@ impl Bank { self.update_fees(); for (pubkey, account) in genesis_config.rewards_pools.iter() { - if self.get_account(&pubkey).is_some() { + if self.get_account(pubkey).is_some() { panic!("{} repeated in genesis config", pubkey); } self.store_account(pubkey, &AccountSharedData::from(account.clone())); @@ -2344,11 +2344,11 @@ impl Bank { // NOTE: must hold idempotent for the same set of arguments pub fn add_native_program(&self, name: &str, program_id: &Pubkey, must_replace: bool) { let existing_genuine_program = - if let Some(mut account) = self.get_account_with_fixed_root(&program_id) { + if let Some(mut account) = self.get_account_with_fixed_root(program_id) { // it's very unlikely to be squatted at program_id as non-system account because of burden to // find victim's pubkey/hash. So, when account.owner is indeed native_loader's, it's // safe to assume it's a genuine program. - if native_loader::check_id(&account.owner()) { + if native_loader::check_id(account.owner()) { Some(account) } else { // malicious account is pre-occupying at program_id @@ -2359,7 +2359,7 @@ impl Bank { // Resetting account balance to 0 is needed to really purge from AccountsDb and // flush the Stakes cache account.set_lamports(0); - self.store_account(&program_id, &account); + self.store_account(program_id, &account); None } } else { @@ -2375,7 +2375,7 @@ impl Bank { name, program_id ), Some(account) => { - if *name == String::from_utf8_lossy(&account.data()) { + if *name == String::from_utf8_lossy(account.data()) { // nop; it seems that already AccountsDb is updated. return; } @@ -2415,7 +2415,7 @@ impl Bank { name, self.inherit_specially_retained_account_fields(&existing_genuine_program), ); - self.store_account_and_update_capitalization(&program_id, &account); + self.store_account_and_update_capitalization(program_id, &account); debug!("Added native program {} under {:?}", name, program_id); } @@ -2584,7 +2584,7 @@ impl Bank { hashed_txs.as_transactions_iter(), self.demote_sysvar_write_locks(), ); - TransactionBatch::new(lock_results, &self, Cow::Owned(hashed_txs)) + TransactionBatch::new(lock_results, self, Cow::Owned(hashed_txs)) } pub fn prepare_hashed_batch<'a, 'b>( @@ -2595,7 +2595,7 @@ impl Bank { hashed_txs.as_transactions_iter(), self.demote_sysvar_write_locks(), ); - TransactionBatch::new(lock_results, &self, Cow::Borrowed(hashed_txs)) + TransactionBatch::new(lock_results, self, Cow::Borrowed(hashed_txs)) } pub(crate) fn prepare_simulation_batch<'a, 'b>( @@ -2604,7 +2604,7 @@ impl Bank { ) -> TransactionBatch<'a, 'b> { let mut batch = TransactionBatch::new( vec![tx.sanitize().map_err(|e| e.into())], - &self, + self, Cow::Owned(vec![HashedTransaction::from(tx)]), ); batch.needs_unlock = false; @@ -2618,7 +2618,7 @@ impl Bank { ) -> (Result<()>, TransactionLogMessages, Vec) { assert!(self.is_frozen(), "simulation bank must be frozen"); - let batch = self.prepare_simulation_batch(&transaction); + let batch = self.prepare_simulation_batch(transaction); let mut timings = ExecuteTimings::default(); @@ -2693,7 +2693,7 @@ impl Bank { let hash_age = hash_queue.check_hash_age(&message.recent_blockhash, max_age); if hash_age == Some(true) { (Ok(()), None) - } else if let Some((pubkey, acc)) = self.check_tx_durable_nonce(&tx) { + } else if let Some((pubkey, acc)) = self.check_tx_durable_nonce(tx) { (Ok(()), Some(NonceRollbackPartial::new(pubkey, acc))) } else if hash_age == Some(false) { error_counters.blockhash_too_old += 1; @@ -2784,10 +2784,10 @@ impl Bank { } pub fn check_tx_durable_nonce(&self, tx: &Transaction) -> Option<(Pubkey, AccountSharedData)> { - transaction::uses_durable_nonce(&tx) - .and_then(|nonce_ix| transaction::get_nonce_pubkey_from_instruction(&nonce_ix, &tx)) + transaction::uses_durable_nonce(tx) + .and_then(|nonce_ix| transaction::get_nonce_pubkey_from_instruction(nonce_ix, tx)) .and_then(|nonce_pubkey| { - self.get_account(&nonce_pubkey) + self.get_account(nonce_pubkey) .map(|acc| (*nonce_pubkey, acc)) }) .filter(|(_pubkey, nonce_account)| { @@ -3427,7 +3427,7 @@ impl Bank { hashed_txs.len() ); timings.store_us += write_time.as_us(); - self.update_transaction_statuses(hashed_txs, &executed); + self.update_transaction_statuses(hashed_txs, executed); let fee_collection_results = self.filter_program_errors_and_collect_fee(hashed_txs.as_transactions_iter(), executed); @@ -4185,7 +4185,7 @@ impl Bank { pubkey: &Pubkey, new_account: &AccountSharedData, ) { - if let Some(old_account) = self.get_account_with_fixed_root(&pubkey) { + if let Some(old_account) = self.get_account_with_fixed_root(pubkey) { match new_account.lamports().cmp(&old_account.lamports()) { std::cmp::Ordering::Greater => { self.capitalization @@ -5039,7 +5039,7 @@ impl Bank { pub fn deactivate_feature(&mut self, id: &Pubkey) { let mut feature_set = Arc::make_mut(&mut self.feature_set).clone(); - feature_set.active.remove(&id); + feature_set.active.remove(id); feature_set.inactive.insert(*id); self.feature_set = Arc::new(feature_set); } @@ -5139,8 +5139,8 @@ impl Bank { ) { let feature_builtins = self.feature_builtins.clone(); for (builtin, feature, activation_type) in feature_builtins.iter() { - let should_populate = init_or_warp && self.feature_set.is_active(&feature) - || !init_or_warp && new_feature_activations.contains(&feature); + let should_populate = init_or_warp && self.feature_set.is_active(feature) + || !init_or_warp && new_feature_activations.contains(feature); if should_populate { match activation_type { ActivationType::NewProgram => self.add_builtin( @@ -5242,10 +5242,10 @@ impl Bank { if purge_window_epoch { for reward_pubkey in self.rewards_pool_pubkeys.iter() { - if let Some(mut reward_account) = self.get_account_with_fixed_root(&reward_pubkey) { + if let Some(mut reward_account) = self.get_account_with_fixed_root(reward_pubkey) { if reward_account.lamports() == u64::MAX { reward_account.set_lamports(0); - self.store_account(&reward_pubkey, &reward_account); + self.store_account(reward_pubkey, &reward_account); // Adjust capitalization.... it has been wrapping, reducing the real capitalization by 1-lamport self.capitalization.fetch_add(1, Relaxed); info!( @@ -5290,7 +5290,7 @@ impl Drop for Bank { pub fn goto_end_of_slot(bank: &mut Bank) { let mut tick_hash = bank.last_blockhash(); loop { - tick_hash = hashv(&[&tick_hash.as_ref(), &[42]]); + tick_hash = hashv(&[tick_hash.as_ref(), &[42]]); bank.register_tick(&tick_hash); if tick_hash == bank.last_blockhash() { bank.freeze(); @@ -7067,7 +7067,7 @@ pub(crate) mod tests { .accounts .accounts_db .accounts_index - .get(&pubkey, Some(&ancestors), None) + .get(pubkey, Some(ancestors), None) .unwrap(); locked_entry .slot_list() @@ -7295,7 +7295,7 @@ pub(crate) mod tests { .map(move |(_stake_pubkey, stake_account)| (stake_account, vote_account)) }) .map(|(stake_account, vote_account)| { - stake_state::calculate_points(&stake_account, &vote_account, None, true) + stake_state::calculate_points(stake_account, vote_account, None, true) .unwrap_or(0) }) .sum(); @@ -9432,11 +9432,11 @@ pub(crate) mod tests { assert_eq!(bank.calculate_capitalization(true), bank.capitalization()); assert_eq!( "mock_program1", - String::from_utf8_lossy(&bank.get_account(&vote_id).unwrap_or_default().data()) + String::from_utf8_lossy(bank.get_account(&vote_id).unwrap_or_default().data()) ); assert_eq!( "mock_program2", - String::from_utf8_lossy(&bank.get_account(&stake_id).unwrap_or_default().data()) + String::from_utf8_lossy(bank.get_account(&stake_id).unwrap_or_default().data()) ); // Re-adding builtin programs should be no-op @@ -9452,11 +9452,11 @@ pub(crate) mod tests { assert_eq!(bank.calculate_capitalization(true), bank.capitalization()); assert_eq!( "mock_program1", - String::from_utf8_lossy(&bank.get_account(&vote_id).unwrap_or_default().data()) + String::from_utf8_lossy(bank.get_account(&vote_id).unwrap_or_default().data()) ); assert_eq!( "mock_program2", - String::from_utf8_lossy(&bank.get_account(&stake_id).unwrap_or_default().data()) + String::from_utf8_lossy(bank.get_account(&stake_id).unwrap_or_default().data()) ); } @@ -9593,7 +9593,7 @@ pub(crate) mod tests { } fn get_nonce_account(bank: &Bank, nonce_pubkey: &Pubkey) -> Option { - bank.get_account(&nonce_pubkey).and_then(|acc| { + bank.get_account(nonce_pubkey).and_then(|acc| { let state = StateMut::::state(&acc).map(|v| v.convert_to_current()); match state { @@ -10084,7 +10084,7 @@ pub(crate) mod tests { let pubkey2 = solana_sdk::pubkey::new_rand(); let keypair0_account = AccountSharedData::new(8, 0, &Pubkey::default()); let keypair1_account = AccountSharedData::new(9, 0, &Pubkey::default()); - let account0 = AccountSharedData::new(11, 0, &&Pubkey::default()); + let account0 = AccountSharedData::new(11, 0, &Pubkey::default()); bank0.store_account(&keypair0.pubkey(), &keypair0_account); bank0.store_account(&keypair1.pubkey(), &keypair1_account); bank0.store_account(&pubkey0, &account0); @@ -11947,7 +11947,7 @@ pub(crate) mod tests { // Write accounts to the store for key in &all_pubkeys { - bank0.store_account(&key, &starting_account); + bank0.store_account(key, &starting_account); } // Set aside a subset of accounts to modify diff --git a/runtime/src/bank_forks.rs b/runtime/src/bank_forks.rs index 08ea15147ef751..012f7244fd7ca9 100644 --- a/runtime/src/bank_forks.rs +++ b/runtime/src/bank_forks.rs @@ -516,7 +516,7 @@ mod tests { slot: child.slot(), timestamp: recent_timestamp + additional_timestamp_secs, }, - &child, + child, &voting_keypair.pubkey(), ); } diff --git a/runtime/src/epoch_stakes.rs b/runtime/src/epoch_stakes.rs index 3185d4bdb3086b..2fdc4ba91a9f4c 100644 --- a/runtime/src/epoch_stakes.rs +++ b/runtime/src/epoch_stakes.rs @@ -24,7 +24,7 @@ impl EpochStakes { pub fn new(stakes: &Stakes, leader_schedule_epoch: Epoch) -> Self { let epoch_vote_accounts = Stakes::vote_accounts(stakes); let (total_stake, node_id_to_vote_accounts, epoch_authorized_voters) = - Self::parse_epoch_vote_accounts(&epoch_vote_accounts, leader_schedule_epoch); + Self::parse_epoch_vote_accounts(epoch_vote_accounts, leader_schedule_epoch); Self { stakes: Arc::new(stakes.clone()), total_stake, diff --git a/runtime/src/genesis_utils.rs b/runtime/src/genesis_utils.rs index 29457764cf3a1c..eea539905cb834 100644 --- a/runtime/src/genesis_utils.rs +++ b/runtime/src/genesis_utils.rs @@ -191,15 +191,15 @@ pub fn create_genesis_config_with_leader_ex( mut initial_accounts: Vec<(Pubkey, AccountSharedData)>, ) -> GenesisConfig { let validator_vote_account = vote_state::create_account( - &validator_vote_account_pubkey, - &validator_pubkey, + validator_vote_account_pubkey, + validator_pubkey, 0, validator_stake_lamports, ); let validator_stake_account = stake_state::create_account( validator_stake_account_pubkey, - &validator_vote_account_pubkey, + validator_vote_account_pubkey, &validator_vote_account, &rent, validator_stake_lamports, diff --git a/runtime/src/hardened_unpack.rs b/runtime/src/hardened_unpack.rs index 2d71219e2d1955..03f213417e27e2 100644 --- a/runtime/src/hardened_unpack.rs +++ b/runtime/src/hardened_unpack.rs @@ -280,7 +280,7 @@ pub fn open_genesis_config( ledger_path: &Path, max_genesis_archive_unpacked_size: u64, ) -> GenesisConfig { - GenesisConfig::load(&ledger_path).unwrap_or_else(|load_err| { + GenesisConfig::load(ledger_path).unwrap_or_else(|load_err| { let genesis_package = ledger_path.join(DEFAULT_GENESIS_ARCHIVE); unpack_genesis_archive( &genesis_package, @@ -296,7 +296,7 @@ pub fn open_genesis_config( }); // loading must succeed at this moment - GenesisConfig::load(&ledger_path).unwrap() + GenesisConfig::load(ledger_path).unwrap() }) } diff --git a/runtime/src/loader_utils.rs b/runtime/src/loader_utils.rs index 9f304df037db6e..7580ef873b75f6 100644 --- a/runtime/src/loader_utils.rs +++ b/runtime/src/loader_utils.rs @@ -70,7 +70,7 @@ pub fn load_buffer_account( bank_client .send_and_confirm_message( - &[from_keypair, &buffer_keypair], + &[from_keypair, buffer_keypair], Message::new( &bpf_loader_upgradeable::create_buffer( &from_keypair.pubkey(), @@ -102,7 +102,7 @@ pub fn load_buffer_account( Some(&from_keypair.pubkey()), ); bank_client - .send_and_confirm_message(&[from_keypair, &buffer_authority_keypair], message) + .send_and_confirm_message(&[from_keypair, buffer_authority_keypair], message) .unwrap(); offset += chunk_size as u32; } @@ -121,7 +121,7 @@ pub fn load_upgradeable_program( load_buffer_account( bank_client, - &from_keypair, + from_keypair, buffer_keypair, authority_keypair, &program, @@ -147,7 +147,7 @@ pub fn load_upgradeable_program( ); bank_client .send_and_confirm_message( - &[from_keypair, &executable_keypair, &authority_keypair], + &[from_keypair, executable_keypair, authority_keypair], message, ) .unwrap(); @@ -163,15 +163,15 @@ pub fn upgrade_program( ) { let message = Message::new( &[bpf_loader_upgradeable::upgrade( - &program_pubkey, - &buffer_pubkey, + program_pubkey, + buffer_pubkey, &authority_keypair.pubkey(), - &spill_pubkey, + spill_pubkey, )], Some(&from_keypair.pubkey()), ); bank_client - .send_and_confirm_message(&[from_keypair, &authority_keypair], message) + .send_and_confirm_message(&[from_keypair, authority_keypair], message) .unwrap(); } @@ -191,7 +191,7 @@ pub fn set_upgrade_authority( Some(&from_keypair.pubkey()), ); bank_client - .send_and_confirm_message(&[from_keypair, ¤t_authority_keypair], message) + .send_and_confirm_message(&[from_keypair, current_authority_keypair], message) .unwrap(); } diff --git a/runtime/src/message_processor.rs b/runtime/src/message_processor.rs index 19f60e214b36cc..80b5cab0b1e98e 100644 --- a/runtime/src/message_processor.rs +++ b/runtime/src/message_processor.rs @@ -115,7 +115,7 @@ impl PreAccount { && (!is_writable // line coverage used to get branch coverage || pre.executable() || program_id != pre.owner() - || !Self::is_zeroed(&post.data())) + || !Self::is_zeroed(post.data())) { return Err(InstructionError::ModifiedProgramId); } @@ -447,7 +447,7 @@ impl<'a> InvokeContext for ThisInvokeContext<'a> { self.executors.borrow_mut().insert(*pubkey, executor); } fn get_executor(&self, pubkey: &Pubkey) -> Option> { - self.executors.borrow().get(&pubkey) + self.executors.borrow().get(pubkey) } fn record_instruction(&self, instruction: &Instruction) { if let Some(recorder) = &self.instruction_recorder { @@ -650,7 +650,7 @@ impl MessageProcessor { if id == root_id { invoke_context.remove_first_keyed_account()?; // Call the builtin program - return process_instruction(&program_id, instruction_data, invoke_context); + return process_instruction(program_id, instruction_data, invoke_context); } } // Call the program via the native loader @@ -664,7 +664,7 @@ impl MessageProcessor { for (id, process_instruction) in &self.programs { if id == owner_id { // Call the program via a builtin loader - return process_instruction(&program_id, instruction_data, invoke_context); + return process_instruction(program_id, instruction_data, invoke_context); } } } @@ -775,7 +775,7 @@ impl MessageProcessor { .map(|index| keyed_account_at_index(keyed_accounts, *index)) .collect::, InstructionError>>()?; let (message, callee_program_id, _) = - Self::create_message(&instruction, &keyed_accounts, &signers, &invoke_context)?; + Self::create_message(&instruction, &keyed_accounts, signers, &invoke_context)?; let keyed_accounts = invoke_context.get_keyed_accounts()?; let mut caller_write_privileges = keyed_account_indices .iter() @@ -1029,7 +1029,7 @@ impl MessageProcessor { let account = accounts[account_index].borrow(); pre_accounts[unique_index] .verify( - &program_id, + program_id, message.is_writable(account_index, demote_sysvar_write_locks), rent, &account, @@ -1095,7 +1095,7 @@ impl MessageProcessor { } let account = account.borrow(); pre_account - .verify(&program_id, is_writable, &rent, &account, timings, false) + .verify(program_id, is_writable, rent, &account, timings, false) .map_err(|err| { ic_logger_msg!(logger, "failed to verify account {}: {}", key, err); err diff --git a/runtime/src/native_loader.rs b/runtime/src/native_loader.rs index 0e51a0f8ba7893..7a313945d072b0 100644 --- a/runtime/src/native_loader.rs +++ b/runtime/src/native_loader.rs @@ -110,7 +110,7 @@ impl NativeLoader { if let Some(entrypoint) = cache.get(name) { Ok(entrypoint.clone()) } else { - match Self::library_open(&Self::create_path(&name)?) { + match Self::library_open(&Self::create_path(name)?) { Ok(library) => { let result = unsafe { library.get::(name.as_bytes()) }; match result { diff --git a/runtime/src/non_circulating_supply.rs b/runtime/src/non_circulating_supply.rs index 531d5327e85637..03196b67ee974e 100644 --- a/runtime/src/non_circulating_supply.rs +++ b/runtime/src/non_circulating_supply.rs @@ -69,7 +69,7 @@ pub fn calculate_non_circulating_supply(bank: &Arc) -> NonCirculatingSuppl let lamports = non_circulating_accounts_set .iter() - .map(|pubkey| bank.get_balance(&pubkey)) + .map(|pubkey| bank.get_balance(pubkey)) .sum(); NonCirculatingSupply { diff --git a/runtime/src/secondary_index.rs b/runtime/src/secondary_index.rs index c6d45961b49c14..37199f2668517c 100644 --- a/runtime/src/secondary_index.rs +++ b/runtime/src/secondary_index.rs @@ -134,10 +134,10 @@ impl .downgrade() }); - let should_insert = !outer_keys.read().unwrap().contains(&key); + let should_insert = !outer_keys.read().unwrap().contains(key); if should_insert { let mut w_outer_keys = outer_keys.write().unwrap(); - if !w_outer_keys.contains(&key) { + if !w_outer_keys.contains(key) { w_outer_keys.push(*key); } } @@ -175,11 +175,11 @@ impl let is_outer_key_empty = { let inner_key_map = self .index - .get_mut(&outer_key) + .get_mut(outer_key) .expect("If we're removing a key, then it must have an entry in the map"); // If we deleted a pubkey from the reverse_index, then the corresponding entry // better exist in this index as well or the two indexes are out of sync! - assert!(inner_key_map.value().remove_inner_key(&removed_inner_key)); + assert!(inner_key_map.value().remove_inner_key(removed_inner_key)); inner_key_map.is_empty() }; diff --git a/runtime/src/serde_snapshot/future.rs b/runtime/src/serde_snapshot/future.rs index b822da5825f72a..1002c1cddb40a2 100644 --- a/runtime/src/serde_snapshot/future.rs +++ b/runtime/src/serde_snapshot/future.rs @@ -165,7 +165,7 @@ impl<'a> From> for SerializableVersionedB } Self { blockhash_queue: rhs.blockhash_queue, - ancestors: &rhs.ancestors, + ancestors: rhs.ancestors, hash: rhs.hash, parent_hash: rhs.parent_hash, parent_slot: rhs.parent_slot, diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index 6d8436f313e80c..c7bf8a9bda8d72 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -250,7 +250,7 @@ pub(crate) fn reconstruct_accounts_db_via_serialization( accountsdb_to_stream( SerdeStyle::Newer, &mut writer, - &accounts, + accounts, slot, &snapshot_storages, ) @@ -261,7 +261,7 @@ pub(crate) fn reconstruct_accounts_db_via_serialization( let copied_accounts = TempDir::new().unwrap(); // Simulate obtaining a copy of the AppendVecs from a tarball - let unpacked_append_vec_map = copy_append_vecs(&accounts, copied_accounts.path()).unwrap(); + let unpacked_append_vec_map = copy_append_vecs(accounts, copied_accounts.path()).unwrap(); let mut accounts_db = accountsdb_from_stream(SerdeStyle::Newer, &mut reader, &[], unpacked_append_vec_map) .unwrap(); diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index fb9218ba45d0e5..62f49c27ed398b 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -622,7 +622,7 @@ pub fn bank_from_archive>( let mut untar = Measure::start("untar"); let unpacked_append_vec_map = untar_snapshot_in( &snapshot_tar, - &unpack_dir.as_ref(), + unpack_dir.as_ref(), account_paths, archive_format, )?; @@ -913,7 +913,7 @@ pub fn verify_snapshot_archive( let unpack_dir = temp_dir.path(); untar_snapshot_in( snapshot_archive, - &unpack_dir, + unpack_dir, &[unpack_dir.to_path_buf()], archive_format, ) @@ -953,7 +953,7 @@ pub fn snapshot_bank( ) -> Result<()> { let storages: Vec<_> = root_bank.get_snapshot_storages(); let mut add_snapshot_time = Measure::start("add-snapshot-ms"); - add_snapshot(snapshot_path, &root_bank, &storages, snapshot_version)?; + add_snapshot(snapshot_path, root_bank, &storages, snapshot_version)?; add_snapshot_time.stop(); inc_new_counter_info!("add-snapshot-ms", add_snapshot_time.as_ms() as usize); @@ -964,7 +964,7 @@ pub fn snapshot_bank( .expect("no snapshots found in config snapshot_path"); let package = package_snapshot( - &root_bank, + root_bank, latest_slot_snapshot_paths, snapshot_path, status_cache_slot_deltas, @@ -1003,9 +1003,9 @@ pub fn bank_to_snapshot_archive, Q: AsRef>( let temp_dir = tempfile::tempdir_in(snapshot_path)?; let storages: Vec<_> = bank.get_snapshot_storages(); - let slot_snapshot_paths = add_snapshot(&temp_dir, &bank, &storages, snapshot_version)?; + let slot_snapshot_paths = add_snapshot(&temp_dir, bank, &storages, snapshot_version)?; let package = package_snapshot( - &bank, + bank, &slot_snapshot_paths, &temp_dir, bank.src.slot_deltas(&bank.src.roots()), diff --git a/runtime/src/stakes.rs b/runtime/src/stakes.rs index 7148094b316ac9..67f0fdfab6b768 100644 --- a/runtime/src/stakes.rs +++ b/runtime/src/stakes.rs @@ -134,7 +134,7 @@ impl Stakes { // when account is removed (lamports == 0 or data uninitialized), don't read so that // given `pubkey` can be used for any owner in the future, while not affecting Stakes. if account.lamports() != 0 - && !(check_vote_init && VoteState::is_uninitialized_no_deser(&account.data())) + && !(check_vote_init && VoteState::is_uninitialized_no_deser(account.data())) { let stake = old.as_ref().map_or_else( || { @@ -258,8 +258,8 @@ pub mod tests { stake_pubkey, stake_state::create_account( &stake_pubkey, - &vote_pubkey, - &vote_state::create_account(&vote_pubkey, &solana_sdk::pubkey::new_rand(), 0, 1), + vote_pubkey, + &vote_state::create_account(vote_pubkey, &solana_sdk::pubkey::new_rand(), 0, 1), &Rent::free(), stake, ), @@ -290,8 +290,8 @@ pub mod tests { stake_pubkey, stake_state::create_account_with_activation_epoch( &stake_pubkey, - &vote_pubkey, - &vote_state::create_account(&vote_pubkey, &solana_sdk::pubkey::new_rand(), 0, 1), + vote_pubkey, + &vote_state::create_account(vote_pubkey, &solana_sdk::pubkey::new_rand(), 0, 1), &Rent::free(), stake, epoch, diff --git a/runtime/src/status_cache.rs b/runtime/src/status_cache.rs index 96fb54cd8aeb3b..d07ee5356e8765 100644 --- a/runtime/src/status_cache.rs +++ b/runtime/src/status_cache.rs @@ -251,7 +251,7 @@ impl StatusCache { .iter() .for_each(|(tx_hash, (key_index, statuses))| { for (key_slice, res) in statuses.iter() { - self.insert_with_slice(&tx_hash, *slot, *key_index, *key_slice, res.clone()) + self.insert_with_slice(tx_hash, *slot, *key_index, *key_slice, res.clone()) } }); if *is_root { diff --git a/runtime/src/system_instruction_processor.rs b/runtime/src/system_instruction_processor.rs index bab232ca5c6c39..a8402bfc94730b 100644 --- a/runtime/src/system_instruction_processor.rs +++ b/runtime/src/system_instruction_processor.rs @@ -79,7 +79,7 @@ fn allocate( // if it looks like the `to` account is already in use, bail // (note that the id check is also enforced by message_processor) - if !account.data().is_empty() || !system_program::check_id(&account.owner()) { + if !account.data().is_empty() || !system_program::check_id(account.owner()) { ic_msg!( invoke_context, "Allocate: account {:?} already in use", @@ -115,13 +115,13 @@ fn assign( return Ok(()); } - if !address.is_signer(&signers) { + if !address.is_signer(signers) { ic_msg!(invoke_context, "Assign: account {:?} must sign", address); return Err(InstructionError::MissingRequiredSignature); } // guard against sysvars being made - if sysvar::check_id(&owner) { + if sysvar::check_id(owner) { ic_msg!(invoke_context, "Assign: cannot assign to sysvar, {}", owner); return Err(SystemError::InvalidProgramId.into()); } @@ -300,13 +300,13 @@ pub fn process_instruction( let from = keyed_account_at_index(keyed_accounts, 0)?; let to = keyed_account_at_index(keyed_accounts, 1)?; let to_address = Address::create( - &to.unsigned_key(), + to.unsigned_key(), Some((&base, &seed, &owner)), invoke_context, )?; create_account( from, - &to, + to, &to_address, lamports, space, @@ -736,11 +736,11 @@ mod tests { let result = create_account( &KeyedAccount::new(&from, true, &from_account), &KeyedAccount::new(&to, false, &to_account), - &address, + address, 50, MAX_PERMITTED_DATA_LENGTH + 1, &system_program::id(), - &signers, + signers, &MockInvokeContext::new(vec![]), ); assert!(result.is_err()); @@ -753,11 +753,11 @@ mod tests { let result = create_account( &KeyedAccount::new(&from, true, &from_account), &KeyedAccount::new(&to, false, &to_account), - &address, + address, 50, MAX_PERMITTED_DATA_LENGTH, &system_program::id(), - &signers, + signers, &MockInvokeContext::new(vec![]), ); assert!(result.is_ok()); @@ -790,7 +790,7 @@ mod tests { 50, 2, &new_owner, - &signers, + signers, &MockInvokeContext::new(vec![]), ); assert_eq!(result, Err(SystemError::AccountAlreadyInUse.into())); @@ -809,7 +809,7 @@ mod tests { 50, 2, &new_owner, - &signers, + signers, &MockInvokeContext::new(vec![]), ); assert_eq!(result, Err(SystemError::AccountAlreadyInUse.into())); @@ -827,7 +827,7 @@ mod tests { 50, 2, &new_owner, - &signers, + signers, &MockInvokeContext::new(vec![]), ); assert_eq!(result, Err(SystemError::AccountAlreadyInUse.into())); @@ -1141,7 +1141,7 @@ mod tests { transfer_with_seed( &from_keyed_account, &from_base_keyed_account, - &from_seed, + from_seed, &from_owner, &to_keyed_account, 50, @@ -1158,7 +1158,7 @@ mod tests { let result = transfer_with_seed( &from_keyed_account, &from_base_keyed_account, - &from_seed, + from_seed, &from_owner, &to_keyed_account, 100, @@ -1173,7 +1173,7 @@ mod tests { assert!(transfer_with_seed( &from_keyed_account, &from_base_keyed_account, - &from_seed, + from_seed, &from_owner, &to_keyed_account, 0, diff --git a/runtime/tests/accounts.rs b/runtime/tests/accounts.rs index 977a65a42becc1..7439d0a8477c6b 100644 --- a/runtime/tests/accounts.rs +++ b/runtime/tests/accounts.rs @@ -52,7 +52,7 @@ fn test_shrink_and_clean() { for (pubkey, account) in alive_accounts.iter_mut() { account.checked_sub_lamports(1).unwrap(); - accounts.store_uncached(current_slot, &[(&pubkey, &account)]); + accounts.store_uncached(current_slot, &[(pubkey, account)]); } accounts.add_root(current_slot); } @@ -121,9 +121,9 @@ fn test_bad_bank_hash() { for (key, account) in &account_refs { assert_eq!( - db.load_account_hash(&ancestors, &key, None, LoadHint::Unspecified) + db.load_account_hash(&ancestors, key, None, LoadHint::Unspecified) .unwrap(), - AccountsDb::hash_account(some_slot, *account, &key) + AccountsDb::hash_account(some_slot, *account, key) ); } existing.clear(); diff --git a/runtime/tests/stake.rs b/runtime/tests/stake.rs index 7c3d812641c1d4..487397fb38207c 100644 --- a/runtime/tests/stake.rs +++ b/runtime/tests/stake.rs @@ -28,7 +28,7 @@ fn next_epoch(bank: &Arc) -> Arc { bank.squash(); Arc::new(Bank::new_from_parent( - &bank, + bank, &Pubkey::default(), bank.get_slots_in_epoch(bank.epoch()) + bank.slot(), )) diff --git a/sdk/cargo-build-bpf/src/main.rs b/sdk/cargo-build-bpf/src/main.rs index e6e0a0e3ef9abb..746b7bf17f02da 100644 --- a/sdk/cargo-build-bpf/src/main.rs +++ b/sdk/cargo-build-bpf/src/main.rs @@ -112,7 +112,7 @@ fn install_if_missing( url.push_str(version); url.push('/'); url.push_str(file.to_str().unwrap()); - download_file(&url.as_str(), &file, true, &mut None)?; + download_file(url.as_str(), file, true, &mut None)?; fs::create_dir_all(&target_path).map_err(|err| err.to_string())?; let zip = File::open(&file).map_err(|err| err.to_string())?; let tar = BzDecoder::new(BufReader::new(zip)); @@ -423,14 +423,14 @@ fn build_bpf_package(config: &Config, target_directory: &Path, package: &cargo_m "solana-bpf-tools-linux.tar.bz2" }; install_if_missing( - &config, + config, "bpf-tools", "v1.8", "https://github.com/solana-labs/bpf-tools/releases/download", &PathBuf::from(bpf_tools_filename), ) .expect("Failed to install bpf-tools"); - link_bpf_toolchain(&config); + link_bpf_toolchain(config); let llvm_bin = config .bpf_sdk @@ -522,7 +522,7 @@ fn build_bpf_package(config: &Config, target_directory: &Path, package: &cargo_m postprocess_dump(&program_dump); } - check_undefined_symbols(&config, &program_so); + check_undefined_symbols(config, &program_so); println!(); println!("To deploy this program:"); @@ -548,7 +548,11 @@ fn build_bpf(config: Config, manifest_path: Option) { if let Some(root_package) = metadata.root_package() { if !config.workspace { +<<<<<<< HEAD build_bpf_package(&config, &metadata.target_directory, root_package); +======= + build_bpf_package(&config, metadata.target_directory.as_ref(), root_package); +>>>>>>> 6514096a6 (chore: cargo +nightly clippy --fix -Z unstable-options) return; } } @@ -569,7 +573,11 @@ fn build_bpf(config: Config, manifest_path: Option) { .collect::>(); for package in all_bpf_packages { +<<<<<<< HEAD build_bpf_package(&config, &metadata.target_directory, package); +======= + build_bpf_package(&config, metadata.target_directory.as_ref(), package); +>>>>>>> 6514096a6 (chore: cargo +nightly clippy --fix -Z unstable-options) } } diff --git a/sdk/cargo-test-bpf/src/main.rs b/sdk/cargo-test-bpf/src/main.rs index fa0bb3c166ade9..2d30bc4ce2c19d 100644 --- a/sdk/cargo-test-bpf/src/main.rs +++ b/sdk/cargo-test-bpf/src/main.rs @@ -122,7 +122,7 @@ fn test_bpf_package(config: &Config, target_directory: &Path, package: &cargo_me cargo_args.push("test-bpf"); } for extra_cargo_test_arg in &config.extra_cargo_test_args { - cargo_args.push(&extra_cargo_test_arg); + cargo_args.push(extra_cargo_test_arg); } spawn(&config.cargo, &cargo_args); } @@ -143,7 +143,11 @@ fn test_bpf(config: Config, manifest_path: Option) { if let Some(root_package) = metadata.root_package() { if !config.workspace { +<<<<<<< HEAD test_bpf_package(&config, &metadata.target_directory, root_package); +======= + test_bpf_package(&config, metadata.target_directory.as_ref(), root_package); +>>>>>>> 6514096a6 (chore: cargo +nightly clippy --fix -Z unstable-options) return; } } @@ -164,7 +168,11 @@ fn test_bpf(config: Config, manifest_path: Option) { .collect::>(); for package in all_bpf_packages { +<<<<<<< HEAD test_bpf_package(&config, &metadata.target_directory, package); +======= + test_bpf_package(&config, metadata.target_directory.as_ref(), package); +>>>>>>> 6514096a6 (chore: cargo +nightly clippy --fix -Z unstable-options) } } diff --git a/sdk/program/src/message.rs b/sdk/program/src/message.rs index a8b0aa5fe8b1ba..aed75785a6382b 100644 --- a/sdk/program/src/message.rs +++ b/sdk/program/src/message.rs @@ -309,8 +309,8 @@ impl Message { nonce_authority_pubkey: &Pubkey, ) -> Self { let nonce_ix = system_instruction::advance_nonce_account( - &nonce_account_pubkey, - &nonce_authority_pubkey, + nonce_account_pubkey, + nonce_authority_pubkey, ); instructions.insert(0, nonce_ix); Self::new(&instructions, payer) @@ -482,20 +482,20 @@ impl Message { data: &[u8], ) -> Result { let mut current = 0; - let num_instructions = read_u16(&mut current, &data)?; + let num_instructions = read_u16(&mut current, data)?; if index >= num_instructions as usize { return Err(SanitizeError::IndexOutOfBounds); } // index into the instruction byte-offset table. current += index * 2; - let start = read_u16(&mut current, &data)?; + let start = read_u16(&mut current, data)?; current = start as usize; - let num_accounts = read_u16(&mut current, &data)?; + let num_accounts = read_u16(&mut current, data)?; let mut accounts = Vec::with_capacity(num_accounts as usize); for _ in 0..num_accounts { - let meta_byte = read_u8(&mut current, &data)?; + let meta_byte = read_u8(&mut current, data)?; let mut is_signer = false; let mut is_writable = false; if meta_byte & (1 << Self::IS_SIGNER_BIT) != 0 { @@ -504,16 +504,16 @@ impl Message { if meta_byte & (1 << Self::IS_WRITABLE_BIT) != 0 { is_writable = true; } - let pubkey = read_pubkey(&mut current, &data)?; + let pubkey = read_pubkey(&mut current, data)?; accounts.push(AccountMeta { pubkey, is_signer, is_writable, }); } - let program_id = read_pubkey(&mut current, &data)?; - let data_len = read_u16(&mut current, &data)?; - let data = read_slice(&mut current, &data, data_len as usize)?; + let program_id = read_pubkey(&mut current, data)?; + let data_len = read_u16(&mut current, data)?; + let data = read_slice(&mut current, data, data_len as usize)?; Ok(Instruction { program_id, accounts, diff --git a/sdk/program/src/slot_hashes.rs b/sdk/program/src/slot_hashes.rs index ff1f0b2e1ea7fb..8016505a0a0792 100644 --- a/sdk/program/src/slot_hashes.rs +++ b/sdk/program/src/slot_hashes.rs @@ -17,7 +17,7 @@ pub struct SlotHashes(Vec); impl SlotHashes { pub fn add(&mut self, slot: Slot, hash: Hash) { - match self.binary_search_by(|(probe, _)| slot.cmp(&probe)) { + match self.binary_search_by(|(probe, _)| slot.cmp(probe)) { Ok(index) => (self.0)[index] = (slot, hash), Err(index) => (self.0).insert(index, (slot, hash)), } @@ -25,7 +25,7 @@ impl SlotHashes { } #[allow(clippy::trivially_copy_pass_by_ref)] pub fn get(&self, slot: &Slot) -> Option<&Hash> { - self.binary_search_by(|(probe, _)| slot.cmp(&probe)) + self.binary_search_by(|(probe, _)| slot.cmp(probe)) .ok() .map(|index| &self[index].1) } diff --git a/sdk/program/src/stake/state.rs b/sdk/program/src/stake/state.rs index ed3096d0054120..b85f74bc641e90 100644 --- a/sdk/program/src/stake/state.rs +++ b/sdk/program/src/stake/state.rs @@ -139,7 +139,7 @@ impl Authorized { } StakeAuthorize::Withdrawer => { if let Some((lockup, clock, custodian)) = lockup_custodian_args { - if lockup.is_in_force(&clock, None) { + if lockup.is_in_force(clock, None) { match custodian { None => { return Err(StakeError::CustodianMissing.into()); @@ -149,7 +149,7 @@ impl Authorized { return Err(StakeError::CustodianSignatureMissing.into()); } - if lockup.is_in_force(&clock, Some(custodian)) { + if lockup.is_in_force(clock, Some(custodian)) { return Err(StakeError::LockupInForce.into()); } } diff --git a/sdk/src/account.rs b/sdk/src/account.rs index 4d6d397e1fc082..a3f467d5a5a810 100644 --- a/sdk/src/account.rs +++ b/sdk/src/account.rs @@ -588,7 +588,7 @@ pub mod tests { use super::*; fn make_two_accounts(key: &Pubkey) -> (Account, AccountSharedData) { - let mut account1 = Account::new(1, 2, &key); + let mut account1 = Account::new(1, 2, key); account1.executable = true; account1.rent_epoch = 4; let mut account2 = AccountSharedData::new(1, 2, key); diff --git a/sdk/src/derivation_path.rs b/sdk/src/derivation_path.rs index 11c563177682eb..06feb0cfbd2e5b 100644 --- a/sdk/src/derivation_path.rs +++ b/sdk/src/derivation_path.rs @@ -46,7 +46,7 @@ impl TryFrom<&str> for DerivationPath { impl AsRef<[ChildIndex]> for DerivationPath { fn as_ref(&self) -> &[ChildIndex] { - &self.0.as_ref() + self.0.as_ref() } } @@ -88,7 +88,7 @@ impl DerivationPath { } fn _from_absolute_path_insecure_str(path: &str) -> Result { - Ok(Self(DerivationPathInner::from_str(&path).map_err( + Ok(Self(DerivationPathInner::from_str(path).map_err( |err| DerivationPathError::InvalidDerivationPath(err.to_string()), )?)) } diff --git a/sdk/src/genesis_config.rs b/sdk/src/genesis_config.rs index 8cd985b7870b4b..3c7c363f1c4525 100644 --- a/sdk/src/genesis_config.rs +++ b/sdk/src/genesis_config.rs @@ -160,7 +160,7 @@ impl GenesisConfig { } pub fn load(ledger_path: &Path) -> Result { - let filename = Self::genesis_filename(&ledger_path); + let filename = Self::genesis_filename(ledger_path); let file = OpenOptions::new() .read(true) .open(&filename) @@ -198,7 +198,7 @@ impl GenesisConfig { std::fs::create_dir_all(&ledger_path)?; - let mut file = File::create(Self::genesis_filename(&ledger_path))?; + let mut file = File::create(Self::genesis_filename(ledger_path))?; file.write_all(&serialized) } @@ -339,8 +339,8 @@ mod tests { && account.lamports == 10_000)); let path = &make_tmp_path("genesis_config"); - config.write(&path).expect("write"); - let loaded_config = GenesisConfig::load(&path).expect("load"); + config.write(path).expect("write"); + let loaded_config = GenesisConfig::load(path).expect("load"); assert_eq!(config.hash(), loaded_config.hash()); let _ignored = std::fs::remove_file(&path); } diff --git a/sdk/src/nonce_keyed_account.rs b/sdk/src/nonce_keyed_account.rs index 14fc508b54f125..336548fcad818a 100644 --- a/sdk/src/nonce_keyed_account.rs +++ b/sdk/src/nonce_keyed_account.rs @@ -306,7 +306,7 @@ mod test { let authorized = keyed_account.unsigned_key(); keyed_account .initialize_nonce_account( - &authorized, + authorized, &recent_blockhashes, &rent, &MockInvokeContext::new(vec![]), @@ -367,7 +367,7 @@ mod test { keyed_account .withdraw_nonce_account( withdraw_lamports, - &to_keyed, + to_keyed, &recent_blockhashes, &rent, &signers, @@ -597,7 +597,7 @@ mod test { nonce_keyed .withdraw_nonce_account( withdraw_lamports, - &to_keyed, + to_keyed, &recent_blockhashes, &rent, &signers, @@ -639,7 +639,7 @@ mod test { let lamports = nonce_keyed.account.borrow().lamports(); let result = nonce_keyed.withdraw_nonce_account( lamports, - &to_keyed, + to_keyed, &recent_blockhashes, &rent, &signers, @@ -669,7 +669,7 @@ mod test { let lamports = nonce_keyed.account.borrow().lamports() + 1; let result = nonce_keyed.withdraw_nonce_account( lamports, - &to_keyed, + to_keyed, &recent_blockhashes, &rent, &signers, @@ -699,7 +699,7 @@ mod test { nonce_keyed .withdraw_nonce_account( withdraw_lamports, - &to_keyed, + to_keyed, &recent_blockhashes, &rent, &signers, @@ -722,7 +722,7 @@ mod test { nonce_keyed .withdraw_nonce_account( withdraw_lamports, - &to_keyed, + to_keyed, &recent_blockhashes, &rent, &signers, @@ -779,7 +779,7 @@ mod test { nonce_keyed .withdraw_nonce_account( withdraw_lamports, - &to_keyed, + to_keyed, &recent_blockhashes, &rent, &signers, @@ -808,7 +808,7 @@ mod test { nonce_keyed .withdraw_nonce_account( withdraw_lamports, - &to_keyed, + to_keyed, &recent_blockhashes, &rent, &signers, @@ -852,7 +852,7 @@ mod test { let withdraw_lamports = nonce_keyed.account.borrow().lamports(); let result = nonce_keyed.withdraw_nonce_account( withdraw_lamports, - &to_keyed, + to_keyed, &recent_blockhashes, &rent, &signers, @@ -888,7 +888,7 @@ mod test { let withdraw_lamports = nonce_keyed.account.borrow().lamports() + 1; let result = nonce_keyed.withdraw_nonce_account( withdraw_lamports, - &to_keyed, + to_keyed, &recent_blockhashes, &rent, &signers, @@ -924,7 +924,7 @@ mod test { let withdraw_lamports = nonce_keyed.account.borrow().lamports() - min_lamports + 1; let result = nonce_keyed.withdraw_nonce_account( withdraw_lamports, - &to_keyed, + to_keyed, &recent_blockhashes, &rent, &signers, @@ -960,7 +960,7 @@ mod test { let withdraw_lamports = u64::MAX - 54; let result = nonce_keyed.withdraw_nonce_account( withdraw_lamports, - &to_keyed, + to_keyed, &recent_blockhashes, &rent, &signers, @@ -1149,7 +1149,7 @@ mod test { let authorized = &Pubkey::default().clone(); nonce_account .initialize_nonce_account( - &authorized, + authorized, &recent_blockhashes, &rent, &MockInvokeContext::new(vec![]), @@ -1176,7 +1176,7 @@ mod test { let authorized = nonce_account.unsigned_key(); nonce_account .initialize_nonce_account( - &authorized, + authorized, &recent_blockhashes, &Rent::free(), &MockInvokeContext::new(vec![]), @@ -1211,7 +1211,7 @@ mod test { let authorized = nonce_account.unsigned_key(); nonce_account .initialize_nonce_account( - &authorized, + authorized, &recent_blockhashes, &Rent::free(), &MockInvokeContext::new(vec![]), diff --git a/sdk/src/secp256k1_instruction.rs b/sdk/src/secp256k1_instruction.rs index 6ad7c7a365e94e..e419afceb229b3 100644 --- a/sdk/src/secp256k1_instruction.rs +++ b/sdk/src/secp256k1_instruction.rs @@ -38,9 +38,15 @@ pub fn new_secp256k1_instruction( hasher.update(&message_arr); let message_hash = hasher.finalize(); let mut message_hash_arr = [0u8; 32]; +<<<<<<< HEAD message_hash_arr.copy_from_slice(&message_hash.as_slice()); let message = secp256k1::Message::parse(&message_hash_arr); let (signature, recovery_id) = secp256k1::sign(&message, priv_key); +======= + message_hash_arr.copy_from_slice(message_hash.as_slice()); + let message = libsecp256k1::Message::parse(&message_hash_arr); + let (signature, recovery_id) = libsecp256k1::sign(&message, priv_key); +>>>>>>> 6514096a6 (chore: cargo +nightly clippy --fix -Z unstable-options) let signature_arr = signature.serialize(); assert_eq!(signature_arr.len(), SIGNATURE_SERIALIZED_SIZE); @@ -140,7 +146,7 @@ pub fn verify_eth_addresses( // Parse out pubkey let eth_address_slice = get_data_slice( - &instruction_datas, + instruction_datas, offsets.eth_address_instruction_index, offsets.eth_address_offset, HASHED_PUBKEY_SERIALIZED_SIZE, @@ -148,7 +154,7 @@ pub fn verify_eth_addresses( // Parse out message let message_slice = get_data_slice( - &instruction_datas, + instruction_datas, offsets.message_instruction_index, offsets.message_data_offset, offsets.message_data_size as usize, diff --git a/sdk/src/signature.rs b/sdk/src/signature.rs index 725e6ffd886316..bf4f5e4322ef84 100644 --- a/sdk/src/signature.rs +++ b/sdk/src/signature.rs @@ -29,7 +29,7 @@ impl crate::sanitize::Sanitize for Signature {} impl Signature { pub fn new(signature_slice: &[u8]) -> Self { - Self(GenericArray::clone_from_slice(&signature_slice)) + Self(GenericArray::clone_from_slice(signature_slice)) } pub(self) fn verify_verbose( @@ -54,7 +54,7 @@ pub trait Signable { } fn verify(&self) -> bool { self.get_signature() - .verify(&self.pubkey().as_ref(), self.signable_data().borrow()) + .verify(self.pubkey().as_ref(), self.signable_data().borrow()) } fn pubkey(&self) -> Pubkey; diff --git a/sdk/src/transaction.rs b/sdk/src/transaction.rs index 077e1b21cd0d77..104de0eb7ab4ae 100644 --- a/sdk/src/transaction.rs +++ b/sdk/src/transaction.rs @@ -459,7 +459,7 @@ pub fn uses_durable_nonce(tx: &Transaction) -> Option<&CompiledInstruction> { .filter(|maybe_ix| { let prog_id_idx = maybe_ix.program_id_index as usize; match message.account_keys.get(prog_id_idx) { - Some(program_id) => system_program::check_id(&program_id), + Some(program_id) => system_program::check_id(program_id), _ => false, } } && matches!(limited_deserialize(&maybe_ix.data), Ok(SystemInstruction::AdvanceNonceAccount)) @@ -968,7 +968,7 @@ mod tests { let (_, nonce_pubkey, tx) = nonced_transfer_tx(); let nonce_ix = uses_durable_nonce(&tx).unwrap(); assert_eq!( - get_nonce_pubkey_from_instruction(&nonce_ix, &tx), + get_nonce_pubkey_from_instruction(nonce_ix, &tx), Some(&nonce_pubkey), ); } diff --git a/stake-accounts/src/args.rs b/stake-accounts/src/args.rs index 025b07adaebcbd..56a0a3a6fe5bed 100644 --- a/stake-accounts/src/args.rs +++ b/stake-accounts/src/args.rs @@ -272,15 +272,15 @@ pub(crate) fn resolve_command( Ok(Command::Balance(resolved_args)) } Command::Authorize(args) => { - let resolved_args = resolve_authorize_args(&mut wallet_manager, &args)?; + let resolved_args = resolve_authorize_args(&mut wallet_manager, args)?; Ok(Command::Authorize(resolved_args)) } Command::SetLockup(args) => { - let resolved_args = resolve_set_lockup_args(&mut wallet_manager, &args)?; + let resolved_args = resolve_set_lockup_args(&mut wallet_manager, args)?; Ok(Command::SetLockup(resolved_args)) } Command::Rebase(args) => { - let resolved_args = resolve_rebase_args(&mut wallet_manager, &args)?; + let resolved_args = resolve_rebase_args(&mut wallet_manager, args)?; Ok(Command::Rebase(resolved_args)) } Command::Move(args) => { diff --git a/stake-accounts/src/main.rs b/stake-accounts/src/main.rs index 2b478785ffdc99..94cca8701d4ff3 100644 --- a/stake-accounts/src/main.rs +++ b/stake-accounts/src/main.rs @@ -114,7 +114,7 @@ fn process_lockup_stake_accounts( ) -> Result<(), ClientError> { let addresses = stake_accounts::derive_stake_account_addresses(&args.base_pubkey, args.num_accounts); - let existing_lockups = get_lockups(&client, addresses)?; + let existing_lockups = get_lockups(client, addresses)?; let lockup = LockupArgs { epoch: args.lockup_epoch, @@ -143,7 +143,7 @@ fn process_rebase_stake_accounts( ) -> Result<(), ClientError> { let addresses = stake_accounts::derive_stake_account_addresses(&args.base_pubkey, args.num_accounts); - let balances = get_balances(&client, addresses)?; + let balances = get_balances(client, addresses)?; let messages = stake_accounts::rebase_stake_accounts( &args.fee_payer.pubkey(), @@ -172,7 +172,7 @@ fn process_move_stake_accounts( let args = &move_args.rebase_args; let addresses = stake_accounts::derive_stake_account_addresses(&args.base_pubkey, args.num_accounts); - let balances = get_balances(&client, addresses)?; + let balances = get_balances(client, addresses)?; let messages = stake_accounts::move_stake_accounts( &args.fee_payer.pubkey(), diff --git a/stake-accounts/src/stake_accounts.rs b/stake-accounts/src/stake_accounts.rs index 37b4837408fa4c..ef183c338b34d4 100644 --- a/stake-accounts/src/stake_accounts.rs +++ b/stake-accounts/src/stake_accounts.rs @@ -49,7 +49,7 @@ pub(crate) fn new_stake_account( let instructions = stake_instruction::create_account_with_seed( funding_pubkey, &stake_account_address, - &base_pubkey, + base_pubkey, &index.to_string(), &authorized, &lockup, @@ -66,14 +66,14 @@ fn authorize_stake_accounts_instructions( new_withdraw_authority_pubkey: &Pubkey, ) -> Vec { let instruction0 = stake_instruction::authorize( - &stake_account_address, + stake_account_address, stake_authority_pubkey, new_stake_authority_pubkey, StakeAuthorize::Staker, None, ); let instruction1 = stake_instruction::authorize( - &stake_account_address, + stake_account_address, withdraw_authority_pubkey, new_withdraw_authority_pubkey, StakeAuthorize::Withdrawer, @@ -102,7 +102,7 @@ fn rebase_stake_account( new_base_pubkey, &i.to_string(), ); - let message = Message::new(&instructions, Some(&fee_payer_pubkey)); + let message = Message::new(&instructions, Some(fee_payer_pubkey)); Some(message) } @@ -139,7 +139,7 @@ fn move_stake_account( ); instructions.extend(authorize_instructions.into_iter()); - let message = Message::new(&instructions, Some(&fee_payer_pubkey)); + let message = Message::new(&instructions, Some(fee_payer_pubkey)); Some(message) } @@ -163,7 +163,7 @@ pub(crate) fn authorize_stake_accounts( new_stake_authority_pubkey, new_withdraw_authority_pubkey, ); - Message::new(&instructions, Some(&fee_payer_pubkey)) + Message::new(&instructions, Some(fee_payer_pubkey)) }) .collect::>() } @@ -223,7 +223,7 @@ pub(crate) fn lockup_stake_accounts( return None; } let instruction = stake_instruction::set_lockup(address, &lockup, custodian_pubkey); - let message = Message::new(&[instruction], Some(&fee_payer_pubkey)); + let message = Message::new(&[instruction], Some(fee_payer_pubkey)); Some(message) }) .collect() @@ -306,7 +306,7 @@ mod tests { ) -> Keypair { let fee_payer_keypair = Keypair::new(); client - .transfer_and_confirm(lamports, &funding_keypair, &fee_payer_keypair.pubkey()) + .transfer_and_confirm(lamports, funding_keypair, &fee_payer_keypair.pubkey()) .unwrap(); fee_payer_keypair } @@ -316,7 +316,7 @@ mod tests { base_pubkey: &Pubkey, i: usize, ) -> AccountSharedData { - let account_address = derive_stake_account_address(&base_pubkey, i); + let account_address = derive_stake_account_address(base_pubkey, i); AccountSharedData::from(client.get_account(&account_address).unwrap().unwrap()) } @@ -327,7 +327,7 @@ mod tests { ) -> Vec<(Pubkey, u64)> { (0..num_accounts) .map(|i| { - let address = derive_stake_account_address(&base_pubkey, i); + let address = derive_stake_account_address(base_pubkey, i); (address, client.get_balance(&address).unwrap()) }) .collect() @@ -340,7 +340,7 @@ mod tests { ) -> Vec<(Pubkey, Lockup)> { (0..num_accounts) .map(|i| { - let address = derive_stake_account_address(&base_pubkey, i); + let address = derive_stake_account_address(base_pubkey, i); let account = AccountSharedData::from(client.get_account(&address).unwrap().unwrap()); (address, stake_state::lockup_from(&account).unwrap()) diff --git a/storage-bigtable/src/bigtable.rs b/storage-bigtable/src/bigtable.rs index 964b4f60a07b16..383e90bc6ff10c 100644 --- a/storage-bigtable/src/bigtable.rs +++ b/storage-bigtable/src/bigtable.rs @@ -628,7 +628,7 @@ where .ok_or_else(|| Error::ObjectNotFound(format!("{}/{}", table, key)))? .1; - let data = decompress(&value)?; + let data = decompress(value)?; T::decode(&data[..]).map_err(|err| { warn!("Failed to deserialize {}/{}: {}", table, key, err); Error::ObjectCorrupt(format!("{}/{}", table, key)) @@ -649,7 +649,7 @@ where .ok_or_else(|| Error::ObjectNotFound(format!("{}/{}", table, key)))? .1; - let data = decompress(&value)?; + let data = decompress(value)?; bincode::deserialize(&data).map_err(|err| { warn!("Failed to deserialize {}/{}: {}", table, key, err); Error::ObjectCorrupt(format!("{}/{}", table, key)) diff --git a/storage-bigtable/src/lib.rs b/storage-bigtable/src/lib.rs index b78b145a2aadc4..254c9406f37592 100644 --- a/storage-bigtable/src/lib.rs +++ b/storage-bigtable/src/lib.rs @@ -559,7 +559,7 @@ impl LedgerStorage { let signature = transaction.signatures[0]; for address in &transaction.message.account_keys { - if !is_sysvar_id(&address) { + if !is_sysvar_id(address) { by_addr .entry(address) .or_default() diff --git a/tokens/src/commands.rs b/tokens/src/commands.rs index fdd35727ee759d..2d8204cc9b5ffe 100644 --- a/tokens/src/commands.rs +++ b/tokens/src/commands.rs @@ -139,7 +139,7 @@ fn apply_previous_transactions( for transaction_info in transaction_infos { let mut amount = transaction_info.amount; for allocation in allocations.iter_mut() { - if !has_same_recipient(&allocation, &transaction_info) { + if !has_same_recipient(allocation, transaction_info) { continue; } if allocation.amount >= amount { @@ -161,7 +161,7 @@ fn transfer( to_pubkey: &Pubkey, ) -> ClientResult { let create_instruction = - system_instruction::transfer(&sender_keypair.pubkey(), &to_pubkey, lamports); + system_instruction::transfer(&sender_keypair.pubkey(), to_pubkey, lamports); let message = Message::new(&[create_instruction], Some(&sender_keypair.pubkey())); let (recent_blockhash, _fees) = client.get_recent_blockhash()?; Ok(Transaction::new( @@ -215,7 +215,7 @@ fn distribution_instructions( } stake_instruction::create_account( &sender_pubkey, - &new_stake_account_address, + new_stake_account_address, &authorized, &lockup, allocation.amount - unlocked_sol, @@ -231,12 +231,12 @@ fn distribution_instructions( &sender_stake_args.stake_account_address, &stake_authority, allocation.amount - unlocked_sol, - &new_stake_account_address, + new_stake_account_address, ); // Make the recipient the new stake authority instructions.push(stake_instruction::authorize( - &new_stake_account_address, + new_stake_account_address, &stake_authority, &recipient, StakeAuthorize::Staker, @@ -245,7 +245,7 @@ fn distribution_instructions( // Make the recipient the new withdraw authority instructions.push(stake_instruction::authorize( - &new_stake_account_address, + new_stake_account_address, &withdraw_authority, &recipient, StakeAuthorize::Withdrawer, @@ -260,7 +260,7 @@ fn distribution_instructions( custodian: None, }; instructions.push(stake_instruction::set_lockup( - &new_stake_account_address, + new_stake_account_address, &lockup, &stake_args.lockup_authority.unwrap(), )); @@ -673,7 +673,7 @@ fn update_finalized_transactions( { statuses.extend( client - .get_signature_statuses(&unconfirmed_signatures_chunk)? + .get_signature_statuses(unconfirmed_signatures_chunk)? .value .into_iter(), ); diff --git a/transaction-status/src/token_balances.rs b/transaction-status/src/token_balances.rs index ab3bfa87ed9017..07d0fb2c9a3ac8 100644 --- a/transaction-status/src/token_balances.rs +++ b/transaction-status/src/token_balances.rs @@ -40,7 +40,7 @@ fn get_mint_decimals(bank: &Bank, mint: &Pubkey) -> Option { } else { let mint_account = bank.get_account(mint)?; - let decimals = Mint::unpack(&mint_account.data()) + let decimals = Mint::unpack(mint_account.data()) .map(|mint| mint.decimals) .ok()?; @@ -69,7 +69,7 @@ pub fn collect_token_balances( } if let Some((mint, ui_token_amount)) = - collect_token_balance_from_account(&bank, account_id, &mut mint_decimals) + collect_token_balance_from_account(bank, account_id, &mut mint_decimals) { transaction_balances.push(TransactionTokenBalance { account_index: index as u8, @@ -91,12 +91,12 @@ pub fn collect_token_balance_from_account( ) -> Option<(String, UiTokenAmount)> { let account = bank.get_account(account_id)?; - let token_account = TokenAccount::unpack(&account.data()).ok()?; + let token_account = TokenAccount::unpack(account.data()).ok()?; let mint_string = &token_account.mint.to_string(); - let mint = &Pubkey::from_str(&mint_string).unwrap_or_default(); + let mint = &Pubkey::from_str(mint_string).unwrap_or_default(); - let decimals = mint_decimals.get(&mint).cloned().or_else(|| { - let decimals = get_mint_decimals(bank, &mint)?; + let decimals = mint_decimals.get(mint).cloned().or_else(|| { + let decimals = get_mint_decimals(bank, mint)?; mint_decimals.insert(*mint, decimals); Some(decimals) })?; diff --git a/upload-perf/src/upload-perf.rs b/upload-perf/src/upload-perf.rs index 34436be80f602a..fbe0132a406fdb 100644 --- a/upload-perf/src/upload-perf.rs +++ b/upload-perf/src/upload-perf.rs @@ -57,7 +57,7 @@ fn main() { let name = v["name"].as_str().unwrap().trim_matches('\"').to_string(); if last_commit.is_none() { - last_commit = get_last_metrics(&"commit".to_string(), &db, &name, &branch).ok(); + last_commit = get_last_metrics(&"commit".to_string(), &db, &name, branch).ok(); } let median: i64 = v["median"].to_string().parse().unwrap(); @@ -76,10 +76,10 @@ fn main() { */ } - let last_median = get_last_metrics(&"median".to_string(), &db, &name, &branch) + let last_median = get_last_metrics(&"median".to_string(), &db, &name, branch) .unwrap_or_default(); let last_deviation = - get_last_metrics(&"deviation".to_string(), &db, &name, &branch) + get_last_metrics(&"deviation".to_string(), &db, &name, branch) .unwrap_or_default(); results.insert(name, (median, deviation, last_median, last_deviation)); diff --git a/validator/src/bin/solana-test-validator.rs b/validator/src/bin/solana-test-validator.rs index 85a7620099b299..8e1a8fe74ca6c0 100644 --- a/validator/src/bin/solana-test-validator.rs +++ b/validator/src/bin/solana-test-validator.rs @@ -68,7 +68,7 @@ fn main() { .takes_value(true) .help("Configuration file to use"); if let Some(ref config_file) = *solana_cli_config::CONFIG_FILE { - arg.default_value(&config_file) + arg.default_value(config_file) } else { arg } diff --git a/validator/src/dashboard.rs b/validator/src/dashboard.rs index 7042f5bdea4884..882aaabd5b409f 100644 --- a/validator/src/dashboard.rs +++ b/validator/src/dashboard.rs @@ -195,7 +195,7 @@ async fn wait_for_validator_startup( } if admin_client.is_none() { - match admin_rpc_service::connect(&ledger_path).await { + match admin_rpc_service::connect(ledger_path).await { Ok(new_admin_client) => admin_client = Some(new_admin_client), Err(err) => { progress_bar.set_message(&format!("Unable to connect to validator: {}", err)); diff --git a/validator/src/main.rs b/validator/src/main.rs index 9a97a54714ca55..cd172042274464 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -112,7 +112,7 @@ fn wait_for_restart_window( let min_idle_slots = (min_idle_time_in_minutes as f64 * 60. / DEFAULT_S_PER_SLOT) as Slot; - let admin_client = admin_rpc_service::connect(&ledger_path); + let admin_client = admin_rpc_service::connect(ledger_path); let rpc_addr = admin_rpc_service::runtime() .block_on(async move { admin_client.await?.rpc_addr().await }) .map_err(|err| format!("Unable to get validator RPC address: {}", err))?; @@ -474,7 +474,7 @@ fn get_rpc_node( rpc_peers } else { let trusted_snapshot_hashes = - get_trusted_snapshot_hashes(&cluster_info, &validator_config.trusted_validators); + get_trusted_snapshot_hashes(cluster_info, &validator_config.trusted_validators); let mut eligible_rpc_peers = vec![]; @@ -598,7 +598,7 @@ fn check_vote_account( } for (_, vote_account_authorized_voter_pubkey) in vote_state.authorized_voters().iter() { - if !authorized_voter_pubkeys.contains(&vote_account_authorized_voter_pubkey) { + if !authorized_voter_pubkeys.contains(vote_account_authorized_voter_pubkey) { return Err(format!( "authorized voter {} not available", vote_account_authorized_voter_pubkey @@ -686,7 +686,7 @@ fn verify_reachable_ports( ("RPC", rpc_addr, &node.info.rpc), ("RPC pubsub", rpc_pubsub_addr, &node.info.rpc_pubsub), ] { - if ContactInfo::is_valid_address(&public_addr) { + if ContactInfo::is_valid_address(public_addr) { tcp_listeners.push(( bind_addr.port(), TcpListener::bind(bind_addr).unwrap_or_else(|err| { @@ -757,7 +757,7 @@ fn rpc_bootstrap( order.shuffle(&mut thread_rng()); if order .into_iter() - .all(|i| !verify_reachable_ports(&node, &cluster_entrypoints[i], &validator_config)) + .all(|i| !verify_reachable_ports(node, &cluster_entrypoints[i], validator_config)) { exit(1); } @@ -775,8 +775,8 @@ fn rpc_bootstrap( *start_progress.write().unwrap() = ValidatorStartProgress::SearchingForRpcService; gossip = Some(start_gossip_node( - &identity_keypair, - &cluster_entrypoints, + identity_keypair, + cluster_entrypoints, ledger_path, &node.info.gossip, node.sockets.gossip.try_clone().unwrap(), @@ -788,8 +788,8 @@ fn rpc_bootstrap( let rpc_node_details = get_rpc_node( &gossip.as_ref().unwrap().0, - &cluster_entrypoints, - &validator_config, + cluster_entrypoints, + validator_config, &mut blacklisted_rpc_nodes, bootstrap_config.no_snapshot_fetch, bootstrap_config.no_untrusted_rpc, @@ -816,7 +816,7 @@ fn rpc_bootstrap( .and_then(|_| { let genesis_config = download_then_check_genesis_hash( &rpc_contact_info.rpc, - &ledger_path, + ledger_path, validator_config.expected_genesis_hash, bootstrap_config.max_genesis_archive_unpacked_size, bootstrap_config.no_genesis_fetch, @@ -897,7 +897,7 @@ fn rpc_bootstrap( }; let ret = download_snapshot( &rpc_contact_info.rpc, - &snapshot_output_dir, + snapshot_output_dir, snapshot_hash, use_progress_bar, maximum_snapshots_to_retain, @@ -946,7 +946,7 @@ fn rpc_bootstrap( check_vote_account( &rpc_client, &identity_keypair.pubkey(), - &vote_account, + vote_account, &authorized_voter_keypairs .read() .unwrap() @@ -1680,7 +1680,7 @@ pub fn main() { .long("max-genesis-archive-unpacked-size") .value_name("NUMBER") .takes_value(true) - .default_value(&default_genesis_archive_unpacked_size) + .default_value(default_genesis_archive_unpacked_size) .help( "maximum total uncompressed file size of downloaded genesis archive", ), @@ -2141,10 +2141,10 @@ pub fn main() { cuda: matches.is_present("cuda"), expected_genesis_hash: matches .value_of("expected_genesis_hash") - .map(|s| Hash::from_str(&s).unwrap()), + .map(|s| Hash::from_str(s).unwrap()), expected_bank_hash: matches .value_of("expected_bank_hash") - .map(|s| Hash::from_str(&s).unwrap()), + .map(|s| Hash::from_str(s).unwrap()), expected_shred_version: value_t!(matches, "expected_shred_version", u16).ok(), new_hard_forks: hardforks_of(&matches, "hard_forks"), rpc_config: JsonRpcConfig { diff --git a/watchtower/src/main.rs b/watchtower/src/main.rs index d9b26c8d978019..3dd06dd8aaa51f 100644 --- a/watchtower/src/main.rs +++ b/watchtower/src/main.rs @@ -66,7 +66,7 @@ fn get_config() -> Config { .global(true) .help("Configuration file to use"); if let Some(ref config_file) = *solana_cli_config::CONFIG_FILE { - arg.default_value(&config_file) + arg.default_value(config_file) } else { arg } @@ -190,7 +190,7 @@ fn get_cluster_info( for validator_identity in &config.validator_identity_pubkeys { validator_balances.insert( *validator_identity, - rpc_client.get_balance(&validator_identity)?, + rpc_client.get_balance(validator_identity)?, ); } @@ -299,7 +299,7 @@ fn main() -> Result<(), Box> { validator_errors.push(format!("{} missing", formatted_validator_identity)); } - if let Some(balance) = validator_balances.get(&validator_identity) { + if let Some(balance) = validator_balances.get(validator_identity) { if *balance < config.minimum_validator_identity_balance { failures.push(( "balance", From f1ae698774cc6c2adf59d5f836432cce647ce081 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Fri, 18 Jun 2021 15:34:58 +0200 Subject: [PATCH 2/4] chore: cargo fmt (cherry picked from commit 789f33e8db8c6e62b5867446c95528d8e47bf5eb) --- cli/src/cli.rs | 8 +----- core/src/consensus.rs | 8 +----- core/src/replay_stage.rs | 19 ++---------- gossip/src/crds_gossip_push.rs | 8 +----- local-cluster/src/local_cluster.rs | 3 +- local-cluster/tests/local_cluster.rs | 5 +--- programs/exchange/src/exchange_processor.rs | 8 ++---- runtime/src/accounts_db.rs | 32 +++++---------------- runtime/src/bank.rs | 3 +- sdk/program/src/message.rs | 6 ++-- upload-perf/src/upload-perf.rs | 7 ++--- 11 files changed, 24 insertions(+), 83 deletions(-) diff --git a/cli/src/cli.rs b/cli/src/cli.rs index f4f4772fde8a9d..2f6ddf0a1c861e 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -1889,13 +1889,7 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { pubkey, output_file, use_lamports_unit, - } => process_show_account( - &rpc_client, - config, - pubkey, - output_file, - *use_lamports_unit, - ), + } => process_show_account(&rpc_client, config, pubkey, output_file, *use_lamports_unit), CliCommand::Transfer { amount, to, diff --git a/core/src/consensus.rs b/core/src/consensus.rs index 7740bf983bbd6b..b184664a28d836 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -218,13 +218,7 @@ impl Tower { ) .clone(); - Self::new( - my_pubkey, - vote_account, - root, - &heaviest_bank, - ledger_path, - ) + Self::new(my_pubkey, vote_account, root, &heaviest_bank, ledger_path) } pub(crate) fn collect_vote_lockouts( diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 172de63343ee67..6f64c2c02aaf1b 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -751,12 +751,7 @@ impl ReplayStage { ) }; - Self::initialize_progress_and_fork_choice( - &root_bank, - frozen_banks, - my_pubkey, - vote_account, - ) + Self::initialize_progress_and_fork_choice(&root_bank, frozen_banks, my_pubkey, vote_account) } pub(crate) fn initialize_progress_and_fork_choice( @@ -774,14 +769,7 @@ impl ReplayStage { let prev_leader_slot = progress.get_bank_prev_leader_slot(bank); progress.insert( bank.slot(), - ForkProgress::new_from_bank( - bank, - my_pubkey, - vote_account, - prev_leader_slot, - 0, - 0, - ), + ForkProgress::new_from_bank(bank, my_pubkey, vote_account, prev_leader_slot, 0, 0), ); } let root = root_bank.slot(); @@ -3115,8 +3103,7 @@ mod tests { let mut entries = entry::create_ticks(bank.ticks_per_slot(), hashes_per_tick, blockhash); let last_entry_hash = entries.last().unwrap().hash; - let tx = - system_transaction::transfer(genesis_keypair, &keypair.pubkey(), 2, blockhash); + let tx = system_transaction::transfer(genesis_keypair, &keypair.pubkey(), 2, blockhash); let trailing_entry = entry::next_entry(&last_entry_hash, 1, vec![tx]); entries.push(trailing_entry); entries_to_test_shreds(entries, slot, slot.saturating_sub(1), true, 0) diff --git a/gossip/src/crds_gossip_push.rs b/gossip/src/crds_gossip_push.rs index 38319995d16acb..6e7319a747fb84 100644 --- a/gossip/src/crds_gossip_push.rs +++ b/gossip/src/crds_gossip_push.rs @@ -275,13 +275,7 @@ impl CrdsGossipPush { let need = Self::compute_need(self.num_active, self.active_set.len(), ratio); let mut new_items = HashMap::new(); let (weights, peers): (Vec<_>, Vec<_>) = self - .push_options( - crds, - self_id, - self_shred_version, - stakes, - gossip_validators, - ) + .push_options(crds, self_id, self_shred_version, stakes, gossip_validators) .into_iter() .unzip(); if peers.is_empty() { diff --git a/local-cluster/src/local_cluster.rs b/local-cluster/src/local_cluster.rs index c842d5bfe1827b..db496e3e771fb1 100644 --- a/local-cluster/src/local_cluster.rs +++ b/local-cluster/src/local_cluster.rs @@ -448,8 +448,7 @@ impl LocalCluster { let (blockhash, _fee_calculator, _last_valid_slot) = client .get_recent_blockhash_with_commitment(CommitmentConfig::processed()) .unwrap(); - let mut tx = - system_transaction::transfer(source_keypair, dest_pubkey, lamports, blockhash); + let mut tx = system_transaction::transfer(source_keypair, dest_pubkey, lamports, blockhash); info!( "executing transfer of {} from {} to {}", lamports, diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index f5869cb4a8661b..7d32cb97704598 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -2833,10 +2833,7 @@ fn test_hard_fork_invalidates_tower() { thread.join().unwrap(); // new slots should be rooted after hard-fork cluster relaunch - cluster - .lock() - .unwrap() - .check_for_new_roots(16, "hard fork"); + cluster.lock().unwrap().check_for_new_roots(16, "hard fork"); } #[test] diff --git a/programs/exchange/src/exchange_processor.rs b/programs/exchange/src/exchange_processor.rs index e16a06773b98b5..ab63cf7ec6fd6d 100644 --- a/programs/exchange/src/exchange_processor.rs +++ b/programs/exchange/src/exchange_processor.rs @@ -311,9 +311,8 @@ impl ExchangeProcessor { Self::is_account_unallocated(keyed_accounts[ORDER_INDEX].try_account_ref()?.data())?; - let mut account = Self::deserialize_account( - keyed_accounts[ACCOUNT_INDEX].try_account_ref_mut()?.data(), - )?; + let mut account = + Self::deserialize_account(keyed_accounts[ACCOUNT_INDEX].try_account_ref_mut()?.data())?; if &account.owner != keyed_accounts[OWNER_INDEX].unsigned_key() { error!("Signer does not own account"); @@ -367,8 +366,7 @@ impl ExchangeProcessor { return Err(InstructionError::InvalidArgument); } - let order = - Self::deserialize_order(keyed_accounts[ORDER_INDEX].try_account_ref()?.data())?; + let order = Self::deserialize_order(keyed_accounts[ORDER_INDEX].try_account_ref()?.data())?; if &order.owner != keyed_accounts[OWNER_INDEX].unsigned_key() { error!("Signer does not own order"); diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 8418a021afc3e5..84b0ce7959a7d5 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -1478,11 +1478,8 @@ impl AccountsDb { .map(|pubkeys: &[Pubkey]| { let mut reclaims = Vec::new(); for pubkey in pubkeys { - self.accounts_index.clean_rooted_entries( - pubkey, - &mut reclaims, - max_clean_root, - ); + self.accounts_index + .clean_rooted_entries(pubkey, &mut reclaims, max_clean_root); } reclaims }); @@ -9441,10 +9438,7 @@ pub mod tests { let dummy_id1 = 22; let entry1 = Arc::new(AccountStorageEntry::new( - dummy_path, - dummy_slot, - dummy_id1, - dummy_size, + dummy_path, dummy_slot, dummy_id1, dummy_size, )); entry1.alive_bytes.store(8000, Ordering::Relaxed); @@ -9455,10 +9449,7 @@ pub mod tests { let dummy_id2 = 44; let entry2 = Arc::new(AccountStorageEntry::new( - dummy_path, - dummy_slot, - dummy_id2, - dummy_size, + dummy_path, dummy_slot, dummy_id2, dummy_size, )); entry2.alive_bytes.store(3000, Ordering::Relaxed); candidates @@ -9477,10 +9468,7 @@ pub mod tests { let dummy_size = 4 * PAGE_SIZE; let dummy_id1 = 22; let entry1 = Arc::new(AccountStorageEntry::new( - dummy_path, - dummy_slot, - dummy_id1, - dummy_size, + dummy_path, dummy_slot, dummy_id1, dummy_size, )); entry1.alive_bytes.store(3500, Ordering::Relaxed); @@ -10950,18 +10938,12 @@ pub mod tests { let dummy_id1 = 22; let entry1 = Arc::new(AccountStorageEntry::new( - dummy_path, - dummy_slot, - dummy_id1, - dummy_size, + dummy_path, dummy_slot, dummy_id1, dummy_size, )); let dummy_id2 = 44; let entry2 = Arc::new(AccountStorageEntry::new( - dummy_path, - dummy_slot, - dummy_id2, - dummy_size, + dummy_path, dummy_slot, dummy_id2, dummy_size, )); let mut recycle_stores = RecycleStores::default(); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index e3cf34c723c1aa..b49d4122080b53 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -7295,8 +7295,7 @@ pub(crate) mod tests { .map(move |(_stake_pubkey, stake_account)| (stake_account, vote_account)) }) .map(|(stake_account, vote_account)| { - stake_state::calculate_points(stake_account, vote_account, None, true) - .unwrap_or(0) + stake_state::calculate_points(stake_account, vote_account, None, true).unwrap_or(0) }) .sum(); diff --git a/sdk/program/src/message.rs b/sdk/program/src/message.rs index aed75785a6382b..3b212461127dc5 100644 --- a/sdk/program/src/message.rs +++ b/sdk/program/src/message.rs @@ -308,10 +308,8 @@ impl Message { nonce_account_pubkey: &Pubkey, nonce_authority_pubkey: &Pubkey, ) -> Self { - let nonce_ix = system_instruction::advance_nonce_account( - nonce_account_pubkey, - nonce_authority_pubkey, - ); + let nonce_ix = + system_instruction::advance_nonce_account(nonce_account_pubkey, nonce_authority_pubkey); instructions.insert(0, nonce_ix); Self::new(&instructions, payer) } diff --git a/upload-perf/src/upload-perf.rs b/upload-perf/src/upload-perf.rs index fbe0132a406fdb..d9835968844a3c 100644 --- a/upload-perf/src/upload-perf.rs +++ b/upload-perf/src/upload-perf.rs @@ -76,11 +76,10 @@ fn main() { */ } - let last_median = get_last_metrics(&"median".to_string(), &db, &name, branch) + let last_median = + get_last_metrics(&"median".to_string(), &db, &name, branch).unwrap_or_default(); + let last_deviation = get_last_metrics(&"deviation".to_string(), &db, &name, branch) .unwrap_or_default(); - let last_deviation = - get_last_metrics(&"deviation".to_string(), &db, &name, branch) - .unwrap_or_default(); results.insert(name, (median, deviation, last_median, last_deviation)); } From 599792579b53edc65f51902778a3bcb56e66bf7c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Fri, 18 Jun 2021 17:39:44 +0200 Subject: [PATCH 3/4] Updates BPF program assert_instruction_count tests. (cherry picked from commit c1e03f34105dc15f66199efbf2aaa300451b6890) # Conflicts: # programs/bpf/tests/programs.rs --- programs/bpf/tests/programs.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/programs/bpf/tests/programs.rs b/programs/bpf/tests/programs.rs index c0193afd865aa3..470b44ee6d2c67 100644 --- a/programs/bpf/tests/programs.rs +++ b/programs/bpf/tests/programs.rs @@ -1296,6 +1296,7 @@ fn assert_instruction_count() { ("solana_bpf_rust_dep_crate", 2), ("solana_bpf_rust_external_spend", 521), ("solana_bpf_rust_iter", 724), +<<<<<<< HEAD ("solana_bpf_rust_many_args", 237), ("solana_bpf_rust_mem", 3166), ("solana_bpf_rust_membuiltins", 4069), @@ -1304,6 +1305,16 @@ fn assert_instruction_count() { ("solana_bpf_rust_rand", 498), ("solana_bpf_rust_sanity", 917), ("solana_bpf_rust_sha", 29099), +======= + ("solana_bpf_rust_many_args", 233), + ("solana_bpf_rust_mem", 3119), + ("solana_bpf_rust_membuiltins", 4065), + ("solana_bpf_rust_noop", 478), + ("solana_bpf_rust_param_passing", 46), + ("solana_bpf_rust_rand", 481), + ("solana_bpf_rust_sanity", 873), + ("solana_bpf_rust_sha", 32295), +>>>>>>> c1e03f341 (Updates BPF program assert_instruction_count tests.) ]); } From 60cb79702d83681ba479dd93e5eca8a7cffea511 Mon Sep 17 00:00:00 2001 From: Michael Vines Date: Fri, 18 Jun 2021 10:58:54 -0700 Subject: [PATCH 4/4] Resolve conflicts --- core/src/banking_stage.rs | 28 -- core/src/cost_model.rs | 513 ------------------------------- core/src/cost_tracker.rs | 356 --------------------- core/src/execute_cost_table.rs | 277 ----------------- core/src/replay_stage.rs | 37 --- core/src/tvu.rs | 4 - ledger-tool/src/main.rs | 55 ---- programs/bpf/tests/programs.rs | 11 - programs/bpf_loader/build.rs | 34 -- sdk/cargo-build-bpf/src/main.rs | 8 - sdk/cargo-test-bpf/src/main.rs | 8 - sdk/src/secp256k1_instruction.rs | 6 - 12 files changed, 1337 deletions(-) delete mode 100644 core/src/cost_model.rs delete mode 100644 core/src/cost_tracker.rs delete mode 100644 core/src/execute_cost_table.rs delete mode 100644 programs/bpf_loader/build.rs diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index aa35614a9e0edb..f71770396ab565 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -231,11 +231,6 @@ impl BankingStage { Self::num_threads(), transaction_status_sender, gossip_vote_sender, -<<<<<<< HEAD -======= - cost_model, - &cost_tracker, ->>>>>>> 6514096a6 (chore: cargo +nightly clippy --fix -Z unstable-options) ) } @@ -1077,19 +1072,7 @@ impl BankingStage { ); process_tx_time.stop(); -<<<<<<< HEAD let unprocessed_tx_count = unprocessed_tx_indexes.len(); -======= - // applying cost of processed transactions to shared cost_tracker - transactions.iter().enumerate().for_each(|(index, tx)| { - if !unprocessed_tx_indexes.iter().any(|&i| i == index) { - let tx_cost = cost_model.read().unwrap().calculate_cost(tx.transaction()); - let mut guard = cost_tracker.lock().unwrap(); - let _result = guard.try_add(tx_cost); - drop(guard); - } - }); ->>>>>>> 6514096a6 (chore: cargo +nightly clippy --fix -Z unstable-options) let mut filter_pending_packets_time = Measure::start("filter_pending_packets_time"); let filtered_unprocessed_packet_indexes = Self::filter_pending_packets_from_pending_txs( @@ -1134,22 +1117,11 @@ impl BankingStage { } } -<<<<<<< HEAD let (transactions, transaction_to_packet_indexes) = Self::transactions_from_packets( msgs, &transaction_indexes, bank.secp256k1_program_enabled(), ); -======= - let (transactions, transaction_to_packet_indexes, retry_packet_indexes) = - Self::transactions_from_packets( - msgs, - transaction_indexes, - bank.secp256k1_program_enabled(), - cost_model, - cost_tracker, - ); ->>>>>>> 6514096a6 (chore: cargo +nightly clippy --fix -Z unstable-options) let tx_count = transaction_to_packet_indexes.len(); diff --git a/core/src/cost_model.rs b/core/src/cost_model.rs deleted file mode 100644 index 6a12005cb003e0..00000000000000 --- a/core/src/cost_model.rs +++ /dev/null @@ -1,513 +0,0 @@ -//! 'cost_model` provides service to estimate a transaction's cost -//! It does so by analyzing accounts the transaction touches, and instructions -//! it includes. Using historical data as guideline, it estimates cost of -//! reading/writing account, the sum of that comes up to "account access cost"; -//! Instructions take time to execute, both historical and runtime data are -//! used to determine each instruction's execution time, the sum of that -//! is transaction's "execution cost" -//! The main function is `calculate_cost` which returns a TransactionCost struct. -//! -use crate::execute_cost_table::ExecuteCostTable; -use log::*; -use solana_sdk::{message::Message, pubkey::Pubkey, transaction::Transaction}; -use std::collections::HashMap; - -// Guestimated from mainnet-beta data, sigver averages 1us, read averages 7us and write avergae 25us -const SIGNED_WRITABLE_ACCOUNT_ACCESS_COST: u64 = 1 + 25; -const SIGNED_READONLY_ACCOUNT_ACCESS_COST: u64 = 1 + 7; -const NON_SIGNED_WRITABLE_ACCOUNT_ACCESS_COST: u64 = 25; -const NON_SIGNED_READONLY_ACCOUNT_ACCESS_COST: u64 = 7; - -// Sampled from mainnet-beta, the instruction execution timings stats are (in us): -// min=194, max=62164, avg=8214.49, med=2243 -pub const ACCOUNT_MAX_COST: u64 = 100_000_000; -pub const BLOCK_MAX_COST: u64 = 2_500_000_000; - -// cost of transaction is made of account_access_cost and instruction execution_cost -// where -// account_access_cost is the sum of read/write/sign all accounts included in the transaction -// read is cheaper than write. -// execution_cost is the sum of all instructions execution cost, which is -// observed during runtime and feedback by Replay -#[derive(Default, Debug)] -pub struct TransactionCost { - pub writable_accounts: Vec, - pub account_access_cost: u64, - pub execution_cost: u64, -} - -#[derive(Debug)] -pub struct CostModel { - account_cost_limit: u64, - block_cost_limit: u64, - instruction_execution_cost_table: ExecuteCostTable, -} - -impl Default for CostModel { - fn default() -> Self { - CostModel::new(ACCOUNT_MAX_COST, BLOCK_MAX_COST) - } -} - -impl CostModel { - pub fn new(chain_max: u64, block_max: u64) -> Self { - Self { - account_cost_limit: chain_max, - block_cost_limit: block_max, - instruction_execution_cost_table: ExecuteCostTable::default(), - } - } - - pub fn get_account_cost_limit(&self) -> u64 { - self.account_cost_limit - } - - pub fn get_block_cost_limit(&self) -> u64 { - self.block_cost_limit - } - - pub fn calculate_cost(&self, transaction: &Transaction) -> TransactionCost { - let ( - signed_writable_accounts, - signed_readonly_accounts, - non_signed_writable_accounts, - non_signed_readonly_accounts, - ) = CostModel::sort_accounts_by_type(transaction.message()); - - let mut cost = TransactionCost { - writable_accounts: vec![], - account_access_cost: CostModel::find_account_access_cost( - &signed_writable_accounts, - &signed_readonly_accounts, - &non_signed_writable_accounts, - &non_signed_readonly_accounts, - ), - execution_cost: self.find_transaction_cost(transaction), - }; - cost.writable_accounts.extend(&signed_writable_accounts); - cost.writable_accounts.extend(&non_signed_writable_accounts); - debug!("transaction {:?} has cost {:?}", transaction, cost); - cost - } - - // To update or insert instruction cost to table. - pub fn upsert_instruction_cost( - &mut self, - program_key: &Pubkey, - cost: &u64, - ) -> Result { - self.instruction_execution_cost_table - .upsert(program_key, cost); - match self.instruction_execution_cost_table.get_cost(program_key) { - Some(cost) => Ok(*cost), - None => Err("failed to upsert to ExecuteCostTable"), - } - } - - pub fn get_instruction_cost_table(&self) -> &HashMap { - self.instruction_execution_cost_table.get_cost_table() - } - - fn find_instruction_cost(&self, program_key: &Pubkey) -> u64 { - match self.instruction_execution_cost_table.get_cost(program_key) { - Some(cost) => *cost, - None => { - let default_value = self.instruction_execution_cost_table.get_mode(); - debug!( - "Program key {:?} does not have assigned cost, using mode {}", - program_key, default_value - ); - default_value - } - } - } - - fn find_transaction_cost(&self, transaction: &Transaction) -> u64 { - let mut cost: u64 = 0; - - for instruction in &transaction.message().instructions { - let program_id = - transaction.message().account_keys[instruction.program_id_index as usize]; - let instruction_cost = self.find_instruction_cost(&program_id); - trace!( - "instruction {:?} has cost of {}", - instruction, - instruction_cost - ); - cost += instruction_cost; - } - cost - } - - fn find_account_access_cost( - signed_writable_accounts: &[Pubkey], - signed_readonly_accounts: &[Pubkey], - non_signed_writable_accounts: &[Pubkey], - non_signed_readonly_accounts: &[Pubkey], - ) -> u64 { - let mut cost = 0; - cost += signed_writable_accounts.len() as u64 * SIGNED_WRITABLE_ACCOUNT_ACCESS_COST; - cost += signed_readonly_accounts.len() as u64 * SIGNED_READONLY_ACCOUNT_ACCESS_COST; - cost += non_signed_writable_accounts.len() as u64 * NON_SIGNED_WRITABLE_ACCOUNT_ACCESS_COST; - cost += non_signed_readonly_accounts.len() as u64 * NON_SIGNED_READONLY_ACCOUNT_ACCESS_COST; - cost - } - - fn sort_accounts_by_type( - message: &Message, - ) -> (Vec, Vec, Vec, Vec) { - let demote_sysvar_write_locks = true; - let mut signer_writable: Vec = vec![]; - let mut signer_readonly: Vec = vec![]; - let mut non_signer_writable: Vec = vec![]; - let mut non_signer_readonly: Vec = vec![]; - message.account_keys.iter().enumerate().for_each(|(i, k)| { - let is_signer = message.is_signer(i); - let is_writable = message.is_writable(i, demote_sysvar_write_locks); - - if is_signer && is_writable { - signer_writable.push(*k); - } else if is_signer && !is_writable { - signer_readonly.push(*k); - } else if !is_signer && is_writable { - non_signer_writable.push(*k); - } else { - non_signer_readonly.push(*k); - } - }); - ( - signer_writable, - signer_readonly, - non_signer_writable, - non_signer_readonly, - ) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use solana_runtime::{ - bank::Bank, - genesis_utils::{create_genesis_config, GenesisConfigInfo}, - }; - use solana_sdk::{ - bpf_loader, - hash::Hash, - instruction::CompiledInstruction, - message::Message, - signature::{Keypair, Signer}, - system_instruction::{self}, - system_program, system_transaction, - }; - use std::{ - str::FromStr, - sync::{Arc, RwLock}, - thread::{self, JoinHandle}, - }; - - fn test_setup() -> (Keypair, Hash) { - solana_logger::setup(); - let GenesisConfigInfo { - genesis_config, - mint_keypair, - .. - } = create_genesis_config(10); - let bank = Arc::new(Bank::new_no_wallclock_throttle(&genesis_config)); - let start_hash = bank.last_blockhash(); - (mint_keypair, start_hash) - } - - #[test] - fn test_cost_model_instruction_cost() { - let mut testee = CostModel::default(); - - let known_key = Pubkey::from_str("known11111111111111111111111111111111111111").unwrap(); - testee.upsert_instruction_cost(&known_key, &100).unwrap(); - // find cost for known programs - assert_eq!(100, testee.find_instruction_cost(&known_key)); - - testee - .upsert_instruction_cost(&bpf_loader::id(), &1999) - .unwrap(); - assert_eq!(1999, testee.find_instruction_cost(&bpf_loader::id())); - - // unknown program is assigned with default cost - assert_eq!( - testee.instruction_execution_cost_table.get_mode(), - testee.find_instruction_cost( - &Pubkey::from_str("unknown111111111111111111111111111111111111").unwrap() - ) - ); - } - - #[test] - fn test_cost_model_simple_transaction() { - let (mint_keypair, start_hash) = test_setup(); - - let keypair = Keypair::new(); - let simple_transaction = - system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 2, start_hash); - debug!( - "system_transaction simple_transaction {:?}", - simple_transaction - ); - - // expected cost for one system transfer instructions - let expected_cost = 8; - - let mut testee = CostModel::default(); - testee - .upsert_instruction_cost(&system_program::id(), &expected_cost) - .unwrap(); - assert_eq!( - expected_cost, - testee.find_transaction_cost(&simple_transaction) - ); - } - - #[test] - fn test_cost_model_transaction_many_transfer_instructions() { - let (mint_keypair, start_hash) = test_setup(); - - let key1 = solana_sdk::pubkey::new_rand(); - let key2 = solana_sdk::pubkey::new_rand(); - let instructions = - system_instruction::transfer_many(&mint_keypair.pubkey(), &[(key1, 1), (key2, 1)]); - let message = Message::new(&instructions, Some(&mint_keypair.pubkey())); - let tx = Transaction::new(&[&mint_keypair], message, start_hash); - debug!("many transfer transaction {:?}", tx); - - // expected cost for two system transfer instructions - let program_cost = 8; - let expected_cost = program_cost * 2; - - let mut testee = CostModel::default(); - testee - .upsert_instruction_cost(&system_program::id(), &program_cost) - .unwrap(); - assert_eq!(expected_cost, testee.find_transaction_cost(&tx)); - } - - #[test] - fn test_cost_model_message_many_different_instructions() { - let (mint_keypair, start_hash) = test_setup(); - - // construct a transaction with multiple random instructions - let key1 = solana_sdk::pubkey::new_rand(); - let key2 = solana_sdk::pubkey::new_rand(); - let prog1 = solana_sdk::pubkey::new_rand(); - let prog2 = solana_sdk::pubkey::new_rand(); - let instructions = vec![ - CompiledInstruction::new(3, &(), vec![0, 1]), - CompiledInstruction::new(4, &(), vec![0, 2]), - ]; - let tx = Transaction::new_with_compiled_instructions( - &[&mint_keypair], - &[key1, key2], - start_hash, - vec![prog1, prog2], - instructions, - ); - debug!("many random transaction {:?}", tx); - - let testee = CostModel::default(); - let result = testee.find_transaction_cost(&tx); - - // expected cost for two random/unknown program is - let expected_cost = testee.instruction_execution_cost_table.get_mode() * 2; - assert_eq!(expected_cost, result); - } - - #[test] - fn test_cost_model_sort_message_accounts_by_type() { - // construct a transaction with two random instructions with same signer - let signer1 = Keypair::new(); - let signer2 = Keypair::new(); - let key1 = Pubkey::new_unique(); - let key2 = Pubkey::new_unique(); - let prog1 = Pubkey::new_unique(); - let prog2 = Pubkey::new_unique(); - let instructions = vec![ - CompiledInstruction::new(4, &(), vec![0, 2]), - CompiledInstruction::new(5, &(), vec![1, 3]), - ]; - let tx = Transaction::new_with_compiled_instructions( - &[&signer1, &signer2], - &[key1, key2], - Hash::new_unique(), - vec![prog1, prog2], - instructions, - ); - debug!("many random transaction {:?}", tx); - - let ( - signed_writable_accounts, - signed_readonly_accounts, - non_signed_writable_accounts, - non_signed_readonly_accounts, - ) = CostModel::sort_accounts_by_type(tx.message()); - - assert_eq!(2, signed_writable_accounts.len()); - assert_eq!(signer1.pubkey(), signed_writable_accounts[0]); - assert_eq!(signer2.pubkey(), signed_writable_accounts[1]); - assert_eq!(0, signed_readonly_accounts.len()); - assert_eq!(2, non_signed_writable_accounts.len()); - assert_eq!(key1, non_signed_writable_accounts[0]); - assert_eq!(key2, non_signed_writable_accounts[1]); - assert_eq!(2, non_signed_readonly_accounts.len()); - assert_eq!(prog1, non_signed_readonly_accounts[0]); - assert_eq!(prog2, non_signed_readonly_accounts[1]); - } - - #[test] - fn test_cost_model_insert_instruction_cost() { - let key1 = Pubkey::new_unique(); - let cost1 = 100; - - let mut cost_model = CostModel::default(); - // Using default cost for unknown instruction - assert_eq!( - cost_model.instruction_execution_cost_table.get_mode(), - cost_model.find_instruction_cost(&key1) - ); - - // insert instruction cost to table - assert!(cost_model.upsert_instruction_cost(&key1, &cost1).is_ok()); - - // now it is known insturction with known cost - assert_eq!(cost1, cost_model.find_instruction_cost(&key1)); - } - - #[test] - fn test_cost_model_calculate_cost() { - let (mint_keypair, start_hash) = test_setup(); - let tx = - system_transaction::transfer(&mint_keypair, &Keypair::new().pubkey(), 2, start_hash); - - let expected_account_cost = SIGNED_WRITABLE_ACCOUNT_ACCESS_COST - + NON_SIGNED_WRITABLE_ACCOUNT_ACCESS_COST - + NON_SIGNED_READONLY_ACCOUNT_ACCESS_COST; - let expected_execution_cost = 8; - - let mut cost_model = CostModel::default(); - cost_model - .upsert_instruction_cost(&system_program::id(), &expected_execution_cost) - .unwrap(); - let tx_cost = cost_model.calculate_cost(&tx); - assert_eq!(expected_account_cost, tx_cost.account_access_cost); - assert_eq!(expected_execution_cost, tx_cost.execution_cost); - assert_eq!(2, tx_cost.writable_accounts.len()); - } - - #[test] - fn test_cost_model_update_instruction_cost() { - let key1 = Pubkey::new_unique(); - let cost1 = 100; - let cost2 = 200; - let updated_cost = (cost1 + cost2) / 2; - - let mut cost_model = CostModel::default(); - - // insert instruction cost to table - assert!(cost_model.upsert_instruction_cost(&key1, &cost1).is_ok()); - assert_eq!(cost1, cost_model.find_instruction_cost(&key1)); - - // update instruction cost - assert!(cost_model.upsert_instruction_cost(&key1, &cost2).is_ok()); - assert_eq!(updated_cost, cost_model.find_instruction_cost(&key1)); - } - - #[test] - fn test_cost_model_can_be_shared_concurrently_as_immutable() { - let (mint_keypair, start_hash) = test_setup(); - let number_threads = 10; - let expected_account_cost = SIGNED_WRITABLE_ACCOUNT_ACCESS_COST - + NON_SIGNED_WRITABLE_ACCOUNT_ACCESS_COST - + NON_SIGNED_READONLY_ACCOUNT_ACCESS_COST; - - let cost_model = Arc::new(CostModel::default()); - - let thread_handlers: Vec> = (0..number_threads) - .map(|_| { - // each thread creates its own simple transaction - let simple_transaction = system_transaction::transfer( - &mint_keypair, - &Keypair::new().pubkey(), - 2, - start_hash, - ); - let cost_model = cost_model.clone(); - thread::spawn(move || { - let tx_cost = cost_model.calculate_cost(&simple_transaction); - assert_eq!(2, tx_cost.writable_accounts.len()); - assert_eq!(expected_account_cost, tx_cost.account_access_cost); - assert_eq!( - cost_model.instruction_execution_cost_table.get_mode(), - tx_cost.execution_cost - ); - }) - }) - .collect(); - - for th in thread_handlers { - th.join().unwrap(); - } - } - - #[test] - fn test_cost_model_can_be_shared_concurrently_with_rwlock() { - let (mint_keypair, start_hash) = test_setup(); - // construct a transaction with multiple random instructions - let key1 = solana_sdk::pubkey::new_rand(); - let key2 = solana_sdk::pubkey::new_rand(); - let prog1 = solana_sdk::pubkey::new_rand(); - let prog2 = solana_sdk::pubkey::new_rand(); - let instructions = vec![ - CompiledInstruction::new(3, &(), vec![0, 1]), - CompiledInstruction::new(4, &(), vec![0, 2]), - ]; - let tx = Arc::new(Transaction::new_with_compiled_instructions( - &[&mint_keypair], - &[key1, key2], - start_hash, - vec![prog1, prog2], - instructions, - )); - - let number_threads = 10; - let expected_account_cost = SIGNED_WRITABLE_ACCOUNT_ACCESS_COST - + NON_SIGNED_WRITABLE_ACCOUNT_ACCESS_COST * 2 - + NON_SIGNED_READONLY_ACCOUNT_ACCESS_COST * 2; - let cost1 = 100; - let cost2 = 200; - // execution cost can be either 2 * Default (before write) or cost1+cost2 (after write) - - let cost_model: Arc> = Arc::new(RwLock::new(CostModel::default())); - - let thread_handlers: Vec> = (0..number_threads) - .map(|i| { - let cost_model = cost_model.clone(); - let tx = tx.clone(); - - if i == 5 { - thread::spawn(move || { - let mut cost_model = cost_model.write().unwrap(); - assert!(cost_model.upsert_instruction_cost(&prog1, &cost1).is_ok()); - assert!(cost_model.upsert_instruction_cost(&prog2, &cost2).is_ok()); - }) - } else { - thread::spawn(move || { - let tx_cost = cost_model.read().unwrap().calculate_cost(&tx); - assert_eq!(3, tx_cost.writable_accounts.len()); - assert_eq!(expected_account_cost, tx_cost.account_access_cost); - }) - } - }) - .collect(); - - for th in thread_handlers { - th.join().unwrap(); - } - } -} diff --git a/core/src/cost_tracker.rs b/core/src/cost_tracker.rs deleted file mode 100644 index df544ba702950f..00000000000000 --- a/core/src/cost_tracker.rs +++ /dev/null @@ -1,356 +0,0 @@ -//! `cost_tracker` keeps tracking tranasction cost per chained accounts as well as for entire block -//! The main entry function is 'try_add', if success, it returns new block cost. -//! -use crate::cost_model::TransactionCost; -use solana_sdk::{clock::Slot, pubkey::Pubkey}; -use std::collections::HashMap; - -#[derive(Debug, Clone)] -pub struct CostTracker { - account_cost_limit: u64, - block_cost_limit: u64, - current_bank_slot: Slot, - cost_by_writable_accounts: HashMap, - block_cost: u64, -} - -impl CostTracker { - pub fn new(chain_max: u64, package_max: u64) -> Self { - assert!(chain_max <= package_max); - Self { - account_cost_limit: chain_max, - block_cost_limit: package_max, - current_bank_slot: 0, - cost_by_writable_accounts: HashMap::new(), - block_cost: 0, - } - } - - pub fn reset_if_new_bank(&mut self, slot: Slot) { - if slot != self.current_bank_slot { - self.current_bank_slot = slot; - self.cost_by_writable_accounts.clear(); - self.block_cost = 0; - } - } - - pub fn try_add(&mut self, transaction_cost: TransactionCost) -> Result { - let cost = transaction_cost.account_access_cost + transaction_cost.execution_cost; - self.would_fit(&transaction_cost.writable_accounts, &cost)?; - - self.add_transaction(&transaction_cost.writable_accounts, &cost); - Ok(self.block_cost) - } - - fn would_fit(&self, keys: &[Pubkey], cost: &u64) -> Result<(), &'static str> { - // check against the total package cost - if self.block_cost + cost > self.block_cost_limit { - return Err("would exceed block cost limit"); - } - - // check if the transaction itself is more costly than the account_cost_limit - if *cost > self.account_cost_limit { - return Err("Transaction is too expansive, exceeds account cost limit"); - } - - // check each account against account_cost_limit, - for account_key in keys.iter() { - match self.cost_by_writable_accounts.get(account_key) { - Some(chained_cost) => { - if chained_cost + cost > self.account_cost_limit { - return Err("would exceed account cost limit"); - } else { - continue; - } - } - None => continue, - } - } - - Ok(()) - } - - fn add_transaction(&mut self, keys: &[Pubkey], cost: &u64) { - for account_key in keys.iter() { - *self - .cost_by_writable_accounts - .entry(*account_key) - .or_insert(0) += cost; - } - self.block_cost += cost; - } -} - -// CostStats can be collected by util, such as ledger_tool -#[derive(Default, Debug)] -pub struct CostStats { - pub total_cost: u64, - pub number_of_accounts: usize, - pub costliest_account: Pubkey, - pub costliest_account_cost: u64, -} - -impl CostTracker { - pub fn get_stats(&self) -> CostStats { - let mut stats = CostStats { - total_cost: self.block_cost, - number_of_accounts: self.cost_by_writable_accounts.len(), - costliest_account: Pubkey::default(), - costliest_account_cost: 0, - }; - - for (key, cost) in self.cost_by_writable_accounts.iter() { - if cost > &stats.costliest_account_cost { - stats.costliest_account = *key; - stats.costliest_account_cost = *cost; - } - } - - stats - } -} - -#[cfg(test)] -mod tests { - use super::*; - use solana_runtime::{ - bank::Bank, - genesis_utils::{create_genesis_config, GenesisConfigInfo}, - }; - use solana_sdk::{ - hash::Hash, - signature::{Keypair, Signer}, - system_transaction, - transaction::Transaction, - }; - use std::{cmp, sync::Arc}; - - fn test_setup() -> (Keypair, Hash) { - solana_logger::setup(); - let GenesisConfigInfo { - genesis_config, - mint_keypair, - .. - } = create_genesis_config(10); - let bank = Arc::new(Bank::new_no_wallclock_throttle(&genesis_config)); - let start_hash = bank.last_blockhash(); - (mint_keypair, start_hash) - } - - fn build_simple_transaction( - mint_keypair: &Keypair, - start_hash: &Hash, - ) -> (Transaction, Vec, u64) { - let keypair = Keypair::new(); - let simple_transaction = - system_transaction::transfer(mint_keypair, &keypair.pubkey(), 2, *start_hash); - - (simple_transaction, vec![mint_keypair.pubkey()], 5) - } - - #[test] - fn test_cost_tracker_initialization() { - let testee = CostTracker::new(10, 11); - assert_eq!(10, testee.account_cost_limit); - assert_eq!(11, testee.block_cost_limit); - assert_eq!(0, testee.cost_by_writable_accounts.len()); - assert_eq!(0, testee.block_cost); - } - - #[test] - fn test_cost_tracker_ok_add_one() { - let (mint_keypair, start_hash) = test_setup(); - let (_tx, keys, cost) = build_simple_transaction(&mint_keypair, &start_hash); - - // build testee to have capacity for one simple transaction - let mut testee = CostTracker::new(cost, cost); - assert!(testee.would_fit(&keys, &cost).is_ok()); - testee.add_transaction(&keys, &cost); - assert_eq!(cost, testee.block_cost); - } - - #[test] - fn test_cost_tracker_ok_add_two_same_accounts() { - let (mint_keypair, start_hash) = test_setup(); - // build two transactions with same signed account - let (_tx1, keys1, cost1) = build_simple_transaction(&mint_keypair, &start_hash); - let (_tx2, keys2, cost2) = build_simple_transaction(&mint_keypair, &start_hash); - - // build testee to have capacity for two simple transactions, with same accounts - let mut testee = CostTracker::new(cost1 + cost2, cost1 + cost2); - { - assert!(testee.would_fit(&keys1, &cost1).is_ok()); - testee.add_transaction(&keys1, &cost1); - } - { - assert!(testee.would_fit(&keys2, &cost2).is_ok()); - testee.add_transaction(&keys2, &cost2); - } - assert_eq!(cost1 + cost2, testee.block_cost); - assert_eq!(1, testee.cost_by_writable_accounts.len()); - } - - #[test] - fn test_cost_tracker_ok_add_two_diff_accounts() { - let (mint_keypair, start_hash) = test_setup(); - // build two transactions with diff accounts - let (_tx1, keys1, cost1) = build_simple_transaction(&mint_keypair, &start_hash); - let second_account = Keypair::new(); - let (_tx2, keys2, cost2) = build_simple_transaction(&second_account, &start_hash); - - // build testee to have capacity for two simple transactions, with same accounts - let mut testee = CostTracker::new(cmp::max(cost1, cost2), cost1 + cost2); - { - assert!(testee.would_fit(&keys1, &cost1).is_ok()); - testee.add_transaction(&keys1, &cost1); - } - { - assert!(testee.would_fit(&keys2, &cost2).is_ok()); - testee.add_transaction(&keys2, &cost2); - } - assert_eq!(cost1 + cost2, testee.block_cost); - assert_eq!(2, testee.cost_by_writable_accounts.len()); - } - - #[test] - fn test_cost_tracker_chain_reach_limit() { - let (mint_keypair, start_hash) = test_setup(); - // build two transactions with same signed account - let (_tx1, keys1, cost1) = build_simple_transaction(&mint_keypair, &start_hash); - let (_tx2, keys2, cost2) = build_simple_transaction(&mint_keypair, &start_hash); - - // build testee to have capacity for two simple transactions, but not for same accounts - let mut testee = CostTracker::new(cmp::min(cost1, cost2), cost1 + cost2); - // should have room for first transaction - { - assert!(testee.would_fit(&keys1, &cost1).is_ok()); - testee.add_transaction(&keys1, &cost1); - } - // but no more sapce on the same chain (same signer account) - { - assert!(testee.would_fit(&keys2, &cost2).is_err()); - } - } - - #[test] - fn test_cost_tracker_reach_limit() { - let (mint_keypair, start_hash) = test_setup(); - // build two transactions with diff accounts - let (_tx1, keys1, cost1) = build_simple_transaction(&mint_keypair, &start_hash); - let second_account = Keypair::new(); - let (_tx2, keys2, cost2) = build_simple_transaction(&second_account, &start_hash); - - // build testee to have capacity for each chain, but not enough room for both transactions - let mut testee = CostTracker::new(cmp::max(cost1, cost2), cost1 + cost2 - 1); - // should have room for first transaction - { - assert!(testee.would_fit(&keys1, &cost1).is_ok()); - testee.add_transaction(&keys1, &cost1); - } - // but no more room for package as whole - { - assert!(testee.would_fit(&keys2, &cost2).is_err()); - } - } - - #[test] - fn test_cost_tracker_reset() { - let (mint_keypair, start_hash) = test_setup(); - // build two transactions with same signed account - let (_tx1, keys1, cost1) = build_simple_transaction(&mint_keypair, &start_hash); - let (_tx2, keys2, cost2) = build_simple_transaction(&mint_keypair, &start_hash); - - // build testee to have capacity for two simple transactions, but not for same accounts - let mut testee = CostTracker::new(cmp::min(cost1, cost2), cost1 + cost2); - // should have room for first transaction - { - assert!(testee.would_fit(&keys1, &cost1).is_ok()); - testee.add_transaction(&keys1, &cost1); - assert_eq!(1, testee.cost_by_writable_accounts.len()); - assert_eq!(cost1, testee.block_cost); - } - // but no more sapce on the same chain (same signer account) - { - assert!(testee.would_fit(&keys2, &cost2).is_err()); - } - // reset the tracker - { - testee.reset_if_new_bank(100); - assert_eq!(0, testee.cost_by_writable_accounts.len()); - assert_eq!(0, testee.block_cost); - } - //now the second transaction can be added - { - assert!(testee.would_fit(&keys2, &cost2).is_ok()); - } - } - - #[test] - fn test_cost_tracker_try_add_is_atomic() { - let acct1 = Pubkey::new_unique(); - let acct2 = Pubkey::new_unique(); - let acct3 = Pubkey::new_unique(); - let cost = 100; - let account_max = cost * 2; - let block_max = account_max * 3; // for three accts - - let mut testee = CostTracker::new(account_max, block_max); - - // case 1: a tx writes to 3 accounts, should success, we will have: - // | acct1 | $cost | - // | acct2 | $cost | - // | acct2 | $cost | - // and block_cost = $cost - { - let tx_cost = TransactionCost { - writable_accounts: vec![acct1, acct2, acct3], - account_access_cost: 0, - execution_cost: cost, - }; - assert!(testee.try_add(tx_cost).is_ok()); - let stat = testee.get_stats(); - assert_eq!(cost, stat.total_cost); - assert_eq!(3, stat.number_of_accounts); - assert_eq!(cost, stat.costliest_account_cost); - } - - // case 2: add tx writes to acct2 with $cost, should succeed, result to - // | acct1 | $cost | - // | acct2 | $cost * 2 | - // | acct2 | $cost | - // and block_cost = $cost * 2 - { - let tx_cost = TransactionCost { - writable_accounts: vec![acct2], - account_access_cost: 0, - execution_cost: cost, - }; - assert!(testee.try_add(tx_cost).is_ok()); - let stat = testee.get_stats(); - assert_eq!(cost * 2, stat.total_cost); - assert_eq!(3, stat.number_of_accounts); - assert_eq!(cost * 2, stat.costliest_account_cost); - assert_eq!(acct2, stat.costliest_account); - } - - // case 3: add tx writes to [acct1, acct2], acct2 exceeds limit, should failed atomically, - // we shoudl still have: - // | acct1 | $cost | - // | acct2 | $cost | - // | acct2 | $cost | - // and block_cost = $cost - { - let tx_cost = TransactionCost { - writable_accounts: vec![acct1, acct2], - account_access_cost: 0, - execution_cost: cost, - }; - assert!(testee.try_add(tx_cost).is_err()); - let stat = testee.get_stats(); - assert_eq!(cost * 2, stat.total_cost); - assert_eq!(3, stat.number_of_accounts); - assert_eq!(cost * 2, stat.costliest_account_cost); - assert_eq!(acct2, stat.costliest_account); - } - } -} diff --git a/core/src/execute_cost_table.rs b/core/src/execute_cost_table.rs deleted file mode 100644 index 47cb1c81dc7f88..00000000000000 --- a/core/src/execute_cost_table.rs +++ /dev/null @@ -1,277 +0,0 @@ -/// ExecuteCostTable is aggregated by Cost Model, it keeps each program's -/// average cost in its HashMap, with fixed capacity to avoid from growing -/// unchecked. -/// When its capacity limit is reached, it prunes old and less-used programs -/// to make room for new ones. -use log::*; -use solana_sdk::pubkey::Pubkey; -use std::{collections::HashMap, time::SystemTime}; - -// prune is rather expensive op, free up bulk space in each operation -// would be more efficient. PRUNE_RATIO defines the after prune table -// size will be original_size * PRUNE_RATIO. -const PRUNE_RATIO: f64 = 0.75; -// with 50_000 TPS as norm, weights occurrences '100' per microsec -const OCCURRENCES_WEIGHT: i64 = 100; - -const DEFAULT_CAPACITY: usize = 1024; - -#[derive(Debug)] -pub struct ExecuteCostTable { - capacity: usize, - table: HashMap, - occurrences: HashMap, -} - -impl Default for ExecuteCostTable { - fn default() -> Self { - ExecuteCostTable::new(DEFAULT_CAPACITY) - } -} - -impl ExecuteCostTable { - pub fn new(cap: usize) -> Self { - Self { - capacity: cap, - table: HashMap::new(), - occurrences: HashMap::new(), - } - } - - pub fn get_cost_table(&self) -> &HashMap { - &self.table - } - - pub fn get_count(&self) -> usize { - self.table.len() - } - - // instead of assigning unknown program with a configured/hard-coded cost - // use average or mode function to make a educated guess. - pub fn get_average(&self) -> u64 { - if self.table.is_empty() { - 0 - } else { - self.table.iter().map(|(_, value)| value).sum::() / self.get_count() as u64 - } - } - - pub fn get_mode(&self) -> u64 { - if self.occurrences.is_empty() { - 0 - } else { - let key = self - .occurrences - .iter() - .max_by_key(|&(_, count)| count) - .map(|(key, _)| key) - .expect("cannot find mode from cost table"); - - *self.table.get(key).unwrap() - } - } - - // returns None if program doesn't exist in table. In this case, - // client is advised to call `get_average()` or `get_mode()` to - // assign a 'default' value for new program. - pub fn get_cost(&self, key: &Pubkey) -> Option<&u64> { - self.table.get(key) - } - - pub fn upsert(&mut self, key: &Pubkey, value: &u64) { - let need_to_add = self.table.get(key).is_none(); - let current_size = self.get_count(); - if current_size == self.capacity && need_to_add { - self.prune_to(&((current_size as f64 * PRUNE_RATIO) as usize)); - } - - let program_cost = self.table.entry(*key).or_insert(*value); - *program_cost = (*program_cost + *value) / 2; - - let (count, timestamp) = self - .occurrences - .entry(*key) - .or_insert((0, SystemTime::now())); - *count += 1; - *timestamp = SystemTime::now(); - } - - // prune the old programs so the table contains `new_size` of records, - // where `old` is defined as weighted age, which is negatively correlated - // with program's age and - // positively correlated with how frequently the program - // is executed (eg. occurrence), - fn prune_to(&mut self, new_size: &usize) { - debug!( - "prune cost table, current size {}, new size {}", - self.get_count(), - new_size - ); - - if *new_size == self.get_count() { - return; - } - - if *new_size == 0 { - self.table.clear(); - self.occurrences.clear(); - return; - } - - let now = SystemTime::now(); - let mut sorted_by_weighted_age: Vec<_> = self - .occurrences - .iter() - .map(|(key, (count, timestamp))| { - let age = now.duration_since(*timestamp).unwrap().as_micros(); - let weighted_age = *count as i64 * OCCURRENCES_WEIGHT + -(age as i64); - (weighted_age, *key) - }) - .collect(); - sorted_by_weighted_age.sort_by(|x, y| x.0.partial_cmp(&y.0).unwrap()); - - for i in sorted_by_weighted_age.iter() { - self.table.remove(&i.1); - self.occurrences.remove(&i.1); - if *new_size == self.get_count() { - break; - } - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_execute_cost_table_prune_simple_table() { - solana_logger::setup(); - let capacity: usize = 3; - let mut testee = ExecuteCostTable::new(capacity); - - let key1 = Pubkey::new_unique(); - let key2 = Pubkey::new_unique(); - let key3 = Pubkey::new_unique(); - - testee.upsert(&key1, &1); - testee.upsert(&key2, &2); - testee.upsert(&key3, &3); - - testee.prune_to(&(capacity - 1)); - - // the oldest, key1, should be pruned - assert!(testee.get_cost(&key1).is_none()); - assert!(testee.get_cost(&key2).is_some()); - assert!(testee.get_cost(&key2).is_some()); - } - - #[test] - fn test_execute_cost_table_prune_weighted_table() { - solana_logger::setup(); - let capacity: usize = 3; - let mut testee = ExecuteCostTable::new(capacity); - - let key1 = Pubkey::new_unique(); - let key2 = Pubkey::new_unique(); - let key3 = Pubkey::new_unique(); - - testee.upsert(&key1, &1); - testee.upsert(&key1, &1); - testee.upsert(&key2, &2); - testee.upsert(&key3, &3); - - testee.prune_to(&(capacity - 1)); - - // the oldest, key1, has 2 counts; 2nd oldest Key2 has 1 count; - // expect key2 to be pruned. - assert!(testee.get_cost(&key1).is_some()); - assert!(testee.get_cost(&key2).is_none()); - assert!(testee.get_cost(&key3).is_some()); - } - - #[test] - fn test_execute_cost_table_upsert_within_capacity() { - solana_logger::setup(); - let mut testee = ExecuteCostTable::default(); - - let key1 = Pubkey::new_unique(); - let key2 = Pubkey::new_unique(); - let cost1: u64 = 100; - let cost2: u64 = 110; - - // query empty table - assert!(testee.get_cost(&key1).is_none()); - - // insert one record - testee.upsert(&key1, &cost1); - assert_eq!(1, testee.get_count()); - assert_eq!(cost1, testee.get_average()); - assert_eq!(cost1, testee.get_mode()); - assert_eq!(&cost1, testee.get_cost(&key1).unwrap()); - - // insert 2nd record - testee.upsert(&key2, &cost2); - assert_eq!(2, testee.get_count()); - assert_eq!((cost1 + cost2) / 2_u64, testee.get_average()); - assert_eq!(cost2, testee.get_mode()); - assert_eq!(&cost1, testee.get_cost(&key1).unwrap()); - assert_eq!(&cost2, testee.get_cost(&key2).unwrap()); - - // update 1st record - testee.upsert(&key1, &cost2); - assert_eq!(2, testee.get_count()); - assert_eq!(((cost1 + cost2) / 2 + cost2) / 2, testee.get_average()); - assert_eq!((cost1 + cost2) / 2, testee.get_mode()); - assert_eq!(&((cost1 + cost2) / 2), testee.get_cost(&key1).unwrap()); - assert_eq!(&cost2, testee.get_cost(&key2).unwrap()); - } - - #[test] - fn test_execute_cost_table_upsert_exceeds_capacity() { - solana_logger::setup(); - let capacity: usize = 2; - let mut testee = ExecuteCostTable::new(capacity); - - let key1 = Pubkey::new_unique(); - let key2 = Pubkey::new_unique(); - let key3 = Pubkey::new_unique(); - let key4 = Pubkey::new_unique(); - let cost1: u64 = 100; - let cost2: u64 = 110; - let cost3: u64 = 120; - let cost4: u64 = 130; - - // insert one record - testee.upsert(&key1, &cost1); - assert_eq!(1, testee.get_count()); - assert_eq!(&cost1, testee.get_cost(&key1).unwrap()); - - // insert 2nd record - testee.upsert(&key2, &cost2); - assert_eq!(2, testee.get_count()); - assert_eq!(&cost1, testee.get_cost(&key1).unwrap()); - assert_eq!(&cost2, testee.get_cost(&key2).unwrap()); - - // insert 3rd record, pushes out the oldest (eg 1st) record - testee.upsert(&key3, &cost3); - assert_eq!(2, testee.get_count()); - assert_eq!((cost2 + cost3) / 2_u64, testee.get_average()); - assert_eq!(cost3, testee.get_mode()); - assert!(testee.get_cost(&key1).is_none()); - assert_eq!(&cost2, testee.get_cost(&key2).unwrap()); - assert_eq!(&cost3, testee.get_cost(&key3).unwrap()); - - // update 2nd record, so the 3rd becomes the oldest - // add 4th record, pushes out 3rd key - testee.upsert(&key2, &cost1); - testee.upsert(&key4, &cost4); - assert_eq!(((cost1 + cost2) / 2 + cost4) / 2_u64, testee.get_average()); - assert_eq!((cost1 + cost2) / 2, testee.get_mode()); - assert_eq!(2, testee.get_count()); - assert!(testee.get_cost(&key1).is_none()); - assert_eq!(&((cost1 + cost2) / 2), testee.get_cost(&key2).unwrap()); - assert!(testee.get_cost(&key3).is_none()); - assert_eq!(&cost4, testee.get_cost(&key4).unwrap()); - } -} diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 6f64c2c02aaf1b..fb4b929c3b167d 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -1690,14 +1690,6 @@ impl ReplayStage { replay_vote_sender, verify_recyclers, ); -<<<<<<< HEAD -======= - Self::update_cost_model(cost_model, &bank_progress.replay_stats.execute_timings); - debug!( - "after replayed into bank, updated cost model instruction cost table, current values: {:?}", - cost_model.read().unwrap().get_instruction_cost_table() - ); ->>>>>>> 6514096a6 (chore: cargo +nightly clippy --fix -Z unstable-options) match replay_result { Ok(replay_tx_count) => tx_count += replay_tx_count, Err(err) => { @@ -1888,35 +1880,6 @@ impl ReplayStage { new_stats } -<<<<<<< HEAD -======= - fn update_cost_model(cost_model: &RwLock, execute_timings: &ExecuteTimings) { - let mut cost_model_mutable = cost_model.write().unwrap(); - for (program_id, stats) in &execute_timings.details.per_program_timings { - let cost = stats.0 / stats.1 as u64; - match cost_model_mutable.upsert_instruction_cost(program_id, &cost) { - Ok(c) => { - debug!( - "after replayed into bank, instruction {:?} has averaged cost {}", - program_id, c - ); - } - Err(err) => { - debug!( - "after replayed into bank, instruction {:?} failed to update cost, err: {}", - program_id, err - ); - } - } - } - drop(cost_model_mutable); - debug!( - "after replayed into bank, updated cost model instruction cost table, current values: {:?}", - cost_model.read().unwrap().get_instruction_cost_table() - ); - } - ->>>>>>> 6514096a6 (chore: cargo +nightly clippy --fix -Z unstable-options) fn update_propagation_status( progress: &mut ProgressMap, slot: Slot, diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 5ae14866aff519..7b058edcc1d0b0 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -176,12 +176,8 @@ impl Tvu { Arc::new(retransmit_sockets), repair_socket, verified_receiver, -<<<<<<< HEAD &exit, completed_slots_receiver, -======= - exit, ->>>>>>> 6514096a6 (chore: cargo +nightly clippy --fix -Z unstable-options) cluster_slots_update_receiver, *bank_forks.read().unwrap().working_bank().epoch_schedule(), cfg, diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 1caf97ade7a5d3..12735f21607a10 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -723,61 +723,6 @@ fn load_bank_forks( ) } -<<<<<<< HEAD -======= -fn compute_slot_cost(blockstore: &Blockstore, slot: Slot) -> Result<(), String> { - if blockstore.is_dead(slot) { - return Err("Dead slot".to_string()); - } - - let (entries, _num_shreds, _is_full) = blockstore - .get_slot_entries_with_shred_info(slot, 0, false) - .map_err(|err| format!(" Slot: {}, Failed to load entries, err {:?}", slot, err))?; - - let mut transactions = 0; - let mut programs = 0; - let mut program_ids = HashMap::new(); - let cost_model = CostModel::new(ACCOUNT_MAX_COST, BLOCK_MAX_COST); - let mut cost_tracker = CostTracker::new( - cost_model.get_account_cost_limit(), - cost_model.get_block_cost_limit(), - ); - - for entry in &entries { - transactions += entry.transactions.len(); - for transaction in &entry.transactions { - programs += transaction.message().instructions.len(); - let tx_cost = cost_model.calculate_cost(transaction); - if cost_tracker.try_add(tx_cost).is_err() { - println!( - "Slot: {}, CostModel rejected transaction {:?}, stats {:?}!", - slot, - transaction, - cost_tracker.get_stats() - ); - } - for instruction in &transaction.message().instructions { - let program_id = - transaction.message().account_keys[instruction.program_id_index as usize]; - *program_ids.entry(program_id).or_insert(0) += 1; - } - } - } - - println!( - "Slot: {}, Entries: {}, Transactions: {}, Programs {}, {:?}", - slot, - entries.len(), - transactions, - programs, - cost_tracker.get_stats() - ); - println!(" Programs: {:?}", program_ids); - - Ok(()) -} - ->>>>>>> 6514096a6 (chore: cargo +nightly clippy --fix -Z unstable-options) fn open_genesis_config_by(ledger_path: &Path, matches: &ArgMatches<'_>) -> GenesisConfig { let max_genesis_archive_unpacked_size = value_t_or_exit!(matches, "max_genesis_archive_unpacked_size", u64); diff --git a/programs/bpf/tests/programs.rs b/programs/bpf/tests/programs.rs index 470b44ee6d2c67..c0193afd865aa3 100644 --- a/programs/bpf/tests/programs.rs +++ b/programs/bpf/tests/programs.rs @@ -1296,7 +1296,6 @@ fn assert_instruction_count() { ("solana_bpf_rust_dep_crate", 2), ("solana_bpf_rust_external_spend", 521), ("solana_bpf_rust_iter", 724), -<<<<<<< HEAD ("solana_bpf_rust_many_args", 237), ("solana_bpf_rust_mem", 3166), ("solana_bpf_rust_membuiltins", 4069), @@ -1305,16 +1304,6 @@ fn assert_instruction_count() { ("solana_bpf_rust_rand", 498), ("solana_bpf_rust_sanity", 917), ("solana_bpf_rust_sha", 29099), -======= - ("solana_bpf_rust_many_args", 233), - ("solana_bpf_rust_mem", 3119), - ("solana_bpf_rust_membuiltins", 4065), - ("solana_bpf_rust_noop", 478), - ("solana_bpf_rust_param_passing", 46), - ("solana_bpf_rust_rand", 481), - ("solana_bpf_rust_sanity", 873), - ("solana_bpf_rust_sha", 32295), ->>>>>>> c1e03f341 (Updates BPF program assert_instruction_count tests.) ]); } diff --git a/programs/bpf_loader/build.rs b/programs/bpf_loader/build.rs deleted file mode 100644 index 2c665cd93fd96e..00000000000000 --- a/programs/bpf_loader/build.rs +++ /dev/null @@ -1,34 +0,0 @@ -use regex::Regex; -use std::{ - fs::File, - io::{prelude::*, BufWriter, Read}, - path::PathBuf, - process::exit, - str, -}; - -/** - * Extract a list of registered syscall names and save it in a file - * for distribution with the SDK. This file is read by cargo-build-bpf - * to verify undefined symbols in a .so module that cargo-build-bpf has built. - */ -fn main() { - let path = PathBuf::from("src/syscalls.rs"); - let mut file = match File::open(&path) { - Ok(x) => x, - _ => exit(1), - }; - let mut text = vec![]; - file.read_to_end(&mut text).unwrap(); - let text = str::from_utf8(&text).unwrap(); - let path = PathBuf::from("../../sdk/bpf/syscalls.txt"); - let file = match File::create(&path) { - Ok(x) => x, - _ => exit(1), - }; - let mut out = BufWriter::new(file); - let sysc_re = Regex::new(r#"register_syscall_by_name\([[:space:]]*b"([^"]+)","#).unwrap(); - for caps in sysc_re.captures_iter(text) { - writeln!(out, "{}", caps[1].to_string()).unwrap(); - } -} diff --git a/sdk/cargo-build-bpf/src/main.rs b/sdk/cargo-build-bpf/src/main.rs index 746b7bf17f02da..da67a577d7bdfb 100644 --- a/sdk/cargo-build-bpf/src/main.rs +++ b/sdk/cargo-build-bpf/src/main.rs @@ -548,11 +548,7 @@ fn build_bpf(config: Config, manifest_path: Option) { if let Some(root_package) = metadata.root_package() { if !config.workspace { -<<<<<<< HEAD build_bpf_package(&config, &metadata.target_directory, root_package); -======= - build_bpf_package(&config, metadata.target_directory.as_ref(), root_package); ->>>>>>> 6514096a6 (chore: cargo +nightly clippy --fix -Z unstable-options) return; } } @@ -573,11 +569,7 @@ fn build_bpf(config: Config, manifest_path: Option) { .collect::>(); for package in all_bpf_packages { -<<<<<<< HEAD build_bpf_package(&config, &metadata.target_directory, package); -======= - build_bpf_package(&config, metadata.target_directory.as_ref(), package); ->>>>>>> 6514096a6 (chore: cargo +nightly clippy --fix -Z unstable-options) } } diff --git a/sdk/cargo-test-bpf/src/main.rs b/sdk/cargo-test-bpf/src/main.rs index 2d30bc4ce2c19d..92392d4f8c6543 100644 --- a/sdk/cargo-test-bpf/src/main.rs +++ b/sdk/cargo-test-bpf/src/main.rs @@ -143,11 +143,7 @@ fn test_bpf(config: Config, manifest_path: Option) { if let Some(root_package) = metadata.root_package() { if !config.workspace { -<<<<<<< HEAD test_bpf_package(&config, &metadata.target_directory, root_package); -======= - test_bpf_package(&config, metadata.target_directory.as_ref(), root_package); ->>>>>>> 6514096a6 (chore: cargo +nightly clippy --fix -Z unstable-options) return; } } @@ -168,11 +164,7 @@ fn test_bpf(config: Config, manifest_path: Option) { .collect::>(); for package in all_bpf_packages { -<<<<<<< HEAD test_bpf_package(&config, &metadata.target_directory, package); -======= - test_bpf_package(&config, metadata.target_directory.as_ref(), package); ->>>>>>> 6514096a6 (chore: cargo +nightly clippy --fix -Z unstable-options) } } diff --git a/sdk/src/secp256k1_instruction.rs b/sdk/src/secp256k1_instruction.rs index e419afceb229b3..312a426e393fa8 100644 --- a/sdk/src/secp256k1_instruction.rs +++ b/sdk/src/secp256k1_instruction.rs @@ -38,15 +38,9 @@ pub fn new_secp256k1_instruction( hasher.update(&message_arr); let message_hash = hasher.finalize(); let mut message_hash_arr = [0u8; 32]; -<<<<<<< HEAD message_hash_arr.copy_from_slice(&message_hash.as_slice()); let message = secp256k1::Message::parse(&message_hash_arr); let (signature, recovery_id) = secp256k1::sign(&message, priv_key); -======= - message_hash_arr.copy_from_slice(message_hash.as_slice()); - let message = libsecp256k1::Message::parse(&message_hash_arr); - let (signature, recovery_id) = libsecp256k1::sign(&message, priv_key); ->>>>>>> 6514096a6 (chore: cargo +nightly clippy --fix -Z unstable-options) let signature_arr = signature.serialize(); assert_eq!(signature_arr.len(), SIGNATURE_SERIALIZED_SIZE);