From 01304b91c56c60e6855ec8cb7e1c4ef5ab3a7657 Mon Sep 17 00:00:00 2001 From: haoran Date: Fri, 19 Aug 2022 10:39:15 -0500 Subject: [PATCH 01/67] refactor: extract store_stake_accounts fn --- runtime/src/bank.rs | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 32ece32f8892d1..6d8ab2443b4166 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -3182,14 +3182,7 @@ impl Bank { m.stop(); metrics.redeem_rewards_us += m.as_us(); - // store stake account even if stakers_reward is 0 - // because credits observed has changed - let mut m = Measure::start("store_stake_account"); - self.store_accounts((self.slot(), &stake_rewards[..])); - m.stop(); - metrics - .store_stake_accounts_us - .fetch_add(m.as_us(), Relaxed); + self.store_stake_accounts(&stake_rewards, metrics); let mut m = Measure::start("store_vote_accounts"); let mut vote_rewards = vote_account_rewards @@ -3239,6 +3232,17 @@ impl Bank { point_value.rewards as f64 / point_value.points as f64 } + fn store_stake_accounts(&self, stake_rewards: &Vec, metrics: &mut RewardsMetrics) { + // store stake account even if stakers_reward is 0 + // because credits observed has changed + let mut m = Measure::start("store_stake_account"); + self.store_accounts((self.slot(), &stake_rewards[..])); + m.stop(); + metrics + .store_stake_accounts_us + .fetch_add(m.as_us(), Relaxed); + } + fn update_recent_blockhashes_locked(&self, locked_blockhash_queue: &BlockhashQueue) { #[allow(deprecated)] self.update_sysvar_account(&sysvar::recent_blockhashes::id(), |account| { From 9eb7ae4c2fbb9630c0be15428b8d5e45f21ad0eb Mon Sep 17 00:00:00 2001 From: haoran Date: Fri, 19 Aug 2022 10:47:37 -0500 Subject: [PATCH 02/67] refactor: extract store_vote_account fn --- runtime/src/bank.rs | 58 ++++++++++++++++++++++++++------------------- 1 file changed, 33 insertions(+), 25 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 6d8ab2443b4166..8594d4cc091d7b 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -3183,9 +3183,40 @@ impl Bank { metrics.redeem_rewards_us += m.as_us(); self.store_stake_accounts(&stake_rewards, metrics); + let mut vote_rewards = self.store_vote_accounts(vote_account_rewards, metrics); + let additional_reserve = stake_rewards.len() + vote_rewards.len(); + { + let mut rewards = self.rewards.write().unwrap(); + rewards.reserve(additional_reserve); + rewards.append(&mut vote_rewards); + stake_rewards + .into_iter() + .filter(|x| x.get_stake_reward() > 0) + .for_each(|x| rewards.push((x.stake_pubkey, x.stake_reward_info))); + } + + point_value.rewards as f64 / point_value.points as f64 + } + + fn store_stake_accounts(&self, stake_rewards: &Vec, metrics: &mut RewardsMetrics) { + // store stake account even if stakers_reward is 0 + // because credits observed has changed + let mut m = Measure::start("store_stake_account"); + self.store_accounts((self.slot(), &stake_rewards[..])); + m.stop(); + metrics + .store_stake_accounts_us + .fetch_add(m.as_us(), Relaxed); + } + + fn store_vote_accounts( + &self, + vote_account_rewards: DashMap, + metrics: &mut RewardsMetrics, + ) -> Vec<(Pubkey, RewardInfo)> { let mut m = Measure::start("store_vote_accounts"); - let mut vote_rewards = vote_account_rewards + let vote_rewards = vote_account_rewards .into_iter() .filter_map( |(vote_pubkey, (mut vote_account, commission, vote_rewards, vote_needs_store))| { @@ -3217,30 +3248,7 @@ impl Bank { m.stop(); metrics.store_vote_accounts_us.fetch_add(m.as_us(), Relaxed); - - let additional_reserve = stake_rewards.len() + vote_rewards.len(); - { - let mut rewards = self.rewards.write().unwrap(); - rewards.reserve(additional_reserve); - rewards.append(&mut vote_rewards); - stake_rewards - .into_iter() - .filter(|x| x.get_stake_reward() > 0) - .for_each(|x| rewards.push((x.stake_pubkey, x.stake_reward_info))); - } - - point_value.rewards as f64 / point_value.points as f64 - } - - fn store_stake_accounts(&self, stake_rewards: &Vec, metrics: &mut RewardsMetrics) { - // store stake account even if stakers_reward is 0 - // because credits observed has changed - let mut m = Measure::start("store_stake_account"); - self.store_accounts((self.slot(), &stake_rewards[..])); - m.stop(); - metrics - .store_stake_accounts_us - .fetch_add(m.as_us(), Relaxed); + vote_rewards } fn update_recent_blockhashes_locked(&self, locked_blockhash_queue: &BlockhashQueue) { From 2f14a677dc8112fcc564a4e2ce174dbbf08eb87c Mon Sep 17 00:00:00 2001 From: haoran Date: Fri, 19 Aug 2022 10:55:21 -0500 Subject: [PATCH 03/67] refactor: extract reward history update fn --- runtime/src/bank.rs | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 8594d4cc091d7b..14a2a572603702 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -3183,18 +3183,8 @@ impl Bank { metrics.redeem_rewards_us += m.as_us(); self.store_stake_accounts(&stake_rewards, metrics); - let mut vote_rewards = self.store_vote_accounts(vote_account_rewards, metrics); - - let additional_reserve = stake_rewards.len() + vote_rewards.len(); - { - let mut rewards = self.rewards.write().unwrap(); - rewards.reserve(additional_reserve); - rewards.append(&mut vote_rewards); - stake_rewards - .into_iter() - .filter(|x| x.get_stake_reward() > 0) - .for_each(|x| rewards.push((x.stake_pubkey, x.stake_reward_info))); - } + let vote_rewards = self.store_vote_accounts(vote_account_rewards, metrics); + self.update_reward_history(stake_rewards, vote_rewards); point_value.rewards as f64 / point_value.points as f64 } @@ -3251,6 +3241,21 @@ impl Bank { vote_rewards } + fn update_reward_history( + &self, + stake_rewards: Vec, + mut vote_rewards: Vec<(Pubkey, RewardInfo)>, + ) { + let additional_reserve = stake_rewards.len() + vote_rewards.len(); + let mut rewards = self.rewards.write().unwrap(); + rewards.reserve(additional_reserve); + rewards.append(&mut vote_rewards); + stake_rewards + .into_iter() + .filter(|x| x.get_stake_reward() > 0) + .for_each(|x| rewards.push((x.stake_pubkey, x.stake_reward_info))); + } + fn update_recent_blockhashes_locked(&self, locked_blockhash_queue: &BlockhashQueue) { #[allow(deprecated)] self.update_sysvar_account(&sysvar::recent_blockhashes::id(), |account| { From 7bd7dd3b1f70b4839623a96de272846322b212c2 Mon Sep 17 00:00:00 2001 From: haoran Date: Fri, 19 Aug 2022 11:03:52 -0500 Subject: [PATCH 04/67] remove avg point value from pay_valiator fn. not used --- runtime/src/bank.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 14a2a572603702..08a6f969cf544d 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -3032,7 +3032,7 @@ impl Bank { thread_pool: &ThreadPool, metrics: &mut RewardsMetrics, update_rewards_from_cached_accounts: bool, - ) -> f64 { + ) { let stake_history = self.stakes_cache.stakes().history().clone(); let vote_with_stake_delegations_map = { let mut m = Measure::start("load_vote_and_stake_accounts_us"); @@ -3098,7 +3098,7 @@ impl Bank { metrics.calculate_points_us.fetch_add(m.as_us(), Relaxed); if points == 0 { - return 0.0; + return; } // pay according to point value @@ -3185,8 +3185,6 @@ impl Bank { self.store_stake_accounts(&stake_rewards, metrics); let vote_rewards = self.store_vote_accounts(vote_account_rewards, metrics); self.update_reward_history(stake_rewards, vote_rewards); - - point_value.rewards as f64 / point_value.points as f64 } fn store_stake_accounts(&self, stake_rewards: &Vec, metrics: &mut RewardsMetrics) { From 4cbf6439d8ff9f9838afc9bed28a055b451e5ebc Mon Sep 17 00:00:00 2001 From: haoran Date: Fri, 19 Aug 2022 11:35:15 -0500 Subject: [PATCH 05/67] clippy: slice --- runtime/src/bank.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 6d8ab2443b4166..56e67e0848804b 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -3232,11 +3232,11 @@ impl Bank { point_value.rewards as f64 / point_value.points as f64 } - fn store_stake_accounts(&self, stake_rewards: &Vec, metrics: &mut RewardsMetrics) { + fn store_stake_accounts(&self, stake_rewards: &[StakeReward], metrics: &mut RewardsMetrics) { // store stake account even if stakers_reward is 0 // because credits observed has changed let mut m = Measure::start("store_stake_account"); - self.store_accounts((self.slot(), &stake_rewards[..])); + self.store_accounts((self.slot(), stake_rewards)); m.stop(); metrics .store_stake_accounts_us From 9508b23e9c9ef85c216c53bce83acdf9dc253619 Mon Sep 17 00:00:00 2001 From: Haoran Yi Date: Fri, 19 Aug 2022 15:42:48 -0500 Subject: [PATCH 06/67] clippy: slice --- runtime/src/bank.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 8594d4cc091d7b..3862382bb56bb0 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -3199,11 +3199,11 @@ impl Bank { point_value.rewards as f64 / point_value.points as f64 } - fn store_stake_accounts(&self, stake_rewards: &Vec, metrics: &mut RewardsMetrics) { + fn store_stake_accounts(&self, stake_rewards: &[StakeReward], metrics: &mut RewardsMetrics) { // store stake account even if stakers_reward is 0 // because credits observed has changed let mut m = Measure::start("store_stake_account"); - self.store_accounts((self.slot(), &stake_rewards[..])); + self.store_accounts((self.slot(), stake_rewards)); m.stop(); metrics .store_stake_accounts_us From f83622d0428149772fc081a55004327ff30b7b1c Mon Sep 17 00:00:00 2001 From: Jeff Biseda Date: Tue, 16 Aug 2022 09:34:10 -0700 Subject: [PATCH 07/67] remove abort() from test-validator (#27124) --- test-validator/src/lib.rs | 64 +++++++++------------- validator/src/bin/solana-test-validator.rs | 22 ++++++-- 2 files changed, 43 insertions(+), 43 deletions(-) diff --git a/test-validator/src/lib.rs b/test-validator/src/lib.rs index 12f4ff6775b0c6..f2fb1f37a3ab12 100644 --- a/test-validator/src/lib.rs +++ b/test-validator/src/lib.rs @@ -284,7 +284,7 @@ impl TestValidatorGenesis { addresses: T, rpc_client: &RpcClient, skip_missing: bool, - ) -> &mut Self + ) -> Result<&mut Self, String> where T: IntoIterator, { @@ -296,20 +296,21 @@ impl TestValidatorGenesis { } else if skip_missing { warn!("Could not find {}, skipping.", address); } else { - error!("Failed to fetch {}: {}", address, res.unwrap_err()); - Self::abort(); + return Err(format!("Failed to fetch {}: {}", address, res.unwrap_err())); } } - self + Ok(self) } - pub fn add_accounts_from_json_files(&mut self, accounts: &[AccountInfo]) -> &mut Self { + pub fn add_accounts_from_json_files( + &mut self, + accounts: &[AccountInfo], + ) -> Result<&mut Self, String> { for account in accounts { - let account_path = - solana_program_test::find_file(account.filename).unwrap_or_else(|| { - error!("Unable to locate {}", account.filename); - Self::abort(); - }); + let account_path = match solana_program_test::find_file(account.filename) { + Some(path) => path, + None => return Err(format!("Unable to locate {}", account.filename)), + }; let mut file = File::open(&account_path).unwrap(); let mut account_info_raw = String::new(); file.read_to_string(&mut account_info_raw).unwrap(); @@ -317,12 +318,11 @@ impl TestValidatorGenesis { let result: serde_json::Result = serde_json::from_str(&account_info_raw); let account_info = match result { Err(err) => { - error!( + return Err(format!( "Unable to deserialize {}: {}", account_path.to_str().unwrap(), err - ); - Self::abort(); + )); } Ok(deserialized) => deserialized, }; @@ -338,25 +338,24 @@ impl TestValidatorGenesis { self.add_account(address, account); } - self + Ok(self) } - pub fn add_accounts_from_directories(&mut self, dirs: T) -> &mut Self + pub fn add_accounts_from_directories(&mut self, dirs: T) -> Result<&mut Self, String> where T: IntoIterator, P: AsRef + Display, { let mut json_files: HashSet = HashSet::new(); for dir in dirs { - let matched_files = fs::read_dir(&dir) - .unwrap_or_else(|err| { - error!("Cannot read directory {}: {}", dir, err); - Self::abort(); - }) - .flatten() - .map(|entry| entry.path()) - .filter(|path| path.is_file() && path.extension() == Some(OsStr::new("json"))) - .map(|path| String::from(path.to_string_lossy())); + let matched_files = match fs::read_dir(&dir) { + Ok(dir) => dir, + Err(e) => return Err(format!("Cannot read directory {}: {}", &dir, e)), + } + .flatten() + .map(|entry| entry.path()) + .filter(|path| path.is_file() && path.extension() == Some(OsStr::new("json"))) + .map(|path| String::from(path.to_string_lossy())); json_files.extend(matched_files); } @@ -371,9 +370,9 @@ impl TestValidatorGenesis { }) .collect(); - self.add_accounts_from_json_files(&accounts); + self.add_accounts_from_json_files(&accounts)?; - self + Ok(self) } /// Add an account to the test environment with the account data in the provided `filename` @@ -512,19 +511,6 @@ impl TestValidatorGenesis { Err(err) => panic!("Test validator failed to start: {}", err), } } - - fn abort() -> ! { - #[cfg(not(test))] - { - // standard error is usually redirected to a log file, cry for help on standard output as - // well - println!("Validator process aborted. The validator log may contain further details"); - std::process::exit(1); - } - - #[cfg(test)] - panic!("process::exit(1) is intercepted for friendly test failure..."); - } } pub struct TestValidator { diff --git a/validator/src/bin/solana-test-validator.rs b/validator/src/bin/solana-test-validator.rs index 8884ad36b2e245..46092b651caa18 100644 --- a/validator/src/bin/solana-test-validator.rs +++ b/validator/src/bin/solana-test-validator.rs @@ -767,27 +767,41 @@ fn main() { .rpc_port(rpc_port) .add_programs_with_path(&programs_to_load) .add_accounts_from_json_files(&accounts_to_load) + .unwrap_or_else(|e| { + println!("Error: add_accounts_from_json_files failed: {}", e); + exit(1); + }) .add_accounts_from_directories(&accounts_from_dirs) + .unwrap_or_else(|e| { + println!("Error: add_accounts_from_directories failed: {}", e); + exit(1); + }) .deactivate_features(&features_to_deactivate); if !accounts_to_clone.is_empty() { - genesis.clone_accounts( + if let Err(e) = genesis.clone_accounts( accounts_to_clone, cluster_rpc_client .as_ref() .expect("bug: --url argument missing?"), false, - ); + ) { + println!("Error: clone_accounts failed: {}", e); + exit(1); + } } if !accounts_to_maybe_clone.is_empty() { - genesis.clone_accounts( + if let Err(e) = genesis.clone_accounts( accounts_to_maybe_clone, cluster_rpc_client .as_ref() .expect("bug: --url argument missing?"), true, - ); + ) { + println!("Error: clone_accounts failed: {}", e); + exit(1); + } } if let Some(warp_slot) = warp_slot { From a39daaac192f73ae9dc323de5d816d3d8ae08bdc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 Aug 2022 11:04:10 -0600 Subject: [PATCH 08/67] chore: bump bytes from 1.1.0 to 1.2.1 (#27172) * chore: bump bytes from 1.1.0 to 1.2.1 Bumps [bytes](https://github.com/tokio-rs/bytes) from 1.1.0 to 1.2.1. - [Release notes](https://github.com/tokio-rs/bytes/releases) - [Changelog](https://github.com/tokio-rs/bytes/blob/master/CHANGELOG.md) - [Commits](https://github.com/tokio-rs/bytes/compare/v1.1.0...v1.2.1) --- updated-dependencies: - dependency-name: bytes dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- client/Cargo.toml | 2 +- programs/bpf/Cargo.lock | 4 ++-- storage-bigtable/Cargo.toml | 2 +- storage-bigtable/build-proto/Cargo.lock | 4 ++-- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5efcd56579db48..ff736feafb8a1f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -625,9 +625,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.1.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" +checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" [[package]] name = "bytesize" diff --git a/client/Cargo.toml b/client/Cargo.toml index 85a0b0fd218599..a17593a085ebee 100644 --- a/client/Cargo.toml +++ b/client/Cargo.toml @@ -15,7 +15,7 @@ async-trait = "0.1.57" base64 = "0.13.0" bincode = "1.3.3" bs58 = "0.4.0" -bytes = "1.1.0" +bytes = "1.2.1" clap = "2.33.0" crossbeam-channel = "0.5" enum_dispatch = "0.3.8" diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index 0226a144638870..273a42ed6daa07 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -584,9 +584,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.1.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" +checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" [[package]] name = "bzip2" diff --git a/storage-bigtable/Cargo.toml b/storage-bigtable/Cargo.toml index fc579eb884dd00..6bcedb5f200236 100644 --- a/storage-bigtable/Cargo.toml +++ b/storage-bigtable/Cargo.toml @@ -12,7 +12,7 @@ edition = "2021" [dependencies] backoff = { version = "0.4.0", features = ["tokio"] } bincode = "1.3.3" -bytes = "1.0" +bytes = "1.2" bzip2 = "0.4.3" enum-iterator = "0.8.1" flate2 = "1.0.24" diff --git a/storage-bigtable/build-proto/Cargo.lock b/storage-bigtable/build-proto/Cargo.lock index e595a4568b32f6..0b599060ce6997 100644 --- a/storage-bigtable/build-proto/Cargo.lock +++ b/storage-bigtable/build-proto/Cargo.lock @@ -31,9 +31,9 @@ checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" [[package]] name = "bytes" -version = "1.0.1" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" +checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" [[package]] name = "cc" From 463fc2233ee3f9786d0bfd7c89bc78f75bd4f7dd Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Tue, 16 Aug 2022 12:06:52 -0500 Subject: [PATCH 09/67] Share Ancestors API get with contains_key (#27161) consolidate similar fns --- runtime/src/ancestors.rs | 8 ++------ runtime/src/status_cache.rs | 2 +- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/runtime/src/ancestors.rs b/runtime/src/ancestors.rs index 42730efd98615c..9712f1fdbbda0a 100644 --- a/runtime/src/ancestors.rs +++ b/runtime/src/ancestors.rs @@ -65,10 +65,6 @@ impl Ancestors { self.ancestors.get_all() } - pub fn get(&self, slot: &Slot) -> bool { - self.ancestors.contains(slot) - } - pub fn remove(&mut self, slot: &Slot) { self.ancestors.remove(slot); } @@ -182,10 +178,10 @@ pub mod tests { let key = item.0; min = std::cmp::min(min, *key); max = std::cmp::max(max, *key); - assert!(ancestors.get(key)); + assert!(ancestors.contains_key(key)); } for slot in min - 1..max + 2 { - assert_eq!(ancestors.get(&slot), hashset.contains(&slot)); + assert_eq!(ancestors.contains_key(&slot), hashset.contains(&slot)); } } diff --git a/runtime/src/status_cache.rs b/runtime/src/status_cache.rs index 130810a5f87d78..c5d8379ce06443 100644 --- a/runtime/src/status_cache.rs +++ b/runtime/src/status_cache.rs @@ -137,7 +137,7 @@ impl StatusCache { if let Some(stored_forks) = keymap.get(key_slice) { let res = stored_forks .iter() - .find(|(f, _)| ancestors.get(f) || self.roots.get(f).is_some()) + .find(|(f, _)| ancestors.contains_key(f) || self.roots.get(f).is_some()) .cloned(); if res.is_some() { return res; From 08bf8d3757339862ab3e9e2d88caf9da3bc5d5c8 Mon Sep 17 00:00:00 2001 From: Brooks Prumo Date: Tue, 16 Aug 2022 13:57:24 -0400 Subject: [PATCH 10/67] Rename to `MAX_BLOCK_ACCOUNTS_DATA_SIZE_DELTA` (#27175) --- runtime/src/block_cost_limits.rs | 5 +++-- runtime/src/cost_tracker.rs | 6 +++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/runtime/src/block_cost_limits.rs b/runtime/src/block_cost_limits.rs index a1f1db85dc69cb..31964f88cdaee3 100644 --- a/runtime/src/block_cost_limits.rs +++ b/runtime/src/block_cost_limits.rs @@ -63,5 +63,6 @@ pub const MAX_WRITABLE_ACCOUNT_UNITS: u64 = MAX_BLOCK_REPLAY_TIME_US * COMPUTE_U /// sets at ~75% of MAX_BLOCK_UNITS to leave room for non-vote transactions pub const MAX_VOTE_UNITS: u64 = (MAX_BLOCK_UNITS as f64 * 0.75_f64) as u64; -/// max length of account data in a block (bytes) -pub const MAX_ACCOUNT_DATA_BLOCK_LEN: u64 = 100_000_000; +/// The maximum allowed size, in bytes, that accounts data can grow, per block. +/// This can also be thought of as the maximum size of new allocations per block. +pub const MAX_BLOCK_ACCOUNTS_DATA_SIZE_DELTA: u64 = 100_000_000; diff --git a/runtime/src/cost_tracker.rs b/runtime/src/cost_tracker.rs index a1d779a8a581b3..6e848b63d24c57 100644 --- a/runtime/src/cost_tracker.rs +++ b/runtime/src/cost_tracker.rs @@ -218,7 +218,7 @@ impl CostTracker { } } - if account_data_size > MAX_ACCOUNT_DATA_BLOCK_LEN { + if account_data_size > MAX_BLOCK_ACCOUNTS_DATA_SIZE_DELTA { return Err(CostTrackerError::WouldExceedAccountDataBlockLimit); } @@ -618,8 +618,8 @@ mod tests { let second_account = Keypair::new(); let (_tx1, mut tx_cost1) = build_simple_transaction(&mint_keypair, &start_hash); let (_tx2, mut tx_cost2) = build_simple_transaction(&second_account, &start_hash); - tx_cost1.account_data_size = MAX_ACCOUNT_DATA_BLOCK_LEN; - tx_cost2.account_data_size = MAX_ACCOUNT_DATA_BLOCK_LEN + 1; + tx_cost1.account_data_size = MAX_BLOCK_ACCOUNTS_DATA_SIZE_DELTA; + tx_cost2.account_data_size = MAX_BLOCK_ACCOUNTS_DATA_SIZE_DELTA + 1; let cost1 = tx_cost1.sum(); let cost2 = tx_cost2.sum(); From 5c796ddfd1f53913f60c46a575038a140be4ac28 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 Aug 2022 13:01:37 -0600 Subject: [PATCH 11/67] chore: bump libc from 0.2.129 to 0.2.131 (#27162) * chore: bump libc from 0.2.129 to 0.2.131 Bumps [libc](https://github.com/rust-lang/libc) from 0.2.129 to 0.2.131. - [Release notes](https://github.com/rust-lang/libc/releases) - [Commits](https://github.com/rust-lang/libc/compare/0.2.129...0.2.131) --- updated-dependencies: - dependency-name: libc dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 4 ++-- ledger/Cargo.toml | 2 +- perf/Cargo.toml | 2 +- programs/bpf/Cargo.lock | 4 ++-- rpc/Cargo.toml | 2 +- storage-bigtable/build-proto/Cargo.lock | 4 ++-- streamer/Cargo.toml | 2 +- sys-tuner/Cargo.toml | 2 +- validator/Cargo.toml | 2 +- 9 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ff736feafb8a1f..b5cbea2552af2c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2423,9 +2423,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.129" +version = "0.2.131" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64de3cc433455c14174d42e554d4027ee631c4d046d43e3ecc6efc4636cdc7a7" +checksum = "04c3b4822ccebfa39c02fc03d1534441b22ead323fa0f48bb7ddd8e6ba076a40" [[package]] name = "libloading" diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index dae528e9999cef..f5fc208efc815e 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -21,7 +21,7 @@ fs_extra = "1.2.0" futures = "0.3.21" itertools = "0.10.3" lazy_static = "1.4.0" -libc = "0.2.129" +libc = "0.2.131" log = { version = "0.4.17" } lru = "0.7.7" num_cpus = "1.13.1" diff --git a/perf/Cargo.toml b/perf/Cargo.toml index 8fd427b6365243..202bf36c687a43 100644 --- a/perf/Cargo.toml +++ b/perf/Cargo.toml @@ -29,7 +29,7 @@ solana-vote-program = { path = "../programs/vote", version = "=1.12.0" } [target."cfg(target_os = \"linux\")".dependencies] caps = "0.5.3" -libc = "0.2.129" +libc = "0.2.131" nix = "0.24.2" [lib] diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index 273a42ed6daa07..e3605f8eca2b35 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -2169,9 +2169,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.129" +version = "0.2.131" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64de3cc433455c14174d42e554d4027ee631c4d046d43e3ecc6efc4636cdc7a7" +checksum = "04c3b4822ccebfa39c02fc03d1534441b22ead323fa0f48bb7ddd8e6ba076a40" [[package]] name = "libloading" diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 00a059d2dccce1..531218daed646f 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -21,7 +21,7 @@ jsonrpc-core-client = { version = "18.0.0" } jsonrpc-derive = "18.0.0" jsonrpc-http-server = "18.0.0" jsonrpc-pubsub = "18.0.0" -libc = "0.2.129" +libc = "0.2.131" log = "0.4.17" rayon = "1.5.3" regex = "1.5.6" diff --git a/storage-bigtable/build-proto/Cargo.lock b/storage-bigtable/build-proto/Cargo.lock index 0b599060ce6997..110356f435c2a5 100644 --- a/storage-bigtable/build-proto/Cargo.lock +++ b/storage-bigtable/build-proto/Cargo.lock @@ -116,9 +116,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.129" +version = "0.2.131" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64de3cc433455c14174d42e554d4027ee631c4d046d43e3ecc6efc4636cdc7a7" +checksum = "04c3b4822ccebfa39c02fc03d1534441b22ead323fa0f48bb7ddd8e6ba076a40" [[package]] name = "log" diff --git a/streamer/Cargo.toml b/streamer/Cargo.toml index 8bc2909620316a..e72816d03099b8 100644 --- a/streamer/Cargo.toml +++ b/streamer/Cargo.toml @@ -15,7 +15,7 @@ futures-util = "0.3.21" histogram = "0.6.9" indexmap = "1.9.1" itertools = "0.10.3" -libc = "0.2.129" +libc = "0.2.131" log = "0.4.17" nix = "0.24.2" pem = "1.0.2" diff --git a/sys-tuner/Cargo.toml b/sys-tuner/Cargo.toml index 4b60a190d295ad..a98a719300d4b1 100644 --- a/sys-tuner/Cargo.toml +++ b/sys-tuner/Cargo.toml @@ -12,7 +12,7 @@ publish = true [dependencies] clap = "2.33.1" -libc = "0.2.129" +libc = "0.2.131" log = "0.4.17" solana-logger = { path = "../logger", version = "=1.12.0" } solana-version = { path = "../version", version = "=1.12.0" } diff --git a/validator/Cargo.toml b/validator/Cargo.toml index 106bc27d6f26df..3d1b79478d7394 100644 --- a/validator/Cargo.toml +++ b/validator/Cargo.toml @@ -60,7 +60,7 @@ symlink = "0.1.0" jemallocator = { package = "tikv-jemallocator", version = "0.4.1", features = ["unprefixed_malloc_on_supported_platforms"] } [target."cfg(unix)".dependencies] -libc = "0.2.129" +libc = "0.2.131" signal-hook = "0.3.14" [package.metadata.docs.rs] From 27ed497b2a0975afb14cc65422e729c9b2c741ab Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Tue, 16 Aug 2022 19:40:06 +0000 Subject: [PATCH 12/67] reverts wide fanout in broadcast when the root node is down (#26359) A change included in https://github.com/solana-labs/solana/pull/20480 was that when the root node in turbine broadcast tree is down, the leader will broadcast the shred to all nodes in the first layer. The intention was to mitigate the impact of dead nodes on shreds propagation, because if the root node is down, then the entire cluster will miss out the shred. On the other hand, if x% of stake is down, this will cause 200*x% + 1 packets/shreds ratio at the broadcast stage which might contribute to line-rate saturation and packet drop. To avoid this bandwidth saturation issue, this commit reverts that logic and always broadcasts shreds from the leader only to the root node. As before we rely on erasure codes to recover shreds lost due to staked nodes being offline. --- core/src/broadcast_stage.rs | 20 +++---- .../broadcast_duplicates_run.rs | 21 +------ core/src/cluster_nodes.rs | 59 ++----------------- 3 files changed, 16 insertions(+), 84 deletions(-) diff --git a/core/src/broadcast_stage.rs b/core/src/broadcast_stage.rs index 18ab25a0b914c9..ba4c33fa38cc46 100644 --- a/core/src/broadcast_stage.rs +++ b/core/src/broadcast_stage.rs @@ -14,7 +14,10 @@ use { }, crossbeam_channel::{unbounded, Receiver, RecvError, RecvTimeoutError, Sender}, itertools::Itertools, - solana_gossip::cluster_info::{ClusterInfo, ClusterInfoError, DATA_PLANE_FANOUT}, + solana_gossip::{ + cluster_info::{ClusterInfo, ClusterInfoError}, + contact_info::ContactInfo, + }, solana_ledger::{blockstore::Blockstore, shred::Shred}, solana_measure::measure::Measure, solana_metrics::{inc_new_counter_error, inc_new_counter_info}, @@ -32,7 +35,6 @@ use { }, std::{ collections::{HashMap, HashSet}, - iter::repeat, net::UdpSocket, sync::{ atomic::{AtomicBool, Ordering}, @@ -390,8 +392,8 @@ fn update_peer_stats( } } -/// broadcast messages from the leader to layer 1 nodes -/// # Remarks +/// Broadcasts shreds from the leader (i.e. this node) to the root of the +/// turbine retransmit tree for each shred. pub fn broadcast_shreds( s: &UdpSocket, shreds: &[Shred], @@ -416,14 +418,10 @@ pub fn broadcast_shreds( let cluster_nodes = cluster_nodes_cache.get(slot, &root_bank, &working_bank, cluster_info); update_peer_stats(&cluster_nodes, last_datapoint_submit); - let root_bank = root_bank.clone(); shreds.flat_map(move |shred| { - repeat(shred.payload()).zip(cluster_nodes.get_broadcast_addrs( - &shred.id(), - &root_bank, - DATA_PLANE_FANOUT, - socket_addr_space, - )) + let node = cluster_nodes.get_broadcast_peer(&shred.id())?; + ContactInfo::is_valid_address(&node.tvu, socket_addr_space) + .then(|| (shred.payload(), node.tvu)) }) }) .collect(); diff --git a/core/src/broadcast_stage/broadcast_duplicates_run.rs b/core/src/broadcast_stage/broadcast_duplicates_run.rs index 741be826c44982..9e60d6c8196cfe 100644 --- a/core/src/broadcast_stage/broadcast_duplicates_run.rs +++ b/core/src/broadcast_stage/broadcast_duplicates_run.rs @@ -3,7 +3,7 @@ use { crate::cluster_nodes::ClusterNodesCache, itertools::Itertools, solana_entry::entry::Entry, - solana_gossip::cluster_info::DATA_PLANE_FANOUT, + solana_gossip::contact_info::ContactInfo, solana_ledger::shred::{ProcessShredsStats, Shredder}, solana_sdk::{ hash::Hash, @@ -270,12 +270,6 @@ impl BroadcastRun for BroadcastDuplicatesRun { (bank_forks.root_bank(), bank_forks.working_bank()) }; let self_pubkey = cluster_info.id(); - let nodes: Vec<_> = cluster_info - .all_peers() - .into_iter() - .map(|(node, _)| node) - .collect(); - // Create cluster partition. let cluster_partition: HashSet = { let mut cumilative_stake = 0; @@ -302,17 +296,8 @@ impl BroadcastRun for BroadcastDuplicatesRun { let packets: Vec<_> = shreds .iter() .filter_map(|shred| { - let addr = cluster_nodes - .get_broadcast_addrs( - &shred.id(), - &root_bank, - DATA_PLANE_FANOUT, - socket_addr_space, - ) - .first() - .copied()?; - let node = nodes.iter().find(|node| node.tvu == addr)?; - if !socket_addr_space.check(&node.tvu) { + let node = cluster_nodes.get_broadcast_peer(&shred.id())?; + if ContactInfo::is_valid_address(&node.tvu, socket_addr_space) { return None; } if self diff --git a/core/src/cluster_nodes.rs b/core/src/cluster_nodes.rs index f83175a9946f8d..22fcc882c07186 100644 --- a/core/src/cluster_nodes.rs +++ b/core/src/cluster_nodes.rs @@ -26,7 +26,7 @@ use { any::TypeId, cmp::Reverse, collections::HashMap, - iter::{once, repeat_with}, + iter::repeat_with, marker::PhantomData, net::SocketAddr, ops::Deref, @@ -114,62 +114,11 @@ impl ClusterNodes { new_cluster_nodes(cluster_info, stakes) } - pub(crate) fn get_broadcast_addrs( - &self, - shred: &ShredId, - root_bank: &Bank, - fanout: usize, - socket_addr_space: &SocketAddrSpace, - ) -> Vec { - const MAX_CONTACT_INFO_AGE: Duration = Duration::from_secs(2 * 60); + pub(crate) fn get_broadcast_peer(&self, shred: &ShredId) -> Option<&ContactInfo> { let shred_seed = shred.seed(&self.pubkey); let mut rng = ChaChaRng::from_seed(shred_seed); - let index = match self.weighted_shuffle.first(&mut rng) { - None => return Vec::default(), - Some(index) => index, - }; - if let Some(node) = self.nodes[index].contact_info() { - let now = timestamp(); - let age = Duration::from_millis(now.saturating_sub(node.wallclock)); - if age < MAX_CONTACT_INFO_AGE - && ContactInfo::is_valid_address(&node.tvu, socket_addr_space) - { - return vec![node.tvu]; - } - } - let mut rng = ChaChaRng::from_seed(shred_seed); - let nodes: Vec<&Node> = self - .weighted_shuffle - .clone() - .shuffle(&mut rng) - .map(|index| &self.nodes[index]) - .collect(); - if nodes.is_empty() { - return Vec::default(); - } - if drop_redundant_turbine_path(shred.slot(), root_bank) { - let peers = once(nodes[0]).chain(get_retransmit_peers(fanout, 0, &nodes)); - let addrs = peers.filter_map(Node::contact_info).map(|peer| peer.tvu); - return addrs - .filter(|addr| ContactInfo::is_valid_address(addr, socket_addr_space)) - .collect(); - } - let (neighbors, children) = compute_retransmit_peers(fanout, 0, &nodes); - neighbors[..1] - .iter() - .filter_map(|node| Some(node.contact_info()?.tvu)) - .chain( - neighbors[1..] - .iter() - .filter_map(|node| Some(node.contact_info()?.tvu_forwards)), - ) - .chain( - children - .iter() - .filter_map(|node| Some(node.contact_info()?.tvu)), - ) - .filter(|addr| ContactInfo::is_valid_address(addr, socket_addr_space)) - .collect() + let index = self.weighted_shuffle.first(&mut rng)?; + self.nodes[index].contact_info() } } From d9260d1d20beaf360dce4de891882140ed8ebcfa Mon Sep 17 00:00:00 2001 From: AJ Taylor Date: Tue, 16 Aug 2022 14:32:38 -0600 Subject: [PATCH 13/67] add getTokenLargestAccounts rpc method to rust client (#26840) * add get token largest accounts rpc call to client * split to include with commitment --- client/src/nonblocking/rpc_client.rs | 25 +++++++++++++++++++++++++ client/src/rpc_client.rs | 18 ++++++++++++++++++ client/src/rpc_request.rs | 6 ++++++ 3 files changed, 49 insertions(+) diff --git a/client/src/nonblocking/rpc_client.rs b/client/src/nonblocking/rpc_client.rs index c6f0098d71eb64..ead129fa26ac60 100644 --- a/client/src/nonblocking/rpc_client.rs +++ b/client/src/nonblocking/rpc_client.rs @@ -5016,6 +5016,31 @@ impl RpcClient { .await } + pub async fn get_token_largest_accounts( + &self, + mint: &Pubkey, + ) -> ClientResult> { + Ok(self + .get_token_largest_accounts_with_commitment(mint, self.commitment()) + .await? + .value) + } + + pub async fn get_token_largest_accounts_with_commitment( + &self, + mint: &Pubkey, + commitment_config: CommitmentConfig, + ) -> RpcResult> { + self.send( + RpcRequest::GetTokenLargestAccounts, + json!([ + mint.to_string(), + self.maybe_map_commitment(commitment_config).await? + ]), + ) + .await + } + pub async fn get_token_supply(&self, mint: &Pubkey) -> ClientResult { Ok(self .get_token_supply_with_commitment(mint, self.commitment()) diff --git a/client/src/rpc_client.rs b/client/src/rpc_client.rs index 9946dfa5cfdf66..b89b906e57ade4 100644 --- a/client/src/rpc_client.rs +++ b/client/src/rpc_client.rs @@ -3901,6 +3901,24 @@ impl RpcClient { ) } + pub fn get_token_largest_accounts( + &self, + mint: &Pubkey, + ) -> ClientResult> { + self.invoke((self.rpc_client.as_ref()).get_token_largest_accounts(mint)) + } + + pub fn get_token_largest_accounts_with_commitment( + &self, + mint: &Pubkey, + commitment_config: CommitmentConfig, + ) -> RpcResult> { + self.invoke( + (self.rpc_client.as_ref()) + .get_token_largest_accounts_with_commitment(mint, commitment_config), + ) + } + pub fn get_token_supply(&self, mint: &Pubkey) -> ClientResult { self.invoke((self.rpc_client.as_ref()).get_token_supply(mint)) } diff --git a/client/src/rpc_request.rs b/client/src/rpc_request.rs index d3f0ceb1c0ad54..32f8c45183762d 100644 --- a/client/src/rpc_request.rs +++ b/client/src/rpc_request.rs @@ -100,6 +100,7 @@ pub enum RpcRequest { GetTokenAccountBalance, GetTokenAccountsByDelegate, GetTokenAccountsByOwner, + GetTokenLargestAccounts, GetTokenSupply, GetTransaction, GetTransactionCount, @@ -175,6 +176,7 @@ impl fmt::Display for RpcRequest { RpcRequest::GetTokenAccountsByDelegate => "getTokenAccountsByDelegate", RpcRequest::GetTokenAccountsByOwner => "getTokenAccountsByOwner", RpcRequest::GetTokenSupply => "getTokenSupply", + RpcRequest::GetTokenLargestAccounts => "getTokenLargestAccounts", RpcRequest::GetTransaction => "getTransaction", RpcRequest::GetTransactionCount => "getTransactionCount", RpcRequest::GetVersion => "getVersion", @@ -322,6 +324,10 @@ mod tests { let test_request = RpcRequest::SendTransaction; let request = test_request.build_request_json(1, Value::Null); assert_eq!(request["method"], "sendTransaction"); + + let test_request = RpcRequest::GetTokenLargestAccounts; + let request = test_request.build_request_json(1, Value::Null); + assert_eq!(request["method"], "getTokenLargestAccounts"); } #[test] From 1608f903029e98d2b71e1dae9c597a4998842cd1 Mon Sep 17 00:00:00 2001 From: Tyera Eulberg Date: Tue, 16 Aug 2022 14:47:12 -0700 Subject: [PATCH 14/67] Bump spl-token-2022 (#27181) * Bump token-2022 to 0.4.3 * Allow cargo to bump stuff to v1.11.5 --- Cargo.lock | 96 +++++++++++++++++++---------------- account-decoder/Cargo.toml | 2 +- client/Cargo.toml | 2 +- ledger/Cargo.toml | 2 +- programs/bpf/Cargo.lock | 96 +++++++++++++++++++---------------- rpc/Cargo.toml | 2 +- transaction-status/Cargo.toml | 2 +- 7 files changed, 111 insertions(+), 91 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b5cbea2552af2c..6be2310543e124 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3122,15 +3122,6 @@ dependencies = [ "crypto-mac", ] -[[package]] -name = "pbkdf2" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271779f35b581956db91a3e55737327a03aa051e90b1c47aeb189508533adfd7" -dependencies = [ - "digest 0.10.3", -] - [[package]] name = "pbkdf2" version = "0.11.0" @@ -5203,23 +5194,35 @@ dependencies = [ [[package]] name = "solana-frozen-abi" -version = "1.10.33" +version = "1.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49a5d3280421bb53fc12bdba1eaa505153fb4f99a06b5609dae22192652ead3b" +checksum = "28e4e35bc58c465f161bde764ebce41fdfcb503583cf3a77e0211274cc12b22d" dependencies = [ + "ahash", + "blake3", + "block-buffer 0.9.0", "bs58", "bv", + "byteorder", + "cc", + "either", "generic-array 0.14.5", + "getrandom 0.1.16", + "hashbrown 0.12.3", "im", "lazy_static", "log", "memmap2", + "once_cell", + "rand_core 0.6.3", "rustc_version 0.4.0", "serde", "serde_bytes", "serde_derive", + "serde_json", "sha2 0.10.2", - "solana-frozen-abi-macro 1.10.33", + "solana-frozen-abi-macro 1.11.5", + "subtle", "thiserror", ] @@ -5258,9 +5261,9 @@ dependencies = [ [[package]] name = "solana-frozen-abi-macro" -version = "1.10.33" +version = "1.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "635c60ac96b1347af272c625465068b908aff919d19f29b5795a44310310494d" +checksum = "708f837d748e574b1e53b250ab1f4a69ba330bbc10d041d02381165f0f36291a" dependencies = [ "proc-macro2 1.0.41", "quote 1.0.18", @@ -5576,9 +5579,9 @@ dependencies = [ [[package]] name = "solana-logger" -version = "1.10.33" +version = "1.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b12cb6e6f1f9c9876d356c928b8c2ac532f6715e7cd2a1b4343d747bee3eca73" +checksum = "e7ea6fc68d63d33d862d919d4c8ad7f613ec243ccf6762d595c660020b289b57" dependencies = [ "env_logger", "lazy_static", @@ -5747,9 +5750,9 @@ dependencies = [ [[package]] name = "solana-program" -version = "1.10.33" +version = "1.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eeecf504cee2821b006871f70e7a1f54db15f914cedf259eaf5976fe606470f0" +checksum = "bdd314d85b171bb20ccdcaf07346a9d52a012b10d84f4706f0628813d002fef8" dependencies = [ "base64 0.13.0", "bincode", @@ -5760,31 +5763,38 @@ dependencies = [ "bs58", "bv", "bytemuck", + "cc", "console_error_panic_hook", "console_log", "curve25519-dalek", - "getrandom 0.1.16", + "getrandom 0.2.3", "itertools", "js-sys", "lazy_static", + "libc", "libsecp256k1", "log", + "memoffset", "num-derive", "num-traits", "parking_lot 0.12.1", "rand 0.7.3", + "rand_chacha 0.2.2", "rustc_version 0.4.0", "rustversion", "serde", "serde_bytes", "serde_derive", + "serde_json", "sha2 0.10.2", "sha3 0.10.2", - "solana-frozen-abi 1.10.33", - "solana-frozen-abi-macro 1.10.33", - "solana-sdk-macro 1.10.33", + "solana-frozen-abi 1.11.5", + "solana-frozen-abi-macro 1.11.5", + "solana-sdk-macro 1.11.5", "thiserror", + "tiny-bip39", "wasm-bindgen", + "zeroize", ] [[package]] @@ -6055,9 +6065,9 @@ dependencies = [ [[package]] name = "solana-sdk" -version = "1.10.33" +version = "1.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "636f6c615aca6f75e22b6baceaf0ffed9d74367f9320b07ed57cd9b5ce2e4ff9" +checksum = "ad7d954df63b267857e26670e3aacfd8e2943ca703653b0418e5afc85046c2f3" dependencies = [ "assert_matches", "base64 0.13.0", @@ -6082,7 +6092,7 @@ dependencies = [ "memmap2", "num-derive", "num-traits", - "pbkdf2 0.10.1", + "pbkdf2 0.11.0", "qstring", "rand 0.7.3", "rand_chacha 0.2.2", @@ -6094,11 +6104,11 @@ dependencies = [ "serde_json", "sha2 0.10.2", "sha3 0.10.2", - "solana-frozen-abi 1.10.33", - "solana-frozen-abi-macro 1.10.33", - "solana-logger 1.10.33", - "solana-program 1.10.33", - "solana-sdk-macro 1.10.33", + "solana-frozen-abi 1.11.5", + "solana-frozen-abi-macro 1.11.5", + "solana-logger 1.11.5", + "solana-program 1.11.5", + "solana-sdk-macro 1.11.5", "thiserror", "uriparse", "wasm-bindgen", @@ -6160,9 +6170,9 @@ dependencies = [ [[package]] name = "solana-sdk-macro" -version = "1.10.33" +version = "1.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b8bcac4394644f21dc013e932a7df9f536fcecef3e5df43fe362b4ec532ce30" +checksum = "d0d9e81bc46edcc517b2df504856d57a5101c7586ec63f3143ae11fbe2eba613" dependencies = [ "bs58", "proc-macro2 1.0.41", @@ -6575,9 +6585,9 @@ dependencies = [ [[package]] name = "solana-zk-token-sdk" -version = "1.10.33" +version = "1.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "410ee53a26ac91098c289c983863535d4fbb6604b229ae1159503f48fa4fc90f" +checksum = "62415c05a9ebfffaf8befaa61b24492ebf88269cf84cbeba714bac4125ec4ea3" dependencies = [ "aes-gcm-siv", "arrayref", @@ -6596,8 +6606,8 @@ dependencies = [ "serde", "serde_json", "sha3 0.9.1", - "solana-program 1.10.33", - "solana-sdk 1.10.33", + "solana-program 1.11.5", + "solana-sdk 1.11.5", "subtle", "thiserror", "zeroize", @@ -6681,7 +6691,7 @@ dependencies = [ "borsh", "num-derive", "num-traits", - "solana-program 1.10.33", + "solana-program 1.11.5", "spl-token", "spl-token-2022", "thiserror", @@ -6693,7 +6703,7 @@ version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd0dc6f70db6bacea7ff25870b016a65ba1d1b6013536f08e4fd79a8f9005325" dependencies = [ - "solana-program 1.10.33", + "solana-program 1.11.5", ] [[package]] @@ -6707,23 +6717,23 @@ dependencies = [ "num-derive", "num-traits", "num_enum", - "solana-program 1.10.33", + "solana-program 1.11.5", "thiserror", ] [[package]] name = "spl-token-2022" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0a97cbf60b91b610c846ccf8eecca96d92a24a19ffbf9fe06cd0c84e76ec45e" +checksum = "e4c0ebca4740cc4c892aa31e07d0b4dc1a24cac4748376d4b34f8eb0fee9ff46" dependencies = [ "arrayref", "bytemuck", "num-derive", "num-traits", "num_enum", - "solana-program 1.10.33", - "solana-zk-token-sdk 1.10.33", + "solana-program 1.11.5", + "solana-zk-token-sdk 1.11.5", "spl-memo", "spl-token", "thiserror", diff --git a/account-decoder/Cargo.toml b/account-decoder/Cargo.toml index d246ba28f186a0..96b97a5652fca5 100644 --- a/account-decoder/Cargo.toml +++ b/account-decoder/Cargo.toml @@ -24,7 +24,7 @@ solana-config-program = { path = "../programs/config", version = "=1.12.0" } solana-sdk = { path = "../sdk", version = "=1.12.0" } solana-vote-program = { path = "../programs/vote", version = "=1.12.0" } spl-token = { version = "=3.5.0", features = ["no-entrypoint"] } -spl-token-2022 = { version = "=0.4.2", features = ["no-entrypoint"] } +spl-token-2022 = { version = "=0.4.3", features = ["no-entrypoint"] } thiserror = "1.0" zstd = "0.11.2" diff --git a/client/Cargo.toml b/client/Cargo.toml index a17593a085ebee..bc2810459e0b20 100644 --- a/client/Cargo.toml +++ b/client/Cargo.toml @@ -49,7 +49,7 @@ solana-streamer = { path = "../streamer", version = "=1.12.0" } solana-transaction-status = { path = "../transaction-status", version = "=1.12.0" } solana-version = { path = "../version", version = "=1.12.0" } solana-vote-program = { path = "../programs/vote", version = "=1.12.0" } -spl-token-2022 = { version = "=0.4.2", features = ["no-entrypoint"] } +spl-token-2022 = { version = "=0.4.3", features = ["no-entrypoint"] } thiserror = "1.0" tokio = { version = "1", features = ["full"] } tokio-stream = "0.1.9" diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index f5fc208efc815e..071c79d1349a30 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -52,7 +52,7 @@ solana-storage-proto = { path = "../storage-proto", version = "=1.12.0" } solana-transaction-status = { path = "../transaction-status", version = "=1.12.0" } solana-vote-program = { path = "../programs/vote", version = "=1.12.0" } spl-token = { version = "=3.5.0", features = ["no-entrypoint"] } -spl-token-2022 = { version = "=0.4.2", features = ["no-entrypoint"] } +spl-token-2022 = { version = "=0.4.3", features = ["no-entrypoint"] } static_assertions = "1.1.0" tempfile = "3.3.0" thiserror = "1.0" diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index e3605f8eca2b35..284e03a6b30e49 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -2906,15 +2906,6 @@ dependencies = [ "crypto-mac", ] -[[package]] -name = "pbkdf2" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271779f35b581956db91a3e55737327a03aa051e90b1c47aeb189508533adfd7" -dependencies = [ - "digest 0.10.3", -] - [[package]] name = "pbkdf2" version = "0.11.0" @@ -4840,23 +4831,35 @@ dependencies = [ [[package]] name = "solana-frozen-abi" -version = "1.10.33" +version = "1.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49a5d3280421bb53fc12bdba1eaa505153fb4f99a06b5609dae22192652ead3b" +checksum = "28e4e35bc58c465f161bde764ebce41fdfcb503583cf3a77e0211274cc12b22d" dependencies = [ + "ahash", + "blake3", + "block-buffer 0.9.0", "bs58", "bv", + "byteorder 1.4.3", + "cc", + "either", "generic-array 0.14.5", + "getrandom 0.1.14", + "hashbrown 0.12.3", "im", "lazy_static", "log", "memmap2", + "once_cell", + "rand_core 0.6.3", "rustc_version", "serde", "serde_bytes", "serde_derive", + "serde_json", "sha2 0.10.2", - "solana-frozen-abi-macro 1.10.33", + "solana-frozen-abi-macro 1.11.5", + "subtle", "thiserror", ] @@ -4894,9 +4897,9 @@ dependencies = [ [[package]] name = "solana-frozen-abi-macro" -version = "1.10.33" +version = "1.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "635c60ac96b1347af272c625465068b908aff919d19f29b5795a44310310494d" +checksum = "708f837d748e574b1e53b250ab1f4a69ba330bbc10d041d02381165f0f36291a" dependencies = [ "proc-macro2 1.0.41", "quote 1.0.18", @@ -5055,9 +5058,9 @@ dependencies = [ [[package]] name = "solana-logger" -version = "1.10.33" +version = "1.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b12cb6e6f1f9c9876d356c928b8c2ac532f6715e7cd2a1b4343d747bee3eca73" +checksum = "e7ea6fc68d63d33d862d919d4c8ad7f613ec243ccf6762d595c660020b289b57" dependencies = [ "env_logger", "lazy_static", @@ -5166,9 +5169,9 @@ dependencies = [ [[package]] name = "solana-program" -version = "1.10.33" +version = "1.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eeecf504cee2821b006871f70e7a1f54db15f914cedf259eaf5976fe606470f0" +checksum = "bdd314d85b171bb20ccdcaf07346a9d52a012b10d84f4706f0628813d002fef8" dependencies = [ "base64 0.13.0", "bincode", @@ -5179,31 +5182,38 @@ dependencies = [ "bs58", "bv", "bytemuck", + "cc", "console_error_panic_hook", "console_log", "curve25519-dalek", - "getrandom 0.1.14", + "getrandom 0.2.4", "itertools", "js-sys", "lazy_static", + "libc", "libsecp256k1 0.6.0", "log", + "memoffset", "num-derive", "num-traits", "parking_lot 0.12.1", "rand 0.7.3", + "rand_chacha 0.2.2", "rustc_version", "rustversion", "serde", "serde_bytes", "serde_derive", + "serde_json", "sha2 0.10.2", "sha3 0.10.2", - "solana-frozen-abi 1.10.33", - "solana-frozen-abi-macro 1.10.33", - "solana-sdk-macro 1.10.33", + "solana-frozen-abi 1.11.5", + "solana-frozen-abi-macro 1.11.5", + "solana-sdk-macro 1.11.5", "thiserror", + "tiny-bip39", "wasm-bindgen", + "zeroize", ] [[package]] @@ -5436,9 +5446,9 @@ dependencies = [ [[package]] name = "solana-sdk" -version = "1.10.33" +version = "1.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "636f6c615aca6f75e22b6baceaf0ffed9d74367f9320b07ed57cd9b5ce2e4ff9" +checksum = "ad7d954df63b267857e26670e3aacfd8e2943ca703653b0418e5afc85046c2f3" dependencies = [ "assert_matches", "base64 0.13.0", @@ -5463,7 +5473,7 @@ dependencies = [ "memmap2", "num-derive", "num-traits", - "pbkdf2 0.10.1", + "pbkdf2 0.11.0", "qstring", "rand 0.7.3", "rand_chacha 0.2.2", @@ -5475,11 +5485,11 @@ dependencies = [ "serde_json", "sha2 0.10.2", "sha3 0.10.2", - "solana-frozen-abi 1.10.33", - "solana-frozen-abi-macro 1.10.33", - "solana-logger 1.10.33", - "solana-program 1.10.33", - "solana-sdk-macro 1.10.33", + "solana-frozen-abi 1.11.5", + "solana-frozen-abi-macro 1.11.5", + "solana-logger 1.11.5", + "solana-program 1.11.5", + "solana-sdk-macro 1.11.5", "thiserror", "uriparse", "wasm-bindgen", @@ -5536,9 +5546,9 @@ dependencies = [ [[package]] name = "solana-sdk-macro" -version = "1.10.33" +version = "1.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b8bcac4394644f21dc013e932a7df9f536fcecef3e5df43fe362b4ec532ce30" +checksum = "d0d9e81bc46edcc517b2df504856d57a5101c7586ec63f3143ae11fbe2eba613" dependencies = [ "bs58", "proc-macro2 1.0.41", @@ -5834,9 +5844,9 @@ dependencies = [ [[package]] name = "solana-zk-token-sdk" -version = "1.10.33" +version = "1.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "410ee53a26ac91098c289c983863535d4fbb6604b229ae1159503f48fa4fc90f" +checksum = "62415c05a9ebfffaf8befaa61b24492ebf88269cf84cbeba714bac4125ec4ea3" dependencies = [ "aes-gcm-siv", "arrayref", @@ -5855,8 +5865,8 @@ dependencies = [ "serde", "serde_json", "sha3 0.9.1", - "solana-program 1.10.33", - "solana-sdk 1.10.33", + "solana-program 1.11.5", + "solana-sdk 1.11.5", "subtle", "thiserror", "zeroize", @@ -5940,7 +5950,7 @@ dependencies = [ "borsh", "num-derive", "num-traits", - "solana-program 1.10.33", + "solana-program 1.11.5", "spl-token", "spl-token-2022", "thiserror", @@ -5952,7 +5962,7 @@ version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd0dc6f70db6bacea7ff25870b016a65ba1d1b6013536f08e4fd79a8f9005325" dependencies = [ - "solana-program 1.10.33", + "solana-program 1.11.5", ] [[package]] @@ -5966,23 +5976,23 @@ dependencies = [ "num-derive", "num-traits", "num_enum", - "solana-program 1.10.33", + "solana-program 1.11.5", "thiserror", ] [[package]] name = "spl-token-2022" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0a97cbf60b91b610c846ccf8eecca96d92a24a19ffbf9fe06cd0c84e76ec45e" +checksum = "e4c0ebca4740cc4c892aa31e07d0b4dc1a24cac4748376d4b34f8eb0fee9ff46" dependencies = [ "arrayref", "bytemuck", "num-derive", "num-traits", "num_enum", - "solana-program 1.10.33", - "solana-zk-token-sdk 1.10.33", + "solana-program 1.11.5", + "solana-zk-token-sdk 1.11.5", "spl-memo", "spl-token", "thiserror", diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 531218daed646f..3c55e8b25a7e6a 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -50,7 +50,7 @@ solana-transaction-status = { path = "../transaction-status", version = "=1.12.0 solana-version = { path = "../version", version = "=1.12.0" } solana-vote-program = { path = "../programs/vote", version = "=1.12.0" } spl-token = { version = "=3.5.0", features = ["no-entrypoint"] } -spl-token-2022 = { version = "=0.4.2", features = ["no-entrypoint"] } +spl-token-2022 = { version = "=0.4.3", features = ["no-entrypoint"] } stream-cancel = "0.8.1" thiserror = "1.0" tokio = { version = "~1.14.1", features = ["full"] } diff --git a/transaction-status/Cargo.toml b/transaction-status/Cargo.toml index 3e73b85153ffe0..9d59696e00a89b 100644 --- a/transaction-status/Cargo.toml +++ b/transaction-status/Cargo.toml @@ -28,7 +28,7 @@ solana-vote-program = { path = "../programs/vote", version = "=1.12.0" } spl-associated-token-account = { version = "=1.1.1", features = ["no-entrypoint"] } spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] } spl-token = { version = "=3.5.0", features = ["no-entrypoint"] } -spl-token-2022 = { version = "=0.4.2", features = ["no-entrypoint"] } +spl-token-2022 = { version = "=0.4.3", features = ["no-entrypoint"] } thiserror = "1.0" [package.metadata.docs.rs] From 84675444d08a995beebb99ba55eb49e86dfee81d Mon Sep 17 00:00:00 2001 From: Andrew Schonfeld Date: Tue, 16 Aug 2022 18:22:38 -0400 Subject: [PATCH 15/67] VoteProgram.safeWithdraw function to safeguard against accidental vote account closures (#26586) feat: safe withdraw function Co-authored-by: aschonfeld --- web3.js/src/programs/vote.ts | 21 +++++++++++++++++++++ web3.js/test/program-tests/vote.test.ts | 15 +++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/web3.js/src/programs/vote.ts b/web3.js/src/programs/vote.ts index 6cd16671a977cd..db1a111da919a4 100644 --- a/web3.js/src/programs/vote.ts +++ b/web3.js/src/programs/vote.ts @@ -410,4 +410,25 @@ export class VoteProgram { data, }); } + + /** + * Generate a transaction to withdraw safely from a Vote account. + * + * This function was created as a safeguard for vote accounts running validators, `safeWithdraw` + * checks that the withdraw amount will not exceed the specified balance while leaving enough left + * to cover rent. If you wish to close the vote account by withdrawing the full amount, call the + * `withdraw` method directly. + */ + static safeWithdraw( + params: WithdrawFromVoteAccountParams, + currentVoteAccountBalance: number, + rentExemptMinimum: number, + ): Transaction { + if (params.lamports > currentVoteAccountBalance - rentExemptMinimum) { + throw new Error( + 'Withdraw will leave vote account with insuffcient funds.', + ); + } + return VoteProgram.withdraw(params); + } } diff --git a/web3.js/test/program-tests/vote.test.ts b/web3.js/test/program-tests/vote.test.ts index 596e6e401b4fb7..6cd349a0c3a772 100644 --- a/web3.js/test/program-tests/vote.test.ts +++ b/web3.js/test/program-tests/vote.test.ts @@ -167,6 +167,21 @@ describe('VoteProgram', () => { // Withdraw from Vote account let recipient = Keypair.generate(); + const voteBalance = await connection.getBalance(newVoteAccount.publicKey); + + expect(() => + VoteProgram.safeWithdraw( + { + votePubkey: newVoteAccount.publicKey, + authorizedWithdrawerPubkey: authorized.publicKey, + lamports: voteBalance - minimumAmount + 1, + toPubkey: recipient.publicKey, + }, + voteBalance, + minimumAmount, + ), + ).to.throw('Withdraw will leave vote account with insuffcient funds.'); + let withdraw = VoteProgram.withdraw({ votePubkey: newVoteAccount.publicKey, authorizedWithdrawerPubkey: authorized.publicKey, From 354472d41251efc441255e1f0630f5669c2ba967 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 Aug 2022 16:29:45 -0600 Subject: [PATCH 16/67] chore: bump futures from 0.3.21 to 0.3.23 (#27182) * chore: bump futures from 0.3.21 to 0.3.23 Bumps [futures](https://github.com/rust-lang/futures-rs) from 0.3.21 to 0.3.23. - [Release notes](https://github.com/rust-lang/futures-rs/releases) - [Changelog](https://github.com/rust-lang/futures-rs/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/futures-rs/compare/0.3.21...0.3.23) --- updated-dependencies: - dependency-name: futures dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 72 ++++++++++++++++++------------------ ledger/Cargo.toml | 2 +- programs/bpf/Cargo.lock | 74 ++++++++++++++++++------------------- storage-bigtable/Cargo.toml | 2 +- 4 files changed, 75 insertions(+), 75 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6be2310543e124..fb3eac355b2667 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1606,9 +1606,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e" +checksum = "ab30e97ab6aacfe635fad58f22c2bb06c8b685f7421eb1e064a729e2a5f481fa" dependencies = [ "futures-channel", "futures-core", @@ -1621,9 +1621,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" +checksum = "2bfc52cbddcfd745bf1740338492bb0bd83d76c67b445f91c5fb29fae29ecaa1" dependencies = [ "futures-core", "futures-sink", @@ -1631,15 +1631,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" +checksum = "d2acedae88d38235936c3922476b10fced7b2b68136f5e3c03c2d5be348a1115" [[package]] name = "futures-executor" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9420b90cfa29e327d0429f19be13e7ddb68fa1cccb09d65e5706b8c7a749b8a6" +checksum = "1d11aa21b5b587a64682c0094c2bdd4df0076c5324961a40cc3abd7f37930528" dependencies = [ "futures-core", "futures-task", @@ -1649,15 +1649,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" +checksum = "93a66fc6d035a26a3ae255a6d2bca35eda63ae4c5512bef54449113f7a1228e5" [[package]] name = "futures-macro" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512" +checksum = "0db9cce532b0eae2ccf2766ab246f114b56b9cf6d445e00c2549fbc100ca045d" dependencies = [ "proc-macro2 1.0.41", "quote 1.0.18", @@ -1666,21 +1666,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" +checksum = "ca0bae1fe9752cf7fd9b0064c674ae63f97b37bc714d745cbde0afb7ec4e6765" [[package]] name = "futures-task" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" +checksum = "842fc63b931f4056a24d59de13fb1272134ce261816e063e634ad0c15cdc5306" [[package]] name = "futures-util" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" +checksum = "f0828a5471e340229c11c77ca80017937ce3c58cb788a17e5f1c2d5c485a9577" dependencies = [ "futures 0.1.31", "futures-channel", @@ -1811,7 +1811,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8af59a261bcf42f45d1b261232847b9b850ba0a1419d6100698246fb66e9240" dependencies = [ "arc-swap", - "futures 0.3.21", + "futures 0.3.23", "log", "reqwest", "serde", @@ -2059,7 +2059,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca815a891b24fdfb243fa3239c86154392b0953ee584aa1a2a1f66d20cbe75cc" dependencies = [ "bytes", - "futures 0.3.21", + "futures 0.3.23", "headers", "http", "hyper", @@ -2280,7 +2280,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2b99d4207e2a04fb4581746903c2bb7eb376f88de9c699d0f3e10feeac0cd3a" dependencies = [ "derive_more", - "futures 0.3.21", + "futures 0.3.23", "jsonrpc-core", "jsonrpc-pubsub", "jsonrpc-server-utils", @@ -2298,7 +2298,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14f7f76aef2d054868398427f6c54943cf3d1caa9a7ec7d0c38d69df97a965eb" dependencies = [ - "futures 0.3.21", + "futures 0.3.23", "futures-executor", "futures-util", "log", @@ -2313,7 +2313,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b51da17abecbdab3e3d4f26b01c5ec075e88d3abe3ab3b05dc9aa69392764ec0" dependencies = [ - "futures 0.3.21", + "futures 0.3.23", "jsonrpc-client-transports", ] @@ -2335,7 +2335,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1dea6e07251d9ce6a552abfb5d7ad6bc290a4596c8dcc3d795fae2bbdc1f3ff" dependencies = [ - "futures 0.3.21", + "futures 0.3.23", "hyper", "jsonrpc-core", "jsonrpc-server-utils", @@ -2351,7 +2351,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "382bb0206323ca7cda3dcd7e245cea86d37d02457a02a975e3378fb149a48845" dependencies = [ - "futures 0.3.21", + "futures 0.3.23", "jsonrpc-core", "jsonrpc-server-utils", "log", @@ -2366,7 +2366,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240f87695e6c6f62fb37f05c02c04953cf68d6408b8c1c89de85c7a0125b1011" dependencies = [ - "futures 0.3.21", + "futures 0.3.23", "jsonrpc-core", "lazy_static", "log", @@ -2382,7 +2382,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa4fdea130485b572c39a460d50888beb00afb3e35de23ccd7fad8ff19f0e0d4" dependencies = [ "bytes", - "futures 0.3.21", + "futures 0.3.23", "globset", "jsonrpc-core", "lazy_static", @@ -3057,7 +3057,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9981e32fb75e004cc148f5fb70342f393830e0a4aa62e3cc93b50976218d42b6" dependencies = [ - "futures 0.3.21", + "futures 0.3.23", "libc", "log", "rand 0.7.3", @@ -4284,7 +4284,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "92761393ee4dc3ff8f4af487bd58f4307c9329bbedea02cac0089ad9c411e153" dependencies = [ "dashmap 5.2.0", - "futures 0.3.21", + "futures 0.3.23", "lazy_static", "log", "parking_lot 0.12.1", @@ -4487,7 +4487,7 @@ checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ "base64 0.13.0", "bytes", - "futures 0.3.21", + "futures 0.3.23", "httparse", "log", "rand 0.8.5", @@ -4616,7 +4616,7 @@ name = "solana-banks-client" version = "1.12.0" dependencies = [ "borsh", - "futures 0.3.21", + "futures 0.3.23", "solana-banks-interface", "solana-banks-server", "solana-program 1.12.0", @@ -4643,7 +4643,7 @@ version = "1.12.0" dependencies = [ "bincode", "crossbeam-channel", - "futures 0.3.21", + "futures 0.3.23", "solana-banks-interface", "solana-client", "solana-runtime", @@ -4940,7 +4940,7 @@ dependencies = [ "clap 2.33.3", "crossbeam-channel", "enum_dispatch", - "futures 0.3.21", + "futures 0.3.23", "futures-util", "indexmap", "indicatif", @@ -5451,7 +5451,7 @@ dependencies = [ "crossbeam-channel", "dashmap 4.0.2", "fs_extra", - "futures 0.3.21", + "futures 0.3.23", "itertools", "lazy_static", "libc", @@ -6255,7 +6255,7 @@ dependencies = [ "bzip2", "enum-iterator", "flate2", - "futures 0.3.21", + "futures 0.3.23", "goauth", "http", "hyper", @@ -6904,7 +6904,7 @@ checksum = "1c38a012bed6fb9681d3bf71ffaa4f88f3b4b9ed3198cda6e4c8462d24d4bb80" dependencies = [ "anyhow", "fnv", - "futures 0.3.21", + "futures 0.3.23", "humantime", "opentelemetry", "pin-project", diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index 071c79d1349a30..c8f16585eef955 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -18,7 +18,7 @@ chrono-humanize = "0.2.1" crossbeam-channel = "0.5" dashmap = { version = "4.0.2", features = ["rayon", "raw-api"] } fs_extra = "1.2.0" -futures = "0.3.21" +futures = "0.3.23" itertools = "0.10.3" lazy_static = "1.4.0" libc = "0.2.131" diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index 284e03a6b30e49..ef2dd18fb852a8 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -1401,9 +1401,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e" +checksum = "ab30e97ab6aacfe635fad58f22c2bb06c8b685f7421eb1e064a729e2a5f481fa" dependencies = [ "futures-channel", "futures-core", @@ -1416,9 +1416,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" +checksum = "2bfc52cbddcfd745bf1740338492bb0bd83d76c67b445f91c5fb29fae29ecaa1" dependencies = [ "futures-core", "futures-sink", @@ -1426,15 +1426,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" +checksum = "d2acedae88d38235936c3922476b10fced7b2b68136f5e3c03c2d5be348a1115" [[package]] name = "futures-executor" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9420b90cfa29e327d0429f19be13e7ddb68fa1cccb09d65e5706b8c7a749b8a6" +checksum = "1d11aa21b5b587a64682c0094c2bdd4df0076c5324961a40cc3abd7f37930528" dependencies = [ "futures-core", "futures-task", @@ -1444,15 +1444,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" +checksum = "93a66fc6d035a26a3ae255a6d2bca35eda63ae4c5512bef54449113f7a1228e5" [[package]] name = "futures-macro" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512" +checksum = "0db9cce532b0eae2ccf2766ab246f114b56b9cf6d445e00c2549fbc100ca045d" dependencies = [ "proc-macro2 1.0.41", "quote 1.0.18", @@ -1461,21 +1461,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" +checksum = "ca0bae1fe9752cf7fd9b0064c674ae63f97b37bc714d745cbde0afb7ec4e6765" [[package]] name = "futures-task" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" +checksum = "842fc63b931f4056a24d59de13fb1272134ce261816e063e634ad0c15cdc5306" [[package]] name = "futures-util" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" +checksum = "f0828a5471e340229c11c77ca80017937ce3c58cb788a17e5f1c2d5c485a9577" dependencies = [ "futures 0.1.31", "futures-channel", @@ -1580,7 +1580,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8af59a261bcf42f45d1b261232847b9b850ba0a1419d6100698246fb66e9240" dependencies = [ "arc-swap", - "futures 0.3.21", + "futures 0.3.23", "log", "reqwest", "serde", @@ -1811,7 +1811,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca815a891b24fdfb243fa3239c86154392b0953ee584aa1a2a1f66d20cbe75cc" dependencies = [ "bytes", - "futures 0.3.21", + "futures 0.3.23", "headers", "http", "hyper", @@ -2026,7 +2026,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2b99d4207e2a04fb4581746903c2bb7eb376f88de9c699d0f3e10feeac0cd3a" dependencies = [ "derive_more", - "futures 0.3.21", + "futures 0.3.23", "jsonrpc-core", "jsonrpc-pubsub", "jsonrpc-server-utils", @@ -2044,7 +2044,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14f7f76aef2d054868398427f6c54943cf3d1caa9a7ec7d0c38d69df97a965eb" dependencies = [ - "futures 0.3.21", + "futures 0.3.23", "futures-executor", "futures-util", "log", @@ -2059,7 +2059,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b51da17abecbdab3e3d4f26b01c5ec075e88d3abe3ab3b05dc9aa69392764ec0" dependencies = [ - "futures 0.3.21", + "futures 0.3.23", "jsonrpc-client-transports", ] @@ -2081,7 +2081,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1dea6e07251d9ce6a552abfb5d7ad6bc290a4596c8dcc3d795fae2bbdc1f3ff" dependencies = [ - "futures 0.3.21", + "futures 0.3.23", "hyper", "jsonrpc-core", "jsonrpc-server-utils", @@ -2097,7 +2097,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "382bb0206323ca7cda3dcd7e245cea86d37d02457a02a975e3378fb149a48845" dependencies = [ - "futures 0.3.21", + "futures 0.3.23", "jsonrpc-core", "jsonrpc-server-utils", "log", @@ -2112,7 +2112,7 @@ version = "18.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240f87695e6c6f62fb37f05c02c04953cf68d6408b8c1c89de85c7a0125b1011" dependencies = [ - "futures 0.3.21", + "futures 0.3.23", "jsonrpc-core", "lazy_static", "log", @@ -2128,7 +2128,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa4fdea130485b572c39a460d50888beb00afb3e35de23ccd7fad8ff19f0e0d4" dependencies = [ "bytes", - "futures 0.3.21", + "futures 0.3.23", "globset", "jsonrpc-core", "lazy_static", @@ -2169,9 +2169,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.131" +version = "0.2.132" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04c3b4822ccebfa39c02fc03d1534441b22ead323fa0f48bb7ddd8e6ba076a40" +checksum = "8371e4e5341c3a96db127eb2465ac681ced4c433e01dd0e938adbef26ba93ba5" [[package]] name = "libloading" @@ -2841,7 +2841,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9981e32fb75e004cc148f5fb70342f393830e0a4aa62e3cc93b50976218d42b6" dependencies = [ - "futures 0.3.21", + "futures 0.3.23", "libc", "log", "rand 0.7.3", @@ -4065,7 +4065,7 @@ checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ "base64 0.13.0", "bytes", - "futures 0.3.21", + "futures 0.3.23", "httparse", "log", "rand 0.8.5", @@ -4119,7 +4119,7 @@ name = "solana-banks-client" version = "1.12.0" dependencies = [ "borsh", - "futures 0.3.21", + "futures 0.3.23", "solana-banks-interface", "solana-program 1.12.0", "solana-sdk 1.12.0", @@ -4144,7 +4144,7 @@ version = "1.12.0" dependencies = [ "bincode", "crossbeam-channel", - "futures 0.3.21", + "futures 0.3.23", "solana-banks-interface", "solana-client", "solana-runtime", @@ -4657,7 +4657,7 @@ dependencies = [ "clap 2.33.3", "crossbeam-channel", "enum_dispatch", - "futures 0.3.21", + "futures 0.3.23", "futures-util", "indexmap", "indicatif", @@ -5011,7 +5011,7 @@ dependencies = [ "crossbeam-channel", "dashmap", "fs_extra", - "futures 0.3.21", + "futures 0.3.23", "itertools", "lazy_static", "libc", @@ -5612,7 +5612,7 @@ dependencies = [ "bzip2", "enum-iterator", "flate2", - "futures 0.3.21", + "futures 0.3.23", "goauth", "http", "hyper", @@ -6149,7 +6149,7 @@ checksum = "1c38a012bed6fb9681d3bf71ffaa4f88f3b4b9ed3198cda6e4c8462d24d4bb80" dependencies = [ "anyhow", "fnv", - "futures 0.3.21", + "futures 0.3.23", "humantime", "opentelemetry", "pin-project", diff --git a/storage-bigtable/Cargo.toml b/storage-bigtable/Cargo.toml index 6bcedb5f200236..0958288e3d4c3c 100644 --- a/storage-bigtable/Cargo.toml +++ b/storage-bigtable/Cargo.toml @@ -16,7 +16,7 @@ bytes = "1.2" bzip2 = "0.4.3" enum-iterator = "0.8.1" flate2 = "1.0.24" -futures = "0.3.21" +futures = "0.3.23" goauth = "0.13.1" http = "0.2.8" hyper = "0.14.20" From 490daaceea67a736a463b4b171e7bec545b5b6be Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 Aug 2022 16:59:18 -0600 Subject: [PATCH 17/67] chore: bump nix from 0.24.2 to 0.25.0 (#27179) * chore: bump nix from 0.24.2 to 0.25.0 Bumps [nix](https://github.com/nix-rust/nix) from 0.24.2 to 0.25.0. - [Release notes](https://github.com/nix-rust/nix/releases) - [Changelog](https://github.com/nix-rust/nix/blob/master/CHANGELOG.md) - [Commits](https://github.com/nix-rust/nix/compare/v0.24.2...v0.25.0) --- updated-dependencies: - dependency-name: nix dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * [auto-commit] Update all Cargo lock files Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot-buildkite --- Cargo.lock | 29 +++++++++++++++++++++-------- install/Cargo.toml | 2 +- net-utils/Cargo.toml | 2 +- perf/Cargo.toml | 2 +- programs/bpf/Cargo.lock | 10 ++++++---- streamer/Cargo.toml | 2 +- sys-tuner/Cargo.toml | 2 +- 7 files changed, 32 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fb3eac355b2667..8616c2632fdfae 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -270,9 +270,9 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "autotools" @@ -1088,7 +1088,7 @@ version = "3.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b37feaa84e6861e00a1f5e5aa8da3ee56d605c9992d33e082786754828e20865" dependencies = [ - "nix", + "nix 0.24.2", "winapi 0.3.9", ] @@ -2743,7 +2743,20 @@ dependencies = [ "bitflags", "cfg-if 1.0.0", "libc", +] + +[[package]] +name = "nix" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e322c04a9e3440c327fca7b6c8a63e6890a32fa2ad689db972425f07e0d22abb" +dependencies = [ + "autocfg", + "bitflags", + "cfg-if 1.0.0", + "libc", "memoffset", + "pin-utils", ] [[package]] @@ -5403,7 +5416,7 @@ dependencies = [ "dirs-next", "indicatif", "lazy_static", - "nix", + "nix 0.25.0", "reqwest", "semver 1.0.13", "serde", @@ -5662,7 +5675,7 @@ dependencies = [ "clap 3.1.8", "crossbeam-channel", "log", - "nix", + "nix 0.25.0", "rand 0.7.3", "serde", "serde_derive", @@ -5699,7 +5712,7 @@ dependencies = [ "libc", "log", "matches", - "nix", + "nix 0.25.0", "rand 0.7.3", "rayon", "serde", @@ -6316,7 +6329,7 @@ dependencies = [ "itertools", "libc", "log", - "nix", + "nix 0.25.0", "pem", "percentage", "pkcs8", @@ -6341,7 +6354,7 @@ dependencies = [ "clap 2.33.3", "libc", "log", - "nix", + "nix 0.25.0", "solana-logger 1.12.0", "solana-version", "sysctl", diff --git a/install/Cargo.toml b/install/Cargo.toml index dae1b8016bbde3..af4cffd7de52b1 100644 --- a/install/Cargo.toml +++ b/install/Cargo.toml @@ -21,7 +21,7 @@ ctrlc = { version = "3.2.2", features = ["termination"] } dirs-next = "2.0.0" indicatif = "0.17.0" lazy_static = "1.4.0" -nix = "0.24.2" +nix = "0.25.0" reqwest = { version = "0.11.11", default-features = false, features = ["blocking", "brotli", "deflate", "gzip", "rustls-tls", "json"] } semver = "1.0.13" serde = { version = "1.0.143", features = ["derive"] } diff --git a/net-utils/Cargo.toml b/net-utils/Cargo.toml index a0530e61f767dd..a26760134ef7e0 100644 --- a/net-utils/Cargo.toml +++ b/net-utils/Cargo.toml @@ -14,7 +14,7 @@ bincode = "1.3.3" clap = { version = "3.1.5", features = ["cargo"] } crossbeam-channel = "0.5" log = "0.4.17" -nix = "0.24.2" +nix = "0.25.0" rand = "0.7.0" serde = "1.0.143" serde_derive = "1.0.103" diff --git a/perf/Cargo.toml b/perf/Cargo.toml index 202bf36c687a43..3380f2ac89f7c0 100644 --- a/perf/Cargo.toml +++ b/perf/Cargo.toml @@ -30,7 +30,7 @@ solana-vote-program = { path = "../programs/vote", version = "=1.12.0" } [target."cfg(target_os = \"linux\")".dependencies] caps = "0.5.3" libc = "0.2.131" -nix = "0.24.2" +nix = "0.25.0" [lib] name = "solana_perf" diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index ef2dd18fb852a8..3c5e530501665c 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -256,9 +256,9 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "autotools" @@ -2527,14 +2527,16 @@ dependencies = [ [[package]] name = "nix" -version = "0.24.2" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "195cdbc1741b8134346d515b3a56a1c94b0912758009cfd53f99ea0f57b065fc" +checksum = "e322c04a9e3440c327fca7b6c8a63e6890a32fa2ad689db972425f07e0d22abb" dependencies = [ + "autocfg", "bitflags", "cfg-if 1.0.0", "libc", "memoffset", + "pin-utils", ] [[package]] diff --git a/streamer/Cargo.toml b/streamer/Cargo.toml index e72816d03099b8..97b42aa58c9050 100644 --- a/streamer/Cargo.toml +++ b/streamer/Cargo.toml @@ -17,7 +17,7 @@ indexmap = "1.9.1" itertools = "0.10.3" libc = "0.2.131" log = "0.4.17" -nix = "0.24.2" +nix = "0.25.0" pem = "1.0.2" percentage = "0.1.0" pkcs8 = { version = "0.8.0", features = ["alloc"] } diff --git a/sys-tuner/Cargo.toml b/sys-tuner/Cargo.toml index a98a719300d4b1..f1cc3c97e26b08 100644 --- a/sys-tuner/Cargo.toml +++ b/sys-tuner/Cargo.toml @@ -20,7 +20,7 @@ solana-version = { path = "../version", version = "=1.12.0" } [target."cfg(unix)".dependencies] unix_socket2 = "0.5.4" users = "0.10.0" -nix = "0.24.2" +nix = "0.25.0" sysctl = "0.4.4" [lib] From 82f93d108cd39a6eee9fffb3c895f3f6f156f136 Mon Sep 17 00:00:00 2001 From: Tyera Eulberg Date: Tue, 16 Aug 2022 19:52:11 -0700 Subject: [PATCH 18/67] Parse ConfidentialTransaction instructions (#26825) Parse ConfidentialTransfer instructions --- transaction-status/src/parse_token.rs | 10 +- .../extension/confidential_transfer.rs | 399 ++++++++++++++++++ .../src/parse_token/extension/mod.rs | 1 + 3 files changed, 406 insertions(+), 4 deletions(-) create mode 100644 transaction-status/src/parse_token/extension/confidential_transfer.rs diff --git a/transaction-status/src/parse_token.rs b/transaction-status/src/parse_token.rs index 3c43cfbf426a28..f820883c74e1c8 100644 --- a/transaction-status/src/parse_token.rs +++ b/transaction-status/src/parse_token.rs @@ -3,8 +3,8 @@ use { check_num_accounts, ParsableProgram, ParseInstructionError, ParsedInstructionEnum, }, extension::{ - default_account_state::*, interest_bearing_mint::*, memo_transfer::*, - mint_close_authority::*, reallocate::*, transfer_fee::*, + confidential_transfer::*, default_account_state::*, interest_bearing_mint::*, + memo_transfer::*, mint_close_authority::*, reallocate::*, transfer_fee::*, }, serde_json::{json, Map, Value}, solana_account_decoder::parse_token::{ @@ -510,8 +510,10 @@ pub fn parse_token( account_keys, ) } - TokenInstruction::ConfidentialTransferExtension => Err( - ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken), + TokenInstruction::ConfidentialTransferExtension => parse_confidential_transfer_instruction( + &instruction.data[1..], + &instruction.accounts, + account_keys, ), TokenInstruction::DefaultAccountStateExtension => { if instruction.data.len() <= 2 { diff --git a/transaction-status/src/parse_token/extension/confidential_transfer.rs b/transaction-status/src/parse_token/extension/confidential_transfer.rs new file mode 100644 index 00000000000000..867f90e97be133 --- /dev/null +++ b/transaction-status/src/parse_token/extension/confidential_transfer.rs @@ -0,0 +1,399 @@ +use { + super::*, + solana_account_decoder::parse_token_extension::UiConfidentialTransferMint, + spl_token_2022::{ + extension::confidential_transfer::{instruction::*, ConfidentialTransferMint}, + instruction::{decode_instruction_data, decode_instruction_type}, + }, +}; + +pub(in crate::parse_token) fn parse_confidential_transfer_instruction( + instruction_data: &[u8], + account_indexes: &[u8], + account_keys: &AccountKeys, +) -> Result { + match decode_instruction_type(instruction_data) + .map_err(|_| ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken))? + { + ConfidentialTransferInstruction::InitializeMint => { + check_num_token_accounts(account_indexes, 1)?; + let confidential_transfer_mint: ConfidentialTransferMint = + *decode_instruction_data(instruction_data).map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let confidential_transfer_mint: UiConfidentialTransferMint = + confidential_transfer_mint.into(); + let mut value = json!({ + "mint": account_keys[account_indexes[0] as usize].to_string(), + }); + let map = value.as_object_mut().unwrap(); + map.append(json!(confidential_transfer_mint).as_object_mut().unwrap()); + Ok(ParsedInstructionEnum { + instruction_type: "initializeConfidentialTransferMint".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::UpdateMint => { + check_num_token_accounts(account_indexes, 3)?; + let confidential_transfer_mint: ConfidentialTransferMint = + *decode_instruction_data(instruction_data).map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let confidential_transfer_mint: UiConfidentialTransferMint = + confidential_transfer_mint.into(); + let mut value = json!({ + "mint": account_keys[account_indexes[0] as usize].to_string(), + "confidentialTransferMintAuthority": account_keys[account_indexes[1] as usize].to_string(), + "newConfidentialTransferMintAuthority": account_keys[account_indexes[2] as usize].to_string(), + }); + let map = value.as_object_mut().unwrap(); + map.append(json!(confidential_transfer_mint).as_object_mut().unwrap()); + Ok(ParsedInstructionEnum { + instruction_type: "updateConfidentialTransferMint".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::ConfigureAccount => { + check_num_token_accounts(account_indexes, 3)?; + let configure_account_data: ConfigureAccountInstructionData = + *decode_instruction_data(instruction_data).map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let maximum_pending_balance_credit_counter: u64 = configure_account_data + .maximum_pending_balance_credit_counter + .into(); + let mut value = json!({ + "account": account_keys[account_indexes[0] as usize].to_string(), + "mint": account_keys[account_indexes[1] as usize].to_string(), + "encryptionPubkey": format!("{}", configure_account_data.encryption_pubkey), + "decryptableZeroBalance": format!("{}", configure_account_data.decryptable_zero_balance), + "maximumPendingBalanceCreditCounter": maximum_pending_balance_credit_counter, + + }); + let map = value.as_object_mut().unwrap(); + parse_signers( + map, + 2, + account_keys, + account_indexes, + "owner", + "multisigOwner", + ); + Ok(ParsedInstructionEnum { + instruction_type: "configureConfidentialTransferAccount".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::ApproveAccount => { + check_num_token_accounts(account_indexes, 3)?; + Ok(ParsedInstructionEnum { + instruction_type: "approveConfidentialTransferAccount".to_string(), + info: json!({ + "account": account_keys[account_indexes[0] as usize].to_string(), + "mint": account_keys[account_indexes[1] as usize].to_string(), + "confidentialTransferAuditorAuthority": account_keys[account_indexes[2] as usize].to_string(), + }), + }) + } + ConfidentialTransferInstruction::EmptyAccount => { + check_num_token_accounts(account_indexes, 3)?; + let empty_account_data: EmptyAccountInstructionData = + *decode_instruction_data(instruction_data).map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let proof_instruction_offset: i8 = empty_account_data.proof_instruction_offset; + let mut value = json!({ + "account": account_keys[account_indexes[0] as usize].to_string(), + "instructionsSysvar": account_keys[account_indexes[1] as usize].to_string(), + "proofInstructionOffset": proof_instruction_offset, + + }); + let map = value.as_object_mut().unwrap(); + parse_signers( + map, + 2, + account_keys, + account_indexes, + "owner", + "multisigOwner", + ); + Ok(ParsedInstructionEnum { + instruction_type: "emptyConfidentialTransferAccount".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::Deposit => { + check_num_token_accounts(account_indexes, 4)?; + let deposit_data: DepositInstructionData = *decode_instruction_data(instruction_data) + .map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let amount: u64 = deposit_data.amount.into(); + let mut value = json!({ + "source": account_keys[account_indexes[0] as usize].to_string(), + "destination": account_keys[account_indexes[1] as usize].to_string(), + "mint": account_keys[account_indexes[2] as usize].to_string(), + "amount": amount, + "decimals": deposit_data.decimals, + + }); + let map = value.as_object_mut().unwrap(); + parse_signers( + map, + 3, + account_keys, + account_indexes, + "owner", + "multisigOwner", + ); + Ok(ParsedInstructionEnum { + instruction_type: "depositConfidentialTransfer".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::Withdraw => { + check_num_token_accounts(account_indexes, 5)?; + let withdrawal_data: WithdrawInstructionData = + *decode_instruction_data(instruction_data).map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let amount: u64 = withdrawal_data.amount.into(); + let proof_instruction_offset: i8 = withdrawal_data.proof_instruction_offset; + let mut value = json!({ + "source": account_keys[account_indexes[0] as usize].to_string(), + "destination": account_keys[account_indexes[1] as usize].to_string(), + "mint": account_keys[account_indexes[2] as usize].to_string(), + "instructionsSysvar": account_keys[account_indexes[3] as usize].to_string(), + "amount": amount, + "decimals": withdrawal_data.decimals, + "newDecryptableAvailableBalance": format!("{}", withdrawal_data.new_decryptable_available_balance), + "proofInstructionOffset": proof_instruction_offset, + + }); + let map = value.as_object_mut().unwrap(); + parse_signers( + map, + 4, + account_keys, + account_indexes, + "owner", + "multisigOwner", + ); + Ok(ParsedInstructionEnum { + instruction_type: "withdrawConfidentialTransfer".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::Transfer => { + check_num_token_accounts(account_indexes, 5)?; + let transfer_data: TransferInstructionData = *decode_instruction_data(instruction_data) + .map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let proof_instruction_offset: i8 = transfer_data.proof_instruction_offset; + let mut value = json!({ + "source": account_keys[account_indexes[0] as usize].to_string(), + "destination": account_keys[account_indexes[1] as usize].to_string(), + "mint": account_keys[account_indexes[2] as usize].to_string(), + "instructionsSysvar": account_keys[account_indexes[3] as usize].to_string(), + "newSourceDecryptableAvailableBalance": format!("{}", transfer_data.new_source_decryptable_available_balance), + "proofInstructionOffset": proof_instruction_offset, + + }); + let map = value.as_object_mut().unwrap(); + parse_signers( + map, + 4, + account_keys, + account_indexes, + "owner", + "multisigOwner", + ); + Ok(ParsedInstructionEnum { + instruction_type: "confidentialTransfer".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::TransferWithFee => { + check_num_token_accounts(account_indexes, 5)?; + let transfer_data: TransferInstructionData = *decode_instruction_data(instruction_data) + .map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let proof_instruction_offset: i8 = transfer_data.proof_instruction_offset; + let mut value = json!({ + "source": account_keys[account_indexes[0] as usize].to_string(), + "destination": account_keys[account_indexes[1] as usize].to_string(), + "mint": account_keys[account_indexes[2] as usize].to_string(), + "instructionsSysvar": account_keys[account_indexes[3] as usize].to_string(), + "newSourceDecryptableAvailableBalance": format!("{}", transfer_data.new_source_decryptable_available_balance), + "proofInstructionOffset": proof_instruction_offset, + + }); + let map = value.as_object_mut().unwrap(); + parse_signers( + map, + 4, + account_keys, + account_indexes, + "owner", + "multisigOwner", + ); + Ok(ParsedInstructionEnum { + instruction_type: "confidentialTransferWithFee".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::ApplyPendingBalance => { + check_num_token_accounts(account_indexes, 2)?; + let apply_pending_balance_data: ApplyPendingBalanceData = + *decode_instruction_data(instruction_data).map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let expected_pending_balance_credit_counter: u64 = apply_pending_balance_data + .expected_pending_balance_credit_counter + .into(); + let mut value = json!({ + "account": account_keys[account_indexes[0] as usize].to_string(), + "newDecryptableAvailableBalance": format!("{}", apply_pending_balance_data.new_decryptable_available_balance), + "expectedPendingBalanceCreditCounter": expected_pending_balance_credit_counter, + + }); + let map = value.as_object_mut().unwrap(); + parse_signers( + map, + 1, + account_keys, + account_indexes, + "owner", + "multisigOwner", + ); + Ok(ParsedInstructionEnum { + instruction_type: "applyPendingConfidentialTransferBalance".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::EnableBalanceCredits => { + check_num_token_accounts(account_indexes, 2)?; + let mut value = json!({ + "account": account_keys[account_indexes[0] as usize].to_string(), + + }); + let map = value.as_object_mut().unwrap(); + parse_signers( + map, + 1, + account_keys, + account_indexes, + "owner", + "multisigOwner", + ); + Ok(ParsedInstructionEnum { + instruction_type: "enableConfidentialTransferBalanceCredits".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::DisableBalanceCredits => { + check_num_token_accounts(account_indexes, 2)?; + let mut value = json!({ + "account": account_keys[account_indexes[0] as usize].to_string(), + + }); + let map = value.as_object_mut().unwrap(); + parse_signers( + map, + 1, + account_keys, + account_indexes, + "owner", + "multisigOwner", + ); + Ok(ParsedInstructionEnum { + instruction_type: "disableConfidentialTransferBalanceCredits".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::WithdrawWithheldTokensFromMint => { + check_num_token_accounts(account_indexes, 4)?; + let withdraw_withheld_data: WithdrawWithheldTokensFromMintData = + *decode_instruction_data(instruction_data).map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let proof_instruction_offset: i8 = withdraw_withheld_data.proof_instruction_offset; + let mut value = json!({ + "mint": account_keys[account_indexes[0] as usize].to_string(), + "feeRecipient": account_keys[account_indexes[1] as usize].to_string(), + "instructionsSysvar": account_keys[account_indexes[2] as usize].to_string(), + "proofInstructionOffset": proof_instruction_offset, + + }); + let map = value.as_object_mut().unwrap(); + parse_signers( + map, + 3, + account_keys, + account_indexes, + "withdrawWithheldAuthority", + "multisigWithdrawWithheldAuthority", + ); + Ok(ParsedInstructionEnum { + instruction_type: "withdrawWithheldConfidentialTransferTokensFromMint".to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::WithdrawWithheldTokensFromAccounts => { + let withdraw_withheld_data: WithdrawWithheldTokensFromAccountsData = + *decode_instruction_data(instruction_data).map_err(|_| { + ParseInstructionError::InstructionNotParsable(ParsableProgram::SplToken) + })?; + let num_token_accounts = withdraw_withheld_data.num_token_accounts; + check_num_token_accounts(account_indexes, 4 + num_token_accounts as usize)?; + let proof_instruction_offset: i8 = withdraw_withheld_data.proof_instruction_offset; + let mut value = json!({ + "mint": account_keys[account_indexes[0] as usize].to_string(), + "feeRecipient": account_keys[account_indexes[1] as usize].to_string(), + "instructionsSysvar": account_keys[account_indexes[2] as usize].to_string(), + "proofInstructionOffset": proof_instruction_offset, + }); + let map = value.as_object_mut().unwrap(); + let mut source_accounts: Vec = vec![]; + let first_source_account_index = account_indexes + .len() + .saturating_sub(num_token_accounts as usize); + for i in account_indexes[first_source_account_index..].iter() { + source_accounts.push(account_keys[*i as usize].to_string()); + } + map.insert("sourceAccounts".to_string(), json!(source_accounts)); + parse_signers( + map, + 3, + account_keys, + &account_indexes[..first_source_account_index], + "withdrawWithheldAuthority", + "multisigWithdrawWithheldAuthority", + ); + Ok(ParsedInstructionEnum { + instruction_type: "withdrawWithheldConfidentialTransferTokensFromAccounts" + .to_string(), + info: value, + }) + } + ConfidentialTransferInstruction::HarvestWithheldTokensToMint => { + check_num_token_accounts(account_indexes, 1)?; + let mut value = json!({ + "mint": account_keys[account_indexes[0] as usize].to_string(), + + }); + let map = value.as_object_mut().unwrap(); + let mut source_accounts: Vec = vec![]; + for i in account_indexes.iter().skip(1) { + source_accounts.push(account_keys[*i as usize].to_string()); + } + map.insert("sourceAccounts".to_string(), json!(source_accounts)); + Ok(ParsedInstructionEnum { + instruction_type: "harvestWithheldConfidentialTransferTokensToMint".to_string(), + info: value, + }) + } + } +} diff --git a/transaction-status/src/parse_token/extension/mod.rs b/transaction-status/src/parse_token/extension/mod.rs index 3c84942651ab79..f5d8e41f4a94d5 100644 --- a/transaction-status/src/parse_token/extension/mod.rs +++ b/transaction-status/src/parse_token/extension/mod.rs @@ -1,5 +1,6 @@ use super::*; +pub(super) mod confidential_transfer; pub(super) mod default_account_state; pub(super) mod interest_bearing_mint; pub(super) mod memo_transfer; From c76807acf566f3d36ae0c2c6d94ccffde2e6f73e Mon Sep 17 00:00:00 2001 From: apfitzge Date: Wed, 17 Aug 2022 12:57:52 -0500 Subject: [PATCH 19/67] snapshots: serialize version file first (#27192) serialize version file first --- runtime/src/snapshot_utils.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 1eab70b8cb5ab7..e8dbe024619622 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -345,10 +345,12 @@ pub fn archive_snapshot_package( let do_archive_files = |encoder: &mut dyn Write| -> Result<()> { let mut archive = tar::Builder::new(encoder); + // Serialize the version and snapshots files before accounts so we can quickly determine the version + // and other bank fields. This is necessary if we want to interleave unpacking with reconstruction + archive.append_path_with_name(staging_dir.as_ref().join("version"), "version")?; for dir in ["snapshots", "accounts"] { archive.append_dir_all(dir, staging_dir.as_ref().join(dir))?; } - archive.append_path_with_name(staging_dir.as_ref().join("version"), "version")?; archive.into_inner()?; Ok(()) }; From ce40986c8ecb68f72037f2616a33484d8d21b5ee Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Wed, 17 Aug 2022 15:14:31 -0500 Subject: [PATCH 20/67] serialize incremental_snapshot_hash (#26839) * serialize incremental_snapshot_hash * pr feedback --- core/src/accounts_hash_verifier.rs | 1 + core/tests/snapshots.rs | 3 ++ runtime/src/bank.rs | 26 ++++++++++++++ runtime/src/serde_snapshot.rs | 14 ++++++-- runtime/src/serde_snapshot/newer.rs | 17 +++++++-- runtime/src/serde_snapshot/tests.rs | 54 ++++++++++++++++++++++------- runtime/src/snapshot_utils.rs | 2 ++ 7 files changed, 101 insertions(+), 16 deletions(-) diff --git a/core/src/accounts_hash_verifier.rs b/core/src/accounts_hash_verifier.rs index ae8f0dbe780aae..118529dd983640 100644 --- a/core/src/accounts_hash_verifier.rs +++ b/core/src/accounts_hash_verifier.rs @@ -190,6 +190,7 @@ impl AccountsHashVerifier { accounts_package.snapshot_links.path(), accounts_package.slot, &accounts_hash, + None, ); datapoint_info!( "accounts_hash_verifier", diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index 3dcc004a53df6a..6e3b7869fac0d6 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -256,6 +256,7 @@ fn run_bank_forks_snapshot_n( accounts_package.snapshot_links.path(), accounts_package.slot, &last_bank.get_accounts_hash(), + None, ); let snapshot_package = SnapshotPackage::new(accounts_package, last_bank.get_accounts_hash()); snapshot_utils::archive_snapshot_package( @@ -491,6 +492,7 @@ fn test_concurrent_snapshot_packaging( accounts_package.snapshot_links.path(), accounts_package.slot, &Hash::default(), + None, ); let snapshot_package = SnapshotPackage::new(accounts_package, Hash::default()); pending_snapshot_package @@ -534,6 +536,7 @@ fn test_concurrent_snapshot_packaging( saved_snapshots_dir.path(), saved_slot, &Hash::default(), + None, ); snapshot_utils::verify_snapshot_archive( diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index f7679b771a9750..e9caf6d66f782b 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -236,6 +236,25 @@ impl RentDebit { } } +/// Incremental snapshots only calculate their accounts hash based on the account changes WITHIN the incremental slot range. +/// So, we need to keep track of the full snapshot expected accounts hash results. +/// We also need to keep track of the hash and capitalization specific to the incremental snapshot slot range. +/// The capitalization we calculate for the incremental slot will NOT be consistent with the bank's capitalization. +/// It is not feasible to calculate a capitalization delta that is correct given just incremental slots account data and the full snapshot's capitalization. +#[derive(Serialize, Deserialize, AbiExample, Clone, Debug, Default, PartialEq, Eq)] +pub struct BankIncrementalSnapshotPersistence { + /// slot of full snapshot + pub full_slot: Slot, + /// accounts hash from the full snapshot + pub full_hash: Hash, + /// capitalization from the full snapshot + pub full_capitalization: u64, + /// hash of the accounts in the incremental snapshot slot range, including zero-lamport accounts + pub incremental_hash: Hash, + /// capitalization of the accounts in the incremental snapshot slot range + pub incremental_capitalization: u64, +} + #[derive(Clone, Debug, Default, PartialEq, Eq)] pub struct RentDebits(HashMap); impl RentDebits { @@ -976,6 +995,7 @@ pub struct BankFieldsToDeserialize { pub(crate) epoch_stakes: HashMap, pub(crate) is_delta: bool, pub(crate) accounts_data_len: u64, + pub(crate) incremental_snapshot_persistence: Option, } // Bank's common fields shared by all supported snapshot versions for serialization. @@ -1083,6 +1103,7 @@ impl PartialEq for Bank { accounts_data_size_delta_on_chain: _, accounts_data_size_delta_off_chain: _, fee_structure: _, + incremental_snapshot_persistence: _, // Ignore new fields explicitly if they do not impact PartialEq. // Adding ".." will remove compile-time checks that if a new field // is added to the struct, this ParitalEq is accordingly updated. @@ -1336,6 +1357,8 @@ pub struct Bank { /// Transaction fee structure pub fee_structure: FeeStructure, + + pub incremental_snapshot_persistence: Option, } struct VoteWithStakeDelegations { @@ -1466,6 +1489,7 @@ impl Bank { fn default_with_accounts(accounts: Accounts) -> Self { let mut bank = Self { + incremental_snapshot_persistence: None, rewrites_skipped_this_slot: Rewrites::default(), rc: BankRc::new(accounts, Slot::default()), status_cache: Arc::>::default(), @@ -1765,6 +1789,7 @@ impl Bank { let accounts_data_size_initial = parent.load_accounts_data_size(); let mut new = Bank { + incremental_snapshot_persistence: None, rewrites_skipped_this_slot: Rewrites::default(), rc, status_cache, @@ -2126,6 +2151,7 @@ impl Bank { } let feature_set = new(); let mut bank = Self { + incremental_snapshot_persistence: fields.incremental_snapshot_persistence, rewrites_skipped_this_slot: Rewrites::default(), rc: bank_rc, status_cache: new(), diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index e32aecbe4b6705..5b42208d042e7c 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -8,7 +8,7 @@ use { accounts_index::AccountSecondaryIndexes, accounts_update_notifier_interface::AccountsUpdateNotifier, append_vec::{AppendVec, StoredMetaWriteVersion}, - bank::{Bank, BankFieldsToDeserialize, BankRc}, + bank::{Bank, BankFieldsToDeserialize, BankIncrementalSnapshotPersistence, BankRc}, blockhash_queue::BlockhashQueue, builtins::Builtins, epoch_stakes::EpochStakes, @@ -77,6 +77,7 @@ pub struct AccountsDbFields( /// slots that were roots within the last epoch for which we care about the hash value #[serde(deserialize_with = "default_on_eof")] Vec<(Slot, Hash)>, + // here? ); /// Helper type to wrap BufReader streams when deserializing and reconstructing from either just a @@ -193,6 +194,7 @@ trait TypeContext<'a>: PartialEq { stream_reader: &mut BufReader, stream_writer: &mut BufWriter, accounts_hash: &Hash, + incremental_snapshot_persistence: Option<&BankIncrementalSnapshotPersistence>, ) -> std::result::Result<(), Box> where R: Read, @@ -370,12 +372,18 @@ fn reserialize_bank_fields_with_new_hash( stream_reader: &mut BufReader, stream_writer: &mut BufWriter, accounts_hash: &Hash, + incremental_snapshot_persistence: Option<&BankIncrementalSnapshotPersistence>, ) -> Result<(), Error> where W: Write, R: Read, { - newer::Context::reserialize_bank_fields_with_hash(stream_reader, stream_writer, accounts_hash) + newer::Context::reserialize_bank_fields_with_hash( + stream_reader, + stream_writer, + accounts_hash, + incremental_snapshot_persistence, + ) } /// effectively updates the accounts hash in the serialized bank file on disk @@ -387,6 +395,7 @@ pub fn reserialize_bank_with_new_accounts_hash( bank_snapshots_dir: impl AsRef, slot: Slot, accounts_hash: &Hash, + incremental_snapshot_persistence: Option<&BankIncrementalSnapshotPersistence>, ) -> bool { let bank_post = snapshot_utils::get_bank_snapshots_dir(bank_snapshots_dir, slot); let bank_post = bank_post.join(snapshot_utils::get_snapshot_file_name(slot)); @@ -404,6 +413,7 @@ pub fn reserialize_bank_with_new_accounts_hash( &mut BufReader::new(file), &mut BufWriter::new(file_out), accounts_hash, + incremental_snapshot_persistence, ) .unwrap(); } diff --git a/runtime/src/serde_snapshot/newer.rs b/runtime/src/serde_snapshot/newer.rs index 3dd73803cf3010..512737106aebc9 100644 --- a/runtime/src/serde_snapshot/newer.rs +++ b/runtime/src/serde_snapshot/newer.rs @@ -96,6 +96,7 @@ impl From for BankFieldsToDeserialize { stakes: dvb.stakes, epoch_stakes: dvb.epoch_stakes, is_delta: dvb.is_delta, + incremental_snapshot_persistence: None, } } } @@ -209,6 +210,7 @@ impl<'a> TypeContext<'a> for Context { // we can grab it on restart. // TODO: if we do a snapshot version bump, consider moving this out. lamports_per_signature, + None::, ) .serialize(serializer) } @@ -314,6 +316,10 @@ impl<'a> TypeContext<'a> for Context { bank_fields.fee_rate_governor = bank_fields .fee_rate_governor .clone_with_lamports_per_signature(lamports_per_signature); + + let incremental_snapshot_persistence = ignore_eof_error(deserialize_from(stream))?; + bank_fields.incremental_snapshot_persistence = incremental_snapshot_persistence; + Ok((bank_fields, accounts_db_fields)) } @@ -327,12 +333,13 @@ impl<'a> TypeContext<'a> for Context { } /// deserialize the bank from 'stream_reader' - /// modify the accounts_hash + /// modify the accounts_hash and incremental_snapshot_persistence /// reserialize the bank to 'stream_writer' fn reserialize_bank_fields_with_hash( stream_reader: &mut BufReader, stream_writer: &mut BufWriter, accounts_hash: &Hash, + incremental_snapshot_persistence: Option<&BankIncrementalSnapshotPersistence>, ) -> std::result::Result<(), Box> where R: Read, @@ -345,6 +352,7 @@ impl<'a> TypeContext<'a> for Context { let blockhash_queue = RwLock::new(rhs.blockhash_queue.clone()); let hard_forks = RwLock::new(rhs.hard_forks.clone()); let lamports_per_signature = rhs.fee_rate_governor.lamports_per_signature; + let bank = SerializableVersionedBank { blockhash_queue: &blockhash_queue, ancestors: &rhs.ancestors, @@ -382,7 +390,12 @@ impl<'a> TypeContext<'a> for Context { bincode::serialize_into( stream_writer, - &(bank, accounts_db_fields, lamports_per_signature), + &( + bank, + accounts_db_fields, + lamports_per_signature, + incremental_snapshot_persistence, + ), ) } } diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index faf3006d9aaccc..1de6ee2a5d54c6 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -190,6 +190,7 @@ fn test_bank_serialize_style( serde_style: SerdeStyle, reserialize_accounts_hash: bool, update_accounts_hash: bool, + incremental_snapshot_persistence: bool, ) { solana_logger::setup(); let (genesis_config, _) = create_genesis_config(500); @@ -236,8 +237,18 @@ fn test_bank_serialize_style( } else { bank2.get_accounts_hash() }; - if reserialize_accounts_hash { - let slot = bank2.slot(); + + let slot = bank2.slot(); + let incremental = + incremental_snapshot_persistence.then(|| BankIncrementalSnapshotPersistence { + full_slot: slot + 1, + full_hash: Hash::new(&[1; 32]), + full_capitalization: 31, + incremental_hash: Hash::new(&[2; 32]), + incremental_capitalization: 32, + }); + + if reserialize_accounts_hash || incremental_snapshot_persistence { let temp_dir = TempDir::new().unwrap(); let slot_dir = temp_dir.path().join(slot.to_string()); let post_path = slot_dir.join(slot.to_string()); @@ -248,21 +259,32 @@ fn test_bank_serialize_style( let mut f = std::fs::File::create(&pre_path).unwrap(); f.write_all(&buf).unwrap(); } + assert!(reserialize_bank_with_new_accounts_hash( temp_dir.path(), slot, - &accounts_hash + &accounts_hash, + incremental.as_ref(), )); let previous_len = buf.len(); // larger buffer than expected to make sure the file isn't larger than expected - let mut buf_reserialized = vec![0; previous_len + 1]; + let sizeof_none = std::mem::size_of::(); + let sizeof_incremental_snapshot_persistence = + std::mem::size_of::>(); + let mut buf_reserialized = + vec![0; previous_len + sizeof_incremental_snapshot_persistence + 1]; { let mut f = std::fs::File::open(post_path).unwrap(); let size = f.read(&mut buf_reserialized).unwrap(); - assert_eq!(size, previous_len); + let expected = if !incremental_snapshot_persistence { + previous_len + } else { + previous_len + sizeof_incremental_snapshot_persistence - sizeof_none + }; + assert_eq!(size, expected); buf_reserialized.truncate(size); } - if update_accounts_hash { + if update_accounts_hash || incremental_snapshot_persistence { // We cannot guarantee buffer contents are exactly the same if hash is the same. // Things like hashsets/maps have randomness in their in-mem representations. // This make serialized bytes not deterministic. @@ -311,6 +333,7 @@ fn test_bank_serialize_style( assert_eq!(dbank.get_balance(&key3.pubkey()), 0); assert_eq!(dbank.get_accounts_hash(), accounts_hash); assert!(bank2 == dbank); + assert_eq!(dbank.incremental_snapshot_persistence, incremental); } pub(crate) fn reconstruct_accounts_db_via_serialization( @@ -359,11 +382,18 @@ fn test_bank_serialize_newer() { for (reserialize_accounts_hash, update_accounts_hash) in [(false, false), (true, false), (true, true)] { - test_bank_serialize_style( - SerdeStyle::Newer, - reserialize_accounts_hash, - update_accounts_hash, - ) + for incremental_snapshot_persistence in if reserialize_accounts_hash { + [false, true].to_vec() + } else { + [false].to_vec() + } { + test_bank_serialize_style( + SerdeStyle::Newer, + reserialize_accounts_hash, + update_accounts_hash, + incremental_snapshot_persistence, + ) + } } } @@ -555,7 +585,7 @@ mod test_bank_serialize { // This some what long test harness is required to freeze the ABI of // Bank's serialization due to versioned nature - #[frozen_abi(digest = "9vGBt7YfymKUTPWLHVVpQbDtPD7dFDwXRMFkCzwujNqJ")] + #[frozen_abi(digest = "5py4Wkuj5fV2sLyA1MrPg4pGNwMEaygQLnpLyY8MMLGC")] #[derive(Serialize, AbiExample)] pub struct BankAbiTestWrapperNewer { #[serde(serialize_with = "wrapper_newer")] diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index e8dbe024619622..19e9d02f684273 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -2045,6 +2045,7 @@ pub fn package_and_archive_full_snapshot( accounts_package.snapshot_links.path(), accounts_package.slot, &bank.get_accounts_hash(), + None, ); let snapshot_package = SnapshotPackage::new(accounts_package, bank.get_accounts_hash()); @@ -2097,6 +2098,7 @@ pub fn package_and_archive_incremental_snapshot( accounts_package.snapshot_links.path(), accounts_package.slot, &bank.get_accounts_hash(), + None, ); let snapshot_package = SnapshotPackage::new(accounts_package, bank.get_accounts_hash()); From 4e7bc0f4cc2ceb102b20b5c0bce690d3ff1fd0eb Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Wed, 17 Aug 2022 22:01:51 +0000 Subject: [PATCH 21/67] derives Error trait for ClusterInfoError and core::result::Error (#27208) --- core/src/result.rs | 80 +++++++++++--------------------------- gossip/src/cluster_info.rs | 8 +++- 2 files changed, 29 insertions(+), 59 deletions(-) diff --git a/core/src/result.rs b/core/src/result.rs index 6c9b66b6d459c0..2aa8f8718f5141 100644 --- a/core/src/result.rs +++ b/core/src/result.rs @@ -3,53 +3,42 @@ use { solana_gossip::{cluster_info, gossip_error::GossipError}, solana_ledger::blockstore, + thiserror::Error, }; -#[derive(Debug)] +#[derive(Debug, Error)] pub enum Error { - Io(std::io::Error), - Recv(crossbeam_channel::RecvError), + #[error(transparent)] + Blockstore(#[from] blockstore::BlockstoreError), + #[error(transparent)] + ClusterInfo(#[from] cluster_info::ClusterInfoError), + #[error(transparent)] + Gossip(#[from] GossipError), + #[error(transparent)] + Io(#[from] std::io::Error), + #[error("ReadyTimeout")] ReadyTimeout, - RecvTimeout(crossbeam_channel::RecvTimeoutError), - TrySend, - Serialize(std::boxed::Box), - ClusterInfo(cluster_info::ClusterInfoError), + #[error(transparent)] + Recv(#[from] crossbeam_channel::RecvError), + #[error(transparent)] + RecvTimeout(#[from] crossbeam_channel::RecvTimeoutError), + #[error("Send")] Send, - Blockstore(blockstore::BlockstoreError), - WeightedIndex(rand::distributions::weighted::WeightedError), - Gossip(GossipError), + #[error("TrySend")] + TrySend, + #[error(transparent)] + Serialize(#[from] std::boxed::Box), + #[error(transparent)] + WeightedIndex(#[from] rand::distributions::weighted::WeightedError), } pub type Result = std::result::Result; -impl std::fmt::Display for Error { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "solana error") - } -} - -impl std::error::Error for Error {} - -impl std::convert::From for Error { - fn from(e: crossbeam_channel::RecvError) -> Error { - Error::Recv(e) - } -} impl std::convert::From for Error { fn from(_e: crossbeam_channel::ReadyTimeoutError) -> Error { Error::ReadyTimeout } } -impl std::convert::From for Error { - fn from(e: crossbeam_channel::RecvTimeoutError) -> Error { - Error::RecvTimeout(e) - } -} -impl std::convert::From for Error { - fn from(e: cluster_info::ClusterInfoError) -> Error { - Error::ClusterInfo(e) - } -} impl std::convert::From> for Error { fn from(_e: crossbeam_channel::TrySendError) -> Error { Error::TrySend @@ -60,31 +49,6 @@ impl std::convert::From> for Error { Error::Send } } -impl std::convert::From for Error { - fn from(e: std::io::Error) -> Error { - Error::Io(e) - } -} -impl std::convert::From> for Error { - fn from(e: std::boxed::Box) -> Error { - Error::Serialize(e) - } -} -impl std::convert::From for Error { - fn from(e: blockstore::BlockstoreError) -> Error { - Error::Blockstore(e) - } -} -impl std::convert::From for Error { - fn from(e: rand::distributions::weighted::WeightedError) -> Error { - Error::WeightedIndex(e) - } -} -impl std::convert::From for Error { - fn from(e: GossipError) -> Error { - Error::Gossip(e) - } -} #[cfg(test)] mod tests { diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 55d3c177515d6e..40142f70e3d336 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -92,6 +92,7 @@ use { thread::{sleep, Builder, JoinHandle}, time::{Duration, Instant}, }, + thiserror::Error, }; /// The Data plane fanout size, also used as the neighborhood size @@ -138,12 +139,17 @@ const MIN_STAKE_FOR_GOSSIP: u64 = solana_sdk::native_token::LAMPORTS_PER_SOL; /// Minimum number of staked nodes for enforcing stakes in gossip. const MIN_NUM_STAKED_NODES: usize = 500; -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Error)] pub enum ClusterInfoError { + #[error("NoPeers")] NoPeers, + #[error("NoLeader")] NoLeader, + #[error("BadContactInfo")] BadContactInfo, + #[error("BadGossipAddress")] BadGossipAddress, + #[error("TooManyIncrementalSnapshotHashes")] TooManyIncrementalSnapshotHashes, } From a92758bc9eebf752d1c70bdb46380660acebb9e1 Mon Sep 17 00:00:00 2001 From: Brooks Prumo Date: Wed, 17 Aug 2022 18:45:59 -0400 Subject: [PATCH 22/67] Add clean_accounts_for_tests() (#27200) --- accounts-bench/src/main.rs | 2 +- runtime/benches/accounts.rs | 2 +- runtime/src/accounts.rs | 2 +- runtime/src/accounts_db.rs | 101 +++++++++++++++++++----------------- runtime/tests/accounts.rs | 2 +- 5 files changed, 57 insertions(+), 52 deletions(-) diff --git a/accounts-bench/src/main.rs b/accounts-bench/src/main.rs index 987915d8c9fe15..3d1c18633f2747 100644 --- a/accounts-bench/src/main.rs +++ b/accounts-bench/src/main.rs @@ -110,7 +110,7 @@ fn main() { for x in 0..iterations { if clean { let mut time = Measure::start("clean"); - accounts.accounts_db.clean_accounts(None, false, None); + accounts.accounts_db.clean_accounts_for_tests(); time.stop(); println!("{}", time); for slot in 0..num_slots { diff --git a/runtime/benches/accounts.rs b/runtime/benches/accounts.rs index ec4eea2fefde9b..7160c2efae8e2b 100644 --- a/runtime/benches/accounts.rs +++ b/runtime/benches/accounts.rs @@ -178,7 +178,7 @@ fn bench_delete_dependencies(bencher: &mut Bencher) { accounts.add_root(i); } bencher.iter(|| { - accounts.accounts_db.clean_accounts(None, false, None); + accounts.accounts_db.clean_accounts_for_tests(); }); } diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index 9c7838938a5fa8..86d14aaf7b681c 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -3081,7 +3081,7 @@ mod tests { } } info!("done..cleaning.."); - accounts.accounts_db.clean_accounts(None, false, None); + accounts.accounts_db.clean_accounts_for_tests(); } fn load_accounts_no_store( diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 4f2fa0a5ba9c10..caa4cc77f31376 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -2507,6 +2507,11 @@ impl AccountsDb { pubkeys } + /// Call clean_accounts() with the common parameters that tests/benches use. + pub fn clean_accounts_for_tests(&self) { + self.clean_accounts(None, false, None) + } + // Purge zero lamport accounts and older rooted account states as garbage // collection // Only remove those accounts where the entire rooted history of the account @@ -10000,7 +10005,7 @@ pub mod tests { // overwrite old rooted account version; only the r_slot_0_stores.count() should be // decremented db.store_uncached(2, &[(&pubkeys[0], &account)]); - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); { let slot_0_stores = &db.storage.get_slot_stores(0).unwrap(); let slot_1_stores = &db.storage.get_slot_stores(1).unwrap(); @@ -10439,7 +10444,7 @@ pub mod tests { //slot is gone accounts.print_accounts_stats("pre-clean"); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert!(accounts.storage.map.get(&0).is_none()); //new value is there @@ -10522,7 +10527,7 @@ pub mod tests { // Slot 1 should be removed, slot 0 cannot be removed because it still has // the latest update for pubkey 2 - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert!(accounts.storage.get_slot_stores(0).is_some()); assert!(accounts.storage.get_slot_stores(1).is_none()); @@ -10557,7 +10562,7 @@ pub mod tests { assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey1), 3); assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey2), 1); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); // Slots 0 and 1 should each have been cleaned because all of their // accounts are zero lamports assert!(accounts.storage.get_slot_stores(0).is_none()); @@ -10571,7 +10576,7 @@ pub mod tests { assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey1), 1); assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey2), 0); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); // Slot 2 will now be cleaned, which will leave account 1 with a ref count of 0 assert!(accounts.storage.get_slot_stores(2).is_none()); assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey1), 0); @@ -10598,7 +10603,7 @@ pub mod tests { // Slot 0 should be removed, and // zero-lamport account should be cleaned - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert!(accounts.storage.get_slot_stores(0).is_none()); assert!(accounts.storage.get_slot_stores(1).is_none()); @@ -10641,7 +10646,7 @@ pub mod tests { assert_eq!(accounts.alive_account_count_in_slot(0), 1); assert_eq!(accounts.alive_account_count_in_slot(1), 1); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); //now old state is cleaned up assert_eq!(accounts.alive_account_count_in_slot(0), 0); @@ -10675,7 +10680,7 @@ pub mod tests { accounts.print_accounts_stats(""); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); //Old state behind zero-lamport account is cleaned up assert_eq!(accounts.alive_account_count_in_slot(0), 0); @@ -10792,7 +10797,7 @@ pub mod tests { accounts.account_indexes.keys = None; } - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); //both zero lamport and normal accounts are cleaned up assert_eq!(accounts.alive_account_count_in_slot(0), 0); @@ -10883,7 +10888,7 @@ pub mod tests { assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 1); //now uncleaned roots are cleaned up - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 0); } @@ -10900,7 +10905,7 @@ pub mod tests { assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 1); //now uncleaned roots are cleaned up - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 0); } @@ -10912,7 +10917,7 @@ pub mod tests { // Create 100 accounts in slot 0 create_account(&accounts, &mut pubkeys, 0, 100, 0, 0); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); check_accounts(&accounts, &pubkeys, 0, 100, 1); // do some updates to those accounts and re-check @@ -10948,7 +10953,7 @@ pub mod tests { // Modify first 20 of the accounts from slot 0 in slot 2 modify_accounts(&accounts, &pubkeys, latest_slot, 20, 4); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); // Overwrite account 31 from slot 0 with lamports=0 into slot 2. // Slot 2 should now have 20 + 1 = 21 accounts let account = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); @@ -10962,7 +10967,7 @@ pub mod tests { accounts.add_root(latest_slot); assert!(check_storage(&accounts, 2, 31)); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); // The first 20 accounts of slot 0 have been updated in slot 2, as well as // accounts 30 and 31 (overwritten with zero-lamport accounts in slot 1 and // slot 2 respectively), so only 78 accounts are left in slot 0's storage entries. @@ -11102,7 +11107,7 @@ pub mod tests { accounts.print_accounts_stats("pre_purge"); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); accounts.print_accounts_stats("post_purge"); @@ -11167,7 +11172,7 @@ pub mod tests { info!("ancestors: {:?}", ancestors); let hash = accounts.update_accounts_hash_test(current_slot, &ancestors); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert_eq!( accounts.update_accounts_hash_test(current_slot, &ancestors), @@ -11234,7 +11239,7 @@ pub mod tests { accounts.print_accounts_stats("accounts"); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); accounts.print_accounts_stats("accounts_post_purge"); let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot); @@ -11320,7 +11325,7 @@ pub mod tests { fn test_accounts_purge_chained_purge_before_snapshot_restore() { solana_logger::setup(); with_chained_zero_lamport_accounts(|accounts, current_slot| { - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); reconstruct_accounts_db_via_serialization(&accounts, current_slot) }); } @@ -11331,7 +11336,7 @@ pub mod tests { with_chained_zero_lamport_accounts(|accounts, current_slot| { let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot); accounts.print_accounts_stats("after_reconstruct"); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); reconstruct_accounts_db_via_serialization(&accounts, current_slot) }); } @@ -12095,7 +12100,7 @@ pub mod tests { accounts.print_count_and_status("before reconstruct"); let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot); accounts.print_count_and_status("before purge zero"); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); accounts.print_count_and_status("after purge zero"); assert_load_account(&accounts, current_slot, pubkey, old_lamport); @@ -12156,7 +12161,7 @@ pub mod tests { accounts.print_accounts_stats("Post-B pre-clean"); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); info!("post B"); accounts.print_accounts_stats("Post-B"); @@ -12196,7 +12201,7 @@ pub mod tests { accounts.get_accounts_delta_hash(current_slot); accounts.add_root(current_slot); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); accounts.print_accounts_stats("Post-D clean"); @@ -12286,7 +12291,7 @@ pub mod tests { current_slot += 1; assert_eq!(3, accounts.ref_count_for_pubkey(&pubkey1)); accounts.store_uncached(current_slot, &[(&pubkey1, &zero_lamport_account)]); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert_eq!( // Removed one reference from the dead slot (reference only counted once @@ -12311,9 +12316,9 @@ pub mod tests { // If step C and step D should be purged, snapshot restore would cause // pubkey1 to be revived as the state of step A. // So, prevent that from happening by introducing refcount - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); info!("pubkey: {}", pubkey1); accounts.print_accounts_stats("pre_clean"); @@ -12328,10 +12333,10 @@ pub mod tests { accounts.add_root(current_slot); // Do clean - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); // 2nd clean needed to clean-up pubkey1 - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); // Ensure pubkey2 is cleaned from the index finally assert_not_load_account(&accounts, current_slot, pubkey1); @@ -12472,7 +12477,7 @@ pub mod tests { accounts.get_accounts_delta_hash(current_slot); accounts.add_root(current_slot); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert_eq!( pubkey_count, @@ -12561,7 +12566,7 @@ pub mod tests { } accounts.get_accounts_delta_hash(current_slot); accounts.add_root(current_slot); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert_eq!( pubkey_count, @@ -12846,7 +12851,7 @@ pub mod tests { accounts.get_accounts_delta_hash(current_slot); accounts.add_root(current_slot); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert_eq!( pubkey_count, @@ -13056,7 +13061,7 @@ pub mod tests { accounts.flush_accounts_cache(true, None); // clear out the dirty keys - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); // flush 1 accounts.get_accounts_delta_hash(1); @@ -13068,11 +13073,11 @@ pub mod tests { // clean to remove pubkey1 from 0, // shrink to shrink pubkey1 from 0 // then another clean to remove pubkey1 from slot 1 - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); accounts.shrink_candidate_slots(); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); accounts.print_accounts_stats("post-clean"); assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey1), 0); @@ -13100,12 +13105,12 @@ pub mod tests { accounts.store_uncached(1, &[(key, &account)]); } accounts.add_root(1); - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); accounts.shrink_all_slots(false, None); // Clean again to flush the dirty stores // and allow them to be recycled in the next step - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); accounts.print_accounts_stats("post-shrink"); let num_stores = accounts.recycle_stores.read().unwrap().entry_count(); assert!(num_stores > 0); @@ -13425,9 +13430,9 @@ pub mod tests { db.add_root(0); db.add_root(1); - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); db.flush_accounts_cache(true, None); - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); db.add_root(2); assert_eq!(db.read_only_accounts_cache.cache_len(), 0); @@ -13475,7 +13480,7 @@ pub mod tests { db.add_root(1); // Clean should not remove anything yet as nothing has been flushed - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); let account = db .do_load( &Ancestors::default(), @@ -13491,7 +13496,7 @@ pub mod tests { // Flush, then clean again. Should not need another root to initiate the cleaning // because `accounts_index.uncleaned_roots` should be correct db.flush_accounts_cache(true, None); - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); assert!(db .do_load( &Ancestors::default(), @@ -13556,7 +13561,7 @@ pub mod tests { // Flush, then clean. Should not need another root to initiate the cleaning // because `accounts_index.uncleaned_roots` should be correct db.flush_accounts_cache(true, None); - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); // The `zero_lamport_account_key` is still alive in slot 1, so refcount for the // pubkey should be 2 @@ -13716,7 +13721,7 @@ pub mod tests { // Run clean, unrooted slot 1 should not be purged, and still readable from the cache, // because we're still doing a scan on it. - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); let account = db .do_load( &scan_ancestors, @@ -13730,7 +13735,7 @@ pub mod tests { // When the scan is over, clean should not panic and should not purge something // still in the cache. scan_tracker.exit().unwrap(); - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); let account = db .do_load( &scan_ancestors, @@ -14332,7 +14337,7 @@ pub mod tests { // Checking that the uncleaned_pubkeys are not pre-maturely removed // such that when the slots are rooted, and can actually be cleaned, then the // delta keys are still there. - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); db.print_accounts_stats("post-clean1"); // Check stores > 0 @@ -14347,12 +14352,12 @@ pub mod tests { db.store_uncached(2, &[(&account_key1, &account3)]); db.get_accounts_delta_hash(2); - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); db.print_accounts_stats("post-clean2"); // root slots 1 db.add_root(1); - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); db.print_accounts_stats("post-clean3"); @@ -14361,7 +14366,7 @@ pub mod tests { db.add_root(3); // Check that we can clean where max_root=3 and slot=2 is not rooted - db.clean_accounts(None, false, None); + db.clean_accounts_for_tests(); assert!(db.uncleaned_pubkeys.is_empty()); @@ -15176,7 +15181,7 @@ pub mod tests { // The later rooted zero-lamport update to `shared_key` cannot be cleaned // because it is kept alive by the unrooted slot. - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert!(accounts .accounts_index .get_account_read_entry(&shared_key) @@ -15186,7 +15191,7 @@ pub mod tests { accounts.purge_slot(slot0, 0, true); // Now clean should clean up the remaining key - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); assert!(accounts .accounts_index .get_account_read_entry(&shared_key) diff --git a/runtime/tests/accounts.rs b/runtime/tests/accounts.rs index a055d62da14da5..d272e738a1695f 100644 --- a/runtime/tests/accounts.rs +++ b/runtime/tests/accounts.rs @@ -65,7 +65,7 @@ fn test_shrink_and_clean() { // let's dance. for _ in 0..10 { - accounts.clean_accounts(None, false, None); + accounts.clean_accounts_for_tests(); std::thread::sleep(std::time::Duration::from_millis(100)); } From f76e4994823e52086969d4237e27689738b7822c Mon Sep 17 00:00:00 2001 From: Brennan Watt Date: Wed, 17 Aug 2022 15:48:33 -0700 Subject: [PATCH 23/67] Rust v1.63.0 (#27148) * Upgrade to Rust v1.63.0 * Add nightly_clippy_allows * Resolve some new clippy nightly lints * Increase QUIC packets completion timeout Co-authored-by: Michael Vines --- account-decoder/src/parse_address_lookup_table.rs | 2 +- banks-server/src/banks_server.rs | 10 +++------- ci/docker-rust-nightly/Dockerfile | 2 +- ci/docker-rust/Dockerfile | 2 +- ci/rust-version.sh | 4 ++-- ci/test-checks.sh | 14 ++++++++++++++ client/tests/quic_client.rs | 2 +- core/src/banking_stage.rs | 2 +- core/src/sigverify_shreds.rs | 2 +- frozen-abi/src/abi_example.rs | 2 +- gossip/src/crds_gossip_pull.rs | 2 +- ledger/src/bigtable_upload.rs | 2 +- ledger/src/blockstore.rs | 2 +- ledger/src/blockstore_meta.rs | 2 +- ledger/src/shred.rs | 2 +- ledger/src/shred/shred_code.rs | 2 +- local-cluster/src/local_cluster.rs | 2 +- perf/src/sigverify.rs | 7 +------ poh/src/poh_recorder.rs | 2 +- rpc/src/rpc.rs | 6 ++---- rpc/src/rpc_subscriptions.rs | 5 +---- runtime/src/account_rent_state.rs | 2 +- runtime/src/accounts.rs | 2 +- runtime/src/accounts_db.rs | 8 ++++---- runtime/src/bank.rs | 12 ++++++------ runtime/src/expected_rent_collection.rs | 8 ++++---- runtime/src/hardened_unpack.rs | 2 +- runtime/src/in_mem_accounts_index.rs | 2 ++ runtime/src/serde_snapshot.rs | 4 ++-- runtime/src/serde_snapshot/newer.rs | 4 ++-- runtime/src/serde_snapshot/tests.rs | 2 +- runtime/src/snapshot_minimizer.rs | 2 +- runtime/src/snapshot_utils.rs | 2 +- runtime/src/storable_accounts.rs | 2 +- runtime/src/system_instruction_processor.rs | 2 +- sdk/program/src/message/compiled_keys.rs | 10 +++++----- sdk/program/src/nonce/state/mod.rs | 2 +- sdk/program/src/stake/tools.rs | 2 +- streamer/src/streamer.rs | 2 +- validator/src/bootstrap.rs | 6 ++---- zk-token-sdk/src/instruction/close_account.rs | 2 +- zk-token-sdk/src/instruction/withdraw.rs | 2 +- 42 files changed, 78 insertions(+), 78 deletions(-) diff --git a/account-decoder/src/parse_address_lookup_table.rs b/account-decoder/src/parse_address_lookup_table.rs index 26955d74a74242..ca461f2636e92a 100644 --- a/account-decoder/src/parse_address_lookup_table.rs +++ b/account-decoder/src/parse_address_lookup_table.rs @@ -19,7 +19,7 @@ pub fn parse_address_lookup_table( }) } -#[derive(Debug, Serialize, Deserialize, PartialEq)] +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] #[serde(rename_all = "camelCase", tag = "type", content = "info")] pub enum LookupTableAccountType { Uninitialized, diff --git a/banks-server/src/banks_server.rs b/banks-server/src/banks_server.rs index c73844d2571560..a4b65601c389b3 100644 --- a/banks-server/src/banks_server.rs +++ b/banks-server/src/banks_server.rs @@ -153,13 +153,9 @@ fn verify_transaction( transaction: &Transaction, feature_set: &Arc, ) -> transaction::Result<()> { - if let Err(err) = transaction.verify() { - Err(err) - } else if let Err(err) = transaction.verify_precompiles(feature_set) { - Err(err) - } else { - Ok(()) - } + transaction.verify()?; + transaction.verify_precompiles(feature_set)?; + Ok(()) } fn simulate_transaction( diff --git a/ci/docker-rust-nightly/Dockerfile b/ci/docker-rust-nightly/Dockerfile index fff0f366d32f29..12aeff7e5e0b81 100644 --- a/ci/docker-rust-nightly/Dockerfile +++ b/ci/docker-rust-nightly/Dockerfile @@ -1,4 +1,4 @@ -FROM solanalabs/rust:1.60.0 +FROM solanalabs/rust:1.63.0 ARG date RUN set -x \ diff --git a/ci/docker-rust/Dockerfile b/ci/docker-rust/Dockerfile index 6805f85fcd85df..a256d308d9b27a 100644 --- a/ci/docker-rust/Dockerfile +++ b/ci/docker-rust/Dockerfile @@ -1,6 +1,6 @@ # Note: when the rust version is changed also modify # ci/rust-version.sh to pick up the new image tag -FROM rust:1.60.0 +FROM rust:1.63.0 # Add Google Protocol Buffers for Libra's metrics library. ENV PROTOC_VERSION 3.8.0 diff --git a/ci/rust-version.sh b/ci/rust-version.sh index dc3570fa939e79..792863c3280fa1 100644 --- a/ci/rust-version.sh +++ b/ci/rust-version.sh @@ -18,13 +18,13 @@ if [[ -n $RUST_STABLE_VERSION ]]; then stable_version="$RUST_STABLE_VERSION" else - stable_version=1.60.0 + stable_version=1.63.0 fi if [[ -n $RUST_NIGHTLY_VERSION ]]; then nightly_version="$RUST_NIGHTLY_VERSION" else - nightly_version=2022-04-01 + nightly_version=2022-08-12 fi diff --git a/ci/test-checks.sh b/ci/test-checks.sh index 72c174395bd1d9..65e5e6271aa4bf 100755 --- a/ci/test-checks.sh +++ b/ci/test-checks.sh @@ -65,11 +65,25 @@ fi _ ci/order-crates-for-publishing.py +nightly_clippy_allows=( + # This lint occurs all over the code base + "--allow=clippy::significant_drop_in_scrutinee" + + # The prost crate, used by solana-storage-proto, generates Rust source that + # triggers this lint. Need to resolve upstream in prost + "--allow=clippy::derive_partial_eq_without_eq" + + # This link seems to incorrectly trigger in + # `programs/bpf_loader/src/syscalls/{lib,cpi}.rs` + "--allow=clippy::explicit_auto_deref" +) + # -Z... is needed because of clippy bug: https://github.com/rust-lang/rust-clippy/issues/4612 # run nightly clippy for `sdk/` as there's a moderate amount of nightly-only code there _ scripts/cargo-for-all-lock-files.sh -- nightly clippy -Zunstable-options --all-targets -- \ --deny=warnings \ --deny=clippy::integer_arithmetic \ + "${nightly_clippy_allows[@]}" _ scripts/cargo-for-all-lock-files.sh -- nightly sort --workspace --check _ scripts/cargo-for-all-lock-files.sh -- nightly fmt --all -- --check diff --git a/client/tests/quic_client.rs b/client/tests/quic_client.rs index 980476aee7b2c6..1c5348177dd644 100644 --- a/client/tests/quic_client.rs +++ b/client/tests/quic_client.rs @@ -27,7 +27,7 @@ mod tests { let mut all_packets = vec![]; let now = Instant::now(); let mut total_packets: usize = 0; - while now.elapsed().as_secs() < 5 { + while now.elapsed().as_secs() < 10 { if let Ok(packets) = receiver.recv_timeout(Duration::from_secs(1)) { total_packets = total_packets.saturating_add(packets.len()); all_packets.push(packets) diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 2547c00f94e5ca..1c3e95e2bd6c5d 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -1335,7 +1335,7 @@ impl BankingStage { ); retryable_transaction_indexes.extend(execution_results.iter().enumerate().filter_map( - |(index, execution_result)| execution_result.was_executed().then(|| index), + |(index, execution_result)| execution_result.was_executed().then_some(index), )); return ExecuteAndCommitTransactionsOutput { diff --git a/core/src/sigverify_shreds.rs b/core/src/sigverify_shreds.rs index f9a50ab8b2a954..f1f08ec671d2f3 100644 --- a/core/src/sigverify_shreds.rs +++ b/core/src/sigverify_shreds.rs @@ -151,7 +151,7 @@ fn get_slot_leaders( let leader = leaders.entry(slot).or_insert_with(|| { let leader = leader_schedule_cache.slot_leader_at(slot, Some(bank))?; // Discard the shred if the slot leader is the node itself. - (&leader != self_pubkey).then(|| leader) + (&leader != self_pubkey).then_some(leader) }); if leader.is_none() { packet.meta.set_discard(true); diff --git a/frozen-abi/src/abi_example.rs b/frozen-abi/src/abi_example.rs index e0dfa50b8acea6..2e1bdbcac16d0d 100644 --- a/frozen-abi/src/abi_example.rs +++ b/frozen-abi/src/abi_example.rs @@ -411,7 +411,7 @@ lazy_static! { impl AbiExample for &Vec { fn example() -> Self { info!("AbiExample for (&Vec): {}", type_name::()); - &*VEC_U8 + &VEC_U8 } } diff --git a/gossip/src/crds_gossip_pull.rs b/gossip/src/crds_gossip_pull.rs index 2780bf7dabf56b..04df91227b971c 100644 --- a/gossip/src/crds_gossip_pull.rs +++ b/gossip/src/crds_gossip_pull.rs @@ -256,7 +256,7 @@ impl CrdsGossipPull { if let Some(ping) = ping { pings.push((peer.gossip, ping)); } - check.then(|| (weight, peer)) + check.then_some((weight, peer)) }) .unzip() }; diff --git a/ledger/src/bigtable_upload.rs b/ledger/src/bigtable_upload.rs index f43b07db12592a..c8cdef587b1fc7 100644 --- a/ledger/src/bigtable_upload.rs +++ b/ledger/src/bigtable_upload.rs @@ -60,7 +60,7 @@ pub async fn upload_confirmed_blocks( starting_slot, err ) })? - .map_while(|slot| (slot <= ending_slot).then(|| slot)) + .map_while(|slot| (slot <= ending_slot).then_some(slot)) .collect(); if blockstore_slots.is_empty() { diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index acacf9d842a7e5..66340b5cb00034 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -3145,7 +3145,7 @@ impl Blockstore { } .expect("fetch from DuplicateSlots column family failed")?; let new_shred = Shred::new_from_serialized_shred(payload).unwrap(); - (existing_shred != *new_shred.payload()).then(|| existing_shred) + (existing_shred != *new_shred.payload()).then_some(existing_shred) } pub fn has_duplicate_shreds_in_slot(&self, slot: Slot) -> bool { diff --git a/ledger/src/blockstore_meta.rs b/ledger/src/blockstore_meta.rs index 65101fe98348ba..5cacf78198dafb 100644 --- a/ledger/src/blockstore_meta.rs +++ b/ledger/src/blockstore_meta.rs @@ -61,7 +61,7 @@ mod serde_compat { D: Deserializer<'de>, { let val = u64::deserialize(deserializer)?; - Ok((val != u64::MAX).then(|| val)) + Ok((val != u64::MAX).then_some(val)) } } diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index e17055b1e7d9a9..bef3df72515640 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -613,7 +613,7 @@ pub mod layout { merkle::ShredData::get_signed_message_range(proof_size)? } }; - (shred.len() <= range.end).then(|| range) + (shred.len() <= range.end).then_some(range) } pub(crate) fn get_reference_tick(shred: &[u8]) -> Result { diff --git a/ledger/src/shred/shred_code.rs b/ledger/src/shred/shred_code.rs index 538bb25427f38f..1fe3fef026ff18 100644 --- a/ledger/src/shred/shred_code.rs +++ b/ledger/src/shred/shred_code.rs @@ -119,7 +119,7 @@ pub(super) fn erasure_shard_index(shred: &T) -> Option let position = usize::from(coding_header.position); let fec_set_size = num_data_shreds.checked_add(num_coding_shreds)?; let index = position.checked_add(num_data_shreds)?; - (index < fec_set_size).then(|| index) + (index < fec_set_size).then_some(index) } pub(super) fn sanitize(shred: &T) -> Result<(), Error> { diff --git a/local-cluster/src/local_cluster.rs b/local-cluster/src/local_cluster.rs index f7b68647053eaf..0f1ca19f876aff 100644 --- a/local-cluster/src/local_cluster.rs +++ b/local-cluster/src/local_cluster.rs @@ -319,7 +319,7 @@ impl LocalCluster { }) .collect(); for (stake, validator_config, (key, _)) in izip!( - (&config.node_stakes[1..]).iter(), + config.node_stakes[1..].iter(), config.validator_configs[1..].iter(), validator_keys[1..].iter(), ) { diff --git a/perf/src/sigverify.rs b/perf/src/sigverify.rs index aee1b310dd59d9..1e40d29adcf13d 100644 --- a/perf/src/sigverify.rs +++ b/perf/src/sigverify.rs @@ -830,12 +830,7 @@ mod tests { pub fn memfind(a: &[A], b: &[A]) -> Option { assert!(a.len() >= b.len()); let end = a.len() - b.len() + 1; - for i in 0..end { - if a[i..i + b.len()] == b[..] { - return Some(i); - } - } - None + (0..end).find(|&i| a[i..i + b.len()] == b[..]) } #[test] diff --git a/poh/src/poh_recorder.rs b/poh/src/poh_recorder.rs index aef2d7393e9f51..d6c85c3fdf7f3f 100644 --- a/poh/src/poh_recorder.rs +++ b/poh/src/poh_recorder.rs @@ -505,7 +505,7 @@ impl PohRecorder { start: Arc::new(Instant::now()), min_tick_height: bank.tick_height(), max_tick_height: bank.max_tick_height(), - transaction_index: track_transaction_indexes.then(|| 0), + transaction_index: track_transaction_indexes.then_some(0), }; trace!("new working bank"); assert_eq!(working_bank.bank.ticks_per_slot(), self.ticks_per_slot()); diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index fdf72d8f5d7299..9cad136b581927 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -129,7 +129,7 @@ fn new_response(bank: &Bank, value: T) -> RpcResponse { /// Wrapper for rpc return types of methods that provide responses both with and without context. /// Main purpose of this is to fix methods that lack context information in their return type, /// without breaking backwards compatibility. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(untagged)] pub enum OptionalContext { Context(RpcResponse), @@ -3646,9 +3646,7 @@ pub mod rpc_full { } if !skip_preflight { - if let Err(e) = verify_transaction(&transaction, &preflight_bank.feature_set) { - return Err(e); - } + verify_transaction(&transaction, &preflight_bank.feature_set)?; match meta.health.check() { RpcHealthStatus::Ok => (), diff --git a/rpc/src/rpc_subscriptions.rs b/rpc/src/rpc_subscriptions.rs index bd9fe337460279..896b6a9ad5f453 100644 --- a/rpc/src/rpc_subscriptions.rs +++ b/rpc/src/rpc_subscriptions.rs @@ -1001,10 +1001,7 @@ impl RpcSubscriptions { let mut slots_to_notify: Vec<_> = (*w_last_unnotified_slot..slot).collect(); let ancestors = bank.proper_ancestors_set(); - slots_to_notify = slots_to_notify - .into_iter() - .filter(|slot| ancestors.contains(slot)) - .collect(); + slots_to_notify.retain(|slot| ancestors.contains(slot)); slots_to_notify.push(slot); for s in slots_to_notify { // To avoid skipping a slot that fails this condition, diff --git a/runtime/src/account_rent_state.rs b/runtime/src/account_rent_state.rs index 629502caf475fe..74cbc5b81af5f1 100644 --- a/runtime/src/account_rent_state.rs +++ b/runtime/src/account_rent_state.rs @@ -104,7 +104,7 @@ pub(crate) fn check_rent_state( .get_account_at_index(index) .expect(expect_msg) .borrow(), - include_account_index_in_err.then(|| index), + include_account_index_in_err.then_some(index), prevent_crediting_accounts_that_end_rent_paying, )?; } diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index 86d14aaf7b681c..ade9d327ba1046 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -440,7 +440,7 @@ impl Accounts { payer_account, feature_set .is_active(&feature_set::include_account_index_in_rent_error::ID) - .then(|| payer_index), + .then_some(payer_index), feature_set .is_active(&feature_set::prevent_crediting_accounts_that_end_rent_paying::id()), ) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index caa4cc77f31376..4c789751a2405d 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -2174,7 +2174,7 @@ impl AccountsDb { // figure out how many ancient accounts have been reclaimed let old_reclaims = reclaims .iter() - .filter_map(|(slot, _)| (slot < &one_epoch_old).then(|| 1)) + .filter_map(|(slot, _)| (slot < &one_epoch_old).then_some(1)) .sum(); ancient_account_cleans.fetch_add(old_reclaims, Ordering::Relaxed); reclaims @@ -2392,7 +2392,7 @@ impl AccountsDb { .iter() .filter_map(|entry| { let slot = *entry.key(); - (slot <= max_slot).then(|| slot) + (slot <= max_slot).then_some(slot) }) .collect() } @@ -3676,7 +3676,7 @@ impl AccountsDb { ) -> Option { self.get_storages_for_slot(slot).and_then(|all_storages| { self.should_move_to_ancient_append_vec(&all_storages, current_ancient, slot) - .then(|| all_storages) + .then_some(all_storages) }) } @@ -5309,7 +5309,7 @@ impl AccountsDb { // with the same slot. let is_being_flushed = !currently_contended_slots.insert(*remove_slot); // If the cache is currently flushing this slot, add it to the list - is_being_flushed.then(|| remove_slot) + is_being_flushed.then_some(remove_slot) }) .cloned() .collect(); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index e9caf6d66f782b..855add35a1770a 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -2285,7 +2285,7 @@ impl Bank { hash: *self.hash.read().unwrap(), parent_hash: self.parent_hash, parent_slot: self.parent_slot, - hard_forks: &*self.hard_forks, + hard_forks: &self.hard_forks, transaction_count: self.transaction_count.load(Relaxed), tick_height: self.tick_height.load(Relaxed), signature_count: self.signature_count.load(Relaxed), @@ -3308,7 +3308,7 @@ impl Bank { let vote_state = account.vote_state(); let vote_state = vote_state.as_ref().ok()?; let slot_delta = self.slot().checked_sub(vote_state.last_timestamp.slot)?; - (slot_delta <= slots_per_epoch).then(|| { + (slot_delta <= slots_per_epoch).then_some({ ( *pubkey, ( @@ -3978,10 +3978,10 @@ impl Bank { } /// Prepare a transaction batch without locking accounts for transaction simulation. - pub(crate) fn prepare_simulation_batch<'a>( - &'a self, + pub(crate) fn prepare_simulation_batch( + &self, transaction: SanitizedTransaction, - ) -> TransactionBatch<'a, '_> { + ) -> TransactionBatch<'_, '_> { let tx_account_lock_limit = self.get_transaction_account_lock_limit(); let lock_result = transaction .get_account_locks(tx_account_lock_limit) @@ -4382,7 +4382,7 @@ impl Bank { self.feature_set.clone(), compute_budget, timings, - &*self.sysvar_cache.read().unwrap(), + &self.sysvar_cache.read().unwrap(), blockhash, lamports_per_signature, prev_accounts_data_len, diff --git a/runtime/src/expected_rent_collection.rs b/runtime/src/expected_rent_collection.rs index d049430933db33..bd6a6bb4842a85 100644 --- a/runtime/src/expected_rent_collection.rs +++ b/runtime/src/expected_rent_collection.rs @@ -684,7 +684,7 @@ pub mod tests { ); assert_eq!( result, - (!leave_alone).then(|| ExpectedRentCollection { + (!leave_alone).then_some(ExpectedRentCollection { partition_from_pubkey, epoch_of_max_storage_slot: rent_collector.epoch, partition_index_from_max_slot: partition_index_max_inclusive, @@ -712,7 +712,7 @@ pub mod tests { ); assert_eq!( result, - (!greater).then(|| ExpectedRentCollection { + (!greater).then_some(ExpectedRentCollection { partition_from_pubkey, epoch_of_max_storage_slot: rent_collector.epoch, partition_index_from_max_slot: partition_index_max_inclusive, @@ -909,7 +909,7 @@ pub mod tests { ); assert_eq!( result, - (account_rent_epoch != 0).then(|| ExpectedRentCollection { + (account_rent_epoch != 0).then_some(ExpectedRentCollection { partition_from_pubkey, epoch_of_max_storage_slot: rent_collector.epoch + 1, partition_index_from_max_slot: partition_index_max_inclusive, @@ -1084,7 +1084,7 @@ pub mod tests { }; assert_eq!( result, - some_expected.then(|| ExpectedRentCollection { + some_expected.then_some(ExpectedRentCollection { partition_from_pubkey, epoch_of_max_storage_slot: rent_collector.epoch, partition_index_from_max_slot, diff --git a/runtime/src/hardened_unpack.rs b/runtime/src/hardened_unpack.rs index e3af855216e409..ac1c23167343fb 100644 --- a/runtime/src/hardened_unpack.rs +++ b/runtime/src/hardened_unpack.rs @@ -384,7 +384,7 @@ where .map(|path_buf| path_buf.as_path()) { Some(path) => { - accounts_path_processor(*file, path); + accounts_path_processor(file, path); UnpackPath::Valid(path) } None => UnpackPath::Invalid, diff --git a/runtime/src/in_mem_accounts_index.rs b/runtime/src/in_mem_accounts_index.rs index b252499267ba4b..c04e0eed1080bf 100644 --- a/runtime/src/in_mem_accounts_index.rs +++ b/runtime/src/in_mem_accounts_index.rs @@ -1418,6 +1418,8 @@ impl<'a> FlushGuard<'a> { #[must_use = "if unused, the `flushing` flag will immediately clear"] fn lock(flushing: &'a AtomicBool) -> Option { let already_flushing = flushing.swap(true, Ordering::AcqRel); + // Eager evaluation here would result in dropping Self and clearing flushing flag + #[allow(clippy::unnecessary_lazy_evaluations)] (!already_flushing).then(|| Self { flushing }) } } diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 5b42208d042e7c..90d0c6db2e3220 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -65,7 +65,7 @@ pub(crate) enum SerdeStyle { const MAX_STREAM_SIZE: u64 = 32 * 1024 * 1024 * 1024; -#[derive(Clone, Debug, Default, Deserialize, Serialize, AbiExample, PartialEq)] +#[derive(Clone, Debug, Default, Deserialize, Serialize, AbiExample, PartialEq, Eq)] pub struct AccountsDbFields( HashMap>, StoredMetaWriteVersion, @@ -120,7 +120,7 @@ impl SnapshotAccountsDbFields { // There must not be any overlap in the slots of storages between the full snapshot and the incremental snapshot incremental_snapshot_storages .iter() - .all(|storage_entry| !full_snapshot_storages.contains_key(storage_entry.0)).then(|| ()).ok_or_else(|| { + .all(|storage_entry| !full_snapshot_storages.contains_key(storage_entry.0)).then_some(()).ok_or_else(|| { io::Error::new(io::ErrorKind::InvalidData, "Snapshots are incompatible: There are storages for the same slot in both the full snapshot and the incremental snapshot!") })?; diff --git a/runtime/src/serde_snapshot/newer.rs b/runtime/src/serde_snapshot/newer.rs index 512737106aebc9..ab27961bf2a49c 100644 --- a/runtime/src/serde_snapshot/newer.rs +++ b/runtime/src/serde_snapshot/newer.rs @@ -201,7 +201,7 @@ impl<'a> TypeContext<'a> for Context { ( SerializableVersionedBank::from(fields), SerializableAccountsDb::<'a, Self> { - accounts_db: &*serializable_bank.bank.rc.accounts.accounts_db, + accounts_db: &serializable_bank.bank.rc.accounts.accounts_db, slot: serializable_bank.bank.rc.slot, account_storage_entries: serializable_bank.snapshot_storages, phantom: std::marker::PhantomData::default(), @@ -228,7 +228,7 @@ impl<'a> TypeContext<'a> for Context { ( SerializableVersionedBank::from(fields), SerializableAccountsDb::<'a, Self> { - accounts_db: &*serializable_bank.bank.rc.accounts.accounts_db, + accounts_db: &serializable_bank.bank.rc.accounts.accounts_db, slot: serializable_bank.bank.rc.slot, account_storage_entries: serializable_bank.snapshot_storages, phantom: std::marker::PhantomData::default(), diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index 1de6ee2a5d54c6..5834a23f969116 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -155,7 +155,7 @@ fn test_accounts_serialize_style(serde_style: SerdeStyle) { accountsdb_to_stream( serde_style, &mut writer, - &*accounts.accounts_db, + &accounts.accounts_db, 0, &accounts.accounts_db.get_snapshot_storages(0, None, None).0, ) diff --git a/runtime/src/snapshot_minimizer.rs b/runtime/src/snapshot_minimizer.rs index 69e7a99e8e7601..94a82e1d482458 100644 --- a/runtime/src/snapshot_minimizer.rs +++ b/runtime/src/snapshot_minimizer.rs @@ -543,7 +543,7 @@ mod tests { .accounts .iter() .filter_map(|(pubkey, account)| { - stake::program::check_id(account.owner()).then(|| *pubkey) + stake::program::check_id(account.owner()).then_some(*pubkey) }) .collect(); expected_stake_accounts.push(bootstrap_validator_pubkey); diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 19e9d02f684273..6018db95d3477b 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -1181,7 +1181,7 @@ fn check_are_snapshots_compatible( let incremental_snapshot_archive_info = incremental_snapshot_archive_info.unwrap(); (full_snapshot_archive_info.slot() == incremental_snapshot_archive_info.base_slot()) - .then(|| ()) + .then_some(()) .ok_or_else(|| { SnapshotError::MismatchedBaseSlot( full_snapshot_archive_info.slot(), diff --git a/runtime/src/storable_accounts.rs b/runtime/src/storable_accounts.rs index 8d79c0f78c5fe4..bfa35cf71c3e6b 100644 --- a/runtime/src/storable_accounts.rs +++ b/runtime/src/storable_accounts.rs @@ -143,7 +143,7 @@ pub mod tests { slot, &vec![(&pk, &account, slot), (&pk, &account, slot)][..], ); - assert!(!(&test3).contains_multiple_slots()); + assert!(!test3.contains_multiple_slots()); let test3 = ( slot, &vec![(&pk, &account, slot), (&pk, &account, slot + 1)][..], diff --git a/runtime/src/system_instruction_processor.rs b/runtime/src/system_instruction_processor.rs index 67f1f931147cef..3b738df1d8a0e4 100644 --- a/runtime/src/system_instruction_processor.rs +++ b/runtime/src/system_instruction_processor.rs @@ -1626,7 +1626,7 @@ mod tests { .unwrap(); // super fun time; callback chooses to .clean_accounts(None) or not - callback(&*bank); + callback(&bank); // create a normal account at the same pubkey as the zero-lamports account let lamports = genesis_config.rent.minimum_balance(len2); diff --git a/sdk/program/src/message/compiled_keys.rs b/sdk/program/src/message/compiled_keys.rs index d56c7aca2c4159..c689d08f39ae81 100644 --- a/sdk/program/src/message/compiled_keys.rs +++ b/sdk/program/src/message/compiled_keys.rs @@ -80,20 +80,20 @@ impl CompiledKeys { .chain( key_meta_map .iter() - .filter_map(|(key, meta)| (meta.is_signer && meta.is_writable).then(|| *key)), + .filter_map(|(key, meta)| (meta.is_signer && meta.is_writable).then_some(*key)), ) .collect(); let readonly_signer_keys: Vec = key_meta_map .iter() - .filter_map(|(key, meta)| (meta.is_signer && !meta.is_writable).then(|| *key)) + .filter_map(|(key, meta)| (meta.is_signer && !meta.is_writable).then_some(*key)) .collect(); let writable_non_signer_keys: Vec = key_meta_map .iter() - .filter_map(|(key, meta)| (!meta.is_signer && meta.is_writable).then(|| *key)) + .filter_map(|(key, meta)| (!meta.is_signer && meta.is_writable).then_some(*key)) .collect(); let readonly_non_signer_keys: Vec = key_meta_map .iter() - .filter_map(|(key, meta)| (!meta.is_signer && !meta.is_writable).then(|| *key)) + .filter_map(|(key, meta)| (!meta.is_signer && !meta.is_writable).then_some(*key)) .collect(); let signers_len = writable_signer_keys @@ -160,7 +160,7 @@ impl CompiledKeys { for search_key in self .key_meta_map .iter() - .filter_map(|(key, meta)| key_meta_filter(meta).then(|| key)) + .filter_map(|(key, meta)| key_meta_filter(meta).then_some(key)) { for (key_index, key) in lookup_table_addresses.iter().enumerate() { if key == search_key { diff --git a/sdk/program/src/nonce/state/mod.rs b/sdk/program/src/nonce/state/mod.rs index a4a850b93c1cdc..d55bc9063afcff 100644 --- a/sdk/program/src/nonce/state/mod.rs +++ b/sdk/program/src/nonce/state/mod.rs @@ -46,7 +46,7 @@ impl Versions { Self::Current(state) => match **state { State::Uninitialized => None, State::Initialized(ref data) => { - (recent_blockhash == &data.blockhash()).then(|| data) + (recent_blockhash == &data.blockhash()).then_some(data) } }, } diff --git a/sdk/program/src/stake/tools.rs b/sdk/program/src/stake/tools.rs index 842a822b0ea329..e0447f49fc69c9 100644 --- a/sdk/program/src/stake/tools.rs +++ b/sdk/program/src/stake/tools.rs @@ -28,7 +28,7 @@ fn get_minimum_delegation_return_data() -> Result { .ok_or(ProgramError::InvalidInstructionData) .and_then(|(program_id, return_data)| { (program_id == super::program::id()) - .then(|| return_data) + .then_some(return_data) .ok_or(ProgramError::IncorrectProgramId) }) .and_then(|return_data| { diff --git a/streamer/src/streamer.rs b/streamer/src/streamer.rs index 3492f60c8933a8..1ef9b989304ebb 100644 --- a/streamer/src/streamer.rs +++ b/streamer/src/streamer.rs @@ -307,7 +307,7 @@ fn recv_send( let packets = packet_batch.iter().filter_map(|pkt| { let addr = pkt.meta.socket_addr(); let data = pkt.data(..)?; - socket_addr_space.check(&addr).then(|| (data, addr)) + socket_addr_space.check(&addr).then_some((data, addr)) }); batch_send(sock, &packets.collect::>())?; Ok(()) diff --git a/validator/src/bootstrap.rs b/validator/src/bootstrap.rs index fec9f6d409709c..c5a4b65d4b1229 100644 --- a/validator/src/bootstrap.rs +++ b/validator/src/bootstrap.rs @@ -409,7 +409,7 @@ pub fn attempt_download_genesis_and_snapshot( .map_err(|err| format!("Failed to get RPC node slot: {}", err))?; info!("RPC node root slot: {}", rpc_client_slot); - if let Err(err) = download_snapshots( + download_snapshots( full_snapshot_archives_dir, incremental_snapshot_archives_dir, validator_config, @@ -422,9 +422,7 @@ pub fn attempt_download_genesis_and_snapshot( download_abort_count, snapshot_hash, rpc_contact_info, - ) { - return Err(err); - }; + )?; if let Some(url) = bootstrap_config.check_vote_account.as_ref() { let rpc_client = RpcClient::new(url); diff --git a/zk-token-sdk/src/instruction/close_account.rs b/zk-token-sdk/src/instruction/close_account.rs index 4525f87901cd71..b6702e3051f168 100644 --- a/zk-token-sdk/src/instruction/close_account.rs +++ b/zk-token-sdk/src/instruction/close_account.rs @@ -41,7 +41,7 @@ impl CloseAccountData { keypair: &ElGamalKeypair, ciphertext: &ElGamalCiphertext, ) -> Result { - let pod_pubkey = pod::ElGamalPubkey((&keypair.public).to_bytes()); + let pod_pubkey = pod::ElGamalPubkey(keypair.public.to_bytes()); let pod_ciphertext = pod::ElGamalCiphertext(ciphertext.to_bytes()); let mut transcript = CloseAccountProof::transcript_new(&pod_pubkey, &pod_ciphertext); diff --git a/zk-token-sdk/src/instruction/withdraw.rs b/zk-token-sdk/src/instruction/withdraw.rs index 9aa606e8ca4203..64f540a591804e 100644 --- a/zk-token-sdk/src/instruction/withdraw.rs +++ b/zk-token-sdk/src/instruction/withdraw.rs @@ -62,7 +62,7 @@ impl WithdrawData { // current source balance let final_ciphertext = current_ciphertext - &ElGamal::encode(amount); - let pod_pubkey = pod::ElGamalPubkey((&keypair.public).to_bytes()); + let pod_pubkey = pod::ElGamalPubkey(keypair.public.to_bytes()); let pod_final_ciphertext: pod::ElGamalCiphertext = final_ciphertext.into(); let mut transcript = WithdrawProof::transcript_new(&pod_pubkey, &pod_final_ciphertext); let proof = WithdrawProof::new(keypair, final_balance, &final_ciphertext, &mut transcript); From 6b7437c1ff8c109f198dfbb8e8cc08ce8fe7d4ac Mon Sep 17 00:00:00 2001 From: Nick Frostbutter <75431177+nickfrosty@users.noreply.github.com> Date: Wed, 17 Aug 2022 22:07:40 -0400 Subject: [PATCH 24/67] docs: updated "transaction fees" page (#26861) * docs: transaction fees, compute units, compute budget * docs: added messages definition * Revert "docs: added messages definition" This reverts commit 3c56156dfaaf17158c5eafbc5877080a83607a06. * docs: added messages definition * Update docs/src/transaction_fees.md Co-authored-by: Jacob Creech <82475023+jacobcreech@users.noreply.github.com> * fix: updates from feedback Co-authored-by: Jacob Creech <82475023+jacobcreech@users.noreply.github.com> --- .../developing/programming-model/runtime.md | 37 +++++----- docs/src/terminology.md | 32 ++++++--- docs/src/transaction_fees.md | 67 ++++++++++++++++--- 3 files changed, 102 insertions(+), 34 deletions(-) diff --git a/docs/src/developing/programming-model/runtime.md b/docs/src/developing/programming-model/runtime.md index f0d402508808da..ac8284b723d92e 100644 --- a/docs/src/developing/programming-model/runtime.md +++ b/docs/src/developing/programming-model/runtime.md @@ -49,7 +49,9 @@ To prevent abuse of computational resources, each transaction is allocated a compute budget. The budget specifies a maximum number of compute units that a transaction can consume, the costs associated with different types of operations the transaction may perform, and operational bounds the transaction must adhere -to. As the transaction is processed compute units are consumed by its +to. + +As the transaction is processed compute units are consumed by its instruction's programs performing operations such as executing BPF instructions, calling syscalls, etc... When the transaction consumes its entire budget, or exceeds a bound such as attempting a call stack that is too deep, the runtime @@ -71,11 +73,11 @@ budget, or exceeds a bound, the entire invocation chain and the top level transaction processing are halted. The current [compute -budget](https://github.com/solana-labs/solana/blob/090e11210aa7222d8295610a6ccac4acda711bb9/program-runtime/src/compute_budget.rs#L26-L87) +budget](https://github.com/solana-labs/solana/blob/090e11210aa7222d8295610a6ccac4acda711bb9/program-runtime/src/compute_budget.rs#L26-L87) can be found in the Solana Program Runtime. -can be found in the Solana Program Runtime. +#### Example Compute Budget -For example, if the current budget is: +For example, if the compute budget set in the Solana runtime is: ```rust max_units: 1,400,000, @@ -89,21 +91,23 @@ log_pubkey_units: 100, ... ``` -Then the transaction +Then any transaction: - Could execute 1,400,000 BPF instructions, if it did nothing else. - Cannot exceed 4k of stack usage. - Cannot exceed a BPF call depth of 64. - Cannot exceed 4 levels of cross-program invocations. -Since the compute budget is consumed incrementally as the transaction executes, -the total budget consumption will be a combination of the various costs of the -operations it performs. +> **NOTE:** Since the compute budget is consumed incrementally as the transaction executes, +> the total budget consumption will be a combination of the various costs of the +> operations it performs. At runtime a program may log how much of the compute budget remains. See [debugging](developing/on-chain-programs/debugging.md#monitoring-compute-budget-consumption) for more information. +### Prioritization fees + A transaction may set the maximum number of compute units it is allowed to consume and the compute unit price by including a `SetComputeUnitLimit` and a `SetComputeUnitPrice` @@ -112,20 +116,19 @@ respectively. If no `SetComputeUnitLimit` is provided the limit will be calculated as the product of the number of instructions in the transaction (excluding the [Compute -budget -instructions](https://github.com/solana-labs/solana/blob/db32549c00a1b5370fcaf128981ad3323bbd9570/sdk/src/compute_budget.rs#L22)) -and the default per-instruction units, which is currently 200k. - -Note that a transaction's prioritization fee is calculated by multiplying the -number of compute units by the compute unit price (measured in micro-lamports) -set by the transaction via compute budget instructions. So transactions should -request the minimum amount of compute units required for execution to minimize +budget instructions](https://github.com/solana-labs/solana/blob/db32549c00a1b5370fcaf128981ad3323bbd9570/sdk/src/compute_budget.rs#L22)) and the default per-instruction units, which is currently 200k. + +> **NOTE:** A transaction's [prioritization fee](./../../terminology.md#prioritization-fee) is calculated by multiplying the +> number of _compute units_ by the _compute unit price_ (measured in micro-lamports) +> set by the transaction via compute budget instructions. + +Transactions should request the minimum amount of compute units required for execution to minimize fees. Also note that fees are not adjusted when the number of requested compute units exceeds the number of compute units actually consumed by an executed transaction. Compute Budget instructions don't require any accounts and don't consume any -compute units to process. Transactions can only contain one of each type of +compute units to process. Transactions can only contain one of each type of compute budget instruction, duplicate types will result in an error. The `ComputeBudgetInstruction::set_compute_unit_limit` and diff --git a/docs/src/terminology.md b/docs/src/terminology.md index 2c22efb2bbdfa3..038aa2d302c9c3 100644 --- a/docs/src/terminology.md +++ b/docs/src/terminology.md @@ -1,8 +1,10 @@ --- title: Terminology +description: "Learn the essential terminology used thoughtout the Solana blockchain and development models." +keywords: "terms, dictionary, definitions, define, programming models" --- -The following terms are used throughout the documentation. +The following terms are used throughout the Solana documentation and development ecosystem. ## account @@ -12,9 +14,9 @@ Like an account at a traditional bank, a Solana account may hold funds called [l The key may be one of: -* an ed25519 public key -* a program-derived account address (32byte value forced off the ed25519 curve) -* a hash of an ed25519 public key with a 32 character string +- an ed25519 public key +- a program-derived account address (32byte value forced off the ed25519 curve) +- a hash of an ed25519 public key with a 32 character string ## account owner @@ -34,7 +36,7 @@ A contiguous set of [entries](#entry) on the ledger covered by a [vote](#ledger- ## blockhash -A unique value ([hash](#hash)) that identifies a record (block). Solana computes a blockhash from the last [entry id](#entry-id) of the block. +A unique value ([hash](#hash)) that identifies a record (block). Solana computes a blockhash from the last [entry id](#entry-id) of the block. ## block height @@ -56,6 +58,14 @@ A computer program that accesses the Solana server network [cluster](#cluster). A set of [validators](#validator) maintaining a single [ledger](#ledger). +## compute budget + +The maximum number of [compute units](#compute-units) consumed per transaction. + +## compute units + +The smallest unit of measure for consumption of computational resources of the blockchain. + ## confirmation time The wallclock duration between a [leader](#leader) creating a [tick entry](#tick) and creating a [confirmed block](#confirmed-block). @@ -179,6 +189,12 @@ A [program](#program) with the ability to interpret the binary encoding of other The duration of time for which a [validator](#validator) is unable to [vote](#ledger-vote) on another [fork](#fork). +## message + +The structured contents of a [transaction](#transaction). Generally containing a header, array of account addresses, recent [blockhash](#blockhash), and an array of [instructions](#instruction). + +Learn more about the [message formatting inside of transactions](./developing/programming-model/transactions.md#message-format) here. + ## native token The [token](#token) used to track work done by [nodes](#node) in a cluster. @@ -221,7 +237,7 @@ A stack of proofs, each of which proves that some data existed before the proof ## prioritization fee -An additional fee user can specify in compute budget [instruction](#instruction) to prioritize their [transactions](#transaction). +An additional fee user can specify in the compute budget [instruction](#instruction) to prioritize their [transactions](#transaction). The prioritization fee is calculated by multiplying the requested maximum compute units by the compute-unit price (specified in increments of 0.000001 lamports per compute unit) rounded up to the nearest lamport. @@ -287,7 +303,7 @@ Tokens forfeit to the [cluster](#cluster) if malicious [validator](#validator) b ## sysvar -A system [account](#account). [Sysvars](developing/runtime-facilities/sysvars.md) provide cluster state information such as current tick height, rewards [points](#point) values, etc. Programs can access Sysvars via a Sysvar account (pubkey) or by querying via a syscall. +A system [account](#account). [Sysvars](developing/runtime-facilities/sysvars.md) provide cluster state information such as current tick height, rewards [points](#point) values, etc. Programs can access Sysvars via a Sysvar account (pubkey) or by querying via a syscall. ## thin client @@ -327,7 +343,7 @@ A set of [transactions](#transaction) that may be executed in parallel. ## validator -A full participant in a Solana network [cluster](#cluster) that produces new [blocks](#block). A validator validates the transactions added to the [ledger](#ledger) +A full participant in a Solana network [cluster](#cluster) that produces new [blocks](#block). A validator validates the transactions added to the [ledger](#ledger) ## VDF diff --git a/docs/src/transaction_fees.md b/docs/src/transaction_fees.md index c28cb32543457e..ee9fdfa43e9b93 100644 --- a/docs/src/transaction_fees.md +++ b/docs/src/transaction_fees.md @@ -1,21 +1,70 @@ --- title: Transaction Fees +description: "Transaction fees are the small fees paid to process instructions on the network. These fees are based on computation and an optional prioritization fee." +keywords: "instruction fee, processing fee, storage fee, low fee blockchain, gas, gwei, cheap network, affordable blockchain" --- -**Subject to change.** +The small fees paid to process [instructions](./terminology.md#instruction) on the Solana blockchain are known as "_transaction fees_". -Each transaction sent through the network, to be processed by the current leader validation-client and confirmed as a global state transaction, contains a transaction fee. Transaction fees offer many benefits in the Solana economic design, for example they: +As each transaction (which contains one or more instructions) is sent through the network, it gets processed by the current leader validation-client. Once confirmed as a global state transaction, this _transaction fee_ is paid to the network to help support the [economic design](#economic-design) of the Solana blockchain. -- provide unit compensation to the validator network for the CPU/GPU resources necessary to process the state transaction, +> **NOTE:** Transaction fees are different from [account rent](./terminology.md#rent)! +> While transaction fees are paid to process instructions on the Solana network, rent is paid to store data on the blockchain. + + + +## Why pay transaction fees? + +Transaction fees offer many benefits in the Solana [economic design](#basic-economic-design) described below. Mainly: + +- they provide compensation to the validator network for the CPU/GPU resources necessary to process transactions, - reduce network spam by introducing real cost to transactions, -- and provide potential long-term economic stability of the network through a protocol-captured minimum fee amount per transaction, as described below. +- and provide potential long-term economic stability of the network through a protocol-captured minimum fee amount per transaction + +> **NOTE:** Network consensus votes are sent as normal system transfers, which means that validators pay transaction fees to participate in consensus. + +## Basic economic design + +Many current blockchain economies \(e.g. Bitcoin, Ethereum\), rely on _protocol-based rewards_ to support the economy in the short term. And when the protocol derived rewards expire, predict that the revenue generated through _transaction fees_ will support the economy in the long term. + +In an attempt to create a sustainable economy on Solana through _protocol-based rewards_ and _transaction fees_: + +- a fixed portion (initially 50%) of each transaction fee is _burned_ (aka destroyed), +- with the remaining fee going to the current [leader](./terminology.md#leader) processing the transaction. + +A scheduled global inflation rate provides a source for [rewards](./implemented-proposals/staking-rewards.md) distributed to [Solana Validators](../src/running-validator.md). + +### Why burn some fees? + +As mentioned above, a fixed proportion of each transaction fee is _burned_ (aka destroyed). The intent of this design is to retain leader incentive to include as many transactions as possible within the leader-slot time. While still providing an inflation limiting mechanism that protects against "tax evasion" attacks \(i.e. side-channel fee payments\). + +Burnt fees can also help prevent malicious validators from censoring transactions by being considered in [fork](./terminology.md#fork) selection. + +#### Example of an attack: + +In the case of a [Proof of History (PoH)](./terminology.md#proof-of-history-poh) fork with a malicious, censoring leader: + +- due to the fees lost from censoring, we would expect the total fees destroyed to be **_less than_** a comparable honest fork +- if the censoring leader is to compensate for these lost protocol fees, they would have to replace the burnt fees on their fork themselves +- thus potentially reducing the incentive to censor in the first place + +## Calculating transaction fees + +Transactions fees are calculated based on two main parts: + +- a statically set base fee per signature, and +- the computational resources used during the transaction, measured in "[_compute units_](./terminology.md#compute-units)" + +Since each transaction may require a different amount of computational resources, they are alloted a maximum number of _compute units_ per transaction known as the "[_compute budget_](./terminology.md#compute-budget)". + +The execution of each instruction within a transactions consumes a different number of _compute units_. After the maximum number of _computer units_ has been consumed (aka compute budget exhaustion), the runtime will halt the transaction and return an error. Resulting in a failed transaction. -Network consensus votes are sent as normal system transfers, which means that validators pay transaction fees to participate in consensus. +> **Learn more:** compute units and the [Compute Budget](./developing/programming-model/runtime#compute-budget) in the Runtime and [requesting a fee estimate](./developing/clients/jsonrpc-api.md#getfeeformessage) from the RPC. -Many current blockchain economies \(e.g. Bitcoin, Ethereum\), rely on protocol-based rewards to support the economy in the short term, with the assumption that the revenue generated through transaction fees will support the economy in the long term, when the protocol derived rewards expire. In an attempt to create a sustainable economy through protocol-based rewards and transaction fees, a fixed portion (initially 50%) of each transaction fee is destroyed, with the remaining fee going to the current leader processing the transaction. A scheduled global inflation rate provides a source for rewards distributed to validation-clients, through the process described above. +## Prioritization fee -Transaction fees are set by the network cluster based on recent historical throughput, see [Congestion Driven Fees](implemented-proposals/transaction-fees.md#congestion-driven-fees). This minimum portion of each transaction fee can be dynamically adjusted depending on historical _signatures-per-slot_. In this way, the protocol can use the minimum fee to target a desired hardware utilization. By monitoring a protocol specified _signatures-per-slot_ with respect to a desired, target usage amount, the minimum fee can be raised/lowered which should, in turn, lower/raise the actual _signature-per-slot_ per block until it reaches the target amount. This adjustment process can be thought of as similar to the difficulty adjustment algorithm in the Bitcoin protocol, however in this case it is adjusting the minimum transaction fee to guide the transaction processing hardware usage to a desired level. +Recently, Solana has introduced an optional fee called the "_[prioritization fee](./terminology.md#prioritization-fee)_". This additional fee can be paid to help boost how a transaction is prioritized against others, resulting in faster transaction execution times. -As mentioned, a fixed-proportion of each transaction fee is to be destroyed. The intent of this design is to retain leader incentive to include as many transactions as possible within the leader-slot time, while providing an inflation limiting mechanism that protects against "tax evasion" attacks \(i.e. side-channel fee payments\). +The prioritization fee is calculated by multiplying the requested maximum _compute units_ by the compute-unit price (specified in increments of 0.000001 lamports per compute unit) rounded up to the nearest lamport. -Additionally, the burnt fees can be a consideration in fork selection. In the case of a PoH fork with a malicious, censoring leader, we would expect the total fees destroyed to be less than a comparable honest fork, due to the fees lost from censoring. If the censoring leader is to compensate for these lost protocol fees, they would have to replace the burnt fees on their fork themselves, thus potentially reducing the incentive to censor in the first place. +You can read more about the [compute budget instruction](./developing/programming-model/runtime.md#compute-budget) here. From 62e876531938bf1ac32bf62084d03deb5fd519dc Mon Sep 17 00:00:00 2001 From: Jon Cinque Date: Thu, 18 Aug 2022 04:52:54 +0200 Subject: [PATCH 25/67] sdk: Fix args after "--" in build-bpf and test-bpf (#27221) --- sdk/cargo-build-bpf/src/main.rs | 5 +++-- sdk/cargo-test-bpf/src/main.rs | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/sdk/cargo-build-bpf/src/main.rs b/sdk/cargo-build-bpf/src/main.rs index 5de2742c2add32..0ea34f68c6cb74 100644 --- a/sdk/cargo-build-bpf/src/main.rs +++ b/sdk/cargo-build-bpf/src/main.rs @@ -26,8 +26,9 @@ fn main() { args.remove(0); } } - args.push("--arch".to_string()); - args.push("bpf".to_string()); + let index = args.iter().position(|x| x == "--").unwrap_or(args.len()); + args.insert(index, "bpf".to_string()); + args.insert(index, "--arch".to_string()); print!("cargo-build-bpf child: {}", program.display()); for a in &args { print!(" {}", a); diff --git a/sdk/cargo-test-bpf/src/main.rs b/sdk/cargo-test-bpf/src/main.rs index fee4dc73811fe5..af5a382fdd4ec9 100644 --- a/sdk/cargo-test-bpf/src/main.rs +++ b/sdk/cargo-test-bpf/src/main.rs @@ -32,8 +32,9 @@ fn main() { args.remove(0); } } - args.push("--arch".to_string()); - args.push("bpf".to_string()); + let index = args.iter().position(|x| x == "--").unwrap_or(args.len()); + args.insert(index, "bpf".to_string()); + args.insert(index, "--arch".to_string()); print!("cargo-test-bpf child: {}", program.display()); for a in &args { print!(" {}", a); From ac6ab905fbb67562c80714b18e0f4847a3936d58 Mon Sep 17 00:00:00 2001 From: Brennan Watt Date: Wed, 17 Aug 2022 19:56:57 -0700 Subject: [PATCH 26/67] Flaky Unit Test test_rpc_subscriptions (#27214) Increase unit test timeout from 5 seconds to 10 seconds --- rpc-test/tests/rpc.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rpc-test/tests/rpc.rs b/rpc-test/tests/rpc.rs index 464560a309b214..5265e93f14f6fb 100644 --- a/rpc-test/tests/rpc.rs +++ b/rpc-test/tests/rpc.rs @@ -402,7 +402,7 @@ fn test_rpc_subscriptions() { } } - let deadline = Instant::now() + Duration::from_secs(5); + let deadline = Instant::now() + Duration::from_secs(10); let mut account_notifications = transactions.len(); while account_notifications > 0 { let timeout = deadline.saturating_duration_since(Instant::now()); From 27c9191df8c1c141f58e7745c72475d6ecdd2757 Mon Sep 17 00:00:00 2001 From: Yihau Chen Date: Thu, 18 Aug 2022 12:33:30 +0800 Subject: [PATCH 27/67] chore: only buildkite pipelines use sccache in docker-run.sh (#27204) chore: only buildkite ci use sccache --- ci/docker-run.sh | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/ci/docker-run.sh b/ci/docker-run.sh index e154de2eefd96d..a7b94f902e8303 100755 --- a/ci/docker-run.sh +++ b/ci/docker-run.sh @@ -45,14 +45,16 @@ if [[ -n $CI ]]; then # Share the real ~/.cargo between docker containers in CI for speed ARGS+=(--volume "$HOME:/home") - # sccache - ARGS+=( - --env "RUSTC_WRAPPER=/home/.cargo/bin/sccache" - --env AWS_ACCESS_KEY_ID - --env AWS_SECRET_ACCESS_KEY - --env SCCACHE_BUCKET - --env SCCACHE_REGION - ) + if [[ -n $BUILDKITE ]]; then + # sccache + ARGS+=( + --env "RUSTC_WRAPPER=/home/.cargo/bin/sccache" + --env AWS_ACCESS_KEY_ID + --env AWS_SECRET_ACCESS_KEY + --env SCCACHE_BUCKET + --env SCCACHE_REGION + ) + fi else # Avoid sharing ~/.cargo when building locally to avoid a mixed macOS/Linux # ~/.cargo From 2f51fa312a4fa56e6eb3aa1c6c2c1e1fea32164d Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Thu, 18 Aug 2022 06:21:16 +0100 Subject: [PATCH 28/67] clean feature: `prevent_calling_precompiles_as_programs` (#27100) * clean feature: prevent_calling_precompiles_as_programs * fix tests * fix test * remove comment * fix test * feedback --- programs/bpf_loader/src/syscalls/cpi.rs | 10 ++---- programs/bpf_loader/src/syscalls/mod.rs | 3 +- .../tests/process_transaction.rs | 25 --------------- runtime/src/bank.rs | 12 +++---- runtime/src/builtins.rs | 32 ++----------------- runtime/src/genesis_utils.rs | 2 ++ runtime/src/message_processor.rs | 8 ++--- sdk/src/feature_set.rs | 2 -- sdk/src/precompiles.rs | 8 ++--- 9 files changed, 20 insertions(+), 82 deletions(-) diff --git a/programs/bpf_loader/src/syscalls/cpi.rs b/programs/bpf_loader/src/syscalls/cpi.rs index d465c506288349..215f7267fb6f7d 100644 --- a/programs/bpf_loader/src/syscalls/cpi.rs +++ b/programs/bpf_loader/src/syscalls/cpi.rs @@ -834,7 +834,6 @@ fn check_authorized_program( instruction_data: &[u8], invoke_context: &InvokeContext, ) -> Result<(), EbpfError> { - #[allow(clippy::blocks_in_if_conditions)] if native_loader::check_id(program_id) || bpf_loader::check_id(program_id) || bpf_loader_deprecated::check_id(program_id) @@ -842,12 +841,9 @@ fn check_authorized_program( && !(bpf_loader_upgradeable::is_upgrade_instruction(instruction_data) || bpf_loader_upgradeable::is_set_authority_instruction(instruction_data) || bpf_loader_upgradeable::is_close_instruction(instruction_data))) - || (invoke_context - .feature_set - .is_active(&prevent_calling_precompiles_as_programs::id()) - && is_precompile(program_id, |feature_id: &Pubkey| { - invoke_context.feature_set.is_active(feature_id) - })) + || is_precompile(program_id, |feature_id: &Pubkey| { + invoke_context.feature_set.is_active(feature_id) + }) { return Err(SyscallError::ProgramNotSupported(*program_id).into()); } diff --git a/programs/bpf_loader/src/syscalls/mod.rs b/programs/bpf_loader/src/syscalls/mod.rs index 467cdd4f74dd7a..2175a1f2a12c27 100644 --- a/programs/bpf_loader/src/syscalls/mod.rs +++ b/programs/bpf_loader/src/syscalls/mod.rs @@ -36,8 +36,7 @@ use { self, blake3_syscall_enabled, check_physical_overlapping, check_slice_translation_size, curve25519_syscall_enabled, disable_cpi_setting_executable_and_rent_epoch, disable_fees_sysvar, enable_early_verification_of_account_modifications, - libsecp256k1_0_5_upgrade_enabled, limit_secp256k1_recovery_id, - prevent_calling_precompiles_as_programs, syscall_saturated_math, + libsecp256k1_0_5_upgrade_enabled, limit_secp256k1_recovery_id, syscall_saturated_math, }, hash::{Hasher, HASH_BYTES}, instruction::{ diff --git a/programs/ed25519-tests/tests/process_transaction.rs b/programs/ed25519-tests/tests/process_transaction.rs index 0ef08e42fd9796..ac786a5e7094d1 100644 --- a/programs/ed25519-tests/tests/process_transaction.rs +++ b/programs/ed25519-tests/tests/process_transaction.rs @@ -4,7 +4,6 @@ use { solana_program_test::*, solana_sdk::{ ed25519_instruction::new_ed25519_instruction, - feature_set, signature::Signer, transaction::{Transaction, TransactionError}, }, @@ -60,27 +59,3 @@ async fn test_failure() { )) ); } - -#[tokio::test] -async fn test_success_call_builtin_program() { - let mut program_test = ProgramTest::default(); - program_test.deactivate_feature(feature_set::prevent_calling_precompiles_as_programs::id()); - let mut context = program_test.start_with_context().await; - - let client = &mut context.banks_client; - let payer = &context.payer; - let recent_blockhash = context.last_blockhash; - - let privkey = ed25519_dalek::Keypair::generate(&mut thread_rng()); - let message_arr = b"hello"; - let instruction = new_ed25519_instruction(&privkey, message_arr); - - let transaction = Transaction::new_signed_with_payer( - &[instruction], - Some(&payer.pubkey()), - &[payer], - recent_blockhash, - ); - - assert_matches!(client.process_transaction(transaction).await, Ok(())); -} diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 855add35a1770a..44de16f157f8f3 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -14586,25 +14586,25 @@ pub(crate) mod tests { if bank.slot == 0 { assert_eq!( bank.hash().to_string(), - "9tLrxkBoNE7zEUZ2g72ZwE4fTfhUQnhC8A4Xt4EmYhP1" + "5gY6TCgB9NymbbxgFgAjvYLpXjyXiVyyruS1aEwbWKLK" ); } if bank.slot == 32 { assert_eq!( bank.hash().to_string(), - "AxphC8xDj9gmFosor5gyiovNvPVMydJCFRUTxn2wFiQf" + "6uJ5C4QDXWCN39EjJ5Frcz73nnS2jMJ55KgkQff12Fqp" ); } if bank.slot == 64 { assert_eq!( bank.hash().to_string(), - "4vZCSbBuL8xjE43rCy9Cm3dCh1BMj45heMiMb6n6qgzA" + "4u8bxZRLYdQBkWRBwmpcwcQVMCJoEpzY7hCuAzxr3kCe" ); } if bank.slot == 128 { assert_eq!( bank.hash().to_string(), - "46LUpeBdJuisnfwgYisvh4x7jnxzBaLfHF614GtcTs59" + "4c5F8UbcDD8FM7qXcfv6BPPo6nHNYJQmN5gHiCMTdEzX" ); break; } @@ -14832,7 +14832,7 @@ pub(crate) mod tests { // No more slots should be shrunk assert_eq!(bank2.shrink_candidate_slots(), 0); // alive_counts represents the count of alive accounts in the three slots 0,1,2 - assert_eq!(alive_counts, vec![9, 1, 7]); + assert_eq!(alive_counts, vec![11, 1, 7]); } #[test] @@ -14878,7 +14878,7 @@ pub(crate) mod tests { .map(|_| bank.process_stale_slot_with_budget(0, force_to_return_alive_account)) .sum(); // consumed_budgets represents the count of alive accounts in the three slots 0,1,2 - assert_eq!(consumed_budgets, 10); + assert_eq!(consumed_budgets, 12); } #[test] diff --git a/runtime/src/builtins.rs b/runtime/src/builtins.rs index 0e3f98843a18a7..d7ec37aed3337e 100644 --- a/runtime/src/builtins.rs +++ b/runtime/src/builtins.rs @@ -2,10 +2,8 @@ use solana_frozen_abi::abi_example::AbiExample; use { crate::system_instruction_processor, - solana_program_runtime::invoke_context::{InvokeContext, ProcessInstructionWithContext}, - solana_sdk::{ - feature_set, instruction::InstructionError, pubkey::Pubkey, stake, system_program, - }, + solana_program_runtime::invoke_context::ProcessInstructionWithContext, + solana_sdk::{feature_set, pubkey::Pubkey, stake, system_program}, std::fmt, }; @@ -141,14 +139,6 @@ fn genesis_builtins() -> Vec { ] } -/// place holder for precompile programs, remove when the precompile program is deactivated via feature activation -fn dummy_process_instruction( - _first_instruction_account: usize, - _invoke_context: &mut InvokeContext, -) -> Result<(), InstructionError> { - Ok(()) -} - /// Dynamic feature transitions for builtin programs fn builtin_feature_transitions() -> Vec { vec![ @@ -160,24 +150,6 @@ fn builtin_feature_transitions() -> Vec { ), feature_id: feature_set::add_compute_budget_program::id(), }, - BuiltinFeatureTransition::RemoveOrRetain { - previously_added_builtin: Builtin::new( - "secp256k1_program", - solana_sdk::secp256k1_program::id(), - dummy_process_instruction, - ), - addition_feature_id: feature_set::secp256k1_program_enabled::id(), - removal_feature_id: feature_set::prevent_calling_precompiles_as_programs::id(), - }, - BuiltinFeatureTransition::RemoveOrRetain { - previously_added_builtin: Builtin::new( - "ed25519_program", - solana_sdk::ed25519_program::id(), - dummy_process_instruction, - ), - addition_feature_id: feature_set::ed25519_program_enabled::id(), - removal_feature_id: feature_set::prevent_calling_precompiles_as_programs::id(), - }, BuiltinFeatureTransition::Add { builtin: Builtin::new( "address_lookup_table_program", diff --git a/runtime/src/genesis_utils.rs b/runtime/src/genesis_utils.rs index 73ab5c105b0d1a..d5330df0031487 100644 --- a/runtime/src/genesis_utils.rs +++ b/runtime/src/genesis_utils.rs @@ -27,6 +27,7 @@ pub fn bootstrap_validator_stake_lamports() -> u64 { // Number of lamports automatically used for genesis accounts pub const fn genesis_sysvar_and_builtin_program_lamports() -> u64 { const NUM_BUILTIN_PROGRAMS: u64 = 4; + const NUM_PRECOMPILES: u64 = 2; const FEES_SYSVAR_MIN_BALANCE: u64 = 946_560; const STAKE_HISTORY_MIN_BALANCE: u64 = 114_979_200; const CLOCK_SYSVAR_MIN_BALANCE: u64 = 1_169_280; @@ -41,6 +42,7 @@ pub const fn genesis_sysvar_and_builtin_program_lamports() -> u64 { + EPOCH_SCHEDULE_SYSVAR_MIN_BALANCE + RECENT_BLOCKHASHES_SYSVAR_MIN_BALANCE + NUM_BUILTIN_PROGRAMS + + NUM_PRECOMPILES } pub struct ValidatorVoteKeypairs { diff --git a/runtime/src/message_processor.rs b/runtime/src/message_processor.rs index c1b06c141dbc98..23eb1e800e9818 100644 --- a/runtime/src/message_processor.rs +++ b/runtime/src/message_processor.rs @@ -10,7 +10,7 @@ use { }, solana_sdk::{ account::WritableAccount, - feature_set::{prevent_calling_precompiles_as_programs, FeatureSet}, + feature_set::FeatureSet, hash::Hash, message::SanitizedMessage, precompiles::is_precompile, @@ -86,10 +86,8 @@ impl MessageProcessor { .zip(program_indices.iter()) .enumerate() { - let is_precompile = invoke_context - .feature_set - .is_active(&prevent_calling_precompiles_as_programs::id()) - && is_precompile(program_id, |id| invoke_context.feature_set.is_active(id)); + let is_precompile = + is_precompile(program_id, |id| invoke_context.feature_set.is_active(id)); // Fixup the special instructions key if present // before the account pre-values are taken care of diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 0c14aa18edb235..f3c86948079ac6 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -201,8 +201,6 @@ pub mod do_support_realloc { solana_sdk::declare_id!("75m6ysz33AfLA5DDEzWM1obBrnPQRSsdVQ2nRmc8Vuu1"); } -// Note: when this feature is cleaned up, also remove the secp256k1 program from -// the list of builtins and remove its files from /programs pub mod prevent_calling_precompiles_as_programs { solana_sdk::declare_id!("4ApgRX3ud6p7LNMJmsuaAcZY5HWctGPr5obAsjB3A54d"); } diff --git a/sdk/src/precompiles.rs b/sdk/src/precompiles.rs index 1f6149772c88e8..e97474b25cc38d 100644 --- a/sdk/src/precompiles.rs +++ b/sdk/src/precompiles.rs @@ -4,9 +4,7 @@ use { crate::{ - decode_error::DecodeError, - feature_set::{prevent_calling_precompiles_as_programs, FeatureSet}, - instruction::CompiledInstruction, + decode_error::DecodeError, feature_set::FeatureSet, instruction::CompiledInstruction, pubkey::Pubkey, }, lazy_static::lazy_static, @@ -81,12 +79,12 @@ lazy_static! { static ref PRECOMPILES: Vec = vec![ Precompile::new( crate::secp256k1_program::id(), - Some(prevent_calling_precompiles_as_programs::id()), + None, // always enabled crate::secp256k1_instruction::verify, ), Precompile::new( crate::ed25519_program::id(), - Some(prevent_calling_precompiles_as_programs::id()), + None, // always enabled crate::ed25519_instruction::verify, ), ]; From 5060e3a4fa7348fc5286af3af56130d25e69590e Mon Sep 17 00:00:00 2001 From: kirill lykov Date: Thu, 18 Aug 2022 10:17:32 +0200 Subject: [PATCH 29/67] Add get_account_with_commitment to BenchTpsClient (#27176) --- bench-tps/src/bench_tps_client.rs | 7 +++++++ bench-tps/src/bench_tps_client/bank_client.rs | 14 ++++++++++++++ bench-tps/src/bench_tps_client/rpc_client.rs | 17 ++++++++++++++++- bench-tps/src/bench_tps_client/thin_client.rs | 16 +++++++++++++++- bench-tps/src/bench_tps_client/tpu_client.rs | 18 +++++++++++++++++- 5 files changed, 69 insertions(+), 3 deletions(-) diff --git a/bench-tps/src/bench_tps_client.rs b/bench-tps/src/bench_tps_client.rs index 3d34a3a041a361..0ecca308ef647f 100644 --- a/bench-tps/src/bench_tps_client.rs +++ b/bench-tps/src/bench_tps_client.rs @@ -83,6 +83,13 @@ pub trait BenchTpsClient { /// Returns all information associated with the account of the provided pubkey fn get_account(&self, pubkey: &Pubkey) -> Result; + + /// Returns all information associated with the account of the provided pubkey, using explicit commitment + fn get_account_with_commitment( + &self, + pubkey: &Pubkey, + commitment_config: CommitmentConfig, + ) -> Result; } mod bank_client; diff --git a/bench-tps/src/bench_tps_client/bank_client.rs b/bench-tps/src/bench_tps_client/bank_client.rs index 9fae1f7a93c7f6..20323656a3b3b0 100644 --- a/bench-tps/src/bench_tps_client/bank_client.rs +++ b/bench-tps/src/bench_tps_client/bank_client.rs @@ -93,4 +93,18 @@ impl BenchTpsClient for BankClient { }) }) } + + fn get_account_with_commitment( + &self, + pubkey: &Pubkey, + commitment_config: CommitmentConfig, + ) -> Result { + SyncClient::get_account_with_commitment(self, pubkey, commitment_config) + .map_err(|err| err.into()) + .and_then(|account| { + account.ok_or_else(|| { + BenchTpsError::Custom(format!("AccountNotFound: pubkey={}", pubkey)) + }) + }) + } } diff --git a/bench-tps/src/bench_tps_client/rpc_client.rs b/bench-tps/src/bench_tps_client/rpc_client.rs index dd34a11f5820d1..158fddd0a4a6fb 100644 --- a/bench-tps/src/bench_tps_client/rpc_client.rs +++ b/bench-tps/src/bench_tps_client/rpc_client.rs @@ -1,5 +1,5 @@ use { - crate::bench_tps_client::{BenchTpsClient, Result}, + crate::bench_tps_client::{BenchTpsClient, BenchTpsError, Result}, solana_client::rpc_client::RpcClient, solana_sdk::{ account::Account, commitment_config::CommitmentConfig, epoch_info::EpochInfo, hash::Hash, @@ -84,4 +84,19 @@ impl BenchTpsClient for RpcClient { fn get_account(&self, pubkey: &Pubkey) -> Result { RpcClient::get_account(self, pubkey).map_err(|err| err.into()) } + + fn get_account_with_commitment( + &self, + pubkey: &Pubkey, + commitment_config: CommitmentConfig, + ) -> Result { + RpcClient::get_account_with_commitment(self, pubkey, commitment_config) + .map(|res| res.value) + .map_err(|err| err.into()) + .and_then(|account| { + account.ok_or_else(|| { + BenchTpsError::Custom(format!("AccountNotFound: pubkey={}", pubkey)) + }) + }) + } } diff --git a/bench-tps/src/bench_tps_client/thin_client.rs b/bench-tps/src/bench_tps_client/thin_client.rs index 13d77078453c8a..16686b8186ecfb 100644 --- a/bench-tps/src/bench_tps_client/thin_client.rs +++ b/bench-tps/src/bench_tps_client/thin_client.rs @@ -1,5 +1,5 @@ use { - crate::bench_tps_client::{BenchTpsClient, Result}, + crate::bench_tps_client::{BenchTpsClient, BenchTpsError, Result}, solana_client::thin_client::ThinClient, solana_sdk::{ account::Account, @@ -90,4 +90,18 @@ impl BenchTpsClient for ThinClient { .get_account(pubkey) .map_err(|err| err.into()) } + + fn get_account_with_commitment( + &self, + pubkey: &Pubkey, + commitment_config: CommitmentConfig, + ) -> Result { + SyncClient::get_account_with_commitment(self, pubkey, commitment_config) + .map_err(|err| err.into()) + .and_then(|account| { + account.ok_or_else(|| { + BenchTpsError::Custom(format!("AccountNotFound: pubkey={}", pubkey)) + }) + }) + } } diff --git a/bench-tps/src/bench_tps_client/tpu_client.rs b/bench-tps/src/bench_tps_client/tpu_client.rs index 53b0102a00f11b..aa86e793a2a498 100644 --- a/bench-tps/src/bench_tps_client/tpu_client.rs +++ b/bench-tps/src/bench_tps_client/tpu_client.rs @@ -1,5 +1,5 @@ use { - crate::bench_tps_client::{BenchTpsClient, Result}, + crate::bench_tps_client::{BenchTpsClient, BenchTpsError, Result}, solana_client::tpu_client::TpuClient, solana_sdk::{ account::Account, commitment_config::CommitmentConfig, epoch_info::EpochInfo, hash::Hash, @@ -102,4 +102,20 @@ impl BenchTpsClient for TpuClient { .get_account(pubkey) .map_err(|err| err.into()) } + + fn get_account_with_commitment( + &self, + pubkey: &Pubkey, + commitment_config: CommitmentConfig, + ) -> Result { + self.rpc_client() + .get_account_with_commitment(pubkey, commitment_config) + .map(|res| res.value) + .map_err(|err| err.into()) + .and_then(|account| { + account.ok_or_else(|| { + BenchTpsError::Custom(format!("AccountNotFound: pubkey={}", pubkey)) + }) + }) + } } From 1524aa7c6629c8ce21de24e7ee7e952f565d17d0 Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Thu, 18 Aug 2022 02:37:19 -0700 Subject: [PATCH 30/67] Fix a corner-case panic in get_entries_in_data_block() (#27195) #### Problem get_entries_in_data_block() panics when there's inconsistency between slot_meta and data_shred. However, as we don't lock on reads, reading across multiple column families is not atomic (especially for older slots) and thus does not guarantee consistency as the background cleanup service could purge the slot in the middle. Such panic was reported in #26980 when the validator serves a high load of RPC calls. #### Summary of Changes This PR makes get_entries_in_data_block() panic only when the inconsistency between slot-meta and data-shred happens on a slot older than lowest_cleanup_slot. --- ledger/src/blockstore.rs | 43 ++++++++++++++++++++-------------------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 66340b5cb00034..336dcd86bf930e 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -2872,28 +2872,29 @@ impl Blockstore { .and_then(|serialized_shred| { if serialized_shred.is_none() { if let Some(slot_meta) = slot_meta { - panic!( - "Shred with - slot: {}, - index: {}, - consumed: {}, - completed_indexes: {:?} - must exist if shred index was included in a range: {} {}", - slot, - i, - slot_meta.consumed, - slot_meta.completed_data_indexes, - start_index, - end_index - ); - } else { - return Err(BlockstoreError::InvalidShredData(Box::new( - bincode::ErrorKind::Custom(format!( - "Missing shred for slot {}, index {}", - slot, i - )), - ))); + if slot > self.lowest_cleanup_slot() { + panic!( + "Shred with + slot: {}, + index: {}, + consumed: {}, + completed_indexes: {:?} + must exist if shred index was included in a range: {} {}", + slot, + i, + slot_meta.consumed, + slot_meta.completed_data_indexes, + start_index, + end_index + ); + } } + return Err(BlockstoreError::InvalidShredData(Box::new( + bincode::ErrorKind::Custom(format!( + "Missing shred for slot {}, index {}", + slot, i + )), + ))); } Shred::new_from_serialized_shred(serialized_shred.unwrap()).map_err(|err| { From 76b7384b14390cdeacda94902261beb1de8bb434 Mon Sep 17 00:00:00 2001 From: Brooks Prumo Date: Thu, 18 Aug 2022 09:48:58 -0400 Subject: [PATCH 31/67] Verify snapshot slot deltas (#26666) --- runtime/src/accounts_background_service.rs | 1 + runtime/src/snapshot_utils.rs | 293 ++++++++++++++++++++- 2 files changed, 292 insertions(+), 2 deletions(-) diff --git a/runtime/src/accounts_background_service.rs b/runtime/src/accounts_background_service.rs index a0695e3373774e..c38203ab821e96 100644 --- a/runtime/src/accounts_background_service.rs +++ b/runtime/src/accounts_background_service.rs @@ -370,6 +370,7 @@ impl SnapshotRequestHandler { SnapshotError::MismatchedBaseSlot(..) => true, SnapshotError::NoSnapshotArchives => true, SnapshotError::MismatchedSlotHash(..) => true, + SnapshotError::VerifySlotDeltas(..) => true, } } } diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 6018db95d3477b..81cdbcc37a6225 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -19,6 +19,7 @@ use { snapshot_package::{ AccountsPackage, PendingAccountsPackage, SnapshotPackage, SnapshotType, }, + status_cache, }, bincode::{config::Options, serialize_into}, bzip2::bufread::BzDecoder, @@ -28,7 +29,13 @@ use { rayon::prelude::*, regex::Regex, solana_measure::measure::Measure, - solana_sdk::{clock::Slot, genesis_config::GenesisConfig, hash::Hash, pubkey::Pubkey}, + solana_sdk::{ + clock::Slot, + genesis_config::GenesisConfig, + hash::Hash, + pubkey::Pubkey, + slot_history::{Check, SlotHistory}, + }, std::{ cmp::Ordering, collections::{HashMap, HashSet}, @@ -223,9 +230,37 @@ pub enum SnapshotError { #[error("snapshot has mismatch: deserialized bank: {:?}, snapshot archive info: {:?}", .0, .1)] MismatchedSlotHash((Slot, Hash), (Slot, Hash)), + + #[error("snapshot slot deltas are invalid: {0}")] + VerifySlotDeltas(#[from] VerifySlotDeltasError), } pub type Result = std::result::Result; +/// Errors that can happen in `verify_slot_deltas()` +#[derive(Error, Debug, PartialEq, Eq)] +pub enum VerifySlotDeltasError { + #[error("too many entries: {0} (max: {1})")] + TooManyEntries(usize, usize), + + #[error("slot {0} is not a root")] + SlotIsNotRoot(Slot), + + #[error("slot {0} is greater than bank slot {1}")] + SlotGreaterThanMaxRoot(Slot, Slot), + + #[error("slot {0} has multiple entries")] + SlotHasMultipleEntries(Slot), + + #[error("slot {0} was not found in slot history")] + SlotNotFoundInHistory(Slot), + + #[error("slot {0} was in history but missing from slot deltas")] + SlotNotFoundInDeltas(Slot), + + #[error("slot history is bad and cannot be used to verify slot deltas")] + BadSlotHistory, +} + /// If the validator halts in the middle of `archive_snapshot_package()`, the temporary staging /// directory won't be cleaned up. Call this function to clean them up. pub fn remove_tmp_snapshot_archives(snapshot_archives_dir: impl AsRef) { @@ -1738,6 +1773,8 @@ fn rebuild_bank_from_snapshots( Ok(slot_deltas) })?; + verify_slot_deltas(slot_deltas.as_slice(), &bank)?; + bank.status_cache.write().unwrap().append(&slot_deltas); bank.prepare_rewrites_for_hash(); @@ -1746,6 +1783,106 @@ fn rebuild_bank_from_snapshots( Ok(bank) } +/// Verify that the snapshot's slot deltas are not corrupt/invalid +fn verify_slot_deltas( + slot_deltas: &[BankSlotDelta], + bank: &Bank, +) -> std::result::Result<(), VerifySlotDeltasError> { + let info = verify_slot_deltas_structural(slot_deltas, bank.slot())?; + verify_slot_deltas_with_history(&info.slots, &bank.get_slot_history(), bank.slot()) +} + +/// Verify that the snapshot's slot deltas are not corrupt/invalid +/// These checks are simple/structural +fn verify_slot_deltas_structural( + slot_deltas: &[BankSlotDelta], + bank_slot: Slot, +) -> std::result::Result { + // there should not be more entries than that status cache's max + let num_entries = slot_deltas.len(); + if num_entries > status_cache::MAX_CACHE_ENTRIES { + return Err(VerifySlotDeltasError::TooManyEntries( + num_entries, + status_cache::MAX_CACHE_ENTRIES, + )); + } + + let mut slots_seen_so_far = HashSet::new(); + for &(slot, is_root, ..) in slot_deltas { + // all entries should be roots + if !is_root { + return Err(VerifySlotDeltasError::SlotIsNotRoot(slot)); + } + + // all entries should be for slots less than or equal to the bank's slot + if slot > bank_slot { + return Err(VerifySlotDeltasError::SlotGreaterThanMaxRoot( + slot, bank_slot, + )); + } + + // there should only be one entry per slot + let is_duplicate = !slots_seen_so_far.insert(slot); + if is_duplicate { + return Err(VerifySlotDeltasError::SlotHasMultipleEntries(slot)); + } + } + + // detect serious logic error for future careless changes. :) + assert_eq!(slots_seen_so_far.len(), slot_deltas.len()); + + Ok(VerifySlotDeltasStructuralInfo { + slots: slots_seen_so_far, + }) +} + +/// Computed information from `verify_slot_deltas_structural()`, that may be reused/useful later. +#[derive(Debug, PartialEq, Eq)] +struct VerifySlotDeltasStructuralInfo { + /// All the slots in the slot deltas + slots: HashSet, +} + +/// Verify that the snapshot's slot deltas are not corrupt/invalid +/// These checks use the slot history for verification +fn verify_slot_deltas_with_history( + slots_from_slot_deltas: &HashSet, + slot_history: &SlotHistory, + bank_slot: Slot, +) -> std::result::Result<(), VerifySlotDeltasError> { + // ensure the slot history is valid (as much as possible), since we're using it to verify the + // slot deltas + if slot_history.newest() != bank_slot { + return Err(VerifySlotDeltasError::BadSlotHistory); + } + + // all slots in the slot deltas should be in the bank's slot history + let slot_missing_from_history = slots_from_slot_deltas + .iter() + .find(|slot| slot_history.check(**slot) != Check::Found); + if let Some(slot) = slot_missing_from_history { + return Err(VerifySlotDeltasError::SlotNotFoundInHistory(*slot)); + } + + // all slots in the history should be in the slot deltas (up to MAX_CACHE_ENTRIES) + // this ensures nothing was removed from the status cache + // + // go through the slot history and make sure there's an entry for each slot + // note: it's important to go highest-to-lowest since the status cache removes + // older entries first + // note: we already checked above that `bank_slot == slot_history.newest()` + let slot_missing_from_deltas = (slot_history.oldest()..=slot_history.newest()) + .rev() + .filter(|slot| slot_history.check(*slot) == Check::Found) + .take(status_cache::MAX_CACHE_ENTRIES) + .find(|slot| !slots_from_slot_deltas.contains(slot)); + if let Some(slot) = slot_missing_from_deltas { + return Err(VerifySlotDeltasError::SlotNotFoundInDeltas(slot)); + } + + Ok(()) +} + pub(crate) fn get_snapshot_file_name(slot: Slot) -> String { slot.to_string() } @@ -2167,13 +2304,14 @@ fn can_submit_accounts_package( mod tests { use { super::*, - crate::accounts_db::ACCOUNTS_DB_CONFIG_FOR_TESTING, + crate::{accounts_db::ACCOUNTS_DB_CONFIG_FOR_TESTING, status_cache::Status}, assert_matches::assert_matches, bincode::{deserialize_from, serialize_into}, solana_sdk::{ genesis_config::create_genesis_config, native_token::sol_to_lamports, signature::{Keypair, Signer}, + slot_history::SlotHistory, system_transaction, transaction::SanitizedTransaction, }, @@ -3831,4 +3969,155 @@ mod tests { assert_eq!(expected_result, actual_result); } } + + #[test] + fn test_verify_slot_deltas_structural_good() { + // NOTE: slot deltas do not need to be sorted + let slot_deltas = vec![ + (222, true, Status::default()), + (333, true, Status::default()), + (111, true, Status::default()), + ]; + + let bank_slot = 333; + let result = verify_slot_deltas_structural(slot_deltas.as_slice(), bank_slot); + assert_eq!( + result, + Ok(VerifySlotDeltasStructuralInfo { + slots: HashSet::from([111, 222, 333]) + }) + ); + } + + #[test] + fn test_verify_slot_deltas_structural_bad_too_many_entries() { + let bank_slot = status_cache::MAX_CACHE_ENTRIES as Slot + 1; + let slot_deltas: Vec<_> = (0..bank_slot) + .map(|slot| (slot, true, Status::default())) + .collect(); + + let result = verify_slot_deltas_structural(slot_deltas.as_slice(), bank_slot); + assert_eq!( + result, + Err(VerifySlotDeltasError::TooManyEntries( + status_cache::MAX_CACHE_ENTRIES + 1, + status_cache::MAX_CACHE_ENTRIES + )), + ); + } + + #[test] + fn test_verify_slot_deltas_structural_bad_slot_not_root() { + let slot_deltas = vec![ + (111, true, Status::default()), + (222, false, Status::default()), // <-- slot is not a root + (333, true, Status::default()), + ]; + + let bank_slot = 333; + let result = verify_slot_deltas_structural(slot_deltas.as_slice(), bank_slot); + assert_eq!(result, Err(VerifySlotDeltasError::SlotIsNotRoot(222))); + } + + #[test] + fn test_verify_slot_deltas_structural_bad_slot_greater_than_bank() { + let slot_deltas = vec![ + (222, true, Status::default()), + (111, true, Status::default()), + (555, true, Status::default()), // <-- slot is greater than the bank slot + ]; + + let bank_slot = 444; + let result = verify_slot_deltas_structural(slot_deltas.as_slice(), bank_slot); + assert_eq!( + result, + Err(VerifySlotDeltasError::SlotGreaterThanMaxRoot( + 555, bank_slot + )), + ); + } + + #[test] + fn test_verify_slot_deltas_structural_bad_slot_has_multiple_entries() { + let slot_deltas = vec![ + (111, true, Status::default()), + (222, true, Status::default()), + (111, true, Status::default()), // <-- slot is a duplicate + ]; + + let bank_slot = 222; + let result = verify_slot_deltas_structural(slot_deltas.as_slice(), bank_slot); + assert_eq!( + result, + Err(VerifySlotDeltasError::SlotHasMultipleEntries(111)), + ); + } + + #[test] + fn test_verify_slot_deltas_with_history_good() { + let mut slots_from_slot_deltas = HashSet::default(); + let mut slot_history = SlotHistory::default(); + // note: slot history expects slots to be added in numeric order + for slot in [0, 111, 222, 333, 444] { + slots_from_slot_deltas.insert(slot); + slot_history.add(slot); + } + + let bank_slot = 444; + let result = + verify_slot_deltas_with_history(&slots_from_slot_deltas, &slot_history, bank_slot); + assert_eq!(result, Ok(())); + } + + #[test] + fn test_verify_slot_deltas_with_history_bad_slot_history() { + let bank_slot = 444; + let result = verify_slot_deltas_with_history( + &HashSet::default(), + &SlotHistory::default(), // <-- will only have an entry for slot 0 + bank_slot, + ); + assert_eq!(result, Err(VerifySlotDeltasError::BadSlotHistory)); + } + + #[test] + fn test_verify_slot_deltas_with_history_bad_slot_not_in_history() { + let slots_from_slot_deltas = HashSet::from([ + 0, // slot history has slot 0 added by default + 444, 222, + ]); + let mut slot_history = SlotHistory::default(); + slot_history.add(444); // <-- slot history is missing slot 222 + + let bank_slot = 444; + let result = + verify_slot_deltas_with_history(&slots_from_slot_deltas, &slot_history, bank_slot); + + assert_eq!( + result, + Err(VerifySlotDeltasError::SlotNotFoundInHistory(222)), + ); + } + + #[test] + fn test_verify_slot_deltas_with_history_bad_slot_not_in_deltas() { + let slots_from_slot_deltas = HashSet::from([ + 0, // slot history has slot 0 added by default + 444, 222, + // <-- slot deltas is missing slot 333 + ]); + let mut slot_history = SlotHistory::default(); + slot_history.add(222); + slot_history.add(333); + slot_history.add(444); + + let bank_slot = 444; + let result = + verify_slot_deltas_with_history(&slots_from_slot_deltas, &slot_history, bank_slot); + + assert_eq!( + result, + Err(VerifySlotDeltasError::SlotNotFoundInDeltas(333)), + ); + } } From ad9d44a2784faa71fee85f477db2f94b43bb4bd5 Mon Sep 17 00:00:00 2001 From: apfitzge Date: Thu, 18 Aug 2022 10:00:04 -0500 Subject: [PATCH 32/67] store-tool: log lamports for each account (#27168) log lamports for each account --- runtime/store-tool/src/main.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/runtime/store-tool/src/main.rs b/runtime/store-tool/src/main.rs index edfc00d8ee1323..8f7d2f2a402a05 100644 --- a/runtime/store-tool/src/main.rs +++ b/runtime/store-tool/src/main.rs @@ -40,8 +40,12 @@ fn main() { break; } info!( - " account: {:?} version: {} data: {} hash: {:?}", - account.meta.pubkey, account.meta.write_version, account.meta.data_len, account.hash + " account: {:?} version: {} lamports: {} data: {} hash: {:?}", + account.meta.pubkey, + account.meta.write_version, + account.account_meta.lamports, + account.meta.data_len, + account.hash ); num_accounts = num_accounts.saturating_add(1); stored_accounts_len = stored_accounts_len.saturating_add(account.stored_size); From 36fea44b6056d2692be22509c49619a28141e29d Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 18 Aug 2022 10:35:22 -0500 Subject: [PATCH 33/67] add an assert for a debug feature to avoid wasted time (#27210) --- runtime/src/accounts_db.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 4c789751a2405d..03d343f0ac18a2 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -6895,6 +6895,11 @@ impl AccountsDb { stats.oldest_root = storages.range().start; + assert!( + !(config.store_detailed_debug_info_on_failure && config.use_write_cache), + "cannot accurately capture all data for debugging if accounts cache is being used" + ); + self.mark_old_slots_as_dirty(storages, Some(config.epoch_schedule.slots_per_epoch)); let (num_hash_scan_passes, bins_per_pass) = Self::bins_per_pass(self.num_hash_scan_passes); From 19fa32b83ca1017ed9fe363c3690c3ad6b238dbb Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 18 Aug 2022 11:01:05 -0500 Subject: [PATCH 34/67] remove redundant call that bumps age to future (#27215) --- runtime/src/in_mem_accounts_index.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/runtime/src/in_mem_accounts_index.rs b/runtime/src/in_mem_accounts_index.rs index c04e0eed1080bf..9ab9da4e8099d2 100644 --- a/runtime/src/in_mem_accounts_index.rs +++ b/runtime/src/in_mem_accounts_index.rs @@ -315,7 +315,6 @@ impl InMemAccountsIndex { ) -> RT { self.get_only_in_mem(pubkey, |entry| { if let Some(entry) = entry { - entry.set_age(self.storage.future_age_to_flush()); callback(Some(entry)).1 } else { // not in cache, look on disk From 9c94ca33d815f017f1f63d532d4554c1f0527762 Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Thu, 18 Aug 2022 11:06:52 -0500 Subject: [PATCH 35/67] Use from_secs api to create duration (#27222) use from_secs api to create duration --- gossip/tests/gossip.rs | 6 +++--- ledger/src/blockstore.rs | 2 +- runtime/src/bank.rs | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/gossip/tests/gossip.rs b/gossip/tests/gossip.rs index f3e136cdba7f72..066bdb24f78b5b 100644 --- a/gossip/tests/gossip.rs +++ b/gossip/tests/gossip.rs @@ -109,7 +109,7 @@ where } else { trace!("not converged {} {} {}", i, total + num, num * num); } - sleep(Duration::new(1, 0)); + sleep(Duration::from_secs(1)); } exit.store(true, Ordering::Relaxed); for (_, dr, _) in listen { @@ -251,7 +251,7 @@ pub fn cluster_info_retransmit() { if done { break; } - sleep(Duration::new(1, 0)); + sleep(Duration::from_secs(1)); } assert!(done); let mut p = Packet::default(); @@ -269,7 +269,7 @@ pub fn cluster_info_retransmit() { .into_par_iter() .map(|s| { let mut p = Packet::default(); - s.set_read_timeout(Some(Duration::new(1, 0))).unwrap(); + s.set_read_timeout(Some(Duration::from_secs(1))).unwrap(); let res = s.recv_from(p.buffer_mut()); res.is_err() //true if failed to receive the retransmit packet }) diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 336dcd86bf930e..a4158a1778e2f1 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -5005,7 +5005,7 @@ pub mod tests { blockstore .insert_shreds(vec![shreds.remove(1)], None, false) .unwrap(); - let timer = Duration::new(1, 0); + let timer = Duration::from_secs(1); assert!(recvr.recv_timeout(timer).is_err()); // Insert first shred, now we've made a consecutive block blockstore diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 44de16f157f8f3..cb339892f49748 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -12992,7 +12992,7 @@ pub(crate) mod tests { #[cfg(not(target_os = "linux"))] { error!("{} banks, sleeping for 5 sec", num_banks); - std::thread::sleep(Duration::new(5, 0)); + std::thread::sleep(Duration::from_secs(5)); } } } From 979da4e51d75c287330db95e3b3871ec5bc45a61 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 18 Aug 2022 11:30:11 -0500 Subject: [PATCH 36/67] reorder slot # in debug hash data path (#27217) --- runtime/src/accounts_db.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 03d343f0ac18a2..702f70456b68be 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -6876,8 +6876,8 @@ impl AccountsDb { } else { // this path executes when we are failing with a hash mismatch let mut new = self.accounts_hash_cache_path.clone(); - new.push(slot.to_string()); new.push("failed_calculate_accounts_hash_cache"); + new.push(slot.to_string()); let _ = std::fs::remove_dir_all(&new); CacheHashData::new(&new) } From 83b31bd04ba55ab801483c65b14ed82d0ce0f455 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Thu, 18 Aug 2022 11:30:35 -0500 Subject: [PATCH 37/67] create helper fn for clarity (#27216) --- runtime/src/in_mem_accounts_index.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/runtime/src/in_mem_accounts_index.rs b/runtime/src/in_mem_accounts_index.rs index 9ab9da4e8099d2..864915399c04cf 100644 --- a/runtime/src/in_mem_accounts_index.rs +++ b/runtime/src/in_mem_accounts_index.rs @@ -279,7 +279,7 @@ impl InMemAccountsIndex { m.stop(); callback(if let Some(entry) = result { - entry.set_age(self.storage.future_age_to_flush()); + self.set_age_to_future(entry); Some(entry) } else { drop(map); @@ -305,6 +305,10 @@ impl InMemAccountsIndex { self.get_internal(pubkey, |entry| (true, entry.map(Arc::clone))) } + fn set_age_to_future(&self, entry: &AccountMapEntry) { + entry.set_age(self.storage.future_age_to_flush()); + } + /// lookup 'pubkey' in index (in_mem or disk). /// call 'callback' whether found or not pub(crate) fn get_internal( @@ -473,7 +477,7 @@ impl InMemAccountsIndex { reclaims, reclaim, ); - current.set_age(self.storage.future_age_to_flush()); + self.set_age_to_future(current); } Entry::Vacant(vacant) => { // not in cache, look on disk From e09fa3927952d20e08546573d19420a02e0c3955 Mon Sep 17 00:00:00 2001 From: Brooks Prumo Date: Thu, 18 Aug 2022 13:24:23 -0400 Subject: [PATCH 38/67] Verifying snapshot bank must always specify the snapshot slot (#27234) --- runtime/src/bank.rs | 10 +++++----- runtime/src/snapshot_utils.rs | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index cb339892f49748..077efd090266e3 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -7165,19 +7165,19 @@ impl Bank { &self, test_hash_calculation: bool, accounts_db_skip_shrink: bool, - last_full_snapshot_slot: Option, + last_full_snapshot_slot: Slot, ) -> bool { let mut clean_time = Measure::start("clean"); if !accounts_db_skip_shrink && self.slot() > 0 { info!("cleaning.."); - self.clean_accounts(true, true, last_full_snapshot_slot); + self.clean_accounts(true, true, Some(last_full_snapshot_slot)); } clean_time.stop(); let mut shrink_all_slots_time = Measure::start("shrink_all_slots"); if !accounts_db_skip_shrink && self.slot() > 0 { info!("shrinking.."); - self.shrink_all_slots(true, last_full_snapshot_slot); + self.shrink_all_slots(true, Some(last_full_snapshot_slot)); } shrink_all_slots_time.stop(); @@ -11672,11 +11672,11 @@ pub(crate) mod tests { .unwrap(); bank.freeze(); bank.update_accounts_hash(); - assert!(bank.verify_snapshot_bank(true, false, None)); + assert!(bank.verify_snapshot_bank(true, false, bank.slot())); // tamper the bank after freeze! bank.increment_signature_count(1); - assert!(!bank.verify_snapshot_bank(true, false, None)); + assert!(!bank.verify_snapshot_bank(true, false, bank.slot())); } // Test that two bank forks with the same accounts should not hash to the same value. diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 81cdbcc37a6225..2273832c19e28a 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -986,7 +986,7 @@ pub fn bank_from_snapshot_archives( if !bank.verify_snapshot_bank( test_hash_calculation, accounts_db_skip_shrink || !full_snapshot_archive_info.is_remote(), - Some(full_snapshot_archive_info.slot()), + full_snapshot_archive_info.slot(), ) && limit_load_slot_count_from_snapshot.is_none() { panic!("Snapshot bank for slot {} failed to verify", bank.slot()); From cfab5c51083c1e5ad1105d4f4c211a8a9ac80251 Mon Sep 17 00:00:00 2001 From: Brooks Prumo Date: Thu, 18 Aug 2022 16:24:36 -0400 Subject: [PATCH 39/67] Remove `Bank::ensure_no_storage_rewards_pool()` (#26468) --- runtime/src/bank.rs | 82 --------------------------------------------- 1 file changed, 82 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 077efd090266e3..249d9eec43e926 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -7636,7 +7636,6 @@ impl Bank { ); self.reconfigure_token2_native_mint(); } - self.ensure_no_storage_rewards_pool(); if new_feature_activations.contains(&feature_set::cap_accounts_data_len::id()) { const ACCOUNTS_DATA_LEN: u64 = 50_000_000_000; @@ -7813,36 +7812,6 @@ impl Bank { } } - fn ensure_no_storage_rewards_pool(&mut self) { - let purge_window_epoch = match self.cluster_type() { - ClusterType::Development => false, - // never do this for devnet; we're pristine here. :) - ClusterType::Devnet => false, - // schedule to remove at testnet/tds - ClusterType::Testnet => self.epoch() == 93, - // never do this for stable; we're pristine here. :) - ClusterType::MainnetBeta => false, - }; - - if purge_window_epoch { - for reward_pubkey in self.rewards_pool_pubkeys.iter() { - if let Some(mut reward_account) = self.get_account_with_fixed_root(reward_pubkey) { - if reward_account.lamports() == u64::MAX { - reward_account.set_lamports(0); - self.store_account(reward_pubkey, &reward_account); - // Adjust capitalization.... it has been wrapping, reducing the real capitalization by 1-lamport - self.capitalization.fetch_add(1, Relaxed); - info!( - "purged rewards pool account: {}, new capitalization: {}", - reward_pubkey, - self.capitalization() - ); - } - }; - } - } - } - /// Get all the accounts for this bank and calculate stats pub fn get_total_accounts_stats(&self) -> ScanResult { let accounts = self.get_all_accounts_with_modified_slots()?; @@ -15236,57 +15205,6 @@ pub(crate) mod tests { assert_eq!(native_mint_account.owner(), &inline_spl_token::id()); } - #[test] - fn test_ensure_no_storage_rewards_pool() { - solana_logger::setup(); - - let mut genesis_config = - create_genesis_config_with_leader(5, &solana_sdk::pubkey::new_rand(), 0).genesis_config; - - // Testnet - Storage rewards pool is purged at epoch 93 - // Also this is with bad capitalization - genesis_config.cluster_type = ClusterType::Testnet; - genesis_config.inflation = Inflation::default(); - let reward_pubkey = solana_sdk::pubkey::new_rand(); - genesis_config.rewards_pools.insert( - reward_pubkey, - Account::new(u64::MAX, 0, &solana_sdk::pubkey::new_rand()), - ); - let bank0 = Bank::new_for_tests(&genesis_config); - // because capitalization has been reset with bogus capitalization calculation allowing overflows, - // deliberately substract 1 lamport to simulate it - bank0.capitalization.fetch_sub(1, Relaxed); - let bank0 = Arc::new(bank0); - assert_eq!(bank0.get_balance(&reward_pubkey), u64::MAX,); - - let bank1 = Bank::new_from_parent( - &bank0, - &Pubkey::default(), - genesis_config.epoch_schedule.get_first_slot_in_epoch(93), - ); - - // assert that everything gets in order.... - assert!(bank1.get_account(&reward_pubkey).is_none()); - let sysvar_and_builtin_program_delta = 1; - assert_eq!( - bank0.capitalization() + 1 + 1_000_000_000 + sysvar_and_builtin_program_delta, - bank1.capitalization() - ); - assert_eq!(bank1.capitalization(), bank1.calculate_capitalization(true)); - - // Depending on RUSTFLAGS, this test exposes rust's checked math behavior or not... - // So do some convolted setup; anyway this test itself will just be temporary - let bank0 = std::panic::AssertUnwindSafe(bank0); - let overflowing_capitalization = - std::panic::catch_unwind(|| bank0.calculate_capitalization(true)); - if let Ok(overflowing_capitalization) = overflowing_capitalization { - info!("asserting overflowing capitalization for bank0"); - assert_eq!(overflowing_capitalization, bank0.capitalization()); - } else { - info!("NOT-asserting overflowing capitalization for bank0"); - } - } - #[derive(Debug)] struct TestExecutor {} impl Executor for TestExecutor { From 471ae901a32f72d7b33c68f3dec44a6a2f00d122 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Thu, 18 Aug 2022 22:12:53 +0100 Subject: [PATCH 40/67] cli: Add subcommands for address lookup tables (#27123) * cli: Add subcommand for creating address lookup tables * cli: Add additional subcommands for address lookup tables * short commands --- Cargo.lock | 1 + cli-output/src/cli_output.rs | 69 +++ cli/Cargo.toml | 1 + cli/src/address_lookup_table.rs | 832 ++++++++++++++++++++++++++++++ cli/src/clap_app.rs | 5 +- cli/src/cli.rs | 14 +- cli/src/lib.rs | 1 + cli/tests/address_lookup_table.rs | 216 ++++++++ 8 files changed, 1135 insertions(+), 4 deletions(-) create mode 100644 cli/src/address_lookup_table.rs create mode 100644 cli/tests/address_lookup_table.rs diff --git a/Cargo.lock b/Cargo.lock index 8616c2632fdfae..9348db394a09bb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4874,6 +4874,7 @@ dependencies = [ "serde_derive", "serde_json", "solana-account-decoder", + "solana-address-lookup-table-program", "solana-bpf-loader-program", "solana-clap-utils", "solana-cli-config", diff --git a/cli-output/src/cli_output.rs b/cli-output/src/cli_output.rs index f45c5713e4af29..645b7b66fbbb40 100644 --- a/cli-output/src/cli_output.rs +++ b/cli-output/src/cli_output.rs @@ -2111,6 +2111,75 @@ impl fmt::Display for CliUpgradeableBuffers { } } +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] +pub struct CliAddressLookupTable { + pub lookup_table_address: String, + pub authority: Option, + pub deactivation_slot: u64, + pub last_extended_slot: u64, + pub addresses: Vec, +} +impl QuietDisplay for CliAddressLookupTable {} +impl VerboseDisplay for CliAddressLookupTable {} +impl fmt::Display for CliAddressLookupTable { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + writeln!(f)?; + writeln_name_value(f, "Lookup Table Address:", &self.lookup_table_address)?; + if let Some(authority) = &self.authority { + writeln_name_value(f, "Authority:", authority)?; + } else { + writeln_name_value(f, "Authority:", "None (frozen)")?; + } + if self.deactivation_slot == u64::MAX { + writeln_name_value(f, "Deactivation Slot:", "None (still active)")?; + } else { + writeln_name_value(f, "Deactivation Slot:", &self.deactivation_slot.to_string())?; + } + if self.last_extended_slot == 0 { + writeln_name_value(f, "Last Extended Slot:", "None (empty)")?; + } else { + writeln_name_value( + f, + "Last Extended Slot:", + &self.last_extended_slot.to_string(), + )?; + } + if self.addresses.is_empty() { + writeln_name_value(f, "Address Table Entries:", "None (empty)")?; + } else { + writeln!(f, "{}", style("Address Table Entries:".to_string()).bold())?; + writeln!(f)?; + writeln!( + f, + "{}", + style(format!(" {:<5} {}", "Index", "Address")).bold() + )?; + for (index, address) in self.addresses.iter().enumerate() { + writeln!(f, " {:<5} {}", index, address)?; + } + } + Ok(()) + } +} + +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CliAddressLookupTableCreated { + pub lookup_table_address: String, + pub signature: String, +} +impl QuietDisplay for CliAddressLookupTableCreated {} +impl VerboseDisplay for CliAddressLookupTableCreated {} +impl fmt::Display for CliAddressLookupTableCreated { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + writeln!(f)?; + writeln_name_value(f, "Signature:", &self.signature)?; + writeln_name_value(f, "Lookup Table Address:", &self.lookup_table_address)?; + Ok(()) + } +} + #[derive(Debug, Default)] pub struct ReturnSignersConfig { pub dump_transaction_message: bool, diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 1ed5ddaef613a6..6fdfa258c45b60 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -28,6 +28,7 @@ serde = "1.0.143" serde_derive = "1.0.103" serde_json = "1.0.83" solana-account-decoder = { path = "../account-decoder", version = "=1.12.0" } +solana-address-lookup-table-program = { path = "../programs/address-lookup-table", version = "=1.12.0" } solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.12.0" } solana-clap-utils = { path = "../clap-utils", version = "=1.12.0" } solana-cli-config = { path = "../cli-config", version = "=1.12.0" } diff --git a/cli/src/address_lookup_table.rs b/cli/src/address_lookup_table.rs new file mode 100644 index 00000000000000..7f0fa9d3137897 --- /dev/null +++ b/cli/src/address_lookup_table.rs @@ -0,0 +1,832 @@ +use { + crate::cli::{CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult}, + clap::{App, AppSettings, Arg, ArgMatches, SubCommand}, + solana_address_lookup_table_program::{ + instruction::{ + close_lookup_table, create_lookup_table, deactivate_lookup_table, extend_lookup_table, + freeze_lookup_table, + }, + state::AddressLookupTable, + }, + solana_clap_utils::{self, input_parsers::*, input_validators::*, keypair::*}, + solana_cli_output::{CliAddressLookupTable, CliAddressLookupTableCreated, CliSignature}, + solana_client::{rpc_client::RpcClient, rpc_config::RpcSendTransactionConfig}, + solana_remote_wallet::remote_wallet::RemoteWalletManager, + solana_sdk::{ + account::from_account, clock::Clock, commitment_config::CommitmentConfig, message::Message, + pubkey::Pubkey, sysvar, transaction::Transaction, + }, + std::sync::Arc, +}; + +#[derive(Debug, PartialEq, Eq)] +pub enum AddressLookupTableCliCommand { + CreateLookupTable { + authority_signer_index: SignerIndex, + payer_signer_index: SignerIndex, + }, + FreezeLookupTable { + lookup_table_pubkey: Pubkey, + authority_signer_index: SignerIndex, + bypass_warning: bool, + }, + ExtendLookupTable { + lookup_table_pubkey: Pubkey, + authority_signer_index: SignerIndex, + payer_signer_index: SignerIndex, + new_addresses: Vec, + }, + DeactivateLookupTable { + lookup_table_pubkey: Pubkey, + authority_signer_index: SignerIndex, + bypass_warning: bool, + }, + CloseLookupTable { + lookup_table_pubkey: Pubkey, + authority_signer_index: SignerIndex, + recipient_pubkey: Pubkey, + }, + ShowLookupTable { + lookup_table_pubkey: Pubkey, + }, +} + +pub trait AddressLookupTableSubCommands { + fn address_lookup_table_subcommands(self) -> Self; +} + +impl AddressLookupTableSubCommands for App<'_, '_> { + fn address_lookup_table_subcommands(self) -> Self { + self.subcommand( + SubCommand::with_name("address-lookup-table") + .about("Address lookup table management") + .setting(AppSettings::SubcommandRequiredElseHelp) + .subcommand( + SubCommand::with_name("create") + .about("Create a lookup table") + .arg( + Arg::with_name("authority") + .long("authority") + .value_name("AUTHORITY_SIGNER") + .takes_value(true) + .validator(is_valid_signer) + .help("Lookup table authority [default: the default configured keypair]") + ) + .arg( + Arg::with_name("payer") + .long("payer") + .value_name("PAYER_SIGNER") + .takes_value(true) + .validator(is_valid_signer) + .help("Account that will pay rent fees for the created lookup table [default: the default configured keypair]") + ) + ) + .subcommand( + SubCommand::with_name("freeze") + .about("Permanently freezes a lookup table") + .arg( + Arg::with_name("lookup_table_address") + .index(1) + .value_name("LOOKUP_TABLE_ADDRESS") + .takes_value(true) + .required(true) + .validator(is_pubkey) + .help("Address of the lookup table") + ) + .arg( + Arg::with_name("authority") + .long("authority") + .value_name("AUTHORITY_SIGNER") + .takes_value(true) + .validator(is_valid_signer) + .help("Lookup table authority [default: the default configured keypair]") + ) + .arg( + Arg::with_name("bypass_warning") + .long("bypass-warning") + .takes_value(false) + .help("Bypass the permanent lookup table freeze warning"), + ), + ) + .subcommand( + SubCommand::with_name("extend") + .about("Append more addresses to a lookup table") + .arg( + Arg::with_name("lookup_table_address") + .index(1) + .value_name("LOOKUP_TABLE_ADDRESS") + .takes_value(true) + .required(true) + .validator(is_pubkey) + .help("Address of the lookup table") + ) + .arg( + Arg::with_name("authority") + .long("authority") + .value_name("AUTHORITY_SIGNER") + .takes_value(true) + .validator(is_valid_signer) + .help("Lookup table authority [default: the default configured keypair]") + ) + .arg( + Arg::with_name("payer") + .long("payer") + .value_name("PAYER_SIGNER") + .takes_value(true) + .validator(is_valid_signer) + .help("Account that will pay rent fees for the extended lookup table [default: the default configured keypair]") + ) + .arg( + Arg::with_name("addresses") + .long("addresses") + .value_name("ADDRESS_1,ADDRESS_2") + .takes_value(true) + .use_delimiter(true) + .required(true) + .validator(is_pubkey) + .help("Comma separated list of addresses to append") + ) + ) + .subcommand( + SubCommand::with_name("deactivate") + .about("Permanently deactivates a lookup table") + .arg( + Arg::with_name("lookup_table_address") + .index(1) + .value_name("LOOKUP_TABLE_ADDRESS") + .takes_value(true) + .required(true) + .help("Address of the lookup table") + ) + .arg( + Arg::with_name("authority") + .long("authority") + .value_name("AUTHORITY_SIGNER") + .takes_value(true) + .validator(is_valid_signer) + .help("Lookup table authority [default: the default configured keypair]") + ) + .arg( + Arg::with_name("bypass_warning") + .long("bypass-warning") + .takes_value(false) + .help("Bypass the permanent lookup table deactivation warning"), + ), + ) + .subcommand( + SubCommand::with_name("close") + .about("Permanently closes a lookup table") + .arg( + Arg::with_name("lookup_table_address") + .index(1) + .value_name("LOOKUP_TABLE_ADDRESS") + .takes_value(true) + .required(true) + .help("Address of the lookup table") + ) + .arg( + Arg::with_name("recipient") + .long("recipient") + .value_name("RECIPIENT_ADDRESS") + .takes_value(true) + .validator(is_pubkey) + .help("Address of the recipient account to deposit the closed account's lamports [default: the default configured keypair]") + ) + .arg( + Arg::with_name("authority") + .long("authority") + .value_name("AUTHORITY_SIGNER") + .takes_value(true) + .validator(is_valid_signer) + .help("Lookup table authority [default: the default configured keypair]") + ) + ) + .subcommand( + SubCommand::with_name("get") + .about("Display information about a lookup table") + .arg( + Arg::with_name("lookup_table_address") + .index(1) + .value_name("LOOKUP_TABLE_ADDRESS") + .takes_value(true) + .help("Address of the lookup table to show") + ) + ) + ) + } +} + +pub fn parse_address_lookup_table_subcommand( + matches: &ArgMatches<'_>, + default_signer: &DefaultSigner, + wallet_manager: &mut Option>, +) -> Result { + let (subcommand, sub_matches) = matches.subcommand(); + + let response = match (subcommand, sub_matches) { + ("create", Some(matches)) => { + let mut bulk_signers = vec![Some( + default_signer.signer_from_path(matches, wallet_manager)?, + )]; + + let authority_pubkey = if let Ok((authority_signer, Some(authority_pubkey))) = + signer_of(matches, "authority", wallet_manager) + { + bulk_signers.push(authority_signer); + Some(authority_pubkey) + } else { + Some( + default_signer + .signer_from_path(matches, wallet_manager)? + .pubkey(), + ) + }; + + let payer_pubkey = if let Ok((payer_signer, Some(payer_pubkey))) = + signer_of(matches, "payer", wallet_manager) + { + bulk_signers.push(payer_signer); + Some(payer_pubkey) + } else { + Some( + default_signer + .signer_from_path(matches, wallet_manager)? + .pubkey(), + ) + }; + + let signer_info = + default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; + + CliCommandInfo { + command: CliCommand::AddressLookupTable( + AddressLookupTableCliCommand::CreateLookupTable { + authority_signer_index: signer_info.index_of(authority_pubkey).unwrap(), + payer_signer_index: signer_info.index_of(payer_pubkey).unwrap(), + }, + ), + signers: signer_info.signers, + } + } + ("freeze", Some(matches)) => { + let lookup_table_pubkey = pubkey_of(matches, "lookup_table_address").unwrap(); + + let mut bulk_signers = vec![Some( + default_signer.signer_from_path(matches, wallet_manager)?, + )]; + + let authority_pubkey = if let Ok((authority_signer, Some(authority_pubkey))) = + signer_of(matches, "authority", wallet_manager) + { + bulk_signers.push(authority_signer); + Some(authority_pubkey) + } else { + Some( + default_signer + .signer_from_path(matches, wallet_manager)? + .pubkey(), + ) + }; + + let signer_info = + default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; + + CliCommandInfo { + command: CliCommand::AddressLookupTable( + AddressLookupTableCliCommand::FreezeLookupTable { + lookup_table_pubkey, + authority_signer_index: signer_info.index_of(authority_pubkey).unwrap(), + bypass_warning: matches.is_present("bypass_warning"), + }, + ), + signers: signer_info.signers, + } + } + ("extend", Some(matches)) => { + let lookup_table_pubkey = pubkey_of(matches, "lookup_table_address").unwrap(); + + let mut bulk_signers = vec![Some( + default_signer.signer_from_path(matches, wallet_manager)?, + )]; + + let authority_pubkey = if let Ok((authority_signer, Some(authority_pubkey))) = + signer_of(matches, "authority", wallet_manager) + { + bulk_signers.push(authority_signer); + Some(authority_pubkey) + } else { + Some( + default_signer + .signer_from_path(matches, wallet_manager)? + .pubkey(), + ) + }; + + let payer_pubkey = if let Ok((payer_signer, Some(payer_pubkey))) = + signer_of(matches, "payer", wallet_manager) + { + bulk_signers.push(payer_signer); + Some(payer_pubkey) + } else { + Some( + default_signer + .signer_from_path(matches, wallet_manager)? + .pubkey(), + ) + }; + + let new_addresses: Vec = values_of(matches, "addresses").unwrap(); + + let signer_info = + default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; + + CliCommandInfo { + command: CliCommand::AddressLookupTable( + AddressLookupTableCliCommand::ExtendLookupTable { + lookup_table_pubkey, + authority_signer_index: signer_info.index_of(authority_pubkey).unwrap(), + payer_signer_index: signer_info.index_of(payer_pubkey).unwrap(), + new_addresses, + }, + ), + signers: signer_info.signers, + } + } + ("deactivate", Some(matches)) => { + let lookup_table_pubkey = pubkey_of(matches, "lookup_table_address").unwrap(); + + let mut bulk_signers = vec![Some( + default_signer.signer_from_path(matches, wallet_manager)?, + )]; + + let authority_pubkey = if let Ok((authority_signer, Some(authority_pubkey))) = + signer_of(matches, "authority", wallet_manager) + { + bulk_signers.push(authority_signer); + Some(authority_pubkey) + } else { + Some( + default_signer + .signer_from_path(matches, wallet_manager)? + .pubkey(), + ) + }; + + let signer_info = + default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; + + CliCommandInfo { + command: CliCommand::AddressLookupTable( + AddressLookupTableCliCommand::DeactivateLookupTable { + lookup_table_pubkey, + authority_signer_index: signer_info.index_of(authority_pubkey).unwrap(), + bypass_warning: matches.is_present("bypass_warning"), + }, + ), + signers: signer_info.signers, + } + } + ("close", Some(matches)) => { + let lookup_table_pubkey = pubkey_of(matches, "lookup_table_address").unwrap(); + + let mut bulk_signers = vec![Some( + default_signer.signer_from_path(matches, wallet_manager)?, + )]; + + let authority_pubkey = if let Ok((authority_signer, Some(authority_pubkey))) = + signer_of(matches, "authority", wallet_manager) + { + bulk_signers.push(authority_signer); + Some(authority_pubkey) + } else { + Some( + default_signer + .signer_from_path(matches, wallet_manager)? + .pubkey(), + ) + }; + + let recipient_pubkey = if let Some(recipient_pubkey) = pubkey_of(matches, "recipient") { + recipient_pubkey + } else { + default_signer + .signer_from_path(matches, wallet_manager)? + .pubkey() + }; + + let signer_info = + default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; + + CliCommandInfo { + command: CliCommand::AddressLookupTable( + AddressLookupTableCliCommand::CloseLookupTable { + lookup_table_pubkey, + authority_signer_index: signer_info.index_of(authority_pubkey).unwrap(), + recipient_pubkey, + }, + ), + signers: signer_info.signers, + } + } + ("get", Some(matches)) => { + let lookup_table_pubkey = pubkey_of(matches, "lookup_table_address").unwrap(); + + CliCommandInfo { + command: CliCommand::AddressLookupTable( + AddressLookupTableCliCommand::ShowLookupTable { + lookup_table_pubkey, + }, + ), + signers: vec![], + } + } + _ => unreachable!(), + }; + Ok(response) +} + +pub fn process_address_lookup_table_subcommand( + rpc_client: Arc, + config: &CliConfig, + subcommand: &AddressLookupTableCliCommand, +) -> ProcessResult { + match subcommand { + AddressLookupTableCliCommand::CreateLookupTable { + authority_signer_index, + payer_signer_index, + } => process_create_lookup_table( + &rpc_client, + config, + *authority_signer_index, + *payer_signer_index, + ), + AddressLookupTableCliCommand::FreezeLookupTable { + lookup_table_pubkey, + authority_signer_index, + bypass_warning, + } => process_freeze_lookup_table( + &rpc_client, + config, + *lookup_table_pubkey, + *authority_signer_index, + *bypass_warning, + ), + AddressLookupTableCliCommand::ExtendLookupTable { + lookup_table_pubkey, + authority_signer_index, + payer_signer_index, + new_addresses, + } => process_extend_lookup_table( + &rpc_client, + config, + *lookup_table_pubkey, + *authority_signer_index, + *payer_signer_index, + new_addresses.to_vec(), + ), + AddressLookupTableCliCommand::DeactivateLookupTable { + lookup_table_pubkey, + authority_signer_index, + bypass_warning, + } => process_deactivate_lookup_table( + &rpc_client, + config, + *lookup_table_pubkey, + *authority_signer_index, + *bypass_warning, + ), + AddressLookupTableCliCommand::CloseLookupTable { + lookup_table_pubkey, + authority_signer_index, + recipient_pubkey, + } => process_close_lookup_table( + &rpc_client, + config, + *lookup_table_pubkey, + *authority_signer_index, + *recipient_pubkey, + ), + AddressLookupTableCliCommand::ShowLookupTable { + lookup_table_pubkey, + } => process_show_lookup_table(&rpc_client, config, *lookup_table_pubkey), + } +} + +fn process_create_lookup_table( + rpc_client: &RpcClient, + config: &CliConfig, + authority_signer_index: usize, + payer_signer_index: usize, +) -> ProcessResult { + let authority_signer = config.signers[authority_signer_index]; + let payer_signer = config.signers[payer_signer_index]; + + let get_clock_result = rpc_client + .get_account_with_commitment(&sysvar::clock::id(), CommitmentConfig::finalized())?; + let clock_account = get_clock_result.value.expect("Clock account doesn't exist"); + let clock: Clock = from_account(&clock_account).ok_or_else(|| { + CliError::RpcRequestError("Failed to deserialize clock sysvar".to_string()) + })?; + + let authority_address = authority_signer.pubkey(); + let payer_address = payer_signer.pubkey(); + let (create_lookup_table_ix, lookup_table_address) = + create_lookup_table(authority_address, payer_address, clock.slot); + + let blockhash = rpc_client.get_latest_blockhash()?; + let mut tx = Transaction::new_unsigned(Message::new( + &[create_lookup_table_ix], + Some(&config.signers[0].pubkey()), + )); + + tx.try_sign( + &[config.signers[0], authority_signer, payer_signer], + blockhash, + )?; + let result = rpc_client.send_and_confirm_transaction_with_spinner_and_config( + &tx, + config.commitment, + RpcSendTransactionConfig { + skip_preflight: false, + preflight_commitment: Some(config.commitment.commitment), + ..RpcSendTransactionConfig::default() + }, + ); + match result { + Err(err) => Err(format!("Create failed: {}", err).into()), + Ok(signature) => Ok(config + .output_format + .formatted_string(&CliAddressLookupTableCreated { + lookup_table_address: lookup_table_address.to_string(), + signature: signature.to_string(), + })), + } +} + +pub const FREEZE_LOOKUP_TABLE_WARNING: &str = "WARNING! \ +Once a lookup table is frozen, it can never be modified or unfrozen again. \ +To proceed with freezing, rerun the `freeze` command with the `--bypass-warning` flag"; + +fn process_freeze_lookup_table( + rpc_client: &RpcClient, + config: &CliConfig, + lookup_table_pubkey: Pubkey, + authority_signer_index: usize, + bypass_warning: bool, +) -> ProcessResult { + let authority_signer = config.signers[authority_signer_index]; + + let get_lookup_table_result = + rpc_client.get_account_with_commitment(&lookup_table_pubkey, config.commitment)?; + let lookup_table_account = get_lookup_table_result.value.ok_or_else(|| { + format!("Lookup table account {lookup_table_pubkey} not found, was it already closed?") + })?; + if !solana_address_lookup_table_program::check_id(&lookup_table_account.owner) { + return Err(format!( + "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table program", + ) + .into()); + } + + if !bypass_warning { + return Err(String::from(FREEZE_LOOKUP_TABLE_WARNING).into()); + } + + let authority_address = authority_signer.pubkey(); + let freeze_lookup_table_ix = freeze_lookup_table(lookup_table_pubkey, authority_address); + + let blockhash = rpc_client.get_latest_blockhash()?; + let mut tx = Transaction::new_unsigned(Message::new( + &[freeze_lookup_table_ix], + Some(&config.signers[0].pubkey()), + )); + + tx.try_sign(&[config.signers[0], authority_signer], blockhash)?; + let result = rpc_client.send_and_confirm_transaction_with_spinner_and_config( + &tx, + config.commitment, + RpcSendTransactionConfig { + skip_preflight: false, + preflight_commitment: Some(config.commitment.commitment), + ..RpcSendTransactionConfig::default() + }, + ); + match result { + Err(err) => Err(format!("Freeze failed: {}", err).into()), + Ok(signature) => Ok(config.output_format.formatted_string(&CliSignature { + signature: signature.to_string(), + })), + } +} + +fn process_extend_lookup_table( + rpc_client: &RpcClient, + config: &CliConfig, + lookup_table_pubkey: Pubkey, + authority_signer_index: usize, + payer_signer_index: usize, + new_addresses: Vec, +) -> ProcessResult { + let authority_signer = config.signers[authority_signer_index]; + let payer_signer = config.signers[payer_signer_index]; + + if new_addresses.is_empty() { + return Err("Lookup tables must be extended by at least one address".into()); + } + + let get_lookup_table_result = + rpc_client.get_account_with_commitment(&lookup_table_pubkey, config.commitment)?; + let lookup_table_account = get_lookup_table_result.value.ok_or_else(|| { + format!("Lookup table account {lookup_table_pubkey} not found, was it already closed?") + })?; + if !solana_address_lookup_table_program::check_id(&lookup_table_account.owner) { + return Err(format!( + "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table program", + ) + .into()); + } + + let authority_address = authority_signer.pubkey(); + let payer_address = payer_signer.pubkey(); + let extend_lookup_table_ix = extend_lookup_table( + lookup_table_pubkey, + authority_address, + Some(payer_address), + new_addresses, + ); + + let blockhash = rpc_client.get_latest_blockhash()?; + let mut tx = Transaction::new_unsigned(Message::new( + &[extend_lookup_table_ix], + Some(&config.signers[0].pubkey()), + )); + + tx.try_sign(&[config.signers[0], authority_signer], blockhash)?; + let result = rpc_client.send_and_confirm_transaction_with_spinner_and_config( + &tx, + config.commitment, + RpcSendTransactionConfig { + skip_preflight: false, + preflight_commitment: Some(config.commitment.commitment), + ..RpcSendTransactionConfig::default() + }, + ); + match result { + Err(err) => Err(format!("Extend failed: {}", err).into()), + Ok(signature) => Ok(config.output_format.formatted_string(&CliSignature { + signature: signature.to_string(), + })), + } +} + +pub const DEACTIVATE_LOOKUP_TABLE_WARNING: &str = "WARNING! \ +Once a lookup table is deactivated, it is no longer usable by transactions. +Deactivated lookup tables may only be closed and cannot be recreated at the same address. \ +To proceed with deactivation, rerun the `deactivate` command with the `--bypass-warning` flag"; + +fn process_deactivate_lookup_table( + rpc_client: &RpcClient, + config: &CliConfig, + lookup_table_pubkey: Pubkey, + authority_signer_index: usize, + bypass_warning: bool, +) -> ProcessResult { + let authority_signer = config.signers[authority_signer_index]; + + let get_lookup_table_result = + rpc_client.get_account_with_commitment(&lookup_table_pubkey, config.commitment)?; + let lookup_table_account = get_lookup_table_result.value.ok_or_else(|| { + format!("Lookup table account {lookup_table_pubkey} not found, was it already closed?") + })?; + if !solana_address_lookup_table_program::check_id(&lookup_table_account.owner) { + return Err(format!( + "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table program", + ) + .into()); + } + + if !bypass_warning { + return Err(String::from(DEACTIVATE_LOOKUP_TABLE_WARNING).into()); + } + + let authority_address = authority_signer.pubkey(); + let deactivate_lookup_table_ix = + deactivate_lookup_table(lookup_table_pubkey, authority_address); + + let blockhash = rpc_client.get_latest_blockhash()?; + let mut tx = Transaction::new_unsigned(Message::new( + &[deactivate_lookup_table_ix], + Some(&config.signers[0].pubkey()), + )); + + tx.try_sign(&[config.signers[0], authority_signer], blockhash)?; + let result = rpc_client.send_and_confirm_transaction_with_spinner_and_config( + &tx, + config.commitment, + RpcSendTransactionConfig { + skip_preflight: false, + preflight_commitment: Some(config.commitment.commitment), + ..RpcSendTransactionConfig::default() + }, + ); + match result { + Err(err) => Err(format!("Deactivate failed: {}", err).into()), + Ok(signature) => Ok(config.output_format.formatted_string(&CliSignature { + signature: signature.to_string(), + })), + } +} + +fn process_close_lookup_table( + rpc_client: &RpcClient, + config: &CliConfig, + lookup_table_pubkey: Pubkey, + authority_signer_index: usize, + recipient_pubkey: Pubkey, +) -> ProcessResult { + let authority_signer = config.signers[authority_signer_index]; + + let get_lookup_table_result = + rpc_client.get_account_with_commitment(&lookup_table_pubkey, config.commitment)?; + let lookup_table_account = get_lookup_table_result.value.ok_or_else(|| { + format!("Lookup table account {lookup_table_pubkey} not found, was it already closed?") + })?; + if !solana_address_lookup_table_program::check_id(&lookup_table_account.owner) { + return Err(format!( + "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table program", + ) + .into()); + } + + let lookup_table_account = AddressLookupTable::deserialize(&lookup_table_account.data)?; + if lookup_table_account.meta.deactivation_slot == u64::MAX { + return Err(format!( + "Lookup table account {lookup_table_pubkey} is not deactivated. Only deactivated lookup tables may be closed", + ) + .into()); + } + + let authority_address = authority_signer.pubkey(); + let close_lookup_table_ix = + close_lookup_table(lookup_table_pubkey, authority_address, recipient_pubkey); + + let blockhash = rpc_client.get_latest_blockhash()?; + let mut tx = Transaction::new_unsigned(Message::new( + &[close_lookup_table_ix], + Some(&config.signers[0].pubkey()), + )); + + tx.try_sign(&[config.signers[0], authority_signer], blockhash)?; + let result = rpc_client.send_and_confirm_transaction_with_spinner_and_config( + &tx, + config.commitment, + RpcSendTransactionConfig { + skip_preflight: false, + preflight_commitment: Some(config.commitment.commitment), + ..RpcSendTransactionConfig::default() + }, + ); + match result { + Err(err) => Err(format!("Close failed: {}", err).into()), + Ok(signature) => Ok(config.output_format.formatted_string(&CliSignature { + signature: signature.to_string(), + })), + } +} + +fn process_show_lookup_table( + rpc_client: &RpcClient, + config: &CliConfig, + lookup_table_pubkey: Pubkey, +) -> ProcessResult { + let get_lookup_table_result = + rpc_client.get_account_with_commitment(&lookup_table_pubkey, config.commitment)?; + let lookup_table_account = get_lookup_table_result.value.ok_or_else(|| { + format!("Lookup table account {lookup_table_pubkey} not found, was it already closed?") + })?; + if !solana_address_lookup_table_program::check_id(&lookup_table_account.owner) { + return Err(format!( + "Lookup table account {lookup_table_pubkey} is not owned by the Address Lookup Table program", + ) + .into()); + } + + let lookup_table_account = AddressLookupTable::deserialize(&lookup_table_account.data)?; + Ok(config + .output_format + .formatted_string(&CliAddressLookupTable { + lookup_table_address: lookup_table_pubkey.to_string(), + authority: lookup_table_account + .meta + .authority + .as_ref() + .map(ToString::to_string), + deactivation_slot: lookup_table_account.meta.deactivation_slot, + last_extended_slot: lookup_table_account.meta.last_extended_slot, + addresses: lookup_table_account + .addresses + .iter() + .map(ToString::to_string) + .collect(), + })) +} diff --git a/cli/src/clap_app.rs b/cli/src/clap_app.rs index 3d48ed37160f93..1760b5161783f2 100644 --- a/cli/src/clap_app.rs +++ b/cli/src/clap_app.rs @@ -1,7 +1,7 @@ use { crate::{ - cli::*, cluster_query::*, feature::*, inflation::*, nonce::*, program::*, stake::*, - validator_info::*, vote::*, wallet::*, + address_lookup_table::AddressLookupTableSubCommands, cli::*, cluster_query::*, feature::*, + inflation::*, nonce::*, program::*, stake::*, validator_info::*, vote::*, wallet::*, }, clap::{App, AppSettings, Arg, ArgGroup, SubCommand}, solana_clap_utils::{self, input_validators::*, keypair::*}, @@ -130,6 +130,7 @@ pub fn get_clap_app<'ab, 'v>(name: &str, about: &'ab str, version: &'v str) -> A .inflation_subcommands() .nonce_subcommands() .program_subcommands() + .address_lookup_table_subcommands() .stake_subcommands() .validator_info_subcommands() .vote_subcommands() diff --git a/cli/src/cli.rs b/cli/src/cli.rs index 2a2397efd3e170..d202a2a69f66c8 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -1,7 +1,7 @@ use { crate::{ - clap_app::*, cluster_query::*, feature::*, inflation::*, nonce::*, program::*, - spend_utils::*, stake::*, validator_info::*, vote::*, wallet::*, + address_lookup_table::*, clap_app::*, cluster_query::*, feature::*, inflation::*, nonce::*, + program::*, spend_utils::*, stake::*, validator_info::*, vote::*, wallet::*, }, clap::{crate_description, crate_name, value_t_or_exit, ArgMatches, Shell}, log::*, @@ -440,6 +440,8 @@ pub enum CliCommand { StakeMinimumDelegation { use_lamports_unit: bool, }, + // Address lookup table commands + AddressLookupTable(AddressLookupTableCliCommand), } #[derive(Debug, PartialEq)] @@ -687,6 +689,9 @@ pub fn parse_command( ("program", Some(matches)) => { parse_program_subcommand(matches, default_signer, wallet_manager) } + ("address-lookup-table", Some(matches)) => { + parse_address_lookup_table_subcommand(matches, default_signer, wallet_manager) + } ("wait-for-max-stake", Some(matches)) => { let max_stake_percent = value_t_or_exit!(matches, "max_percent", f32); Ok(CliCommandInfo { @@ -1627,6 +1632,11 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { derived_address_program_id.as_ref(), compute_unit_price.as_ref(), ), + + // Address Lookup Table Commands + CliCommand::AddressLookupTable(subcommand) => { + process_address_lookup_table_subcommand(rpc_client, config, subcommand) + } } } diff --git a/cli/src/lib.rs b/cli/src/lib.rs index 85d90869ff41b3..c271990b58b7ce 100644 --- a/cli/src/lib.rs +++ b/cli/src/lib.rs @@ -23,6 +23,7 @@ extern crate const_format; extern crate serde_derive; +pub mod address_lookup_table; pub mod checks; pub mod clap_app; pub mod cli; diff --git a/cli/tests/address_lookup_table.rs b/cli/tests/address_lookup_table.rs new file mode 100644 index 00000000000000..5d370d48c4eafd --- /dev/null +++ b/cli/tests/address_lookup_table.rs @@ -0,0 +1,216 @@ +use { + solana_cli::{ + address_lookup_table::{ + AddressLookupTableCliCommand, DEACTIVATE_LOOKUP_TABLE_WARNING, + FREEZE_LOOKUP_TABLE_WARNING, + }, + cli::{process_command, CliCommand, CliConfig}, + }, + solana_cli_output::{CliAddressLookupTable, CliAddressLookupTableCreated, OutputFormat}, + solana_faucet::faucet::run_local_faucet, + solana_sdk::{ + native_token::LAMPORTS_PER_SOL, + pubkey::Pubkey, + signature::{Keypair, Signer}, + }, + solana_streamer::socket::SocketAddrSpace, + solana_test_validator::TestValidator, + std::str::FromStr, +}; + +#[test] +fn test_cli_create_extend_and_freeze_address_lookup_table() { + let mint_keypair = Keypair::new(); + let mint_pubkey = mint_keypair.pubkey(); + let faucet_addr = run_local_faucet(mint_keypair, None); + let test_validator = + TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); + + let mut config = CliConfig::recent_for_tests(); + let keypair = Keypair::new(); + config.json_rpc_url = test_validator.rpc_url(); + config.signers = vec![&keypair]; + config.output_format = OutputFormat::JsonCompact; + + // Airdrop SOL for transaction fees + config.command = CliCommand::Airdrop { + pubkey: None, + lamports: 10 * LAMPORTS_PER_SOL, + }; + process_command(&config).unwrap(); + + // Create lookup table + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::CreateLookupTable { + authority_signer_index: 0, + payer_signer_index: 0, + }); + let response: CliAddressLookupTableCreated = + serde_json::from_str(&process_command(&config).unwrap()).unwrap(); + let lookup_table_pubkey = Pubkey::from_str(&response.lookup_table_address).unwrap(); + + // Validate created lookup table + { + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::ShowLookupTable { + lookup_table_pubkey, + }); + let response: CliAddressLookupTable = + serde_json::from_str(&process_command(&config).unwrap()).unwrap(); + assert_eq!( + response, + CliAddressLookupTable { + lookup_table_address: lookup_table_pubkey.to_string(), + authority: Some(keypair.pubkey().to_string()), + deactivation_slot: u64::MAX, + last_extended_slot: 0, + addresses: vec![], + } + ); + } + + // Extend lookup table + let new_addresses: Vec = (0..5).map(|_| Pubkey::new_unique()).collect(); + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::ExtendLookupTable { + lookup_table_pubkey, + authority_signer_index: 0, + payer_signer_index: 0, + new_addresses: new_addresses.clone(), + }); + process_command(&config).unwrap(); + + // Validate extended lookup table + { + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::ShowLookupTable { + lookup_table_pubkey, + }); + let CliAddressLookupTable { + addresses, + last_extended_slot, + .. + } = serde_json::from_str(&process_command(&config).unwrap()).unwrap(); + assert_eq!( + addresses + .into_iter() + .map(|address| Pubkey::from_str(&address).unwrap()) + .collect::>(), + new_addresses + ); + assert!(last_extended_slot > 0); + } + + // Freeze lookup table w/o bypass + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::FreezeLookupTable { + lookup_table_pubkey, + authority_signer_index: 0, + bypass_warning: false, + }); + let process_err = process_command(&config).unwrap_err(); + assert_eq!(process_err.to_string(), FREEZE_LOOKUP_TABLE_WARNING); + + // Freeze lookup table w/ bypass + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::FreezeLookupTable { + lookup_table_pubkey, + authority_signer_index: 0, + bypass_warning: true, + }); + process_command(&config).unwrap(); + + // Validate frozen lookup table + { + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::ShowLookupTable { + lookup_table_pubkey, + }); + let CliAddressLookupTable { authority, .. } = + serde_json::from_str(&process_command(&config).unwrap()).unwrap(); + assert!(authority.is_none()); + } +} + +#[test] +fn test_cli_create_and_deactivate_address_lookup_table() { + let mint_keypair = Keypair::new(); + let mint_pubkey = mint_keypair.pubkey(); + let faucet_addr = run_local_faucet(mint_keypair, None); + let test_validator = + TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); + + let mut config = CliConfig::recent_for_tests(); + let keypair = Keypair::new(); + config.json_rpc_url = test_validator.rpc_url(); + config.signers = vec![&keypair]; + config.output_format = OutputFormat::JsonCompact; + + // Airdrop SOL for transaction fees + config.command = CliCommand::Airdrop { + pubkey: None, + lamports: 10 * LAMPORTS_PER_SOL, + }; + process_command(&config).unwrap(); + + // Create lookup table + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::CreateLookupTable { + authority_signer_index: 0, + payer_signer_index: 0, + }); + let response: CliAddressLookupTableCreated = + serde_json::from_str(&process_command(&config).unwrap()).unwrap(); + let lookup_table_pubkey = Pubkey::from_str(&response.lookup_table_address).unwrap(); + + // Validate created lookup table + { + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::ShowLookupTable { + lookup_table_pubkey, + }); + let response: CliAddressLookupTable = + serde_json::from_str(&process_command(&config).unwrap()).unwrap(); + assert_eq!( + response, + CliAddressLookupTable { + lookup_table_address: lookup_table_pubkey.to_string(), + authority: Some(keypair.pubkey().to_string()), + deactivation_slot: u64::MAX, + last_extended_slot: 0, + addresses: vec![], + } + ); + } + + // Deactivate lookup table w/o bypass + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::DeactivateLookupTable { + lookup_table_pubkey, + authority_signer_index: 0, + bypass_warning: false, + }); + let process_err = process_command(&config).unwrap_err(); + assert_eq!(process_err.to_string(), DEACTIVATE_LOOKUP_TABLE_WARNING); + + // Deactivate lookup table w/ bypass + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::DeactivateLookupTable { + lookup_table_pubkey, + authority_signer_index: 0, + bypass_warning: true, + }); + process_command(&config).unwrap(); + + // Validate deactivated lookup table + { + config.command = + CliCommand::AddressLookupTable(AddressLookupTableCliCommand::ShowLookupTable { + lookup_table_pubkey, + }); + let CliAddressLookupTable { + deactivation_slot, .. + } = serde_json::from_str(&process_command(&config).unwrap()).unwrap(); + assert_ne!(deactivation_slot, u64::MAX); + } +} From da75405a7eb54a3142dc0989f5c0859c39a0d906 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Thu, 18 Aug 2022 22:39:31 +0000 Subject: [PATCH 41/67] adds hash domain to ping-pong protocol (#27193) In order to maintain backward compatibility, for now the responding node will hash the token both with and without domain so that the other node will accept the response regardless of its upgrade status. Once the cluster has upgraded to the new code, we will remove the legacy domain = false case. --- core/src/ancestor_hashes_service.rs | 15 +++++++---- core/src/serve_repair.rs | 15 +++++++---- gossip/src/cluster_info.rs | 30 +++++++++++++--------- gossip/src/ping_pong.rs | 40 ++++++++++++++++++++++++----- 4 files changed, 72 insertions(+), 28 deletions(-) diff --git a/core/src/ancestor_hashes_service.rs b/core/src/ancestor_hashes_service.rs index 330ebb072abc10..4813ed11685069 100644 --- a/core/src/ancestor_hashes_service.rs +++ b/core/src/ancestor_hashes_service.rs @@ -425,16 +425,21 @@ impl AncestorHashesService { stats.invalid_packets += 1; return None; } - if ping.verify() { - stats.ping_count += 1; - if let Ok(pong) = Pong::new(&ping, keypair) { + if !ping.verify() { + stats.ping_err_verify_count += 1; + return None; + } + stats.ping_count += 1; + // Respond both with and without domain so that the other node + // will accept the response regardless of its upgrade status. + // TODO: remove domain = false once cluster is upgraded. + for domain in [false, true] { + if let Ok(pong) = Pong::new(domain, &ping, keypair) { let pong = RepairProtocol::Pong(pong); if let Ok(pong_bytes) = serialize(&pong) { let _ignore = ancestor_socket.send_to(&pong_bytes[..], from_addr); } } - } else { - stats.ping_err_verify_count += 1; } None } diff --git a/core/src/serve_repair.rs b/core/src/serve_repair.rs index 2f755ebb17f4bb..47443bcd9acc2d 100644 --- a/core/src/serve_repair.rs +++ b/core/src/serve_repair.rs @@ -1044,11 +1044,16 @@ impl ServeRepair { } packet.meta.set_discard(true); stats.ping_count += 1; - if let Ok(pong) = Pong::new(&ping, keypair) { - let pong = RepairProtocol::Pong(pong); - if let Ok(pong_bytes) = serialize(&pong) { - let from_addr = packet.meta.socket_addr(); - pending_pongs.push((pong_bytes, from_addr)); + // Respond both with and without domain so that the other node + // will accept the response regardless of its upgrade status. + // TODO: remove domain = false once cluster is upgraded. + for domain in [false, true] { + if let Ok(pong) = Pong::new(domain, &ping, keypair) { + let pong = RepairProtocol::Pong(pong); + if let Ok(pong_bytes) = serialize(&pong) { + let from_addr = packet.meta.socket_addr(); + pending_pongs.push((pong_bytes, from_addr)); + } } } } diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 40142f70e3d336..9d692b8a08aba6 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -2170,14 +2170,18 @@ impl ClusterInfo { I: IntoIterator, { let keypair = self.keypair(); - let pongs_and_dests: Vec<_> = pings - .into_iter() - .filter_map(|(addr, ping)| { - let pong = Pong::new(&ping, &keypair).ok()?; - let pong = Protocol::PongMessage(pong); - Some((addr, pong)) - }) - .collect(); + let mut pongs_and_dests = Vec::new(); + for (addr, ping) in pings { + // Respond both with and without domain so that the other node will + // accept the response regardless of its upgrade status. + // TODO: remove domain = false once cluster is upgraded. + for domain in [false, true] { + if let Ok(pong) = Pong::new(domain, &ping, &keypair) { + let pong = Protocol::PongMessage(pong); + pongs_and_dests.push((addr, pong)); + } + } + } if pongs_and_dests.is_empty() { None } else { @@ -3287,7 +3291,9 @@ RPC Enabled Nodes: 1"#; let pongs: Vec<(SocketAddr, Pong)> = pings .iter() .zip(&remote_nodes) - .map(|(ping, (keypair, socket))| (*socket, Pong::new(ping, keypair).unwrap())) + .map(|(ping, (keypair, socket))| { + (*socket, Pong::new(/*domain:*/ true, ping, keypair).unwrap()) + }) .collect(); let now = now + Duration::from_millis(1); cluster_info.handle_batch_pong_messages(pongs, now); @@ -3330,7 +3336,7 @@ RPC Enabled Nodes: 1"#; .collect(); let pongs: Vec<_> = pings .iter() - .map(|ping| Pong::new(ping, &this_node).unwrap()) + .map(|ping| Pong::new(/*domain:*/ false, ping, &this_node).unwrap()) .collect(); let recycler = PacketBatchRecycler::default(); let packets = cluster_info @@ -3342,9 +3348,9 @@ RPC Enabled Nodes: 1"#; &recycler, ) .unwrap(); - assert_eq!(remote_nodes.len(), packets.len()); + assert_eq!(remote_nodes.len() * 2, packets.len()); for (packet, (_, socket), pong) in izip!( - packets.into_iter(), + packets.into_iter().step_by(2), remote_nodes.into_iter(), pongs.into_iter() ) { diff --git a/gossip/src/ping_pong.rs b/gossip/src/ping_pong.rs index 6c3a219cfdb81b..16961f26f18388 100644 --- a/gossip/src/ping_pong.rs +++ b/gossip/src/ping_pong.rs @@ -16,6 +16,8 @@ use { }, }; +const PING_PONG_HASH_PREFIX: &[u8] = "SOLANA_PING_PONG".as_bytes(); + #[derive(AbiExample, Debug, Deserialize, Serialize)] pub struct Ping { from: Pubkey, @@ -100,8 +102,17 @@ impl Signable for Ping { } impl Pong { - pub fn new(ping: &Ping, keypair: &Keypair) -> Result { - let hash = hash::hash(&serialize(&ping.token)?); + pub fn new( + domain: bool, + ping: &Ping, + keypair: &Keypair, + ) -> Result { + let token = serialize(&ping.token)?; + let hash = if domain { + hash::hashv(&[PING_PONG_HASH_PREFIX, &token]) + } else { + hash::hash(&token) + }; let pong = Pong { from: keypair.pubkey(), hash, @@ -187,9 +198,15 @@ impl PingCache { Some(t) if now.saturating_duration_since(*t) < delay => None, _ => { let ping = pingf()?; - let hash = hash::hash(&serialize(&ping.token).ok()?); - self.pings.put(node, now); + let token = serialize(&ping.token).ok()?; + // For backward compatibility, for now responses both with and + // without domain are accepted. + // TODO: remove no domain case once cluster is upgraded. + let hash = hash::hash(&token); + self.pending_cache.put(hash, node); + let hash = hash::hashv(&[PING_PONG_HASH_PREFIX, &token]); self.pending_cache.put(hash, node); + self.pings.put(node, now); Some(ping) } } @@ -281,10 +298,18 @@ mod tests { assert!(ping.verify()); assert!(ping.sanitize().is_ok()); - let pong = Pong::new(&ping, &keypair).unwrap(); + let pong = Pong::new(/*domain:*/ false, &ping, &keypair).unwrap(); assert!(pong.verify()); assert!(pong.sanitize().is_ok()); assert_eq!(hash::hash(&ping.token), pong.hash); + + let pong = Pong::new(/*domian:*/ true, &ping, &keypair).unwrap(); + assert!(pong.verify()); + assert!(pong.sanitize().is_ok()); + assert_eq!( + hash::hashv(&[PING_PONG_HASH_PREFIX, &ping.token]), + pong.hash + ); } #[test] @@ -339,7 +364,10 @@ mod tests { assert!(ping.is_none()); } Some(ping) => { - let pong = Pong::new(ping, keypair).unwrap(); + let domain = rng.gen_ratio(1, 2); + let pong = Pong::new(domain, ping, keypair).unwrap(); + assert!(cache.add(&pong, *socket, now)); + let pong = Pong::new(!domain, ping, keypair).unwrap(); assert!(cache.add(&pong, *socket, now)); } } From f7276534dd4f848756290ecfbc10b21fd08f5785 Mon Sep 17 00:00:00 2001 From: Brennan Watt Date: Fri, 19 Aug 2022 01:19:44 -0700 Subject: [PATCH 42/67] Revert "Rust v1.63.0 (#27148)" (#27245) This reverts commit a2e7bdf50ac5e1d4c633f64f6362028b4164c003. --- account-decoder/src/parse_address_lookup_table.rs | 2 +- banks-server/src/banks_server.rs | 10 +++++++--- ci/docker-rust-nightly/Dockerfile | 2 +- ci/docker-rust/Dockerfile | 2 +- ci/rust-version.sh | 4 ++-- ci/test-checks.sh | 14 -------------- client/tests/quic_client.rs | 2 +- core/src/banking_stage.rs | 2 +- core/src/sigverify_shreds.rs | 2 +- frozen-abi/src/abi_example.rs | 2 +- gossip/src/crds_gossip_pull.rs | 2 +- ledger/src/bigtable_upload.rs | 2 +- ledger/src/blockstore.rs | 2 +- ledger/src/blockstore_meta.rs | 2 +- ledger/src/shred.rs | 2 +- ledger/src/shred/shred_code.rs | 2 +- local-cluster/src/local_cluster.rs | 2 +- perf/src/sigverify.rs | 7 ++++++- poh/src/poh_recorder.rs | 2 +- rpc/src/rpc.rs | 6 ++++-- rpc/src/rpc_subscriptions.rs | 5 ++++- runtime/src/account_rent_state.rs | 2 +- runtime/src/accounts.rs | 2 +- runtime/src/accounts_db.rs | 8 ++++---- runtime/src/bank.rs | 12 ++++++------ runtime/src/expected_rent_collection.rs | 8 ++++---- runtime/src/hardened_unpack.rs | 2 +- runtime/src/in_mem_accounts_index.rs | 2 -- runtime/src/serde_snapshot.rs | 4 ++-- runtime/src/serde_snapshot/newer.rs | 4 ++-- runtime/src/serde_snapshot/tests.rs | 2 +- runtime/src/snapshot_minimizer.rs | 2 +- runtime/src/snapshot_utils.rs | 2 +- runtime/src/storable_accounts.rs | 2 +- runtime/src/system_instruction_processor.rs | 2 +- sdk/program/src/message/compiled_keys.rs | 10 +++++----- sdk/program/src/nonce/state/mod.rs | 2 +- sdk/program/src/stake/tools.rs | 2 +- streamer/src/streamer.rs | 2 +- validator/src/bootstrap.rs | 6 ++++-- zk-token-sdk/src/instruction/close_account.rs | 2 +- zk-token-sdk/src/instruction/withdraw.rs | 2 +- 42 files changed, 78 insertions(+), 78 deletions(-) diff --git a/account-decoder/src/parse_address_lookup_table.rs b/account-decoder/src/parse_address_lookup_table.rs index ca461f2636e92a..26955d74a74242 100644 --- a/account-decoder/src/parse_address_lookup_table.rs +++ b/account-decoder/src/parse_address_lookup_table.rs @@ -19,7 +19,7 @@ pub fn parse_address_lookup_table( }) } -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] +#[derive(Debug, Serialize, Deserialize, PartialEq)] #[serde(rename_all = "camelCase", tag = "type", content = "info")] pub enum LookupTableAccountType { Uninitialized, diff --git a/banks-server/src/banks_server.rs b/banks-server/src/banks_server.rs index a4b65601c389b3..c73844d2571560 100644 --- a/banks-server/src/banks_server.rs +++ b/banks-server/src/banks_server.rs @@ -153,9 +153,13 @@ fn verify_transaction( transaction: &Transaction, feature_set: &Arc, ) -> transaction::Result<()> { - transaction.verify()?; - transaction.verify_precompiles(feature_set)?; - Ok(()) + if let Err(err) = transaction.verify() { + Err(err) + } else if let Err(err) = transaction.verify_precompiles(feature_set) { + Err(err) + } else { + Ok(()) + } } fn simulate_transaction( diff --git a/ci/docker-rust-nightly/Dockerfile b/ci/docker-rust-nightly/Dockerfile index 12aeff7e5e0b81..fff0f366d32f29 100644 --- a/ci/docker-rust-nightly/Dockerfile +++ b/ci/docker-rust-nightly/Dockerfile @@ -1,4 +1,4 @@ -FROM solanalabs/rust:1.63.0 +FROM solanalabs/rust:1.60.0 ARG date RUN set -x \ diff --git a/ci/docker-rust/Dockerfile b/ci/docker-rust/Dockerfile index a256d308d9b27a..6805f85fcd85df 100644 --- a/ci/docker-rust/Dockerfile +++ b/ci/docker-rust/Dockerfile @@ -1,6 +1,6 @@ # Note: when the rust version is changed also modify # ci/rust-version.sh to pick up the new image tag -FROM rust:1.63.0 +FROM rust:1.60.0 # Add Google Protocol Buffers for Libra's metrics library. ENV PROTOC_VERSION 3.8.0 diff --git a/ci/rust-version.sh b/ci/rust-version.sh index 792863c3280fa1..dc3570fa939e79 100644 --- a/ci/rust-version.sh +++ b/ci/rust-version.sh @@ -18,13 +18,13 @@ if [[ -n $RUST_STABLE_VERSION ]]; then stable_version="$RUST_STABLE_VERSION" else - stable_version=1.63.0 + stable_version=1.60.0 fi if [[ -n $RUST_NIGHTLY_VERSION ]]; then nightly_version="$RUST_NIGHTLY_VERSION" else - nightly_version=2022-08-12 + nightly_version=2022-04-01 fi diff --git a/ci/test-checks.sh b/ci/test-checks.sh index 65e5e6271aa4bf..72c174395bd1d9 100755 --- a/ci/test-checks.sh +++ b/ci/test-checks.sh @@ -65,25 +65,11 @@ fi _ ci/order-crates-for-publishing.py -nightly_clippy_allows=( - # This lint occurs all over the code base - "--allow=clippy::significant_drop_in_scrutinee" - - # The prost crate, used by solana-storage-proto, generates Rust source that - # triggers this lint. Need to resolve upstream in prost - "--allow=clippy::derive_partial_eq_without_eq" - - # This link seems to incorrectly trigger in - # `programs/bpf_loader/src/syscalls/{lib,cpi}.rs` - "--allow=clippy::explicit_auto_deref" -) - # -Z... is needed because of clippy bug: https://github.com/rust-lang/rust-clippy/issues/4612 # run nightly clippy for `sdk/` as there's a moderate amount of nightly-only code there _ scripts/cargo-for-all-lock-files.sh -- nightly clippy -Zunstable-options --all-targets -- \ --deny=warnings \ --deny=clippy::integer_arithmetic \ - "${nightly_clippy_allows[@]}" _ scripts/cargo-for-all-lock-files.sh -- nightly sort --workspace --check _ scripts/cargo-for-all-lock-files.sh -- nightly fmt --all -- --check diff --git a/client/tests/quic_client.rs b/client/tests/quic_client.rs index 1c5348177dd644..980476aee7b2c6 100644 --- a/client/tests/quic_client.rs +++ b/client/tests/quic_client.rs @@ -27,7 +27,7 @@ mod tests { let mut all_packets = vec![]; let now = Instant::now(); let mut total_packets: usize = 0; - while now.elapsed().as_secs() < 10 { + while now.elapsed().as_secs() < 5 { if let Ok(packets) = receiver.recv_timeout(Duration::from_secs(1)) { total_packets = total_packets.saturating_add(packets.len()); all_packets.push(packets) diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 1c3e95e2bd6c5d..2547c00f94e5ca 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -1335,7 +1335,7 @@ impl BankingStage { ); retryable_transaction_indexes.extend(execution_results.iter().enumerate().filter_map( - |(index, execution_result)| execution_result.was_executed().then_some(index), + |(index, execution_result)| execution_result.was_executed().then(|| index), )); return ExecuteAndCommitTransactionsOutput { diff --git a/core/src/sigverify_shreds.rs b/core/src/sigverify_shreds.rs index f1f08ec671d2f3..f9a50ab8b2a954 100644 --- a/core/src/sigverify_shreds.rs +++ b/core/src/sigverify_shreds.rs @@ -151,7 +151,7 @@ fn get_slot_leaders( let leader = leaders.entry(slot).or_insert_with(|| { let leader = leader_schedule_cache.slot_leader_at(slot, Some(bank))?; // Discard the shred if the slot leader is the node itself. - (&leader != self_pubkey).then_some(leader) + (&leader != self_pubkey).then(|| leader) }); if leader.is_none() { packet.meta.set_discard(true); diff --git a/frozen-abi/src/abi_example.rs b/frozen-abi/src/abi_example.rs index 2e1bdbcac16d0d..e0dfa50b8acea6 100644 --- a/frozen-abi/src/abi_example.rs +++ b/frozen-abi/src/abi_example.rs @@ -411,7 +411,7 @@ lazy_static! { impl AbiExample for &Vec { fn example() -> Self { info!("AbiExample for (&Vec): {}", type_name::()); - &VEC_U8 + &*VEC_U8 } } diff --git a/gossip/src/crds_gossip_pull.rs b/gossip/src/crds_gossip_pull.rs index 04df91227b971c..2780bf7dabf56b 100644 --- a/gossip/src/crds_gossip_pull.rs +++ b/gossip/src/crds_gossip_pull.rs @@ -256,7 +256,7 @@ impl CrdsGossipPull { if let Some(ping) = ping { pings.push((peer.gossip, ping)); } - check.then_some((weight, peer)) + check.then(|| (weight, peer)) }) .unzip() }; diff --git a/ledger/src/bigtable_upload.rs b/ledger/src/bigtable_upload.rs index c8cdef587b1fc7..f43b07db12592a 100644 --- a/ledger/src/bigtable_upload.rs +++ b/ledger/src/bigtable_upload.rs @@ -60,7 +60,7 @@ pub async fn upload_confirmed_blocks( starting_slot, err ) })? - .map_while(|slot| (slot <= ending_slot).then_some(slot)) + .map_while(|slot| (slot <= ending_slot).then(|| slot)) .collect(); if blockstore_slots.is_empty() { diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index a4158a1778e2f1..2c0913a5ab8cfb 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -3146,7 +3146,7 @@ impl Blockstore { } .expect("fetch from DuplicateSlots column family failed")?; let new_shred = Shred::new_from_serialized_shred(payload).unwrap(); - (existing_shred != *new_shred.payload()).then_some(existing_shred) + (existing_shred != *new_shred.payload()).then(|| existing_shred) } pub fn has_duplicate_shreds_in_slot(&self, slot: Slot) -> bool { diff --git a/ledger/src/blockstore_meta.rs b/ledger/src/blockstore_meta.rs index 5cacf78198dafb..65101fe98348ba 100644 --- a/ledger/src/blockstore_meta.rs +++ b/ledger/src/blockstore_meta.rs @@ -61,7 +61,7 @@ mod serde_compat { D: Deserializer<'de>, { let val = u64::deserialize(deserializer)?; - Ok((val != u64::MAX).then_some(val)) + Ok((val != u64::MAX).then(|| val)) } } diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index bef3df72515640..e17055b1e7d9a9 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -613,7 +613,7 @@ pub mod layout { merkle::ShredData::get_signed_message_range(proof_size)? } }; - (shred.len() <= range.end).then_some(range) + (shred.len() <= range.end).then(|| range) } pub(crate) fn get_reference_tick(shred: &[u8]) -> Result { diff --git a/ledger/src/shred/shred_code.rs b/ledger/src/shred/shred_code.rs index 1fe3fef026ff18..538bb25427f38f 100644 --- a/ledger/src/shred/shred_code.rs +++ b/ledger/src/shred/shred_code.rs @@ -119,7 +119,7 @@ pub(super) fn erasure_shard_index(shred: &T) -> Option let position = usize::from(coding_header.position); let fec_set_size = num_data_shreds.checked_add(num_coding_shreds)?; let index = position.checked_add(num_data_shreds)?; - (index < fec_set_size).then_some(index) + (index < fec_set_size).then(|| index) } pub(super) fn sanitize(shred: &T) -> Result<(), Error> { diff --git a/local-cluster/src/local_cluster.rs b/local-cluster/src/local_cluster.rs index 0f1ca19f876aff..f7b68647053eaf 100644 --- a/local-cluster/src/local_cluster.rs +++ b/local-cluster/src/local_cluster.rs @@ -319,7 +319,7 @@ impl LocalCluster { }) .collect(); for (stake, validator_config, (key, _)) in izip!( - config.node_stakes[1..].iter(), + (&config.node_stakes[1..]).iter(), config.validator_configs[1..].iter(), validator_keys[1..].iter(), ) { diff --git a/perf/src/sigverify.rs b/perf/src/sigverify.rs index 1e40d29adcf13d..aee1b310dd59d9 100644 --- a/perf/src/sigverify.rs +++ b/perf/src/sigverify.rs @@ -830,7 +830,12 @@ mod tests { pub fn memfind(a: &[A], b: &[A]) -> Option { assert!(a.len() >= b.len()); let end = a.len() - b.len() + 1; - (0..end).find(|&i| a[i..i + b.len()] == b[..]) + for i in 0..end { + if a[i..i + b.len()] == b[..] { + return Some(i); + } + } + None } #[test] diff --git a/poh/src/poh_recorder.rs b/poh/src/poh_recorder.rs index d6c85c3fdf7f3f..aef2d7393e9f51 100644 --- a/poh/src/poh_recorder.rs +++ b/poh/src/poh_recorder.rs @@ -505,7 +505,7 @@ impl PohRecorder { start: Arc::new(Instant::now()), min_tick_height: bank.tick_height(), max_tick_height: bank.max_tick_height(), - transaction_index: track_transaction_indexes.then_some(0), + transaction_index: track_transaction_indexes.then(|| 0), }; trace!("new working bank"); assert_eq!(working_bank.bank.ticks_per_slot(), self.ticks_per_slot()); diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 9cad136b581927..fdf72d8f5d7299 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -129,7 +129,7 @@ fn new_response(bank: &Bank, value: T) -> RpcResponse { /// Wrapper for rpc return types of methods that provide responses both with and without context. /// Main purpose of this is to fix methods that lack context information in their return type, /// without breaking backwards compatibility. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(untagged)] pub enum OptionalContext { Context(RpcResponse), @@ -3646,7 +3646,9 @@ pub mod rpc_full { } if !skip_preflight { - verify_transaction(&transaction, &preflight_bank.feature_set)?; + if let Err(e) = verify_transaction(&transaction, &preflight_bank.feature_set) { + return Err(e); + } match meta.health.check() { RpcHealthStatus::Ok => (), diff --git a/rpc/src/rpc_subscriptions.rs b/rpc/src/rpc_subscriptions.rs index 896b6a9ad5f453..bd9fe337460279 100644 --- a/rpc/src/rpc_subscriptions.rs +++ b/rpc/src/rpc_subscriptions.rs @@ -1001,7 +1001,10 @@ impl RpcSubscriptions { let mut slots_to_notify: Vec<_> = (*w_last_unnotified_slot..slot).collect(); let ancestors = bank.proper_ancestors_set(); - slots_to_notify.retain(|slot| ancestors.contains(slot)); + slots_to_notify = slots_to_notify + .into_iter() + .filter(|slot| ancestors.contains(slot)) + .collect(); slots_to_notify.push(slot); for s in slots_to_notify { // To avoid skipping a slot that fails this condition, diff --git a/runtime/src/account_rent_state.rs b/runtime/src/account_rent_state.rs index 74cbc5b81af5f1..629502caf475fe 100644 --- a/runtime/src/account_rent_state.rs +++ b/runtime/src/account_rent_state.rs @@ -104,7 +104,7 @@ pub(crate) fn check_rent_state( .get_account_at_index(index) .expect(expect_msg) .borrow(), - include_account_index_in_err.then_some(index), + include_account_index_in_err.then(|| index), prevent_crediting_accounts_that_end_rent_paying, )?; } diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index ade9d327ba1046..86d14aaf7b681c 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -440,7 +440,7 @@ impl Accounts { payer_account, feature_set .is_active(&feature_set::include_account_index_in_rent_error::ID) - .then_some(payer_index), + .then(|| payer_index), feature_set .is_active(&feature_set::prevent_crediting_accounts_that_end_rent_paying::id()), ) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 702f70456b68be..c84f45501faacf 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -2174,7 +2174,7 @@ impl AccountsDb { // figure out how many ancient accounts have been reclaimed let old_reclaims = reclaims .iter() - .filter_map(|(slot, _)| (slot < &one_epoch_old).then_some(1)) + .filter_map(|(slot, _)| (slot < &one_epoch_old).then(|| 1)) .sum(); ancient_account_cleans.fetch_add(old_reclaims, Ordering::Relaxed); reclaims @@ -2392,7 +2392,7 @@ impl AccountsDb { .iter() .filter_map(|entry| { let slot = *entry.key(); - (slot <= max_slot).then_some(slot) + (slot <= max_slot).then(|| slot) }) .collect() } @@ -3676,7 +3676,7 @@ impl AccountsDb { ) -> Option { self.get_storages_for_slot(slot).and_then(|all_storages| { self.should_move_to_ancient_append_vec(&all_storages, current_ancient, slot) - .then_some(all_storages) + .then(|| all_storages) }) } @@ -5309,7 +5309,7 @@ impl AccountsDb { // with the same slot. let is_being_flushed = !currently_contended_slots.insert(*remove_slot); // If the cache is currently flushing this slot, add it to the list - is_being_flushed.then_some(remove_slot) + is_being_flushed.then(|| remove_slot) }) .cloned() .collect(); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 249d9eec43e926..b22e7baa83dcd6 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -2285,7 +2285,7 @@ impl Bank { hash: *self.hash.read().unwrap(), parent_hash: self.parent_hash, parent_slot: self.parent_slot, - hard_forks: &self.hard_forks, + hard_forks: &*self.hard_forks, transaction_count: self.transaction_count.load(Relaxed), tick_height: self.tick_height.load(Relaxed), signature_count: self.signature_count.load(Relaxed), @@ -3308,7 +3308,7 @@ impl Bank { let vote_state = account.vote_state(); let vote_state = vote_state.as_ref().ok()?; let slot_delta = self.slot().checked_sub(vote_state.last_timestamp.slot)?; - (slot_delta <= slots_per_epoch).then_some({ + (slot_delta <= slots_per_epoch).then(|| { ( *pubkey, ( @@ -3978,10 +3978,10 @@ impl Bank { } /// Prepare a transaction batch without locking accounts for transaction simulation. - pub(crate) fn prepare_simulation_batch( - &self, + pub(crate) fn prepare_simulation_batch<'a>( + &'a self, transaction: SanitizedTransaction, - ) -> TransactionBatch<'_, '_> { + ) -> TransactionBatch<'a, '_> { let tx_account_lock_limit = self.get_transaction_account_lock_limit(); let lock_result = transaction .get_account_locks(tx_account_lock_limit) @@ -4382,7 +4382,7 @@ impl Bank { self.feature_set.clone(), compute_budget, timings, - &self.sysvar_cache.read().unwrap(), + &*self.sysvar_cache.read().unwrap(), blockhash, lamports_per_signature, prev_accounts_data_len, diff --git a/runtime/src/expected_rent_collection.rs b/runtime/src/expected_rent_collection.rs index bd6a6bb4842a85..d049430933db33 100644 --- a/runtime/src/expected_rent_collection.rs +++ b/runtime/src/expected_rent_collection.rs @@ -684,7 +684,7 @@ pub mod tests { ); assert_eq!( result, - (!leave_alone).then_some(ExpectedRentCollection { + (!leave_alone).then(|| ExpectedRentCollection { partition_from_pubkey, epoch_of_max_storage_slot: rent_collector.epoch, partition_index_from_max_slot: partition_index_max_inclusive, @@ -712,7 +712,7 @@ pub mod tests { ); assert_eq!( result, - (!greater).then_some(ExpectedRentCollection { + (!greater).then(|| ExpectedRentCollection { partition_from_pubkey, epoch_of_max_storage_slot: rent_collector.epoch, partition_index_from_max_slot: partition_index_max_inclusive, @@ -909,7 +909,7 @@ pub mod tests { ); assert_eq!( result, - (account_rent_epoch != 0).then_some(ExpectedRentCollection { + (account_rent_epoch != 0).then(|| ExpectedRentCollection { partition_from_pubkey, epoch_of_max_storage_slot: rent_collector.epoch + 1, partition_index_from_max_slot: partition_index_max_inclusive, @@ -1084,7 +1084,7 @@ pub mod tests { }; assert_eq!( result, - some_expected.then_some(ExpectedRentCollection { + some_expected.then(|| ExpectedRentCollection { partition_from_pubkey, epoch_of_max_storage_slot: rent_collector.epoch, partition_index_from_max_slot, diff --git a/runtime/src/hardened_unpack.rs b/runtime/src/hardened_unpack.rs index ac1c23167343fb..e3af855216e409 100644 --- a/runtime/src/hardened_unpack.rs +++ b/runtime/src/hardened_unpack.rs @@ -384,7 +384,7 @@ where .map(|path_buf| path_buf.as_path()) { Some(path) => { - accounts_path_processor(file, path); + accounts_path_processor(*file, path); UnpackPath::Valid(path) } None => UnpackPath::Invalid, diff --git a/runtime/src/in_mem_accounts_index.rs b/runtime/src/in_mem_accounts_index.rs index 864915399c04cf..82312327bca621 100644 --- a/runtime/src/in_mem_accounts_index.rs +++ b/runtime/src/in_mem_accounts_index.rs @@ -1421,8 +1421,6 @@ impl<'a> FlushGuard<'a> { #[must_use = "if unused, the `flushing` flag will immediately clear"] fn lock(flushing: &'a AtomicBool) -> Option { let already_flushing = flushing.swap(true, Ordering::AcqRel); - // Eager evaluation here would result in dropping Self and clearing flushing flag - #[allow(clippy::unnecessary_lazy_evaluations)] (!already_flushing).then(|| Self { flushing }) } } diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 90d0c6db2e3220..5b42208d042e7c 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -65,7 +65,7 @@ pub(crate) enum SerdeStyle { const MAX_STREAM_SIZE: u64 = 32 * 1024 * 1024 * 1024; -#[derive(Clone, Debug, Default, Deserialize, Serialize, AbiExample, PartialEq, Eq)] +#[derive(Clone, Debug, Default, Deserialize, Serialize, AbiExample, PartialEq)] pub struct AccountsDbFields( HashMap>, StoredMetaWriteVersion, @@ -120,7 +120,7 @@ impl SnapshotAccountsDbFields { // There must not be any overlap in the slots of storages between the full snapshot and the incremental snapshot incremental_snapshot_storages .iter() - .all(|storage_entry| !full_snapshot_storages.contains_key(storage_entry.0)).then_some(()).ok_or_else(|| { + .all(|storage_entry| !full_snapshot_storages.contains_key(storage_entry.0)).then(|| ()).ok_or_else(|| { io::Error::new(io::ErrorKind::InvalidData, "Snapshots are incompatible: There are storages for the same slot in both the full snapshot and the incremental snapshot!") })?; diff --git a/runtime/src/serde_snapshot/newer.rs b/runtime/src/serde_snapshot/newer.rs index ab27961bf2a49c..512737106aebc9 100644 --- a/runtime/src/serde_snapshot/newer.rs +++ b/runtime/src/serde_snapshot/newer.rs @@ -201,7 +201,7 @@ impl<'a> TypeContext<'a> for Context { ( SerializableVersionedBank::from(fields), SerializableAccountsDb::<'a, Self> { - accounts_db: &serializable_bank.bank.rc.accounts.accounts_db, + accounts_db: &*serializable_bank.bank.rc.accounts.accounts_db, slot: serializable_bank.bank.rc.slot, account_storage_entries: serializable_bank.snapshot_storages, phantom: std::marker::PhantomData::default(), @@ -228,7 +228,7 @@ impl<'a> TypeContext<'a> for Context { ( SerializableVersionedBank::from(fields), SerializableAccountsDb::<'a, Self> { - accounts_db: &serializable_bank.bank.rc.accounts.accounts_db, + accounts_db: &*serializable_bank.bank.rc.accounts.accounts_db, slot: serializable_bank.bank.rc.slot, account_storage_entries: serializable_bank.snapshot_storages, phantom: std::marker::PhantomData::default(), diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index 5834a23f969116..1de6ee2a5d54c6 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -155,7 +155,7 @@ fn test_accounts_serialize_style(serde_style: SerdeStyle) { accountsdb_to_stream( serde_style, &mut writer, - &accounts.accounts_db, + &*accounts.accounts_db, 0, &accounts.accounts_db.get_snapshot_storages(0, None, None).0, ) diff --git a/runtime/src/snapshot_minimizer.rs b/runtime/src/snapshot_minimizer.rs index 94a82e1d482458..69e7a99e8e7601 100644 --- a/runtime/src/snapshot_minimizer.rs +++ b/runtime/src/snapshot_minimizer.rs @@ -543,7 +543,7 @@ mod tests { .accounts .iter() .filter_map(|(pubkey, account)| { - stake::program::check_id(account.owner()).then_some(*pubkey) + stake::program::check_id(account.owner()).then(|| *pubkey) }) .collect(); expected_stake_accounts.push(bootstrap_validator_pubkey); diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 2273832c19e28a..93cdbc0f33fc0c 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -1216,7 +1216,7 @@ fn check_are_snapshots_compatible( let incremental_snapshot_archive_info = incremental_snapshot_archive_info.unwrap(); (full_snapshot_archive_info.slot() == incremental_snapshot_archive_info.base_slot()) - .then_some(()) + .then(|| ()) .ok_or_else(|| { SnapshotError::MismatchedBaseSlot( full_snapshot_archive_info.slot(), diff --git a/runtime/src/storable_accounts.rs b/runtime/src/storable_accounts.rs index bfa35cf71c3e6b..8d79c0f78c5fe4 100644 --- a/runtime/src/storable_accounts.rs +++ b/runtime/src/storable_accounts.rs @@ -143,7 +143,7 @@ pub mod tests { slot, &vec![(&pk, &account, slot), (&pk, &account, slot)][..], ); - assert!(!test3.contains_multiple_slots()); + assert!(!(&test3).contains_multiple_slots()); let test3 = ( slot, &vec![(&pk, &account, slot), (&pk, &account, slot + 1)][..], diff --git a/runtime/src/system_instruction_processor.rs b/runtime/src/system_instruction_processor.rs index 3b738df1d8a0e4..67f1f931147cef 100644 --- a/runtime/src/system_instruction_processor.rs +++ b/runtime/src/system_instruction_processor.rs @@ -1626,7 +1626,7 @@ mod tests { .unwrap(); // super fun time; callback chooses to .clean_accounts(None) or not - callback(&bank); + callback(&*bank); // create a normal account at the same pubkey as the zero-lamports account let lamports = genesis_config.rent.minimum_balance(len2); diff --git a/sdk/program/src/message/compiled_keys.rs b/sdk/program/src/message/compiled_keys.rs index c689d08f39ae81..d56c7aca2c4159 100644 --- a/sdk/program/src/message/compiled_keys.rs +++ b/sdk/program/src/message/compiled_keys.rs @@ -80,20 +80,20 @@ impl CompiledKeys { .chain( key_meta_map .iter() - .filter_map(|(key, meta)| (meta.is_signer && meta.is_writable).then_some(*key)), + .filter_map(|(key, meta)| (meta.is_signer && meta.is_writable).then(|| *key)), ) .collect(); let readonly_signer_keys: Vec = key_meta_map .iter() - .filter_map(|(key, meta)| (meta.is_signer && !meta.is_writable).then_some(*key)) + .filter_map(|(key, meta)| (meta.is_signer && !meta.is_writable).then(|| *key)) .collect(); let writable_non_signer_keys: Vec = key_meta_map .iter() - .filter_map(|(key, meta)| (!meta.is_signer && meta.is_writable).then_some(*key)) + .filter_map(|(key, meta)| (!meta.is_signer && meta.is_writable).then(|| *key)) .collect(); let readonly_non_signer_keys: Vec = key_meta_map .iter() - .filter_map(|(key, meta)| (!meta.is_signer && !meta.is_writable).then_some(*key)) + .filter_map(|(key, meta)| (!meta.is_signer && !meta.is_writable).then(|| *key)) .collect(); let signers_len = writable_signer_keys @@ -160,7 +160,7 @@ impl CompiledKeys { for search_key in self .key_meta_map .iter() - .filter_map(|(key, meta)| key_meta_filter(meta).then_some(key)) + .filter_map(|(key, meta)| key_meta_filter(meta).then(|| key)) { for (key_index, key) in lookup_table_addresses.iter().enumerate() { if key == search_key { diff --git a/sdk/program/src/nonce/state/mod.rs b/sdk/program/src/nonce/state/mod.rs index d55bc9063afcff..a4a850b93c1cdc 100644 --- a/sdk/program/src/nonce/state/mod.rs +++ b/sdk/program/src/nonce/state/mod.rs @@ -46,7 +46,7 @@ impl Versions { Self::Current(state) => match **state { State::Uninitialized => None, State::Initialized(ref data) => { - (recent_blockhash == &data.blockhash()).then_some(data) + (recent_blockhash == &data.blockhash()).then(|| data) } }, } diff --git a/sdk/program/src/stake/tools.rs b/sdk/program/src/stake/tools.rs index e0447f49fc69c9..842a822b0ea329 100644 --- a/sdk/program/src/stake/tools.rs +++ b/sdk/program/src/stake/tools.rs @@ -28,7 +28,7 @@ fn get_minimum_delegation_return_data() -> Result { .ok_or(ProgramError::InvalidInstructionData) .and_then(|(program_id, return_data)| { (program_id == super::program::id()) - .then_some(return_data) + .then(|| return_data) .ok_or(ProgramError::IncorrectProgramId) }) .and_then(|return_data| { diff --git a/streamer/src/streamer.rs b/streamer/src/streamer.rs index 1ef9b989304ebb..3492f60c8933a8 100644 --- a/streamer/src/streamer.rs +++ b/streamer/src/streamer.rs @@ -307,7 +307,7 @@ fn recv_send( let packets = packet_batch.iter().filter_map(|pkt| { let addr = pkt.meta.socket_addr(); let data = pkt.data(..)?; - socket_addr_space.check(&addr).then_some((data, addr)) + socket_addr_space.check(&addr).then(|| (data, addr)) }); batch_send(sock, &packets.collect::>())?; Ok(()) diff --git a/validator/src/bootstrap.rs b/validator/src/bootstrap.rs index c5a4b65d4b1229..fec9f6d409709c 100644 --- a/validator/src/bootstrap.rs +++ b/validator/src/bootstrap.rs @@ -409,7 +409,7 @@ pub fn attempt_download_genesis_and_snapshot( .map_err(|err| format!("Failed to get RPC node slot: {}", err))?; info!("RPC node root slot: {}", rpc_client_slot); - download_snapshots( + if let Err(err) = download_snapshots( full_snapshot_archives_dir, incremental_snapshot_archives_dir, validator_config, @@ -422,7 +422,9 @@ pub fn attempt_download_genesis_and_snapshot( download_abort_count, snapshot_hash, rpc_contact_info, - )?; + ) { + return Err(err); + }; if let Some(url) = bootstrap_config.check_vote_account.as_ref() { let rpc_client = RpcClient::new(url); diff --git a/zk-token-sdk/src/instruction/close_account.rs b/zk-token-sdk/src/instruction/close_account.rs index b6702e3051f168..4525f87901cd71 100644 --- a/zk-token-sdk/src/instruction/close_account.rs +++ b/zk-token-sdk/src/instruction/close_account.rs @@ -41,7 +41,7 @@ impl CloseAccountData { keypair: &ElGamalKeypair, ciphertext: &ElGamalCiphertext, ) -> Result { - let pod_pubkey = pod::ElGamalPubkey(keypair.public.to_bytes()); + let pod_pubkey = pod::ElGamalPubkey((&keypair.public).to_bytes()); let pod_ciphertext = pod::ElGamalCiphertext(ciphertext.to_bytes()); let mut transcript = CloseAccountProof::transcript_new(&pod_pubkey, &pod_ciphertext); diff --git a/zk-token-sdk/src/instruction/withdraw.rs b/zk-token-sdk/src/instruction/withdraw.rs index 64f540a591804e..9aa606e8ca4203 100644 --- a/zk-token-sdk/src/instruction/withdraw.rs +++ b/zk-token-sdk/src/instruction/withdraw.rs @@ -62,7 +62,7 @@ impl WithdrawData { // current source balance let final_ciphertext = current_ciphertext - &ElGamal::encode(amount); - let pod_pubkey = pod::ElGamalPubkey(keypair.public.to_bytes()); + let pod_pubkey = pod::ElGamalPubkey((&keypair.public).to_bytes()); let pod_final_ciphertext: pod::ElGamalCiphertext = final_ciphertext.into(); let mut transcript = WithdrawProof::transcript_new(&pod_pubkey, &pod_final_ciphertext); let proof = WithdrawProof::new(keypair, final_balance, &final_ciphertext, &mut transcript); From 6436a6879868944e8ba2dc2a723c7dcf60c9c51a Mon Sep 17 00:00:00 2001 From: leonardkulms <42893075+leonardkulms@users.noreply.github.com> Date: Fri, 19 Aug 2022 12:18:19 +0200 Subject: [PATCH 43/67] correct double negation (#27240) --- sdk/src/transaction/error.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/src/transaction/error.rs b/sdk/src/transaction/error.rs index 2fe4e0e3756adf..e061b3ffebf951 100644 --- a/sdk/src/transaction/error.rs +++ b/sdk/src/transaction/error.rs @@ -142,9 +142,9 @@ pub enum TransactionError { #[error("Transaction contains a duplicate instruction ({0}) that is not allowed")] DuplicateInstruction(u8), - /// Transaction results in an account without insufficient funds for rent + /// Transaction results in an account with insufficient funds for rent #[error( - "Transaction results in an account ({account_index}) without insufficient funds for rent" + "Transaction results in an account ({account_index}) with insufficient funds for rent" )] InsufficientFundsForRent { account_index: u8 }, } From 58cd2d36bd4c6d12d3f6a5ee1b3a655c3ebf245b Mon Sep 17 00:00:00 2001 From: Will Hickey Date: Fri, 19 Aug 2022 09:15:15 -0500 Subject: [PATCH 44/67] Enable QUIC client by default. Add arg to disable QUIC client. (Forward port #26927) (#27194) Enable QUIC client by default. Add arg to disable QUIC client. * Enable QUIC client by default. Add arg to disable QUIC client. * Deprecate --disable-quic-servers arg * Add #[ignore] annotation to failing tests --- banking-bench/src/main.rs | 6 +- bench-tps/src/cli.rs | 10 +-- bench-tps/tests/bench_tps.rs | 1 + client/src/connection_cache.rs | 30 ++++++--- core/src/banking_stage.rs | 2 + core/src/tpu.rs | 69 +++++++++------------ core/src/validator.rs | 3 - dos/src/main.rs | 2 + local-cluster/src/validator_configs.rs | 1 - local-cluster/tests/local_cluster.rs | 4 ++ local-cluster/tests/local_cluster_slow_1.rs | 2 + local-cluster/tests/local_cluster_slow_2.rs | 1 + multinode-demo/bootstrap-validator.sh | 2 +- validator/src/main.rs | 17 +++-- 14 files changed, 87 insertions(+), 63 deletions(-) diff --git a/banking-bench/src/main.rs b/banking-bench/src/main.rs index 51b0042abed374..2806a8a9e05a7a 100644 --- a/banking-bench/src/main.rs +++ b/banking-bench/src/main.rs @@ -214,10 +214,10 @@ fn main() { .help("Number of threads to use in the banking stage"), ) .arg( - Arg::new("tpu_use_quic") - .long("tpu-use-quic") + Arg::new("tpu_disable_quic") + .long("tpu-disable-quic") .takes_value(false) - .help("Forward messages to TPU using QUIC"), + .help("Disable forwarding messages to TPU using QUIC"), ) .get_matches(); diff --git a/bench-tps/src/cli.rs b/bench-tps/src/cli.rs index a1b5c28329ea76..9c583642d78d44 100644 --- a/bench-tps/src/cli.rs +++ b/bench-tps/src/cli.rs @@ -294,10 +294,10 @@ pub fn build_args<'a, 'b>(version: &'b str) -> App<'a, 'b> { .help("Submit transactions with a TpuClient") ) .arg( - Arg::with_name("tpu_use_quic") - .long("tpu-use-quic") + Arg::with_name("tpu_disable_quic") + .long("tpu-disable-quic") .takes_value(false) - .help("Submit transactions via QUIC; only affects ThinClient (default) \ + .help("Do not submit transactions via QUIC; only affects ThinClient (default) \ or TpuClient sends"), ) .arg( @@ -358,8 +358,8 @@ pub fn extract_args(matches: &ArgMatches) -> Config { args.external_client_type = ExternalClientType::RpcClient; } - if matches.is_present("tpu_use_quic") { - args.use_quic = true; + if matches.is_present("tpu_disable_quic") { + args.use_quic = false; } if let Some(v) = matches.value_of("tpu_connection_pool_size") { diff --git a/bench-tps/tests/bench_tps.rs b/bench-tps/tests/bench_tps.rs index 220980f9b0bf90..ec12c8b7aaabe1 100644 --- a/bench-tps/tests/bench_tps.rs +++ b/bench-tps/tests/bench_tps.rs @@ -136,6 +136,7 @@ fn test_bench_tps_test_validator(config: Config) { #[test] #[serial] +#[ignore] fn test_bench_tps_local_cluster_solana() { test_bench_tps_local_cluster(Config { tx_count: 100, diff --git a/client/src/connection_cache.rs b/client/src/connection_cache.rs index f0628d3e32b9de..9e5efff3f3e061 100644 --- a/client/src/connection_cache.rs +++ b/client/src/connection_cache.rs @@ -32,7 +32,7 @@ static MAX_CONNECTIONS: usize = 1024; /// Used to decide whether the TPU and underlying connection cache should use /// QUIC connections. -pub const DEFAULT_TPU_USE_QUIC: bool = false; +pub const DEFAULT_TPU_USE_QUIC: bool = true; /// Default TPU connection pool size per remote address pub const DEFAULT_TPU_CONNECTION_POOL_SIZE: usize = 4; @@ -683,6 +683,11 @@ mod tests { // be lazy and not connect until first use or handle connection errors somehow // (without crashing, as would be required in a real practical validator) let connection_cache = ConnectionCache::default(); + let port_offset = if connection_cache.use_quic() { + QUIC_PORT_OFFSET + } else { + 0 + }; let addrs = (0..MAX_CONNECTIONS) .into_iter() .map(|_| { @@ -695,18 +700,29 @@ mod tests { let map = connection_cache.map.read().unwrap(); assert!(map.len() == MAX_CONNECTIONS); addrs.iter().for_each(|a| { - let conn = &map.get(a).expect("Address not found").connections[0]; - let conn = conn.new_blocking_connection(*a, connection_cache.stats.clone()); - assert!(a.ip() == conn.tpu_addr().ip()); + let port = a + .port() + .checked_add(port_offset) + .unwrap_or_else(|| a.port()); + let addr = &SocketAddr::new(a.ip(), port); + + let conn = &map.get(addr).expect("Address not found").connections[0]; + let conn = conn.new_blocking_connection(*addr, connection_cache.stats.clone()); + assert!(addr.ip() == conn.tpu_addr().ip()); }); } - let addr = get_addr(&mut rng); - connection_cache.get_connection(&addr); + let addr = &get_addr(&mut rng); + connection_cache.get_connection(addr); + let port = addr + .port() + .checked_add(port_offset) + .unwrap_or_else(|| addr.port()); + let addr_with_quic_port = SocketAddr::new(addr.ip(), port); let map = connection_cache.map.read().unwrap(); assert!(map.len() == MAX_CONNECTIONS); - let _conn = map.get(&addr).expect("Address not found"); + let _conn = map.get(&addr_with_quic_port).expect("Address not found"); } #[test] diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 2547c00f94e5ca..83eae7330de1e4 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -4122,6 +4122,7 @@ mod tests { } #[test] + #[ignore] fn test_forwarder_budget() { solana_logger::setup(); // Create `PacketBatch` with 1 unprocessed packet @@ -4209,6 +4210,7 @@ mod tests { } #[test] + #[ignore] fn test_handle_forwarding() { solana_logger::setup(); // packets are deserialized upon receiving, failed packets will not be diff --git a/core/src/tpu.rs b/core/src/tpu.rs index e969ba90eed03a..606fee5cb3cded 100644 --- a/core/src/tpu.rs +++ b/core/src/tpu.rs @@ -63,8 +63,8 @@ pub struct Tpu { banking_stage: BankingStage, cluster_info_vote_listener: ClusterInfoVoteListener, broadcast_stage: BroadcastStage, - tpu_quic_t: Option>, - tpu_forwards_quic_t: Option>, + tpu_quic_t: thread::JoinHandle<()>, + tpu_forwards_quic_t: thread::JoinHandle<()>, find_packet_sender_stake_stage: FindPacketSenderStakeStage, vote_find_packet_sender_stake_stage: FindPacketSenderStakeStage, staked_nodes_updater_service: StakedNodesUpdaterService, @@ -97,7 +97,6 @@ impl Tpu { connection_cache: &Arc, keypair: &Keypair, log_messages_bytes_limit: Option, - enable_quic_servers: bool, staked_nodes: &Arc>, shared_staked_nodes_overrides: Arc>>, ) -> Self { @@ -157,37 +156,33 @@ impl Tpu { let (verified_sender, verified_receiver) = unbounded(); let stats = Arc::new(StreamStats::default()); - let tpu_quic_t = enable_quic_servers.then(|| { - spawn_server( - transactions_quic_sockets, - keypair, - cluster_info.my_contact_info().tpu.ip(), - packet_sender, - exit.clone(), - MAX_QUIC_CONNECTIONS_PER_PEER, - staked_nodes.clone(), - MAX_STAKED_CONNECTIONS, - MAX_UNSTAKED_CONNECTIONS, - stats.clone(), - ) - .unwrap() - }); + let tpu_quic_t = spawn_server( + transactions_quic_sockets, + keypair, + cluster_info.my_contact_info().tpu.ip(), + packet_sender, + exit.clone(), + MAX_QUIC_CONNECTIONS_PER_PEER, + staked_nodes.clone(), + MAX_STAKED_CONNECTIONS, + MAX_UNSTAKED_CONNECTIONS, + stats.clone(), + ) + .unwrap(); - let tpu_forwards_quic_t = enable_quic_servers.then(|| { - spawn_server( - transactions_forwards_quic_sockets, - keypair, - cluster_info.my_contact_info().tpu_forwards.ip(), - forwarded_packet_sender, - exit.clone(), - MAX_QUIC_CONNECTIONS_PER_PEER, - staked_nodes.clone(), - MAX_STAKED_CONNECTIONS.saturating_add(MAX_UNSTAKED_CONNECTIONS), - 0, // Prevent unstaked nodes from forwarding transactions - stats, - ) - .unwrap() - }); + let tpu_forwards_quic_t = spawn_server( + transactions_forwards_quic_sockets, + keypair, + cluster_info.my_contact_info().tpu_forwards.ip(), + forwarded_packet_sender, + exit.clone(), + MAX_QUIC_CONNECTIONS_PER_PEER, + staked_nodes.clone(), + MAX_STAKED_CONNECTIONS.saturating_add(MAX_UNSTAKED_CONNECTIONS), + 0, // Prevent unstaked nodes from forwarding transactions + stats, + ) + .unwrap(); let sigverify_stage = { let verifier = TransactionSigVerifier::new(verified_sender); @@ -274,13 +269,9 @@ impl Tpu { self.find_packet_sender_stake_stage.join(), self.vote_find_packet_sender_stake_stage.join(), self.staked_nodes_updater_service.join(), + self.tpu_quic_t.join(), + self.tpu_forwards_quic_t.join(), ]; - if let Some(tpu_quic_t) = self.tpu_quic_t { - tpu_quic_t.join()?; - } - if let Some(tpu_forwards_quic_t) = self.tpu_forwards_quic_t { - tpu_forwards_quic_t.join()?; - } let broadcast_result = self.broadcast_stage.join(); for result in results { result?; diff --git a/core/src/validator.rs b/core/src/validator.rs index 533cabab67e4c6..5477985c27ad66 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -175,7 +175,6 @@ pub struct ValidatorConfig { pub wait_to_vote_slot: Option, pub ledger_column_options: LedgerColumnOptions, pub runtime_config: RuntimeConfig, - pub enable_quic_servers: bool, } impl Default for ValidatorConfig { @@ -239,7 +238,6 @@ impl Default for ValidatorConfig { wait_to_vote_slot: None, ledger_column_options: LedgerColumnOptions::default(), runtime_config: RuntimeConfig::default(), - enable_quic_servers: true, } } } @@ -1025,7 +1023,6 @@ impl Validator { &connection_cache, &identity_keypair, config.runtime_config.log_messages_bytes_limit, - config.enable_quic_servers, &staked_nodes, config.staked_nodes_overrides.clone(), ); diff --git a/dos/src/main.rs b/dos/src/main.rs index fa75fe90b79435..baadc5c0016388 100644 --- a/dos/src/main.rs +++ b/dos/src/main.rs @@ -1185,11 +1185,13 @@ pub mod test { } #[test] + #[ignore] fn test_dos_with_blockhash_and_payer() { run_dos_with_blockhash_and_payer(/*tpu_use_quic*/ false) } #[test] + #[ignore] fn test_dos_with_blockhash_and_payer_and_quic() { run_dos_with_blockhash_and_payer(/*tpu_use_quic*/ true) } diff --git a/local-cluster/src/validator_configs.rs b/local-cluster/src/validator_configs.rs index 4c3b281cb991bb..5d678319a3d636 100644 --- a/local-cluster/src/validator_configs.rs +++ b/local-cluster/src/validator_configs.rs @@ -65,7 +65,6 @@ pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig { wait_to_vote_slot: config.wait_to_vote_slot, ledger_column_options: config.ledger_column_options.clone(), runtime_config: config.runtime_config.clone(), - enable_quic_servers: config.enable_quic_servers, } } diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index 072239f5d951ab..6fad4c541c47c8 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -173,6 +173,7 @@ fn test_spend_and_verify_all_nodes_3() { #[test] #[serial] +#[ignore] fn test_local_cluster_signature_subscribe() { solana_logger::setup_with_default(RUST_LOG_FILTER); let num_nodes = 2; @@ -311,6 +312,7 @@ fn test_two_unbalanced_stakes() { #[test] #[serial] +#[ignore] fn test_forwarding() { solana_logger::setup_with_default(RUST_LOG_FILTER); // Set up a cluster where one node is never the leader, so all txs sent to this node @@ -1228,6 +1230,7 @@ fn test_incremental_snapshot_download_with_crossing_full_snapshot_interval_at_st #[allow(unused_attributes)] #[test] #[serial] +#[ignore] fn test_snapshot_restart_tower() { solana_logger::setup_with_default(RUST_LOG_FILTER); // First set up the cluster with 2 nodes @@ -2520,6 +2523,7 @@ fn run_test_load_program_accounts_partition(scan_commitment: CommitmentConfig) { #[test] #[serial] +#[ignore] fn test_votes_land_in_fork_during_long_partition() { let total_stake = 3 * DEFAULT_NODE_STAKE; // Make `lighter_stake` insufficient for switching threshold diff --git a/local-cluster/tests/local_cluster_slow_1.rs b/local-cluster/tests/local_cluster_slow_1.rs index 29a5f314c41dd2..2faf69f1e503cb 100644 --- a/local-cluster/tests/local_cluster_slow_1.rs +++ b/local-cluster/tests/local_cluster_slow_1.rs @@ -50,6 +50,7 @@ mod common; #[test] #[serial] +#[ignore] // Steps in this test: // We want to create a situation like: /* @@ -588,6 +589,7 @@ fn test_duplicate_shreds_broadcast_leader() { #[test] #[serial] +#[ignore] fn test_switch_threshold_uses_gossip_votes() { solana_logger::setup_with_default(RUST_LOG_FILTER); let total_stake = 100 * DEFAULT_NODE_STAKE; diff --git a/local-cluster/tests/local_cluster_slow_2.rs b/local-cluster/tests/local_cluster_slow_2.rs index 6488ddea1e0e57..d6d315ed0d46d8 100644 --- a/local-cluster/tests/local_cluster_slow_2.rs +++ b/local-cluster/tests/local_cluster_slow_2.rs @@ -201,6 +201,7 @@ fn test_leader_failure_4() { #[test] #[serial] +#[ignore] fn test_ledger_cleanup_service() { solana_logger::setup_with_default(RUST_LOG_FILTER); error!("test_ledger_cleanup_service"); diff --git a/multinode-demo/bootstrap-validator.sh b/multinode-demo/bootstrap-validator.sh index 9245f507c394e2..deb82f106fae04 100755 --- a/multinode-demo/bootstrap-validator.sh +++ b/multinode-demo/bootstrap-validator.sh @@ -61,7 +61,7 @@ while [[ -n $1 ]]; do elif [[ $1 = --enable-rpc-bigtable-ledger-storage ]]; then args+=("$1") shift - elif [[ $1 = --tpu-use-quic ]]; then + elif [[ $1 = --tpu-disable-quic ]]; then args+=("$1") shift elif [[ $1 = --rpc-send-batch-ms ]]; then diff --git a/validator/src/main.rs b/validator/src/main.rs index 5381155c6591a6..5d0a824feac4a0 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1218,13 +1218,21 @@ pub fn main() { Arg::with_name("tpu_use_quic") .long("tpu-use-quic") .takes_value(false) + .hidden(true) + .conflicts_with("tpu_disable_quic") .help("Use QUIC to send transactions."), ) + .arg( + Arg::with_name("tpu_disable_quic") + .long("tpu-disable-quic") + .takes_value(false) + .help("Do not use QUIC to send transactions."), + ) .arg( Arg::with_name("disable_quic_servers") .long("disable-quic-servers") .takes_value(false) - .help("Disable QUIC TPU servers"), + .hidden(true) ) .arg( Arg::with_name("enable_quic_servers") @@ -2394,8 +2402,7 @@ pub fn main() { let restricted_repair_only_mode = matches.is_present("restricted_repair_only_mode"); let accounts_shrink_optimize_total_space = value_t_or_exit!(matches, "accounts_shrink_optimize_total_space", bool); - let tpu_use_quic = matches.is_present("tpu_use_quic"); - let enable_quic_servers = !matches.is_present("disable_quic_servers"); + let tpu_use_quic = !matches.is_present("tpu_disable_quic"); let tpu_connection_pool_size = value_t_or_exit!(matches, "tpu_connection_pool_size", usize); let shrink_ratio = value_t_or_exit!(matches, "accounts_shrink_ratio", f64); @@ -2565,6 +2572,9 @@ pub fn main() { if matches.is_present("enable_quic_servers") { warn!("--enable-quic-servers is now the default behavior. This flag is deprecated and can be removed from the launch args"); } + if matches.is_present("disable_quic_servers") { + warn!("--disable-quic-servers is deprecated. The quic server cannot be disabled."); + } let rpc_bigtable_config = if matches.is_present("enable_rpc_bigtable_ledger_storage") || matches.is_present("enable_bigtable_ledger_upload") @@ -2749,7 +2759,6 @@ pub fn main() { log_messages_bytes_limit: value_of(&matches, "log_messages_bytes_limit"), ..RuntimeConfig::default() }, - enable_quic_servers, staked_nodes_overrides: staked_nodes_overrides.clone(), ..ValidatorConfig::default() }; From d940c6ede0477cd38d559c546a75d1e2437f4761 Mon Sep 17 00:00:00 2001 From: apfitzge Date: Fri, 19 Aug 2022 09:33:50 -0500 Subject: [PATCH 45/67] slots_connected: check if the range is connected (>= ending_slot) (#27152) --- ledger-tool/src/main.rs | 4 ++-- ledger/src/blockstore.rs | 27 +++++++++++++-------------- 2 files changed, 15 insertions(+), 16 deletions(-) diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index e2b6fdc2e7609a..ce3d42a06a0a9a 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -955,9 +955,9 @@ fn load_bank_forks( }; if let Some(halt_slot) = process_options.halt_at_slot { - // Check if we have the slot data necessary to replay from starting_slot to halt_slot. + // Check if we have the slot data necessary to replay from starting_slot to >= halt_slot. // - This will not catch the case when loading from genesis without a full slot 0. - if !blockstore.slots_connected(starting_slot, halt_slot) { + if !blockstore.slot_range_connected(starting_slot, halt_slot) { eprintln!( "Unable to load bank forks at slot {} due to disconnected blocks.", halt_slot, diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 2c0913a5ab8cfb..5c246b5ab8045c 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -546,9 +546,9 @@ impl Blockstore { self.prepare_rooted_slot_iterator(slot, IteratorDirection::Reverse) } - /// Determines if `starting_slot` and `ending_slot` are connected by full slots + /// Determines if we can iterate from `starting_slot` to >= `ending_slot` by full slots /// `starting_slot` is excluded from the `is_full()` check - pub fn slots_connected(&self, starting_slot: Slot, ending_slot: Slot) -> bool { + pub fn slot_range_connected(&self, starting_slot: Slot, ending_slot: Slot) -> bool { if starting_slot == ending_slot { return true; } @@ -562,8 +562,7 @@ impl Blockstore { if slot_meta.is_full() { match slot.cmp(&ending_slot) { cmp::Ordering::Less => next_slots.extend(slot_meta.next_slots), - cmp::Ordering::Equal => return true, - cmp::Ordering::Greater => {} // slot is greater than the ending slot, so all its children would be as well + _ => return true, } } } @@ -5502,7 +5501,7 @@ pub mod tests { } */ #[test] - fn test_slots_connected_chain() { + fn test_slot_range_connected_chain() { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); @@ -5511,12 +5510,12 @@ pub mod tests { make_and_insert_slot(&blockstore, slot, slot.saturating_sub(1)); } - assert!(blockstore.slots_connected(1, 3)); - assert!(!blockstore.slots_connected(1, 4)); // slot 4 does not exist + assert!(blockstore.slot_range_connected(1, 3)); + assert!(!blockstore.slot_range_connected(1, 4)); // slot 4 does not exist } #[test] - fn test_slots_connected_disconnected() { + fn test_slot_range_connected_disconnected() { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); @@ -5524,20 +5523,20 @@ pub mod tests { make_and_insert_slot(&blockstore, 2, 1); make_and_insert_slot(&blockstore, 4, 2); - assert!(!blockstore.slots_connected(1, 3)); // Slot 3 does not exit - assert!(blockstore.slots_connected(1, 4)); + assert!(blockstore.slot_range_connected(1, 3)); // Slot 3 does not exist, but we can still replay this range to slot 4 + assert!(blockstore.slot_range_connected(1, 4)); } #[test] - fn test_slots_connected_same_slot() { + fn test_slot_range_connected_same_slot() { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); - assert!(blockstore.slots_connected(54, 54)); + assert!(blockstore.slot_range_connected(54, 54)); } #[test] - fn test_slots_connected_starting_slot_not_full() { + fn test_slot_range_connected_starting_slot_not_full() { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); @@ -5545,7 +5544,7 @@ pub mod tests { make_and_insert_slot(&blockstore, 6, 5); assert!(!blockstore.meta(4).unwrap().unwrap().is_full()); - assert!(blockstore.slots_connected(4, 6)); + assert!(blockstore.slot_range_connected(4, 6)); } #[test] From e12955e13693514e1730f40b2b423159531a11d5 Mon Sep 17 00:00:00 2001 From: apfitzge Date: Fri, 19 Aug 2022 09:34:35 -0500 Subject: [PATCH 46/67] create-snapshot check if snapshot slot exists (#27153) --- ledger-tool/src/main.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index ce3d42a06a0a9a..9484e1b691fa8a 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -2817,6 +2817,11 @@ fn main() { value_t_or_exit!(arg_matches, "snapshot_slot", Slot) }; + assert!( + blockstore.meta(snapshot_slot).unwrap().is_some(), + "snapshot slot doesn't exist" + ); + let ending_slot = if is_minimized { let ending_slot = value_t_or_exit!(arg_matches, "ending_slot", Slot); if ending_slot <= snapshot_slot { From c9d7ad099e975d61fdc4a0785993745dd623c268 Mon Sep 17 00:00:00 2001 From: Brooks Prumo Date: Fri, 19 Aug 2022 12:09:35 -0400 Subject: [PATCH 47/67] Add Bank::clean_accounts_for_tests() (#27209) --- runtime/src/bank.rs | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index b22e7baa83dcd6..43de49847d5d47 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -8100,6 +8100,12 @@ pub(crate) mod tests { } } + impl Bank { + fn clean_accounts_for_tests(&self) { + self.rc.accounts.accounts_db.clean_accounts_for_tests() + } + } + #[test] fn test_nonce_info() { let lamports_per_signature = 42; @@ -10418,7 +10424,7 @@ pub(crate) mod tests { bank.squash(); bank.force_flush_accounts_cache(); let hash = bank.update_accounts_hash(); - bank.clean_accounts(false, false, None); + bank.clean_accounts_for_tests(); assert_eq!(bank.update_accounts_hash(), hash); let bank0 = Arc::new(new_from_parent(&bank)); @@ -10441,7 +10447,7 @@ pub(crate) mod tests { info!("bank0 purge"); let hash = bank0.update_accounts_hash(); - bank0.clean_accounts(false, false, None); + bank0.clean_accounts_for_tests(); assert_eq!(bank0.update_accounts_hash(), hash); assert_eq!( @@ -10451,7 +10457,7 @@ pub(crate) mod tests { assert_eq!(bank1.get_account(&keypair.pubkey()), None); info!("bank1 purge"); - bank1.clean_accounts(false, false, None); + bank1.clean_accounts_for_tests(); assert_eq!( bank0.get_account(&keypair.pubkey()).unwrap().lamports(), @@ -10475,7 +10481,7 @@ pub(crate) mod tests { assert_eq!(bank0.get_account(&keypair.pubkey()), None); assert_eq!(bank1.get_account(&keypair.pubkey()), None); bank1.force_flush_accounts_cache(); - bank1.clean_accounts(false, false, None); + bank1.clean_accounts_for_tests(); assert!(bank1.verify_bank_hash(VerifyBankHash::default_for_test())); } @@ -14704,7 +14710,7 @@ pub(crate) mod tests { // Clean accounts, which should add earlier slots to the shrink // candidate set - bank2.clean_accounts(false, false, None); + bank2.clean_accounts_for_tests(); let mut bank3 = Arc::new(Bank::new_from_parent(&bank2, &Pubkey::default(), 3)); bank3.deposit(&pubkey1, some_lamports + 1).unwrap(); @@ -14713,7 +14719,7 @@ pub(crate) mod tests { bank3.squash(); bank3.force_flush_accounts_cache(); - bank3.clean_accounts(false, false, None); + bank3.clean_accounts_for_tests(); assert_eq!( bank3.rc.accounts.accounts_db.ref_count_for_pubkey(&pubkey0), 2 @@ -14782,7 +14788,7 @@ pub(crate) mod tests { // Clean accounts, which should add earlier slots to the shrink // candidate set - bank2.clean_accounts(false, false, None); + bank2.clean_accounts_for_tests(); // Slots 0 and 1 should be candidates for shrinking, but slot 2 // shouldn't because none of its accounts are outdated by a later @@ -14836,7 +14842,7 @@ pub(crate) mod tests { goto_end_of_slot(Arc::::get_mut(&mut bank).unwrap()); bank.squash(); - bank.clean_accounts(false, false, None); + bank.clean_accounts_for_tests(); let force_to_return_alive_account = 0; assert_eq!( bank.process_stale_slot_with_budget(22, force_to_return_alive_account), @@ -16221,7 +16227,7 @@ pub(crate) mod tests { current_major_fork_bank.squash(); // Try to get cache flush/clean to overlap with the scan current_major_fork_bank.force_flush_accounts_cache(); - current_major_fork_bank.clean_accounts(false, false, None); + current_major_fork_bank.clean_accounts_for_tests(); // Move purge here so that Bank::drop()->purge_slots() doesn't race // with clean. Simulates the call from AccountsBackgroundService abs_request_handler.handle_pruned_banks(¤t_major_fork_bank, true); @@ -17350,7 +17356,7 @@ pub(crate) mod tests { bank2.squash(); drop(bank1); - bank2.clean_accounts(false, false, None); + bank2.clean_accounts_for_tests(); let expected_ref_count_for_cleaned_up_keys = 0; let expected_ref_count_for_keys_in_both_slot1_and_slot2 = 1; From 27e2d512d96bba13dc02e41629f7abf385877387 Mon Sep 17 00:00:00 2001 From: Brooks Prumo Date: Fri, 19 Aug 2022 12:09:47 -0400 Subject: [PATCH 48/67] Call `AccountsDb::shrink_all_slots()` directly (#27235) --- runtime/src/bank.rs | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 43de49847d5d47..b087dcc2774951 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -7177,7 +7177,10 @@ impl Bank { let mut shrink_all_slots_time = Measure::start("shrink_all_slots"); if !accounts_db_skip_shrink && self.slot() > 0 { info!("shrinking.."); - self.shrink_all_slots(true, Some(last_full_snapshot_slot)); + self.rc + .accounts + .accounts_db + .shrink_all_slots(true, Some(last_full_snapshot_slot)); } shrink_all_slots_time.stop(); @@ -7473,13 +7476,6 @@ impl Bank { ); } - pub fn shrink_all_slots(&self, is_startup: bool, last_full_snapshot_slot: Option) { - self.rc - .accounts - .accounts_db - .shrink_all_slots(is_startup, last_full_snapshot_slot); - } - pub fn print_accounts_stats(&self) { self.rc.accounts.accounts_db.print_accounts_stats(""); } From 44b8eac82fb641c8c0275bafe60531c408d466fb Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+taozhu-chicago@users.noreply.github.com> Date: Fri, 19 Aug 2022 12:14:37 -0500 Subject: [PATCH 49/67] add ed25519_program to built-in instruction cost list (#27199) * add ed25519_program to built-in instruction cost list * Remove unnecessary and stale comment --- runtime/src/block_cost_limits.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/runtime/src/block_cost_limits.rs b/runtime/src/block_cost_limits.rs index 31964f88cdaee3..cc279b4dd7549e 100644 --- a/runtime/src/block_cost_limits.rs +++ b/runtime/src/block_cost_limits.rs @@ -3,7 +3,8 @@ use { lazy_static::lazy_static, solana_sdk::{ - feature, incinerator, native_loader, pubkey::Pubkey, secp256k1_program, system_program, + ed25519_program, feature, incinerator, native_loader, pubkey::Pubkey, secp256k1_program, + system_program, }, std::collections::HashMap, }; @@ -38,8 +39,8 @@ lazy_static! { (solana_sdk::stake::program::id(), COMPUTE_UNIT_TO_US_RATIO * 25), (solana_config_program::id(), COMPUTE_UNIT_TO_US_RATIO * 15), (solana_vote_program::id(), COMPUTE_UNIT_TO_US_RATIO * 70), - // secp256k1 is executed in banking stage, it should cost similar to sigverify (secp256k1_program::id(), COMPUTE_UNIT_TO_US_RATIO * 24), + (ed25519_program::id(), COMPUTE_UNIT_TO_US_RATIO * 24), (system_program::id(), COMPUTE_UNIT_TO_US_RATIO * 5), ] .iter() From 1cebb1f8a9b9ffabd14198779c83d44ce68d8145 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Fri, 19 Aug 2022 12:40:42 -0500 Subject: [PATCH 50/67] simple refactorings to disk idx (#27238) --- runtime/src/accounts_index.rs | 3 ++- runtime/src/bucket_map_holder.rs | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/runtime/src/accounts_index.rs b/runtime/src/accounts_index.rs index a2bd30d35d4bda..386367e00520c1 100644 --- a/runtime/src/accounts_index.rs +++ b/runtime/src/accounts_index.rs @@ -412,7 +412,8 @@ impl PreAllocatedAccountMapEntry { account_info: T, storage: &Arc>, ) -> AccountMapEntry { - let ref_count = if account_info.is_cached() { 0 } else { 1 }; + let is_cached = account_info.is_cached(); + let ref_count = if is_cached { 0 } else { 1 }; let meta = AccountMapEntryMeta::new_dirty(storage); Arc::new(AccountMapEntryInner::new( vec![(slot, account_info)], diff --git a/runtime/src/bucket_map_holder.rs b/runtime/src/bucket_map_holder.rs index b0237c59101cb9..aea586dca659a1 100644 --- a/runtime/src/bucket_map_holder.rs +++ b/runtime/src/bucket_map_holder.rs @@ -31,6 +31,7 @@ pub struct BucketMapHolder { pub disk: Option>, pub count_buckets_flushed: AtomicUsize, + /// rolling 'current' age pub age: AtomicU8, pub stats: BucketMapHolderStats, From 437387ddab970b394f5155868a80d40636d3e3c8 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Fri, 19 Aug 2022 14:17:32 -0500 Subject: [PATCH 51/67] add _inclusive for clarity (#27239) --- runtime/src/accounts_db.rs | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index c84f45501faacf..838f9bfb649aaf 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -2387,12 +2387,12 @@ impl AccountsDb { /// Collect all the uncleaned slots, up to a max slot /// /// Search through the uncleaned Pubkeys and return all the slots, up to a maximum slot. - fn collect_uncleaned_slots_up_to_slot(&self, max_slot: Slot) -> Vec { + fn collect_uncleaned_slots_up_to_slot(&self, max_slot_inclusive: Slot) -> Vec { self.uncleaned_pubkeys .iter() .filter_map(|entry| { let slot = *entry.key(); - (slot <= max_slot).then(|| slot) + (slot <= max_slot_inclusive).then(|| slot) }) .collect() } @@ -2419,9 +2419,9 @@ impl AccountsDb { /// fn remove_uncleaned_slots_and_collect_pubkeys_up_to_slot( &self, - max_slot: Slot, + max_slot_inclusive: Slot, ) -> Vec> { - let uncleaned_slots = self.collect_uncleaned_slots_up_to_slot(max_slot); + let uncleaned_slots = self.collect_uncleaned_slots_up_to_slot(max_slot_inclusive); self.remove_uncleaned_slots_and_collect_pubkeys(uncleaned_slots) } @@ -2435,10 +2435,11 @@ impl AccountsDb { timings: &mut CleanKeyTimings, ) -> Vec { let mut dirty_store_processing_time = Measure::start("dirty_store_processing"); - let max_slot = max_clean_root.unwrap_or_else(|| self.accounts_index.max_root_inclusive()); + let max_slot_inclusive = + max_clean_root.unwrap_or_else(|| self.accounts_index.max_root_inclusive()); let mut dirty_stores = Vec::with_capacity(self.dirty_stores.len()); self.dirty_stores.retain(|(slot, _store_id), store| { - if *slot > max_slot { + if *slot > max_slot_inclusive { true } else { dirty_stores.push((*slot, store.clone())); @@ -2447,7 +2448,7 @@ impl AccountsDb { }); let dirty_stores_len = dirty_stores.len(); let pubkeys = DashSet::new(); - timings.oldest_dirty_slot = max_slot.saturating_add(1); + timings.oldest_dirty_slot = max_slot_inclusive.saturating_add(1); for (slot, store) in dirty_stores { timings.oldest_dirty_slot = std::cmp::min(timings.oldest_dirty_slot, slot); store.accounts.account_iter().for_each(|account| { @@ -2464,7 +2465,8 @@ impl AccountsDb { timings.dirty_store_processing_us += dirty_store_processing_time.as_us(); let mut collect_delta_keys = Measure::start("key_create"); - let delta_keys = self.remove_uncleaned_slots_and_collect_pubkeys_up_to_slot(max_slot); + let delta_keys = + self.remove_uncleaned_slots_and_collect_pubkeys_up_to_slot(max_slot_inclusive); collect_delta_keys.stop(); timings.collect_delta_keys_us += collect_delta_keys.as_us(); @@ -2496,7 +2498,7 @@ impl AccountsDb { self.zero_lamport_accounts_to_purge_after_full_snapshot .retain(|(slot, pubkey)| { let is_candidate_for_clean = - max_slot >= *slot && last_full_snapshot_slot >= *slot; + max_slot_inclusive >= *slot && last_full_snapshot_slot >= *slot; if is_candidate_for_clean { pubkeys.push(*pubkey); } From bbf622c5cc4e6f82b73ec917015de2e6f3c69e86 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Fri, 19 Aug 2022 14:18:00 -0500 Subject: [PATCH 52/67] eliminate unnecessary ZERO_RAW_LAMPORTS_SENTINEL (#27218) --- runtime/src/accounts_db.rs | 9 +-------- runtime/src/accounts_hash.rs | 16 +++++++++------- 2 files changed, 10 insertions(+), 15 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 838f9bfb649aaf..2d83547095b790 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -1820,14 +1820,7 @@ impl<'a, T: Fn(Slot) -> Option + Sync + Send + Clone> AppendVecScan for Sc // when we are scanning with bin ranges, we don't need to use exact bin numbers. Subtract to make first bin we care about at index 0. self.pubkey_to_bin_index -= self.bin_range.start; - let raw_lamports = loaded_account.lamports(); - let zero_raw_lamports = raw_lamports == 0; - let balance = if zero_raw_lamports { - crate::accounts_hash::ZERO_RAW_LAMPORTS_SENTINEL - } else { - raw_lamports - }; - + let balance = loaded_account.lamports(); let loaded_hash = loaded_account.loaded_hash(); let new_hash = ExpectedRentCollection::maybe_rehash_skipped_rewrite( loaded_account, diff --git a/runtime/src/accounts_hash.rs b/runtime/src/accounts_hash.rs index c36a95d3e02640..251050816a0a24 100644 --- a/runtime/src/accounts_hash.rs +++ b/runtime/src/accounts_hash.rs @@ -18,7 +18,6 @@ use { }, }, }; -pub const ZERO_RAW_LAMPORTS_SENTINEL: u64 = std::u64::MAX; pub const MERKLE_FANOUT: usize = 16; #[derive(Default, Debug)] @@ -844,7 +843,7 @@ impl AccountsHash { ); // add lamports, get hash as long as the lamports are > 0 - if item.lamports != ZERO_RAW_LAMPORTS_SENTINEL + if item.lamports != 0 && (!filler_accounts_enabled || !self.is_filler_account(&item.pubkey)) { overall_sum = Self::checked_cast_for_capitalization( @@ -1042,7 +1041,7 @@ pub mod tests { // 2nd key - zero lamports, so will be removed let key = Pubkey::new(&[12u8; 32]); let hash = Hash::new(&[2u8; 32]); - let val = CalculateHashIntermediate::new(hash, ZERO_RAW_LAMPORTS_SENTINEL, key); + let val = CalculateHashIntermediate::new(hash, 0, key); account_maps.push(val); let accounts_hash = AccountsHash::default(); @@ -1116,7 +1115,7 @@ pub mod tests { // 2nd key - zero lamports, so will be removed let key = Pubkey::new(&[12u8; 32]); let hash = Hash::new(&[2u8; 32]); - let val = CalculateHashIntermediate::new(hash, ZERO_RAW_LAMPORTS_SENTINEL, key); + let val = CalculateHashIntermediate::new(hash, 0, key); account_maps.push(val); let mut previous_pass = PreviousPass::default(); @@ -1395,10 +1394,13 @@ pub mod tests { #[test] fn test_accountsdb_de_dup_accounts_zero_chunks() { - let vec = [vec![vec![CalculateHashIntermediate::default()]]]; + let vec = [vec![vec![CalculateHashIntermediate { + lamports: 1, + ..CalculateHashIntermediate::default() + }]]]; let (hashes, lamports, _) = AccountsHash::default().de_dup_accounts_in_parallel(&vec, 0); assert_eq!(vec![&Hash::default()], hashes); - assert_eq!(lamports, 0); + assert_eq!(lamports, 1); } #[test] @@ -1653,7 +1655,7 @@ pub mod tests { assert_eq!(result, (vec![&val.hash], val.lamports as u64, 1)); // zero original lamports, higher version - let val = CalculateHashIntermediate::new(hash, ZERO_RAW_LAMPORTS_SENTINEL, key); + let val = CalculateHashIntermediate::new(hash, 0, key); account_maps.push(val); // has to be after previous entry since account_maps are in slot order let vecs = vec![vec![account_maps.to_vec()]]; From f94f12a96849b0a0df6a103da7c202ec81d53599 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Fri, 19 Aug 2022 14:39:20 -0500 Subject: [PATCH 53/67] make test code more clear (#27260) --- runtime/src/accounts_db.rs | 3 ++- runtime/src/bank.rs | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 2d83547095b790..adef621aac3afd 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -5536,7 +5536,8 @@ impl AccountsDb { .fetch_add(recycle_stores_write_elapsed.as_us(), Ordering::Relaxed); } - pub fn flush_accounts_cache_slot(&self, slot: Slot) { + #[cfg(test)] + pub(crate) fn flush_accounts_cache_slot_for_tests(&self, slot: Slot) { self.flush_slot_cache(slot); } diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index b087dcc2774951..bd82888b932cf0 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -6436,11 +6436,11 @@ impl Bank { } #[cfg(test)] - pub fn flush_accounts_cache_slot(&self) { + pub fn flush_accounts_cache_slot_for_tests(&self) { self.rc .accounts .accounts_db - .flush_accounts_cache_slot(self.slot()) + .flush_accounts_cache_slot_for_tests(self.slot()) } pub fn expire_old_recycle_stores(&self) { @@ -14687,7 +14687,7 @@ pub(crate) mod tests { bank1.deposit(&pubkey0, some_lamports).unwrap(); goto_end_of_slot(Arc::::get_mut(&mut bank1).unwrap()); bank1.freeze(); - bank1.flush_accounts_cache_slot(); + bank1.flush_accounts_cache_slot_for_tests(); bank1.print_accounts_stats(); From 0721fe678ce4053b24e30ef5dba360daa0423d65 Mon Sep 17 00:00:00 2001 From: apfitzge Date: Fri, 19 Aug 2022 15:16:56 -0500 Subject: [PATCH 54/67] banking stage: actually aggregate tracer packet stats (#27118) * aggregated_tracer_packet_stats_option was alwasys None * Actually accumulate tracer packet stats --- core/src/banking_stage.rs | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 83eae7330de1e4..2ec79c951675d7 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -2000,26 +2000,26 @@ impl BankingStage { packet_count_upperbound: usize, ) -> Result<(Vec, Option), RecvTimeoutError> { let start = Instant::now(); - let mut aggregated_tracer_packet_stats_option: Option = None; - let (mut packet_batches, new_tracer_packet_stats_option) = + let (mut packet_batches, mut aggregated_tracer_packet_stats_option) = verified_receiver.recv_timeout(recv_timeout)?; - if let Some(new_tracer_packet_stats) = &new_tracer_packet_stats_option { - if let Some(aggregated_tracer_packet_stats) = &mut aggregated_tracer_packet_stats_option - { - aggregated_tracer_packet_stats.aggregate(new_tracer_packet_stats); - } else { - aggregated_tracer_packet_stats_option = new_tracer_packet_stats_option; - } - } - let mut num_packets_received: usize = packet_batches.iter().map(|batch| batch.len()).sum(); - while let Ok((packet_batch, _tracer_packet_stats_option)) = verified_receiver.try_recv() { + while let Ok((packet_batch, tracer_packet_stats_option)) = verified_receiver.try_recv() { trace!("got more packet batches in banking stage"); let (packets_received, packet_count_overflowed) = num_packets_received .overflowing_add(packet_batch.iter().map(|batch| batch.len()).sum()); packet_batches.extend(packet_batch); + if let Some(tracer_packet_stats) = &tracer_packet_stats_option { + if let Some(aggregated_tracer_packet_stats) = + &mut aggregated_tracer_packet_stats_option + { + aggregated_tracer_packet_stats.aggregate(tracer_packet_stats); + } else { + aggregated_tracer_packet_stats_option = tracer_packet_stats_option; + } + } + // Spend any leftover receive time budget to greedily receive more packet batches, // until the upperbound of the packet count is reached. if start.elapsed() >= recv_timeout From 14aaf79acd3ca6b5fd33d4481546d70a0277ce85 Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Fri, 19 Aug 2022 16:04:24 -0500 Subject: [PATCH 55/67] Refactor epoch reward 1 (#27253) * refactor: extract store_stake_accounts fn * clippy: slice Co-authored-by: haoran --- runtime/src/bank.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index bd82888b932cf0..dcffc6cb987b7f 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -3280,6 +3280,17 @@ impl Bank { .for_each(|x| rewards.push((x.stake_pubkey, x.stake_reward_info))); } + fn store_stake_accounts(&self, stake_rewards: &[StakeReward], metrics: &mut RewardsMetrics) { + // store stake account even if stakers_reward is 0 + // because credits observed has changed + let mut m = Measure::start("store_stake_account"); + self.store_accounts((self.slot(), stake_rewards)); + m.stop(); + metrics + .store_stake_accounts_us + .fetch_add(m.as_us(), Relaxed); + } + fn update_recent_blockhashes_locked(&self, locked_blockhash_queue: &BlockhashQueue) { #[allow(deprecated)] self.update_sysvar_account(&sysvar::recent_blockhashes::id(), |account| { From 94adb7723df9c2d11a938459c98499e0ffbc3a83 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Fri, 19 Aug 2022 21:07:32 +0000 Subject: [PATCH 56/67] recovers merkle shreds from erasure codes (#27136) The commit * Identifies Merkle shreds when recovering from erasure codes and dispatches specialized code to reconstruct shreds. * Coding shred headers are added to recovered erasure shards. * Merkle tree is reconstructed for the erasure batch and added to recovered shreds. * The common signature (for the root of Merkle tree) is attached to all recovered shreds. --- Cargo.lock | 1 + ledger/Cargo.toml | 1 + ledger/src/blockstore.rs | 31 ++- ledger/src/shred.rs | 53 +++- ledger/src/shred/merkle.rs | 489 ++++++++++++++++++++++++++++++++++++- ledger/src/shredder.rs | 2 +- 6 files changed, 552 insertions(+), 25 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9348db394a09bb..8e0d13a53004f8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5506,6 +5506,7 @@ dependencies = [ "spl-token-2022", "static_assertions", "tempfile", + "test-case", "thiserror", "tokio", "tokio-stream", diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index c8f16585eef955..915bf2038de3c8 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -73,6 +73,7 @@ bs58 = "0.4.0" matches = "0.1.9" solana-account-decoder = { path = "../account-decoder", version = "=1.12.0" } solana-logger = { path = "../logger", version = "=1.12.0" } +test-case = "2.1.0" [build-dependencies] rustc_version = "0.4" diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 5c246b5ab8045c..5bddc02bb90a4e 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -625,7 +625,7 @@ impl Blockstore { index: &mut Index, erasure_meta: &ErasureMeta, prev_inserted_shreds: &HashMap, - recovered_data_shreds: &mut Vec, + recovered_shreds: &mut Vec, data_cf: &LedgerColumn, code_cf: &LedgerColumn, ) { @@ -646,9 +646,9 @@ impl Blockstore { code_cf, )) .collect(); - if let Ok(mut result) = Shredder::try_recovery(available_shreds) { + if let Ok(mut result) = shred::recover(available_shreds) { Self::submit_metrics(slot, erasure_meta, true, "complete".into(), result.len()); - recovered_data_shreds.append(&mut result); + recovered_shreds.append(&mut result); } else { Self::submit_metrics(slot, erasure_meta, true, "incomplete".into(), 0); } @@ -709,7 +709,7 @@ impl Blockstore { ) -> Vec { let data_cf = db.column::(); let code_cf = db.column::(); - let mut recovered_data_shreds = vec![]; + let mut recovered_shreds = vec![]; // Recovery rules: // 1. Only try recovery around indexes for which new data or coding shreds are received // 2. For new data shreds, check if an erasure set exists. If not, don't try recovery @@ -725,7 +725,7 @@ impl Blockstore { index, erasure_meta, prev_inserted_shreds, - &mut recovered_data_shreds, + &mut recovered_shreds, &data_cf, &code_cf, ); @@ -744,7 +744,7 @@ impl Blockstore { } }; } - recovered_data_shreds + recovered_shreds } /// The main helper function that performs the shred insertion logic @@ -888,15 +888,18 @@ impl Blockstore { metrics.insert_shreds_elapsed_us += start.as_us(); let mut start = Measure::start("Shred recovery"); if let Some(leader_schedule_cache) = leader_schedule { - let recovered_data_shreds = Self::try_shred_recovery( + let recovered_shreds = Self::try_shred_recovery( db, &erasure_metas, &mut index_working_set, &just_inserted_shreds, ); - metrics.num_recovered += recovered_data_shreds.len(); - let recovered_data_shreds: Vec<_> = recovered_data_shreds + metrics.num_recovered += recovered_shreds + .iter() + .filter(|shred| shred.is_data()) + .count(); + let recovered_shreds: Vec<_> = recovered_shreds .into_iter() .filter_map(|shred| { let leader = @@ -905,6 +908,12 @@ impl Blockstore { metrics.num_recovered_failed_sig += 1; return None; } + // Since the data shreds are fully recovered from the + // erasure batch, no need to store coding shreds in + // blockstore. + if shred.is_code() { + return Some(shred); + } match self.check_insert_data_shred( shred.clone(), &mut erasure_metas, @@ -941,10 +950,10 @@ impl Blockstore { // Always collect recovered-shreds so that above insert code is // executed even if retransmit-sender is None. .collect(); - if !recovered_data_shreds.is_empty() { + if !recovered_shreds.is_empty() { if let Some(retransmit_sender) = retransmit_sender { let _ = retransmit_sender.send( - recovered_data_shreds + recovered_shreds .into_iter() .map(Shred::into_payload) .collect(), diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index e17055b1e7d9a9..cee63cb45df57d 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -61,6 +61,7 @@ use { crate::blockstore::{self, MAX_DATA_SHREDS_PER_SLOT}, bitflags::bitflags, num_enum::{IntoPrimitive, TryFromPrimitive}, + reed_solomon_erasure::Error::TooFewShardsPresent, serde::{Deserialize, Serialize}, solana_entry::entry::{create_ticks, Entry}, solana_perf::packet::Packet, @@ -144,6 +145,10 @@ pub enum Error { InvalidPayloadSize(/*payload size:*/ usize), #[error("Invalid proof size: {0}")] InvalidProofSize(/*proof_size:*/ u8), + #[error("Invalid recovered shred")] + InvalidRecoveredShred, + #[error("Invalid shard size: {0}")] + InvalidShardSize(/*shard_size:*/ usize), #[error("Invalid shred flags: {0}")] InvalidShredFlags(u8), #[error("Invalid {0:?} shred index: {1}")] @@ -211,7 +216,7 @@ struct DataShredHeader { struct CodingShredHeader { num_data_shreds: u16, num_coding_shreds: u16, - position: u16, + position: u16, // [0..num_coding_shreds) } #[derive(Clone, Debug, PartialEq, Eq)] @@ -294,6 +299,8 @@ macro_rules! dispatch { } } +use dispatch; + impl Shred { dispatch!(fn common_header(&self) -> &ShredCommonHeader); dispatch!(fn set_signature(&mut self, signature: Signature)); @@ -494,6 +501,7 @@ impl Shred { } } + #[must_use] pub fn verify(&self, pubkey: &Pubkey) -> bool { let message = self.signed_message(); self.signature().verify(pubkey.as_ref(), message) @@ -642,6 +650,28 @@ impl From for Shred { } } +impl From for Shred { + fn from(shred: merkle::Shred) -> Self { + match shred { + merkle::Shred::ShredCode(shred) => Self::ShredCode(ShredCode::Merkle(shred)), + merkle::Shred::ShredData(shred) => Self::ShredData(ShredData::Merkle(shred)), + } + } +} + +impl TryFrom for merkle::Shred { + type Error = Error; + + fn try_from(shred: Shred) -> Result { + match shred { + Shred::ShredCode(ShredCode::Legacy(_)) => Err(Error::InvalidShredVariant), + Shred::ShredCode(ShredCode::Merkle(shred)) => Ok(Self::ShredCode(shred)), + Shred::ShredData(ShredData::Legacy(_)) => Err(Error::InvalidShredVariant), + Shred::ShredData(ShredData::Merkle(shred)) => Ok(Self::ShredData(shred)), + } + } +} + impl From for ShredType { #[inline] fn from(shred_variant: ShredVariant) -> Self { @@ -682,6 +712,27 @@ impl TryFrom for ShredVariant { } } +pub(crate) fn recover(shreds: Vec) -> Result, Error> { + match shreds + .first() + .ok_or(TooFewShardsPresent)? + .common_header() + .shred_variant + { + ShredVariant::LegacyData | ShredVariant::LegacyCode => Shredder::try_recovery(shreds), + ShredVariant::MerkleCode(_) | ShredVariant::MerkleData(_) => { + let shreds = shreds + .into_iter() + .map(merkle::Shred::try_from) + .collect::>()?; + Ok(merkle::recover(shreds)? + .into_iter() + .map(Shred::from) + .collect()) + } + } +} + // Accepts shreds in the slot range [root + 1, max_slot]. #[must_use] pub fn should_discard_shred( diff --git a/ledger/src/shred/merkle.rs b/ledger/src/shred/merkle.rs index 5b224632a2a880..9d0482b95354a5 100644 --- a/ledger/src/shred/merkle.rs +++ b/ledger/src/shred/merkle.rs @@ -1,12 +1,20 @@ +#[cfg(test)] +use {crate::shred::ShredType, solana_sdk::pubkey::Pubkey}; use { - crate::shred::{ - common::impl_shred_common, - shred_code, shred_data, - traits::{Shred, ShredCode as ShredCodeTrait, ShredData as ShredDataTrait}, - CodingShredHeader, DataShredHeader, Error, ShredCommonHeader, ShredFlags, ShredVariant, - SIZE_OF_CODING_SHRED_HEADERS, SIZE_OF_COMMON_SHRED_HEADER, SIZE_OF_DATA_SHRED_HEADERS, - SIZE_OF_SIGNATURE, + crate::{ + shred::{ + common::impl_shred_common, + dispatch, shred_code, shred_data, + traits::{ + Shred as ShredTrait, ShredCode as ShredCodeTrait, ShredData as ShredDataTrait, + }, + CodingShredHeader, DataShredHeader, Error, ShredCommonHeader, ShredFlags, ShredVariant, + SIZE_OF_CODING_SHRED_HEADERS, SIZE_OF_COMMON_SHRED_HEADER, SIZE_OF_DATA_SHRED_HEADERS, + SIZE_OF_SIGNATURE, + }, + shredder::ReedSolomon, }, + reed_solomon_erasure::Error::{InvalidIndex, TooFewParityShards, TooFewShards}, solana_perf::packet::deserialize_from_with_limit, solana_sdk::{ clock::Slot, @@ -58,12 +66,58 @@ pub struct ShredCode { payload: Vec, } +#[derive(Clone, Debug, Eq, PartialEq)] +pub(super) enum Shred { + ShredCode(ShredCode), + ShredData(ShredData), +} + #[derive(Clone, Debug, Eq, PartialEq)] struct MerkleBranch { root: MerkleRoot, proof: Vec, } +impl Shred { + dispatch!(fn common_header(&self) -> &ShredCommonHeader); + dispatch!(fn erasure_shard_as_slice(&self) -> Result<&[u8], Error>); + dispatch!(fn erasure_shard_index(&self) -> Result); + dispatch!(fn merkle_tree_node(&self) -> Result); + dispatch!(fn sanitize(&self) -> Result<(), Error>); + dispatch!(fn set_merkle_branch(&mut self, merkle_branch: MerkleBranch) -> Result<(), Error>); + + fn merkle_root(&self) -> &MerkleRoot { + match self { + Self::ShredCode(shred) => &shred.merkle_branch.root, + Self::ShredData(shred) => &shred.merkle_branch.root, + } + } +} + +#[cfg(test)] +impl Shred { + dispatch!(fn set_signature(&mut self, signature: Signature)); + dispatch!(fn signed_message(&self) -> &[u8]); + + fn index(&self) -> u32 { + self.common_header().index + } + + fn shred_type(&self) -> ShredType { + ShredType::from(self.common_header().shred_variant) + } + + fn signature(&self) -> Signature { + self.common_header().signature + } + + #[must_use] + fn verify(&self, pubkey: &Pubkey) -> bool { + let message = self.signed_message(); + self.signature().verify(pubkey.as_ref(), message) + } +} + impl ShredData { // proof_size is the number of proof entries in the merkle tree branch. fn proof_size(&self) -> Result { @@ -104,6 +158,52 @@ impl ShredData { let index = self.erasure_shard_index()?; Ok(verify_merkle_proof(index, node, &self.merkle_branch)) } + + fn from_recovered_shard(signature: &Signature, mut shard: Vec) -> Result { + let shard_size = shard.len(); + if shard_size + SIZE_OF_SIGNATURE > Self::SIZE_OF_PAYLOAD { + return Err(Error::InvalidShardSize(shard_size)); + } + shard.resize(Self::SIZE_OF_PAYLOAD, 0u8); + shard.copy_within(0..shard_size, SIZE_OF_SIGNATURE); + shard[0..SIZE_OF_SIGNATURE].copy_from_slice(signature.as_ref()); + // Deserialize headers. + let mut cursor = Cursor::new(&shard[..]); + let common_header: ShredCommonHeader = deserialize_from_with_limit(&mut cursor)?; + let proof_size = match common_header.shred_variant { + ShredVariant::MerkleData(proof_size) => proof_size, + _ => return Err(Error::InvalidShredVariant), + }; + if ShredCode::capacity(proof_size)? != shard_size { + return Err(Error::InvalidShardSize(shard_size)); + } + let data_header = deserialize_from_with_limit(&mut cursor)?; + Ok(Self { + common_header, + data_header, + merkle_branch: MerkleBranch::new_zeroed(proof_size), + payload: shard, + }) + } + + fn set_merkle_branch(&mut self, merkle_branch: MerkleBranch) -> Result<(), Error> { + let proof_size = self.proof_size()?; + if merkle_branch.proof.len() != usize::from(proof_size) { + return Err(Error::InvalidMerkleProof); + } + let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size)?; + let mut cursor = Cursor::new( + self.payload + .get_mut(offset..) + .ok_or(Error::InvalidProofSize(proof_size))?, + ); + bincode::serialize_into(&mut cursor, &merkle_branch.root)?; + for entry in &merkle_branch.proof { + bincode::serialize_into(&mut cursor, entry)?; + } + self.merkle_branch = merkle_branch; + Ok(()) + } } impl ShredCode { @@ -154,9 +254,66 @@ impl ShredCode { || self.merkle_branch.root != other.merkle_branch.root || self.common_header.signature != other.common_header.signature } + + fn from_recovered_shard( + common_header: ShredCommonHeader, + coding_header: CodingShredHeader, + mut shard: Vec, + ) -> Result { + let proof_size = match common_header.shred_variant { + ShredVariant::MerkleCode(proof_size) => proof_size, + _ => return Err(Error::InvalidShredVariant), + }; + let shard_size = shard.len(); + if Self::capacity(proof_size)? != shard_size { + return Err(Error::InvalidShardSize(shard_size)); + } + if shard_size + Self::SIZE_OF_HEADERS > Self::SIZE_OF_PAYLOAD { + return Err(Error::InvalidShardSize(shard_size)); + } + shard.resize(Self::SIZE_OF_PAYLOAD, 0u8); + shard.copy_within(0..shard_size, Self::SIZE_OF_HEADERS); + let mut cursor = Cursor::new(&mut shard[..]); + bincode::serialize_into(&mut cursor, &common_header)?; + bincode::serialize_into(&mut cursor, &coding_header)?; + Ok(Self { + common_header, + coding_header, + merkle_branch: MerkleBranch::new_zeroed(proof_size), + payload: shard, + }) + } + + fn set_merkle_branch(&mut self, merkle_branch: MerkleBranch) -> Result<(), Error> { + let proof_size = self.proof_size()?; + if merkle_branch.proof.len() != usize::from(proof_size) { + return Err(Error::InvalidMerkleProof); + } + let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size)?; + let mut cursor = Cursor::new( + self.payload + .get_mut(offset..) + .ok_or(Error::InvalidProofSize(proof_size))?, + ); + bincode::serialize_into(&mut cursor, &merkle_branch.root)?; + for entry in &merkle_branch.proof { + bincode::serialize_into(&mut cursor, entry)?; + } + self.merkle_branch = merkle_branch; + Ok(()) + } } -impl Shred for ShredData { +impl MerkleBranch { + fn new_zeroed(proof_size: u8) -> Self { + Self { + root: MerkleRoot::default(), + proof: vec![MerkleProofEntry::default(); usize::from(proof_size)], + } + } +} + +impl ShredTrait for ShredData { impl_shred_common!(); // Also equal to: @@ -249,7 +406,7 @@ impl Shred for ShredData { } } -impl Shred for ShredCode { +impl ShredTrait for ShredCode { impl_shred_common!(); const SIZE_OF_PAYLOAD: usize = shred_code::ShredCode::SIZE_OF_PAYLOAD; const SIZE_OF_HEADERS: usize = SIZE_OF_CODING_SHRED_HEADERS; @@ -391,7 +548,6 @@ fn verify_merkle_proof(index: usize, node: Hash, merkle_branch: &MerkleBranch) - (index, root) == (0usize, &merkle_branch.root[..]) } -#[cfg(test)] fn make_merkle_tree(mut nodes: Vec) -> Vec { let mut size = nodes.len(); while size > 1 { @@ -407,7 +563,6 @@ fn make_merkle_tree(mut nodes: Vec) -> Vec { nodes } -#[cfg(test)] fn make_merkle_branch( mut index: usize, // leaf index ~ shred's erasure shard index. mut size: usize, // number of leaves ~ erasure batch size. @@ -434,9 +589,170 @@ fn make_merkle_branch( Some(MerkleBranch { root, proof }) } +pub(super) fn recover(mut shreds: Vec) -> Result, Error> { + // Grab {common, coding} headers from first coding shred. + let headers = shreds.iter().find_map(|shred| { + let shred = match shred { + Shred::ShredCode(shred) => shred, + Shred::ShredData(_) => return None, + }; + let position = u32::from(shred.coding_header.position); + let common_header = ShredCommonHeader { + index: shred.common_header.index.checked_sub(position)?, + ..shred.common_header + }; + let coding_header = CodingShredHeader { + position: 0u16, + ..shred.coding_header + }; + Some((common_header, coding_header)) + }); + let (common_header, coding_header) = headers.ok_or(TooFewParityShards)?; + debug_assert!(matches!( + common_header.shred_variant, + ShredVariant::MerkleCode(_) + )); + let proof_size = match common_header.shred_variant { + ShredVariant::MerkleCode(proof_size) => proof_size, + ShredVariant::MerkleData(_) | ShredVariant::LegacyCode | ShredVariant::LegacyData => { + return Err(Error::InvalidShredVariant); + } + }; + // Verify that shreds belong to the same erasure batch + // and have consistent headers. + debug_assert!(shreds.iter().all(|shred| { + let ShredCommonHeader { + signature, + shred_variant, + slot, + index: _, + version, + fec_set_index, + } = shred.common_header(); + signature == &common_header.signature + && slot == &common_header.slot + && version == &common_header.version + && fec_set_index == &common_header.fec_set_index + && match shred { + Shred::ShredData(_) => shred_variant == &ShredVariant::MerkleData(proof_size), + Shred::ShredCode(shred) => { + let CodingShredHeader { + num_data_shreds, + num_coding_shreds, + position: _, + } = shred.coding_header; + shred_variant == &ShredVariant::MerkleCode(proof_size) + && num_data_shreds == coding_header.num_data_shreds + && num_coding_shreds == coding_header.num_coding_shreds + } + } + })); + let num_data_shreds = usize::from(coding_header.num_data_shreds); + let num_coding_shreds = usize::from(coding_header.num_coding_shreds); + let num_shards = num_data_shreds + num_coding_shreds; + // Obtain erasure encoded shards from shreds. + let shreds = { + let mut batch = vec![None; num_shards]; + while let Some(shred) = shreds.pop() { + let index = match shred.erasure_shard_index() { + Ok(index) if index < batch.len() => index, + _ => return Err(Error::from(InvalidIndex)), + }; + batch[index] = Some(shred); + } + batch + }; + let mut shards: Vec>> = shreds + .iter() + .map(|shred| Some(shred.as_ref()?.erasure_shard_as_slice().ok()?.to_vec())) + .collect(); + ReedSolomon::new(num_data_shreds, num_coding_shreds)?.reconstruct(&mut shards)?; + let mask: Vec<_> = shreds.iter().map(Option::is_some).collect(); + // Reconstruct code and data shreds from erasure encoded shards. + let mut shreds: Vec<_> = shreds + .into_iter() + .zip(shards) + .enumerate() + .map(|(index, (shred, shard))| { + if let Some(shred) = shred { + return Ok(shred); + } + let shard = shard.ok_or(TooFewShards)?; + if index < num_data_shreds { + let shred = ShredData::from_recovered_shard(&common_header.signature, shard)?; + let ShredCommonHeader { + signature: _, + shred_variant, + slot, + index: _, + version, + fec_set_index, + } = shred.common_header; + if shred_variant != ShredVariant::MerkleData(proof_size) + || common_header.slot != slot + || common_header.version != version + || common_header.fec_set_index != fec_set_index + { + return Err(Error::InvalidRecoveredShred); + } + Ok(Shred::ShredData(shred)) + } else { + let offset = index - num_data_shreds; + let coding_header = CodingShredHeader { + position: offset as u16, + ..coding_header + }; + let common_header = ShredCommonHeader { + index: common_header.index + offset as u32, + ..common_header + }; + let shred = ShredCode::from_recovered_shard(common_header, coding_header, shard)?; + Ok(Shred::ShredCode(shred)) + } + }) + .collect::>()?; + // Compute merkle tree and set the merkle branch on the recovered shreds. + let nodes: Vec<_> = shreds + .iter() + .map(Shred::merkle_tree_node) + .collect::>()?; + let tree = make_merkle_tree(nodes); + let merkle_root = &tree.last().unwrap().as_ref()[..SIZE_OF_MERKLE_ROOT]; + let merkle_root = MerkleRoot::try_from(merkle_root).unwrap(); + for (index, (shred, mask)) in shreds.iter_mut().zip(&mask).enumerate() { + if *mask { + if shred.merkle_root() != &merkle_root { + return Err(Error::InvalidMerkleProof); + } + } else { + let merkle_branch = + make_merkle_branch(index, num_shards, &tree).ok_or(Error::InvalidMerkleProof)?; + if merkle_branch.proof.len() != usize::from(proof_size) { + return Err(Error::InvalidMerkleProof); + } + shred.set_merkle_branch(merkle_branch)?; + } + } + // TODO: No need to verify merkle proof in sanitize here. + shreds + .into_iter() + .zip(mask) + .filter(|(_, mask)| !mask) + .map(|(shred, _)| shred.sanitize().map(|_| shred)) + .collect() +} + #[cfg(test)] mod test { - use {super::*, rand::Rng, std::iter::repeat_with}; + use { + super::*, + itertools::Itertools, + matches::assert_matches, + rand::{seq::SliceRandom, CryptoRng, Rng}, + solana_sdk::signature::{Keypair, Signer}, + std::{cmp::Ordering, iter::repeat_with}, + test_case::test_case, + }; // Total size of a data shred including headers and merkle branch. fn shred_data_size_of_payload(proof_size: u8) -> usize { @@ -525,4 +841,153 @@ mod test { run_merkle_tree_round_trip(size); } } + + #[test_case(37)] + #[test_case(64)] + #[test_case(73)] + fn test_recover_merkle_shreds(num_shreds: usize) { + let mut rng = rand::thread_rng(); + for num_data_shreds in 1..num_shreds { + let num_coding_shreds = num_shreds - num_data_shreds; + run_recover_merkle_shreds(&mut rng, num_data_shreds, num_coding_shreds); + } + } + + fn run_recover_merkle_shreds( + rng: &mut R, + num_data_shreds: usize, + num_coding_shreds: usize, + ) { + let keypair = Keypair::generate(rng); + let num_shreds = num_data_shreds + num_coding_shreds; + let proof_size = (num_shreds as f64).log2().ceil() as u8; + let capacity = ShredData::capacity(proof_size).unwrap(); + let common_header = ShredCommonHeader { + signature: Signature::default(), + shred_variant: ShredVariant::MerkleData(proof_size), + slot: 145865705, + index: 1835, + version: 4978, + fec_set_index: 1835, + }; + let data_header = DataShredHeader { + parent_offset: 25, + flags: unsafe { ShredFlags::from_bits_unchecked(0b0010_1010) }, + size: 0, + }; + let coding_header = CodingShredHeader { + num_data_shreds: num_data_shreds as u16, + num_coding_shreds: num_coding_shreds as u16, + position: 0, + }; + let mut shreds = Vec::with_capacity(num_shreds); + for i in 0..num_data_shreds { + let common_header = ShredCommonHeader { + index: common_header.index + i as u32, + ..common_header + }; + let size = ShredData::SIZE_OF_HEADERS + rng.gen_range(0, capacity); + let data_header = DataShredHeader { + size: size as u16, + ..data_header + }; + let mut payload = vec![0u8; ShredData::SIZE_OF_PAYLOAD]; + let mut cursor = Cursor::new(&mut payload[..]); + bincode::serialize_into(&mut cursor, &common_header).unwrap(); + bincode::serialize_into(&mut cursor, &data_header).unwrap(); + rng.fill(&mut payload[ShredData::SIZE_OF_HEADERS..size]); + let shred = ShredData { + common_header, + data_header, + merkle_branch: MerkleBranch::new_zeroed(proof_size), + payload, + }; + shreds.push(Shred::ShredData(shred)); + } + let data: Vec<_> = shreds + .iter() + .map(Shred::erasure_shard_as_slice) + .collect::>() + .unwrap(); + let mut parity = vec![vec![0u8; data[0].len()]; num_coding_shreds]; + ReedSolomon::new(num_data_shreds, num_coding_shreds) + .unwrap() + .encode_sep(&data, &mut parity[..]) + .unwrap(); + for (i, code) in parity.into_iter().enumerate() { + let common_header = ShredCommonHeader { + shred_variant: ShredVariant::MerkleCode(proof_size), + index: common_header.index + i as u32 + 7, + ..common_header + }; + let coding_header = CodingShredHeader { + position: i as u16, + ..coding_header + }; + let mut payload = vec![0u8; ShredCode::SIZE_OF_PAYLOAD]; + let mut cursor = Cursor::new(&mut payload[..]); + bincode::serialize_into(&mut cursor, &common_header).unwrap(); + bincode::serialize_into(&mut cursor, &coding_header).unwrap(); + payload[ShredCode::SIZE_OF_HEADERS..ShredCode::SIZE_OF_HEADERS + code.len()] + .copy_from_slice(&code); + let shred = ShredCode { + common_header, + coding_header, + merkle_branch: MerkleBranch::new_zeroed(proof_size), + payload, + }; + shreds.push(Shred::ShredCode(shred)); + } + let nodes: Vec<_> = shreds + .iter() + .map(Shred::merkle_tree_node) + .collect::>() + .unwrap(); + let tree = make_merkle_tree(nodes); + for (index, shred) in shreds.iter_mut().enumerate() { + let merkle_branch = make_merkle_branch(index, num_shreds, &tree).unwrap(); + assert_eq!(merkle_branch.proof.len(), usize::from(proof_size)); + shred.set_merkle_branch(merkle_branch).unwrap(); + let signature = keypair.sign_message(shred.signed_message()); + shred.set_signature(signature); + assert!(shred.verify(&keypair.pubkey())); + assert_matches!(shred.sanitize(), Ok(())); + } + assert_eq!(shreds.iter().map(Shred::signature).dedup().count(), 1); + for size in num_data_shreds..num_shreds { + let mut shreds = shreds.clone(); + let mut removed_shreds = Vec::new(); + while shreds.len() > size { + let index = rng.gen_range(0, shreds.len()); + removed_shreds.push(shreds.swap_remove(index)); + } + shreds.shuffle(rng); + // Should at least contain one coding shred. + if shreds.iter().all(|shred| { + matches!( + shred.common_header().shred_variant, + ShredVariant::MerkleData(_) + ) + }) { + assert_matches!( + recover(shreds), + Err(Error::ErasureError(TooFewParityShards)) + ); + continue; + } + let recovered_shreds = recover(shreds).unwrap(); + assert_eq!(size + recovered_shreds.len(), num_shreds); + assert_eq!(recovered_shreds.len(), removed_shreds.len()); + removed_shreds.sort_by(|a, b| { + if a.shred_type() == b.shred_type() { + a.index().cmp(&b.index()) + } else if a.shred_type() == ShredType::Data { + Ordering::Less + } else { + Ordering::Greater + } + }); + assert_eq!(recovered_shreds, removed_shreds); + } + } } diff --git a/ledger/src/shredder.rs b/ledger/src/shredder.rs index d3a50cb82dc1ca..671cc0b7c44c47 100644 --- a/ledger/src/shredder.rs +++ b/ledger/src/shredder.rs @@ -33,7 +33,7 @@ const ERASURE_BATCH_SIZE: [usize; 33] = [ 55, 56, 58, 59, 60, 62, 63, 64, // 32 ]; -type ReedSolomon = reed_solomon_erasure::ReedSolomon; +pub(crate) type ReedSolomon = reed_solomon_erasure::ReedSolomon; #[derive(Debug)] pub struct Shredder { From aa2a28616b78208cb27a3129c1bdad3a1f15374e Mon Sep 17 00:00:00 2001 From: Brooks Prumo Date: Fri, 19 Aug 2022 18:15:04 -0400 Subject: [PATCH 57/67] Simplify `Bank::clean_accounts()` by removing params (#27254) --- runtime/src/accounts_background_service.rs | 8 ++----- runtime/src/bank.rs | 24 ++++++++++++--------- runtime/src/snapshot_utils.rs | 6 +++--- runtime/src/system_instruction_processor.rs | 5 +++-- 4 files changed, 22 insertions(+), 21 deletions(-) diff --git a/runtime/src/accounts_background_service.rs b/runtime/src/accounts_background_service.rs index c38203ab821e96..8d21fed9c7c939 100644 --- a/runtime/src/accounts_background_service.rs +++ b/runtime/src/accounts_background_service.rs @@ -254,11 +254,7 @@ impl SnapshotRequestHandler { }; let mut clean_time = Measure::start("clean_time"); - // Don't clean the slot we're snapshotting because it may have zero-lamport - // accounts that were included in the bank delta hash when the bank was frozen, - // and if we clean them here, the newly created snapshot's hash may not match - // the frozen hash. - snapshot_root_bank.clean_accounts(true, false, *last_full_snapshot_slot); + snapshot_root_bank.clean_accounts(*last_full_snapshot_slot); clean_time.stop(); if accounts_db_caching_enabled { @@ -564,7 +560,7 @@ impl AccountsBackgroundService { // slots >= bank.slot() bank.force_flush_accounts_cache(); } - bank.clean_accounts(true, false, last_full_snapshot_slot); + bank.clean_accounts(last_full_snapshot_slot); last_cleaned_block_height = bank.block_height(); } } diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index dcffc6cb987b7f..7ab7b710c807ef 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -7181,7 +7181,7 @@ impl Bank { let mut clean_time = Measure::start("clean"); if !accounts_db_skip_shrink && self.slot() > 0 { info!("cleaning.."); - self.clean_accounts(true, true, Some(last_full_snapshot_slot)); + self._clean_accounts(true, true, Some(last_full_snapshot_slot)); } clean_time.stop(); @@ -7465,12 +7465,7 @@ impl Bank { debug!("Added precompiled program {:?}", program_id); } - pub fn clean_accounts( - &self, - skip_last: bool, - is_startup: bool, - last_full_snapshot_slot: Option, - ) { + pub(crate) fn clean_accounts(&self, last_full_snapshot_slot: Option) { // Don't clean the slot we're snapshotting because it may have zero-lamport // accounts that were included in the bank delta hash when the bank was frozen, // and if we clean them here, any newly created snapshot's hash for this bank @@ -7478,10 +7473,19 @@ impl Bank { // // So when we're snapshotting, set `skip_last` to true so the highest slot to clean is // lowered by one. - let highest_slot_to_clean = skip_last.then(|| self.slot().saturating_sub(1)); + self._clean_accounts(true, false, last_full_snapshot_slot) + } + + fn _clean_accounts( + &self, + skip_last: bool, + is_startup: bool, + last_full_snapshot_slot: Option, + ) { + let max_clean_root = skip_last.then(|| self.slot().saturating_sub(1)); self.rc.accounts.accounts_db.clean_accounts( - highest_slot_to_clean, + max_clean_root, is_startup, last_full_snapshot_slot, ); @@ -16282,7 +16286,7 @@ pub(crate) mod tests { current_bank.squash(); if current_bank.slot() % 2 == 0 { current_bank.force_flush_accounts_cache(); - current_bank.clean_accounts(true, false, None); + current_bank.clean_accounts(None); } prev_bank = current_bank.clone(); current_bank = Arc::new(Bank::new_from_parent( diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 93cdbc0f33fc0c..4717bb1ab4f356 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -2078,7 +2078,7 @@ pub fn bank_to_full_snapshot_archive( assert!(bank.is_complete()); bank.squash(); // Bank may not be a root bank.force_flush_accounts_cache(); - bank.clean_accounts(true, false, Some(bank.slot())); + bank.clean_accounts(Some(bank.slot())); bank.update_accounts_hash(); bank.rehash(); // Bank accounts may have been manually modified by the caller @@ -2125,7 +2125,7 @@ pub fn bank_to_incremental_snapshot_archive( assert!(bank.slot() > full_snapshot_slot); bank.squash(); // Bank may not be a root bank.force_flush_accounts_cache(); - bank.clean_accounts(true, false, Some(full_snapshot_slot)); + bank.clean_accounts(Some(full_snapshot_slot)); bank.update_accounts_hash(); bank.rehash(); // Bank accounts may have been manually modified by the caller @@ -3771,7 +3771,7 @@ mod tests { // Ensure account1 has been cleaned/purged from everywhere bank4.squash(); - bank4.clean_accounts(true, false, Some(full_snapshot_slot)); + bank4.clean_accounts(Some(full_snapshot_slot)); assert!( bank4.get_account_modified_slot(&key1.pubkey()).is_none(), "Ensure Account1 has been cleaned and purged from AccountsDb" diff --git a/runtime/src/system_instruction_processor.rs b/runtime/src/system_instruction_processor.rs index 67f1f931147cef..c0b588c25c42c4 100644 --- a/runtime/src/system_instruction_processor.rs +++ b/runtime/src/system_instruction_processor.rs @@ -1626,6 +1626,7 @@ mod tests { .unwrap(); // super fun time; callback chooses to .clean_accounts(None) or not + let bank = Arc::new(Bank::new_from_parent(&bank, &collector, bank.slot() + 1)); callback(&*bank); // create a normal account at the same pubkey as the zero-lamports account @@ -1651,9 +1652,9 @@ mod tests { bank.squash(); bank.force_flush_accounts_cache(); // do clean and assert that it actually did its job + assert_eq!(4, bank.get_snapshot_storages(None).len()); + bank.clean_accounts(None); assert_eq!(3, bank.get_snapshot_storages(None).len()); - bank.clean_accounts(false, false, None); - assert_eq!(2, bank.get_snapshot_storages(None).len()); }); } From 5c856b3962b2eef30e8a1585e322dc6f373c3406 Mon Sep 17 00:00:00 2001 From: Xiang Zhu Date: Fri, 19 Aug 2022 23:56:52 -0700 Subject: [PATCH 58/67] Account files remove (#26910) * Create a new function cleanup_accounts_paths, a trivial change * Remove account files asynchronously * Update and simplify the implementation after the validator test runs. * Fixes after testing on the dev device * Discard tokio. Use thread instead * Fix comments format * Fix config type to pass the github test * Fix failed tests. Handle the case of non-existing path * Final cleanup, addressing the review comments Avoided OsString. Made the function more generic with "impl AsRef" Co-authored-by: Jeff Washington --- core/src/validator.rs | 64 +++++++++++++++++++++++++++++++++---------- 1 file changed, 50 insertions(+), 14 deletions(-) diff --git a/core/src/validator.rs b/core/src/validator.rs index 5477985c27ad66..ca8ba125fd3029 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -454,14 +454,7 @@ impl Validator { info!("Cleaning accounts paths.."); *start_progress.write().unwrap() = ValidatorStartProgress::CleaningAccounts; let mut start = Measure::start("clean_accounts_paths"); - for accounts_path in &config.account_paths { - cleanup_accounts_path(accounts_path); - } - if let Some(ref shrink_paths) = config.account_shrink_paths { - for accounts_path in shrink_paths { - cleanup_accounts_path(accounts_path); - } - } + cleanup_accounts_paths(config); start.stop(); info!("done. {}", start); @@ -2058,13 +2051,56 @@ fn get_stake_percent_in_gossip(bank: &Bank, cluster_info: &ClusterInfo, log: boo online_stake_percentage as u64 } -// Cleanup anything that looks like an accounts append-vec -fn cleanup_accounts_path(account_path: &std::path::Path) { - if let Err(e) = std::fs::remove_dir_all(account_path) { - warn!( - "encountered error removing accounts path: {:?}: {}", - account_path, e +/// Delete directories/files asynchronously to avoid blocking on it. +/// Fist, in sync context, rename the original path to *_deleted, +/// then spawn a thread to delete the renamed path. +/// If the process is killed and the deleting process is not done, +/// the leftover path will be deleted in the next process life, so +/// there is no file space leaking. +fn move_and_async_delete_path(path: impl AsRef + Copy) { + let mut path_delete = PathBuf::new(); + path_delete.push(path); + path_delete.set_file_name(format!( + "{}{}", + path_delete.file_name().unwrap().to_str().unwrap(), + "_to_be_deleted" + )); + + if path_delete.exists() { + debug!("{} exists, delete it first.", path_delete.display()); + std::fs::remove_dir_all(&path_delete).unwrap(); + } + + if !path.as_ref().exists() { + info!( + "move_and_async_delete_path: path {} does not exist", + path.as_ref().display() ); + return; + } + + std::fs::rename(&path, &path_delete).unwrap(); + + Builder::new() + .name("delete_path".to_string()) + .spawn(move || { + std::fs::remove_dir_all(&path_delete).unwrap(); + info!( + "Cleaning path {} done asynchronously in a spawned thread", + path_delete.display() + ); + }) + .unwrap(); +} + +fn cleanup_accounts_paths(config: &ValidatorConfig) { + for accounts_path in &config.account_paths { + move_and_async_delete_path(accounts_path); + } + if let Some(ref shrink_paths) = config.account_shrink_paths { + for accounts_path in shrink_paths { + move_and_async_delete_path(accounts_path); + } } } From 9e769fb105190486d37560ac135241ca38ac8975 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Sat, 20 Aug 2022 11:20:47 +0200 Subject: [PATCH 59/67] Refactor: Flattens `TransactionContext::instruction_trace` (#27109) * Flattens TransactionContext::instruction_trace. * Stop the search at transaction level. * Renames get_instruction_context_at => get_instruction_context_at_nesting_level. * Removes TransactionContext::get_instruction_trace(). Adds TransactionContext::get_instruction_trace_length() and TransactionContext::get_instruction_context_at_index(). * Have TransactionContext::instruction_accounts_lamport_sum() accept an iterator instead of a slice. * Removes instruction_trace from ExecutionRecord. * make InstructionContext::new() private --- program-runtime/src/invoke_context.rs | 2 +- programs/bpf_loader/src/serialization.rs | 19 ++- programs/bpf_loader/src/syscalls/mod.rs | 55 +++---- runtime/src/bank.rs | 127 +++++++-------- runtime/src/message_processor.rs | 2 +- sdk/src/transaction_context.rs | 188 ++++++++++------------- 6 files changed, 188 insertions(+), 205 deletions(-) diff --git a/program-runtime/src/invoke_context.rs b/program-runtime/src/invoke_context.rs index b54e6f987561e5..ca3582c7ae1dc2 100644 --- a/program-runtime/src/invoke_context.rs +++ b/program-runtime/src/invoke_context.rs @@ -325,7 +325,7 @@ impl<'a> InvokeContext<'a> { .get_instruction_context_stack_height()) .any(|level| { self.transaction_context - .get_instruction_context_at(level) + .get_instruction_context_at_nesting_level(level) .and_then(|instruction_context| { instruction_context .try_borrow_last_program_account(self.transaction_context) diff --git a/programs/bpf_loader/src/serialization.rs b/programs/bpf_loader/src/serialization.rs index 6c046b2c0f2f0b..cb02226953420c 100644 --- a/programs/bpf_loader/src/serialization.rs +++ b/programs/bpf_loader/src/serialization.rs @@ -497,21 +497,20 @@ mod tests { &program_indices, ) .instruction_accounts; + let instruction_data = vec![]; - let transaction_context = + let mut transaction_context = TransactionContext::new(transaction_accounts, Some(Rent::default()), 1, 1); - let instruction_data = vec![]; - let instruction_context = InstructionContext::new( - 0, - 0, - &program_indices, - &instruction_accounts, - &instruction_data, - ); + transaction_context + .push(&program_indices, &instruction_accounts, &instruction_data) + .unwrap(); + let instruction_context = transaction_context + .get_instruction_context_at_index_in_trace(0) + .unwrap(); let serialization_result = serialize_parameters( &transaction_context, - &instruction_context, + instruction_context, should_cap_ix_accounts, ); assert_eq!( diff --git a/programs/bpf_loader/src/syscalls/mod.rs b/programs/bpf_loader/src/syscalls/mod.rs index 2175a1f2a12c27..2893849a9f0582 100644 --- a/programs/bpf_loader/src/syscalls/mod.rs +++ b/programs/bpf_loader/src/syscalls/mod.rs @@ -1709,34 +1709,37 @@ declare_syscall!( result ); + // Reverse iterate through the instruction trace, + // ignoring anything except instructions on the same level let stack_height = invoke_context.get_stack_height(); - let instruction_trace = invoke_context.transaction_context.get_instruction_trace(); - let instruction_context = if stack_height == TRANSACTION_LEVEL_STACK_HEIGHT { - // pick one of the top-level instructions - instruction_trace - .len() - .checked_sub(2) - .and_then(|result| result.checked_sub(index as usize)) - .and_then(|index| instruction_trace.get(index)) - .and_then(|instruction_list| instruction_list.first()) - } else { - // Walk the last list of inner instructions - instruction_trace.last().and_then(|inners| { - let mut current_index = 0; - inners.iter().rev().skip(1).find(|instruction_context| { - if stack_height == instruction_context.get_stack_height() { - if index == current_index { - return true; - } else { - current_index = current_index.saturating_add(1); - } - } - false - }) - }) - }; + let instruction_trace_length = invoke_context + .transaction_context + .get_instruction_trace_length(); + let mut reverse_index_at_stack_height = 0; + let mut found_instruction_context = None; + for index_in_trace in (0..instruction_trace_length).rev() { + let instruction_context = question_mark!( + invoke_context + .transaction_context + .get_instruction_context_at_index_in_trace(index_in_trace) + .map_err(SyscallError::InstructionError), + result + ); + if instruction_context.get_stack_height() == TRANSACTION_LEVEL_STACK_HEIGHT + && stack_height > TRANSACTION_LEVEL_STACK_HEIGHT + { + break; + } + if instruction_context.get_stack_height() == stack_height { + if index.saturating_add(1) == reverse_index_at_stack_height { + found_instruction_context = Some(instruction_context); + break; + } + reverse_index_at_stack_height = reverse_index_at_stack_height.saturating_add(1); + } + } - if let Some(instruction_context) = instruction_context { + if let Some(instruction_context) = found_instruction_context { let ProcessedSiblingInstruction { data_len, accounts_len, diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 7ab7b710c807ef..ba192c7af4d937 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -120,7 +120,7 @@ use { hash::{extend_and_hash, hashv, Hash}, incinerator, inflation::Inflation, - instruction::CompiledInstruction, + instruction::{CompiledInstruction, TRANSACTION_LEVEL_STACK_HEIGHT}, lamports::LamportsError, message::{AccountKeys, SanitizedMessage}, native_loader, @@ -143,8 +143,7 @@ use { TransactionVerificationMode, VersionedTransaction, MAX_TX_ACCOUNT_LOCKS, }, transaction_context::{ - ExecutionRecord, InstructionTrace, TransactionAccount, TransactionContext, - TransactionReturnData, + ExecutionRecord, TransactionAccount, TransactionContext, TransactionReturnData, }, }, solana_stake_program::stake_state::{ @@ -762,40 +761,50 @@ pub type InnerInstructions = Vec; /// a transaction pub type InnerInstructionsList = Vec; -/// Convert from an InstructionTrace to InnerInstructionsList +/// Extract the InnerInstructionsList from a TransactionContext pub fn inner_instructions_list_from_instruction_trace( - instruction_trace: &InstructionTrace, + transaction_context: &TransactionContext, ) -> InnerInstructionsList { - instruction_trace - .iter() - .map(|inner_instructions_trace| { - inner_instructions_trace - .iter() - .skip(1) - .map(|instruction_context| { - CompiledInstruction::new_from_raw_parts( - instruction_context - .get_index_of_program_account_in_transaction( - instruction_context - .get_number_of_program_accounts() - .saturating_sub(1), - ) - .unwrap_or_default() as u8, - instruction_context.get_instruction_data().to_vec(), - (0..instruction_context.get_number_of_instruction_accounts()) - .map(|instruction_account_index| { - instruction_context - .get_index_of_instruction_account_in_transaction( - instruction_account_index, - ) - .unwrap_or_default() as u8 - }) - .collect(), - ) - }) - .collect() - }) - .collect() + debug_assert!(transaction_context + .get_instruction_context_at_index_in_trace(0) + .map(|instruction_context| instruction_context.get_stack_height() + == TRANSACTION_LEVEL_STACK_HEIGHT) + .unwrap_or(true)); + let mut outer_instructions = Vec::new(); + for index_in_trace in 0..transaction_context.get_instruction_trace_length() { + if let Ok(instruction_context) = + transaction_context.get_instruction_context_at_index_in_trace(index_in_trace) + { + if instruction_context.get_stack_height() == TRANSACTION_LEVEL_STACK_HEIGHT { + outer_instructions.push(Vec::new()); + } else if let Some(inner_instructions) = outer_instructions.last_mut() { + inner_instructions.push(CompiledInstruction::new_from_raw_parts( + instruction_context + .get_index_of_program_account_in_transaction( + instruction_context + .get_number_of_program_accounts() + .saturating_sub(1), + ) + .unwrap_or_default() as u8, + instruction_context.get_instruction_data().to_vec(), + (0..instruction_context.get_number_of_instruction_accounts()) + .map(|instruction_account_index| { + instruction_context + .get_index_of_instruction_account_in_transaction( + instruction_account_index, + ) + .unwrap_or_default() as u8 + }) + .collect(), + )); + } else { + debug_assert!(false); + } + } else { + debug_assert!(false); + } + } + outer_instructions } /// A list of log messages emitted during a transaction @@ -4452,9 +4461,16 @@ impl Bank { .ok() }); + let inner_instructions = if enable_cpi_recording { + Some(inner_instructions_list_from_instruction_trace( + &transaction_context, + )) + } else { + None + }; + let ExecutionRecord { accounts, - instruction_trace, mut return_data, changed_account_count, total_size_of_all_accounts, @@ -4482,14 +4498,6 @@ impl Bank { accounts_data_len_delta = status.as_ref().map_or(0, |_| accounts_resize_delta); } - let inner_instructions = if enable_cpi_recording { - Some(inner_instructions_list_from_instruction_trace( - &instruction_trace, - )) - } else { - None - }; - let return_data = if enable_return_data_recording { if let Some(end_index) = return_data.data.iter().rposition(|&x| x != 0) { let end_index = end_index.saturating_add(1); @@ -8071,7 +8079,6 @@ pub(crate) mod tests { system_program, timing::duration_as_s, transaction::MAX_TX_ACCOUNT_LOCKS, - transaction_context::InstructionContext, }, solana_vote_program::{ vote_instruction, @@ -18863,26 +18870,24 @@ pub(crate) mod tests { #[test] fn test_inner_instructions_list_from_instruction_trace() { - let instruction_trace = vec![ - vec![ - InstructionContext::new(0, 0, &[], &[], &[1]), - InstructionContext::new(1, 0, &[], &[], &[2]), - ], - vec![], - vec![ - InstructionContext::new(0, 0, &[], &[], &[3]), - InstructionContext::new(1, 0, &[], &[], &[4]), - InstructionContext::new(2, 0, &[], &[], &[5]), - InstructionContext::new(1, 0, &[], &[], &[6]), - ], - ]; - - let inner_instructions = inner_instructions_list_from_instruction_trace(&instruction_trace); + let mut transaction_context = TransactionContext::new(vec![], None, 3, 3); + for (index_in_trace, stack_height) in [1, 2, 1, 1, 2, 3, 2].into_iter().enumerate() { + while stack_height <= transaction_context.get_instruction_context_stack_height() { + transaction_context.pop().unwrap(); + } + if stack_height > transaction_context.get_instruction_context_stack_height() { + transaction_context + .push(&[], &[], &[index_in_trace as u8]) + .unwrap(); + } + } + let inner_instructions = + inner_instructions_list_from_instruction_trace(&transaction_context); assert_eq!( inner_instructions, vec![ - vec![CompiledInstruction::new_from_raw_parts(0, vec![2], vec![])], + vec![CompiledInstruction::new_from_raw_parts(0, vec![1], vec![])], vec![], vec![ CompiledInstruction::new_from_raw_parts(0, vec![4], vec![]), diff --git a/runtime/src/message_processor.rs b/runtime/src/message_processor.rs index 23eb1e800e9818..c95bfc0b72eae2 100644 --- a/runtime/src/message_processor.rs +++ b/runtime/src/message_processor.rs @@ -680,6 +680,6 @@ mod tests { InstructionError::Custom(0xbabb1e) )) ); - assert_eq!(transaction_context.get_instruction_trace().len(), 2); + assert_eq!(transaction_context.get_instruction_trace_length(), 2); } } diff --git a/sdk/src/transaction_context.rs b/sdk/src/transaction_context.rs index 25e67523335ce4..d22a91372b7745 100644 --- a/sdk/src/transaction_context.rs +++ b/sdk/src/transaction_context.rs @@ -48,8 +48,7 @@ pub struct TransactionContext { account_touched_flags: RefCell>>, instruction_context_capacity: usize, instruction_stack: Vec, - number_of_instructions_at_transaction_level: usize, - instruction_trace: InstructionTrace, + instruction_trace: Vec, return_data: TransactionReturnData, accounts_resize_delta: RefCell, rent: Option, @@ -61,7 +60,7 @@ impl TransactionContext { transaction_accounts: Vec, rent: Option, instruction_context_capacity: usize, - number_of_instructions_at_transaction_level: usize, + _number_of_instructions_at_transaction_level: usize, ) -> Self { let (account_keys, accounts): (Vec, Vec>) = transaction_accounts @@ -75,8 +74,7 @@ impl TransactionContext { account_touched_flags: RefCell::new(Pin::new(account_touched_flags.into_boxed_slice())), instruction_context_capacity, instruction_stack: Vec::with_capacity(instruction_context_capacity), - number_of_instructions_at_transaction_level, - instruction_trace: Vec::with_capacity(number_of_instructions_at_transaction_level), + instruction_trace: Vec::new(), return_data: TransactionReturnData::default(), accounts_resize_delta: RefCell::new(0), rent, @@ -139,29 +137,32 @@ impl TransactionContext { self.account_keys.iter().rposition(|key| key == pubkey) } + /// Returns instruction trace length + pub fn get_instruction_trace_length(&self) -> usize { + self.instruction_trace.len() + } + + /// Gets an InstructionContext by its index in the trace + pub fn get_instruction_context_at_index_in_trace( + &self, + index_in_trace: usize, + ) -> Result<&InstructionContext, InstructionError> { + self.instruction_trace + .get(index_in_trace) + .ok_or(InstructionError::CallDepth) + } + /// Gets an InstructionContext by its nesting level in the stack - pub fn get_instruction_context_at( + pub fn get_instruction_context_at_nesting_level( &self, - level: usize, + nesting_level: usize, ) -> Result<&InstructionContext, InstructionError> { - let top_level_index = *self + let index_in_trace = *self .instruction_stack - .first() - .ok_or(InstructionError::CallDepth)?; - let cpi_index = if level == 0 { - 0 - } else { - *self - .instruction_stack - .get(level) - .ok_or(InstructionError::CallDepth)? - }; - let instruction_context = self - .instruction_trace - .get(top_level_index) - .and_then(|instruction_trace| instruction_trace.get(cpi_index)) + .get(nesting_level) .ok_or(InstructionError::CallDepth)?; - debug_assert_eq!(instruction_context.nesting_level, level); + let instruction_context = self.get_instruction_context_at_index_in_trace(index_in_trace)?; + debug_assert_eq!(instruction_context.nesting_level, nesting_level); Ok(instruction_context) } @@ -182,7 +183,7 @@ impl TransactionContext { .get_instruction_context_stack_height() .checked_sub(1) .ok_or(InstructionError::CallDepth)?; - self.get_instruction_context_at(level) + self.get_instruction_context_at_nesting_level(level) } /// Pushes a new InstructionContext @@ -193,49 +194,32 @@ impl TransactionContext { instruction_data: &[u8], ) -> Result<(), InstructionError> { let callee_instruction_accounts_lamport_sum = - self.instruction_accounts_lamport_sum(instruction_accounts)?; - let index_in_trace = if self.instruction_stack.is_empty() { - debug_assert!( - self.instruction_trace.len() < self.number_of_instructions_at_transaction_level - ); - let instruction_context = InstructionContext { - nesting_level: self.instruction_stack.len(), - instruction_accounts_lamport_sum: callee_instruction_accounts_lamport_sum, - program_accounts: program_accounts.to_vec(), - instruction_accounts: instruction_accounts.to_vec(), - instruction_data: instruction_data.to_vec(), - }; - self.instruction_trace.push(vec![instruction_context]); - self.instruction_trace.len().saturating_sub(1) - } else { - if self.is_early_verification_of_account_modifications_enabled() { - let caller_instruction_context = self.get_current_instruction_context()?; - let original_caller_instruction_accounts_lamport_sum = - caller_instruction_context.instruction_accounts_lamport_sum; - let current_caller_instruction_accounts_lamport_sum = self - .instruction_accounts_lamport_sum( - &caller_instruction_context.instruction_accounts, - )?; - if original_caller_instruction_accounts_lamport_sum - != current_caller_instruction_accounts_lamport_sum - { - return Err(InstructionError::UnbalancedInstruction); - } - } - if let Some(instruction_trace) = self.instruction_trace.last_mut() { - let instruction_context = InstructionContext { - nesting_level: self.instruction_stack.len(), - instruction_accounts_lamport_sum: callee_instruction_accounts_lamport_sum, - program_accounts: program_accounts.to_vec(), - instruction_accounts: instruction_accounts.to_vec(), - instruction_data: instruction_data.to_vec(), - }; - instruction_trace.push(instruction_context); - instruction_trace.len().saturating_sub(1) - } else { - return Err(InstructionError::CallDepth); + self.instruction_accounts_lamport_sum(instruction_accounts.iter())?; + if !self.instruction_stack.is_empty() + && self.is_early_verification_of_account_modifications_enabled() + { + let caller_instruction_context = self.get_current_instruction_context()?; + let original_caller_instruction_accounts_lamport_sum = + caller_instruction_context.instruction_accounts_lamport_sum; + let current_caller_instruction_accounts_lamport_sum = self + .instruction_accounts_lamport_sum( + caller_instruction_context.instruction_accounts.iter(), + )?; + if original_caller_instruction_accounts_lamport_sum + != current_caller_instruction_accounts_lamport_sum + { + return Err(InstructionError::UnbalancedInstruction); } - }; + } + let instruction_context = InstructionContext::new( + self.instruction_stack.len(), + callee_instruction_accounts_lamport_sum, + program_accounts.to_vec(), + instruction_accounts.to_vec(), + instruction_data.to_vec(), + ); + let index_in_trace = self.instruction_trace.len(); + self.instruction_trace.push(instruction_context); if self.instruction_stack.len() >= self.instruction_context_capacity { return Err(InstructionError::CallDepth); } @@ -249,26 +233,27 @@ impl TransactionContext { return Err(InstructionError::CallDepth); } // Verify (before we pop) that the total sum of all lamports in this instruction did not change - let detected_an_unbalanced_instruction = if self - .is_early_verification_of_account_modifications_enabled() - { - self.get_current_instruction_context() - .and_then(|instruction_context| { - // Verify all executable accounts have no outstanding refs - for account_index in instruction_context.program_accounts.iter() { - self.get_account_at_index(*account_index)? - .try_borrow_mut() - .map_err(|_| InstructionError::AccountBorrowOutstanding)?; - } - self.instruction_accounts_lamport_sum(&instruction_context.instruction_accounts) + let detected_an_unbalanced_instruction = + if self.is_early_verification_of_account_modifications_enabled() { + self.get_current_instruction_context() + .and_then(|instruction_context| { + // Verify all executable accounts have no outstanding refs + for account_index in instruction_context.program_accounts.iter() { + self.get_account_at_index(*account_index)? + .try_borrow_mut() + .map_err(|_| InstructionError::AccountBorrowOutstanding)?; + } + self.instruction_accounts_lamport_sum( + instruction_context.instruction_accounts.iter(), + ) .map(|instruction_accounts_lamport_sum| { instruction_context.instruction_accounts_lamport_sum != instruction_accounts_lamport_sum }) - }) - } else { - Ok(false) - }; + }) + } else { + Ok(false) + }; // Always pop, even if we `detected_an_unbalanced_instruction` self.instruction_stack.pop(); if detected_an_unbalanced_instruction? { @@ -293,23 +278,19 @@ impl TransactionContext { Ok(()) } - /// Returns instruction trace - pub fn get_instruction_trace(&self) -> &InstructionTrace { - &self.instruction_trace - } - /// Calculates the sum of all lamports within an instruction - fn instruction_accounts_lamport_sum( - &self, - instruction_accounts: &[InstructionAccount], - ) -> Result { + fn instruction_accounts_lamport_sum<'a, I>( + &'a self, + instruction_accounts: I, + ) -> Result + where + I: Iterator, + { if !self.is_early_verification_of_account_modifications_enabled() { return Ok(0); } let mut instruction_accounts_lamport_sum: u128 = 0; - for (instruction_account_index, instruction_account) in - instruction_accounts.iter().enumerate() - { + for (instruction_account_index, instruction_account) in instruction_accounts.enumerate() { if instruction_account_index != instruction_account.index_in_callee { continue; // Skip duplicate account } @@ -340,9 +321,6 @@ pub struct TransactionReturnData { pub data: Vec, } -/// List of (stack height, instruction) for each top-level instruction -pub type InstructionTrace = Vec>; - /// Loaded instruction shared between runtime and programs. /// /// This context is valid for the entire duration of a (possibly cross program) instruction being processed. @@ -357,19 +335,19 @@ pub struct InstructionContext { impl InstructionContext { /// New - pub fn new( + fn new( nesting_level: usize, instruction_accounts_lamport_sum: u128, - program_accounts: &[usize], - instruction_accounts: &[InstructionAccount], - instruction_data: &[u8], + program_accounts: Vec, + instruction_accounts: Vec, + instruction_data: Vec, ) -> Self { InstructionContext { nesting_level, instruction_accounts_lamport_sum, - program_accounts: program_accounts.to_vec(), - instruction_accounts: instruction_accounts.to_vec(), - instruction_data: instruction_data.to_vec(), + program_accounts, + instruction_accounts, + instruction_data, } } @@ -912,7 +890,6 @@ impl<'a> BorrowedAccount<'a> { /// Everything that needs to be recorded from a TransactionContext after execution pub struct ExecutionRecord { pub accounts: Vec, - pub instruction_trace: InstructionTrace, pub return_data: TransactionReturnData, pub changed_account_count: u64, pub total_size_of_all_accounts: u64, @@ -955,7 +932,6 @@ impl From for ExecutionRecord { .map(|account| account.into_inner()), ) .collect(), - instruction_trace: context.instruction_trace, return_data: context.return_data, changed_account_count, total_size_of_all_accounts, From 4b4af89a23d6d45883f384c11b3c43bb4aa03d32 Mon Sep 17 00:00:00 2001 From: Brennan Watt Date: Sat, 20 Aug 2022 06:52:32 -0700 Subject: [PATCH 60/67] Parallel insertion of dirty store keys during clean (#27058) parallelize dirty store key insertion --- runtime/src/accounts_db.rs | 34 +++++++++++++++++++++++++++++----- 1 file changed, 29 insertions(+), 5 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index adef621aac3afd..2bb0f51142eaa1 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -2424,6 +2424,7 @@ impl AccountsDb { fn construct_candidate_clean_keys( &self, max_clean_root: Option, + is_startup: bool, last_full_snapshot_slot: Option, timings: &mut CleanKeyTimings, ) -> Vec { @@ -2441,11 +2442,33 @@ impl AccountsDb { }); let dirty_stores_len = dirty_stores.len(); let pubkeys = DashSet::new(); - timings.oldest_dirty_slot = max_slot_inclusive.saturating_add(1); - for (slot, store) in dirty_stores { - timings.oldest_dirty_slot = std::cmp::min(timings.oldest_dirty_slot, slot); - store.accounts.account_iter().for_each(|account| { - pubkeys.insert(account.meta.pubkey); + let mut dirty_store_routine = || { + let chunk_size = 1.max(dirty_stores_len.saturating_div(rayon::current_num_threads())); + let oldest_dirty_slots: Vec = dirty_stores + .par_chunks(chunk_size) + .map(|dirty_store_chunk| { + let mut oldest_dirty_slot = max_slot_inclusive.saturating_add(1); + dirty_store_chunk.iter().for_each(|(slot, store)| { + oldest_dirty_slot = oldest_dirty_slot.min(*slot); + store.accounts.account_iter().for_each(|account| { + pubkeys.insert(account.meta.pubkey); + }); + }); + oldest_dirty_slot + }) + .collect(); + timings.oldest_dirty_slot = *oldest_dirty_slots + .iter() + .min() + .unwrap_or(&max_slot_inclusive.saturating_add(1)); + }; + + if is_startup { + // Free to consume all the cores during startup + dirty_store_routine(); + } else { + self.thread_pool_clean.install(|| { + dirty_store_routine(); }); } trace!( @@ -2533,6 +2556,7 @@ impl AccountsDb { let mut key_timings = CleanKeyTimings::default(); let mut pubkeys = self.construct_candidate_clean_keys( max_clean_root, + is_startup, last_full_snapshot_slot, &mut key_timings, ); From 2f0d3558452222e022432d23a770f8085dcfad50 Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Sat, 20 Aug 2022 09:37:02 -0500 Subject: [PATCH 61/67] Refactor epoch reward 2 (#27257) * refactor: extract store_stake_accounts fn * refactor: extract store_vote_account fn * clippy: slice * clippy: slice * fix merge error Co-authored-by: haoran --- runtime/src/bank.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index ba192c7af4d937..cfd5187b84e891 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -3272,6 +3272,7 @@ impl Bank { m.stop(); metrics.store_vote_accounts_us.fetch_add(m.as_us(), Relaxed); vote_rewards +<<<<<<< HEAD } fn update_reward_history( @@ -3298,6 +3299,8 @@ impl Bank { metrics .store_stake_accounts_us .fetch_add(m.as_us(), Relaxed); +======= +>>>>>>> c17f15a34f (Refactor epoch reward 2 (#27257)) } fn update_recent_blockhashes_locked(&self, locked_blockhash_queue: &BlockhashQueue) { From e399b11eb468687e1a45be2e3e456db16c396eef Mon Sep 17 00:00:00 2001 From: Michael Vines Date: Wed, 17 Aug 2022 08:40:23 -0700 Subject: [PATCH 62/67] Standardize thread names Tenets: 1. Limit thread names to 15 characters 2. Prefix all Solana-controlled threads with "sol" 3. Use Camel case. It's more character dense than Snake or Kebab case --- banks-server/src/banks_server.rs | 2 +- banks-server/src/rpc_banks_service.rs | 2 +- client/src/rpc_client.rs | 2 +- client/src/transaction_executor.rs | 2 +- core/src/accounts_hash_verifier.rs | 2 +- core/src/ancestor_hashes_service.rs | 4 +- core/src/banking_stage.rs | 2 +- core/src/broadcast_stage.rs | 8 +- core/src/cache_block_meta_service.rs | 2 +- core/src/cluster_info_vote_listener.rs | 6 +- core/src/cluster_slots_service.rs | 2 +- core/src/commitment_service.rs | 2 +- core/src/completed_data_sets_service.rs | 2 +- core/src/cost_update_service.rs | 2 +- core/src/drop_bank_service.rs | 2 +- core/src/fetch_stage.rs | 4 +- core/src/find_packet_sender_stake_stage.rs | 2 +- core/src/ledger_cleanup_service.rs | 6 +- core/src/ledger_metric_report_service.rs | 2 +- core/src/poh_timing_report_service.rs | 2 +- core/src/qos_service.rs | 2 +- core/src/repair_service.rs | 2 +- core/src/replay_stage.rs | 4 +- core/src/retransmit_stage.rs | 4 +- core/src/rewards_recorder_service.rs | 2 +- core/src/serve_repair.rs | 2 +- core/src/serve_repair_service.rs | 2 +- core/src/shred_fetch_stage.rs | 2 +- core/src/sigverify_shreds.rs | 2 +- core/src/sigverify_stage.rs | 2 +- core/src/snapshot_packager_service.rs | 2 +- core/src/staked_nodes_updater_service.rs | 2 +- core/src/stats_reporter_service.rs | 2 +- core/src/system_monitor_service.rs | 2 +- core/src/tpu.rs | 4 +- core/src/validator.rs | 35 +++------ core/src/voting_service.rs | 2 +- core/src/warm_quic_cache_service.rs | 2 +- core/src/window_service.rs | 6 +- entry/src/entry.rs | 74 +++++++++--------- .../src/slot_status_observer.rs | 2 +- gossip/src/cluster_info.rs | 12 +-- gossip/src/gossip_service.rs | 2 +- gossip/tests/crds_gossip.rs | 2 +- ledger/src/bigtable_upload.rs | 56 +++++++------- ledger/src/bigtable_upload_service.rs | 2 +- ledger/src/blockstore.rs | 4 +- ledger/src/blockstore_processor.rs | 2 +- ledger/src/shredder.rs | 2 +- ledger/src/sigverify_shreds.rs | 2 +- metrics/src/metrics.rs | 6 +- net-utils/src/lib.rs | 75 ++++++++++--------- perf/src/sigverify.rs | 2 +- poh/src/poh_service.rs | 4 +- .../optimistically_confirmed_bank_tracker.rs | 2 +- rpc/src/rpc_completed_slots_service.rs | 2 +- rpc/src/rpc_pubsub_service.rs | 4 +- rpc/src/rpc_service.rs | 6 +- rpc/src/rpc_subscriptions.rs | 4 +- rpc/src/transaction_status_service.rs | 2 +- runtime/src/accounts_background_service.rs | 2 +- runtime/src/accounts_db.rs | 6 +- runtime/src/accounts_index_storage.rs | 2 +- runtime/src/bank_client.rs | 2 +- runtime/src/serde_snapshot.rs | 2 +- runtime/src/shared_buffer_reader.rs | 2 +- .../src/verify_accounts_hash_in_background.rs | 2 +- .../src/send_transaction_service.rs | 4 +- streamer/src/quic.rs | 13 ++-- streamer/src/streamer.rs | 6 +- validator/src/admin_rpc_service.rs | 39 +++++----- validator/src/lib.rs | 23 +++--- 72 files changed, 264 insertions(+), 245 deletions(-) diff --git a/banks-server/src/banks_server.rs b/banks-server/src/banks_server.rs index c73844d2571560..1aff5220bec93b 100644 --- a/banks-server/src/banks_server.rs +++ b/banks-server/src/banks_server.rs @@ -105,7 +105,7 @@ impl BanksServer { } let server_bank_forks = bank_forks.clone(); Builder::new() - .name("solana-bank-forks-client".to_string()) + .name("solBankForksCli".to_string()) .spawn(move || Self::run(server_bank_forks, transaction_receiver)) .unwrap(); Self::new( diff --git a/banks-server/src/rpc_banks_service.rs b/banks-server/src/rpc_banks_service.rs index 822798dd1ffd62..8a2f48156291c8 100644 --- a/banks-server/src/rpc_banks_service.rs +++ b/banks-server/src/rpc_banks_service.rs @@ -88,7 +88,7 @@ impl RpcBanksService { let connection_cache = connection_cache.clone(); let exit = exit.clone(); let thread_hdl = Builder::new() - .name("solana-rpc-banks".to_string()) + .name("solRpcBanksSvc".to_string()) .spawn(move || { Self::run( listen_addr, diff --git a/client/src/rpc_client.rs b/client/src/rpc_client.rs index b89b906e57ade4..c387fe6e749b25 100644 --- a/client/src/rpc_client.rs +++ b/client/src/rpc_client.rs @@ -178,7 +178,7 @@ impl RpcClient { )), runtime: Some( tokio::runtime::Builder::new_current_thread() - .thread_name("rpc-client") + .thread_name("solRpcClient") .enable_io() .enable_time() .build() diff --git a/client/src/transaction_executor.rs b/client/src/transaction_executor.rs index 56f7a8002275e4..89a70d7ee596e8 100644 --- a/client/src/transaction_executor.rs +++ b/client/src/transaction_executor.rs @@ -91,7 +91,7 @@ impl TransactionExecutor { let exit = exit.clone(); let cleared = cleared.clone(); Builder::new() - .name("sig_clear".to_string()) + .name("solSigClear".to_string()) .spawn(move || { let client = RpcClient::new_socket_with_commitment( entrypoint_addr, diff --git a/core/src/accounts_hash_verifier.rs b/core/src/accounts_hash_verifier.rs index 118529dd983640..6dbdbaa9237087 100644 --- a/core/src/accounts_hash_verifier.rs +++ b/core/src/accounts_hash_verifier.rs @@ -50,7 +50,7 @@ impl AccountsHashVerifier { let exit = exit.clone(); let cluster_info = cluster_info.clone(); let t_accounts_hash_verifier = Builder::new() - .name("solana-hash-accounts".to_string()) + .name("solAcctHashVer".to_string()) .spawn(move || { let mut hashes = vec![]; loop { diff --git a/core/src/ancestor_hashes_service.rs b/core/src/ancestor_hashes_service.rs index 4813ed11685069..3f81d38e2a31b3 100644 --- a/core/src/ancestor_hashes_service.rs +++ b/core/src/ancestor_hashes_service.rs @@ -215,7 +215,7 @@ impl AncestorHashesService { ancestor_socket: Arc, ) -> JoinHandle<()> { Builder::new() - .name("solana-ancestor-hashes-responses-service".to_string()) + .name("solAncHashesSvc".to_string()) .spawn(move || { let mut last_stats_report = Instant::now(); let mut stats = AncestorHashesResponsesStats::default(); @@ -538,7 +538,7 @@ impl AncestorHashesService { // to MAX_ANCESTOR_HASHES_SLOT_REQUESTS_PER_SECOND/second let mut request_throttle = vec![]; Builder::new() - .name("solana-manage-ancestor-requests".to_string()) + .name("solManAncReqs".to_string()) .spawn(move || loop { if exit.load(Ordering::Relaxed) { return; diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 2ec79c951675d7..68cb68fcd37d55 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -466,7 +466,7 @@ impl BankingStage { let connection_cache = connection_cache.clone(); let bank_forks = bank_forks.clone(); Builder::new() - .name(format!("solana-banking-stage-tx-{}", i)) + .name(format!("solBanknStgTx{:02}", i)) .spawn(move || { Self::process_loop( &verified_receiver, diff --git a/core/src/broadcast_stage.rs b/core/src/broadcast_stage.rs index ba4c33fa38cc46..68729600d1983e 100644 --- a/core/src/broadcast_stage.rs +++ b/core/src/broadcast_stage.rs @@ -255,7 +255,7 @@ impl BroadcastStage { let blockstore = blockstore.clone(); let cluster_info = cluster_info.clone(); Builder::new() - .name("solana-broadcaster".to_string()) + .name("solBroadcast".to_string()) .spawn(move || { let _finalizer = Finalizer::new(exit); Self::run( @@ -277,7 +277,7 @@ impl BroadcastStage { let cluster_info = cluster_info.clone(); let bank_forks = bank_forks.clone(); let t = Builder::new() - .name("solana-broadcaster-transmit".to_string()) + .name("solBroadcastTx".to_string()) .spawn(move || loop { let res = bs_transmit.transmit(&socket_receiver, &cluster_info, &sock, &bank_forks); @@ -295,7 +295,7 @@ impl BroadcastStage { let mut bs_record = broadcast_stage_run.clone(); let btree = blockstore.clone(); let t = Builder::new() - .name("solana-broadcaster-record".to_string()) + .name("solBroadcastRec".to_string()) .spawn(move || loop { let res = bs_record.record(&blockstore_receiver, &btree); let res = Self::handle_error(res, "solana-broadcaster-record"); @@ -308,7 +308,7 @@ impl BroadcastStage { } let retransmit_thread = Builder::new() - .name("solana-broadcaster-retransmit".to_string()) + .name("solBroadcastRtx".to_string()) .spawn(move || loop { if let Some(res) = Self::handle_error( Self::check_retransmit_signals( diff --git a/core/src/cache_block_meta_service.rs b/core/src/cache_block_meta_service.rs index 98069f253aa66d..a8da9ac0964459 100644 --- a/core/src/cache_block_meta_service.rs +++ b/core/src/cache_block_meta_service.rs @@ -31,7 +31,7 @@ impl CacheBlockMetaService { ) -> Self { let exit = exit.clone(); let thread_hdl = Builder::new() - .name("solana-cache-block-time".to_string()) + .name("solCacheBlkTime".to_string()) .spawn(move || loop { if exit.load(Ordering::Relaxed) { break; diff --git a/core/src/cluster_info_vote_listener.rs b/core/src/cluster_info_vote_listener.rs index ed94724824ff2d..bbe7c2b4a67e79 100644 --- a/core/src/cluster_info_vote_listener.rs +++ b/core/src/cluster_info_vote_listener.rs @@ -252,7 +252,7 @@ impl ClusterInfoVoteListener { let exit = exit.clone(); let bank_forks = bank_forks.clone(); Builder::new() - .name("solana-cluster_info_vote_listener".to_string()) + .name("solCiVoteLstnr".to_string()) .spawn(move || { let _ = Self::recv_loop( exit, @@ -266,7 +266,7 @@ impl ClusterInfoVoteListener { }; let exit_ = exit.clone(); let bank_send_thread = Builder::new() - .name("solana-cluster_info_bank_send".to_string()) + .name("solCiBankSend".to_string()) .spawn(move || { let _ = Self::bank_send_loop( exit_, @@ -278,7 +278,7 @@ impl ClusterInfoVoteListener { .unwrap(); let send_thread = Builder::new() - .name("solana-cluster_info_process_votes".to_string()) + .name("solCiProcVotes".to_string()) .spawn(move || { let _ = Self::process_votes_loop( exit, diff --git a/core/src/cluster_slots_service.rs b/core/src/cluster_slots_service.rs index 119f6081cff0e4..f867981f6afb9b 100644 --- a/core/src/cluster_slots_service.rs +++ b/core/src/cluster_slots_service.rs @@ -48,7 +48,7 @@ impl ClusterSlotsService { Self::initialize_lowest_slot(&blockstore, &cluster_info); Self::initialize_epoch_slots(&bank_forks, &cluster_info); let t_cluster_slots_service = Builder::new() - .name("solana-cluster-slots-service".to_string()) + .name("solClusterSlots".to_string()) .spawn(move || { Self::run( blockstore, diff --git a/core/src/commitment_service.rs b/core/src/commitment_service.rs index 0922345a096f2d..92bab89107545c 100644 --- a/core/src/commitment_service.rs +++ b/core/src/commitment_service.rs @@ -69,7 +69,7 @@ impl AggregateCommitmentService { sender, Self { t_commitment: Builder::new() - .name("solana-aggregate-stake-lockouts".to_string()) + .name("solAggCommitSvc".to_string()) .spawn(move || loop { if exit_.load(Ordering::Relaxed) { break; diff --git a/core/src/completed_data_sets_service.rs b/core/src/completed_data_sets_service.rs index 08b561b8aca12f..ff11dfa1fb454a 100644 --- a/core/src/completed_data_sets_service.rs +++ b/core/src/completed_data_sets_service.rs @@ -31,7 +31,7 @@ impl CompletedDataSetsService { ) -> Self { let exit = exit.clone(); let thread_hdl = Builder::new() - .name("completed-data-set-service".to_string()) + .name("solComplDataSet".to_string()) .spawn(move || loop { if exit.load(Ordering::Relaxed) { break; diff --git a/core/src/cost_update_service.rs b/core/src/cost_update_service.rs index cf1a55365f444e..8f5038c0c91633 100644 --- a/core/src/cost_update_service.rs +++ b/core/src/cost_update_service.rs @@ -75,7 +75,7 @@ impl CostUpdateService { cost_update_receiver: CostUpdateReceiver, ) -> Self { let thread_hdl = Builder::new() - .name("solana-cost-update-service".to_string()) + .name("solCostUpdtSvc".to_string()) .spawn(move || { Self::service_loop(blockstore, cost_model, cost_update_receiver); }) diff --git a/core/src/drop_bank_service.rs b/core/src/drop_bank_service.rs index aac1a02ee0344d..0321643d6aab68 100644 --- a/core/src/drop_bank_service.rs +++ b/core/src/drop_bank_service.rs @@ -15,7 +15,7 @@ pub struct DropBankService { impl DropBankService { pub fn new(bank_receiver: Receiver>>) -> Self { let thread_hdl = Builder::new() - .name("sol-drop-b-service".to_string()) + .name("solDropBankSrvc".to_string()) .spawn(move || { for banks in bank_receiver.iter() { let len = banks.len(); diff --git a/core/src/fetch_stage.rs b/core/src/fetch_stage.rs index c041739d7c8d4e..93d8ed28ac1bd1 100644 --- a/core/src/fetch_stage.rs +++ b/core/src/fetch_stage.rs @@ -208,7 +208,7 @@ impl FetchStage { let poh_recorder = poh_recorder.clone(); let fwd_thread_hdl = Builder::new() - .name("solana-fetch-stage-fwd-rcvr".to_string()) + .name("solFetchStgFwRx".to_string()) .spawn(move || loop { if let Err(e) = Self::handle_forwarded_packets(&forward_receiver, &sender, &poh_recorder) @@ -226,7 +226,7 @@ impl FetchStage { let exit = exit.clone(); let metrics_thread_hdl = Builder::new() - .name("solana-fetch-stage-metrics".to_string()) + .name("solFetchStgMetr".to_string()) .spawn(move || loop { sleep(Duration::from_secs(1)); diff --git a/core/src/find_packet_sender_stake_stage.rs b/core/src/find_packet_sender_stake_stage.rs index d62d6afe7c8dac..53f1d033669a34 100644 --- a/core/src/find_packet_sender_stake_stage.rs +++ b/core/src/find_packet_sender_stake_stage.rs @@ -84,7 +84,7 @@ impl FindPacketSenderStakeStage { ) -> Self { let mut stats = FindPacketSenderStakeStats::default(); let thread_hdl = Builder::new() - .name("find-packet-sender-stake".to_string()) + .name("solPktStake".to_string()) .spawn(move || loop { match streamer::recv_packet_batches(&packet_receiver) { Ok((mut batches, num_packets, recv_duration)) => { diff --git a/core/src/ledger_cleanup_service.rs b/core/src/ledger_cleanup_service.rs index 5c006c82613373..160b8721f45b6e 100644 --- a/core/src/ledger_cleanup_service.rs +++ b/core/src/ledger_cleanup_service.rs @@ -76,7 +76,7 @@ impl LedgerCleanupService { let blockstore_compact = blockstore.clone(); let t_cleanup = Builder::new() - .name("sol-led-cleanup".to_string()) + .name("solLedgerClean".to_string()) .spawn(move || loop { if exit.load(Ordering::Relaxed) { break; @@ -98,7 +98,7 @@ impl LedgerCleanupService { .unwrap(); let t_compact = Builder::new() - .name("sol-led-compact".to_string()) + .name("solLedgerComp".to_string()) .spawn(move || loop { if exit_compact.load(Ordering::Relaxed) { break; @@ -238,7 +238,7 @@ impl LedgerCleanupService { let purge_complete1 = purge_complete.clone(); let last_compact_slot1 = last_compact_slot.clone(); let _t_purge = Builder::new() - .name("solana-ledger-purge".to_string()) + .name("solLedgerPurge".to_string()) .spawn(move || { let mut slot_update_time = Measure::start("slot_update"); *blockstore.lowest_cleanup_slot.write().unwrap() = lowest_cleanup_slot; diff --git a/core/src/ledger_metric_report_service.rs b/core/src/ledger_metric_report_service.rs index 8d0b96d28c3500..1f8636bff6a546 100644 --- a/core/src/ledger_metric_report_service.rs +++ b/core/src/ledger_metric_report_service.rs @@ -26,7 +26,7 @@ impl LedgerMetricReportService { pub fn new(blockstore: Arc, exit: &Arc) -> Self { let exit_signal = exit.clone(); let t_cf_metric = Builder::new() - .name("metric_report_rocksdb_cf_metrics".to_string()) + .name("solRocksCfMtrcs".to_string()) .spawn(move || loop { if exit_signal.load(Ordering::Relaxed) { break; diff --git a/core/src/poh_timing_report_service.rs b/core/src/poh_timing_report_service.rs index 175b3cdc83e50d..bc84176525ef6e 100644 --- a/core/src/poh_timing_report_service.rs +++ b/core/src/poh_timing_report_service.rs @@ -28,7 +28,7 @@ impl PohTimingReportService { let exit_signal = exit.clone(); let mut poh_timing_reporter = PohTimingReporter::default(); let t_poh_timing = Builder::new() - .name("poh_timing_report".to_string()) + .name("solPohTimingRpt".to_string()) .spawn(move || loop { if exit_signal.load(Ordering::Relaxed) { break; diff --git a/core/src/qos_service.rs b/core/src/qos_service.rs index fb493f674343bb..9b54e2a302c46d 100644 --- a/core/src/qos_service.rs +++ b/core/src/qos_service.rs @@ -72,7 +72,7 @@ impl QosService { let metrics_clone = Arc::clone(&metrics); let reporting_thread = Some( Builder::new() - .name("solana-qos-service-metrics-repoting".to_string()) + .name("solQosSvcMetr".to_string()) .spawn(move || { Self::reporting_loop(running_flag_clone, metrics_clone, report_receiver); }) diff --git a/core/src/repair_service.rs b/core/src/repair_service.rs index 018824c7935e9e..2b8bda8705842c 100644 --- a/core/src/repair_service.rs +++ b/core/src/repair_service.rs @@ -212,7 +212,7 @@ impl RepairService { let exit = exit.clone(); let repair_info = repair_info.clone(); Builder::new() - .name("solana-repair-service".to_string()) + .name("solRepairSvc".to_string()) .spawn(move || { Self::run( &blockstore, diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 8b611e96a0d19c..0c94120c86d6ad 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -97,7 +97,7 @@ const MAX_CONCURRENT_FORKS_TO_REPLAY: usize = 4; lazy_static! { static ref PAR_THREAD_POOL: ThreadPool = rayon::ThreadPoolBuilder::new() .num_threads(MAX_CONCURRENT_FORKS_TO_REPLAY) - .thread_name(|ix| format!("replay_{}", ix)) + .thread_name(|ix| format!("solReplay{:02}", ix)) .build() .unwrap(); } @@ -436,7 +436,7 @@ impl ReplayStage { #[allow(clippy::cognitive_complexity)] let t_replay = Builder::new() - .name("solana-replay-stage".to_string()) + .name("solReplayStage".to_string()) .spawn(move || { let verify_recyclers = VerifyRecyclers::default(); let _exit = Finalizer::new(exit.clone()); diff --git a/core/src/retransmit_stage.rs b/core/src/retransmit_stage.rs index c6d3855f72b640..bf4e9c2e76819c 100644 --- a/core/src/retransmit_stage.rs +++ b/core/src/retransmit_stage.rs @@ -364,11 +364,11 @@ pub fn retransmitter( let num_threads = get_thread_count().min(8).max(sockets.len()); let thread_pool = ThreadPoolBuilder::new() .num_threads(num_threads) - .thread_name(|i| format!("retransmit-{}", i)) + .thread_name(|i| format!("solRetransmit{:02}", i)) .build() .unwrap(); Builder::new() - .name("solana-retransmitter".to_string()) + .name("solRetransmittr".to_string()) .spawn(move || loop { match retransmit( &thread_pool, diff --git a/core/src/rewards_recorder_service.rs b/core/src/rewards_recorder_service.rs index 8988441d22222f..10dd8ea9cd0b0c 100644 --- a/core/src/rewards_recorder_service.rs +++ b/core/src/rewards_recorder_service.rs @@ -30,7 +30,7 @@ impl RewardsRecorderService { ) -> Self { let exit = exit.clone(); let thread_hdl = Builder::new() - .name("solana-rewards-writer".to_string()) + .name("solRewardsWritr".to_string()) .spawn(move || loop { if exit.load(Ordering::Relaxed) { break; diff --git a/core/src/serve_repair.rs b/core/src/serve_repair.rs index 47443bcd9acc2d..0b7d63a7ddc2f7 100644 --- a/core/src/serve_repair.rs +++ b/core/src/serve_repair.rs @@ -567,7 +567,7 @@ impl ServeRepair { let recycler = PacketBatchRecycler::default(); Builder::new() - .name("solana-repair-listen".to_string()) + .name("solRepairListen".to_string()) .spawn(move || { let mut last_print = Instant::now(); let mut stats = ServeRepairStats::default(); diff --git a/core/src/serve_repair_service.rs b/core/src/serve_repair_service.rs index 72dc7a49e66553..144de5c2a98d48 100644 --- a/core/src/serve_repair_service.rs +++ b/core/src/serve_repair_service.rs @@ -46,7 +46,7 @@ impl ServeRepairService { ); let (response_sender, response_receiver) = unbounded(); let t_responder = streamer::responder( - "serve-repairs", + "Repair", serve_repair_socket, response_receiver, socket_addr_space, diff --git a/core/src/shred_fetch_stage.rs b/core/src/shred_fetch_stage.rs index d9427e30f64da2..25c9b00cdf7c76 100644 --- a/core/src/shred_fetch_stage.rs +++ b/core/src/shred_fetch_stage.rs @@ -135,7 +135,7 @@ impl ShredFetchStage { }) .collect(); let modifier_hdl = Builder::new() - .name("solana-tvu-fetch-stage-packet-modifier".to_string()) + .name("solTvuFetchPMod".to_string()) .spawn(move || { let repair_context = repair_context .as_ref() diff --git a/core/src/sigverify_shreds.rs b/core/src/sigverify_shreds.rs index f9a50ab8b2a954..521fadedd0db3f 100644 --- a/core/src/sigverify_shreds.rs +++ b/core/src/sigverify_shreds.rs @@ -37,7 +37,7 @@ pub(crate) fn spawn_shred_sigverify( let recycler_cache = RecyclerCache::warmed(); let mut stats = ShredSigVerifyStats::new(Instant::now()); Builder::new() - .name("shred-verifier".to_string()) + .name("solShredVerifr".to_string()) .spawn(move || loop { match run_shred_sigverify( &self_pubkey, diff --git a/core/src/sigverify_stage.rs b/core/src/sigverify_stage.rs index 75c863e9f16c9a..30174dc986888b 100644 --- a/core/src/sigverify_stage.rs +++ b/core/src/sigverify_stage.rs @@ -412,7 +412,7 @@ impl SigVerifyStage { const MAX_DEDUPER_AGE: Duration = Duration::from_secs(2); const MAX_DEDUPER_ITEMS: u32 = 1_000_000; Builder::new() - .name("solana-verifier".to_string()) + .name("solSigVerifier".to_string()) .spawn(move || { let mut deduper = Deduper::new(MAX_DEDUPER_ITEMS, MAX_DEDUPER_AGE); loop { diff --git a/core/src/snapshot_packager_service.rs b/core/src/snapshot_packager_service.rs index 7077362e4b8725..3234dcbeea2c15 100644 --- a/core/src/snapshot_packager_service.rs +++ b/core/src/snapshot_packager_service.rs @@ -49,7 +49,7 @@ impl SnapshotPackagerService { ); let t_snapshot_packager = Builder::new() - .name("snapshot-packager".to_string()) + .name("solSnapshotPkgr".to_string()) .spawn(move || { renice_this_thread(snapshot_config.packager_thread_niceness_adj).unwrap(); let mut snapshot_gossip_manager = if enable_gossip_push { diff --git a/core/src/staked_nodes_updater_service.rs b/core/src/staked_nodes_updater_service.rs index 97b9914ea1042f..73b3c6ec8c2323 100644 --- a/core/src/staked_nodes_updater_service.rs +++ b/core/src/staked_nodes_updater_service.rs @@ -30,7 +30,7 @@ impl StakedNodesUpdaterService { shared_staked_nodes_overrides: Arc>>, ) -> Self { let thread_hdl = Builder::new() - .name("sol-sn-updater".to_string()) + .name("solStakedNodeUd".to_string()) .spawn(move || { let mut last_stakes = Instant::now(); while !exit.load(Ordering::Relaxed) { diff --git a/core/src/stats_reporter_service.rs b/core/src/stats_reporter_service.rs index b6f23e4162e08e..90e72aaadb3e46 100644 --- a/core/src/stats_reporter_service.rs +++ b/core/src/stats_reporter_service.rs @@ -22,7 +22,7 @@ impl StatsReporterService { ) -> Self { let exit = exit.clone(); let thread_hdl = Builder::new() - .name("solana-stats-reporter".to_owned()) + .name("solStatsReport".to_owned()) .spawn(move || loop { if exit.load(Ordering::Relaxed) { return; diff --git a/core/src/system_monitor_service.rs b/core/src/system_monitor_service.rs index 3b11d4b6a1a424..dc6146ed1abc75 100644 --- a/core/src/system_monitor_service.rs +++ b/core/src/system_monitor_service.rs @@ -363,7 +363,7 @@ impl SystemMonitorService { ) -> Self { info!("Starting SystemMonitorService"); let thread_hdl = Builder::new() - .name("system-monitor".to_string()) + .name("solSystemMonitr".to_string()) .spawn(move || { Self::run( exit, diff --git a/core/src/tpu.rs b/core/src/tpu.rs index 606fee5cb3cded..6885f433327a44 100644 --- a/core/src/tpu.rs +++ b/core/src/tpu.rs @@ -140,7 +140,7 @@ impl Tpu { packet_receiver, find_packet_sender_stake_sender, staked_nodes.clone(), - "tpu-find-packet-sender-stake", + "Tpu", ); let (vote_find_packet_sender_stake_sender, vote_find_packet_sender_stake_receiver) = @@ -150,7 +150,7 @@ impl Tpu { vote_packet_receiver, vote_find_packet_sender_stake_sender, staked_nodes.clone(), - "tpu-vote-find-packet-sender-stake", + "Vote", ); let (verified_sender, verified_receiver) = unbounded(); diff --git a/core/src/validator.rs b/core/src/validator.rs index ca8ba125fd3029..eee5873d579267 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -302,7 +302,7 @@ impl BlockstoreRootScan { let exit = exit.clone(); Some( Builder::new() - .name("blockstore-root-scan".to_string()) + .name("solBStoreRtScan".to_string()) .spawn(move || blockstore.scan_and_fix_roots(&exit)) .unwrap(), ) @@ -1588,34 +1588,23 @@ impl<'a> ProcessBlockStore<'a> { let previous_start_process = *self.start_progress.read().unwrap(); *self.start_progress.write().unwrap() = ValidatorStartProgress::LoadingLedger; - /* - #[allow(clippy::too_many_arguments)] - fn process_blockstore( - blockstore: &Blockstore, - bank_forks: &Arc>, - leader_schedule_cache: &LeaderScheduleCache, - process_options: &blockstore_processor::ProcessOptions, - transaction_status_sender: Option<&TransactionStatusSender>, - cache_block_meta_sender: Option<&CacheBlockMetaSender>, - blockstore_root_scan: BlockstoreRootScan, - accounts_background_request_sender: &AbsRequestSender, - start_progress: &Arc>, - ) { - */ let exit = Arc::new(AtomicBool::new(false)); if let Some(max_slot) = highest_slot(self.blockstore) { let bank_forks = self.bank_forks.clone(); let exit = exit.clone(); let start_progress = self.start_progress.clone(); - let _ = std::thread::spawn(move || { - while !exit.load(Ordering::Relaxed) { - let slot = bank_forks.read().unwrap().working_bank().slot(); - *start_progress.write().unwrap() = - ValidatorStartProgress::ProcessingLedger { slot, max_slot }; - sleep(Duration::from_secs(2)); - } - }); + let _ = Builder::new() + .name("solRptLdgrStat".to_string()) + .spawn(move || { + while !exit.load(Ordering::Relaxed) { + let slot = bank_forks.read().unwrap().working_bank().slot(); + *start_progress.write().unwrap() = + ValidatorStartProgress::ProcessingLedger { slot, max_slot }; + sleep(Duration::from_secs(2)); + } + }) + .unwrap(); } if let Err(e) = blockstore_processor::process_blockstore_from_root( self.blockstore, diff --git a/core/src/voting_service.rs b/core/src/voting_service.rs index 29cf4699dd575e..cbd53a1c3bc23b 100644 --- a/core/src/voting_service.rs +++ b/core/src/voting_service.rs @@ -46,7 +46,7 @@ impl VotingService { bank_forks: Arc>, ) -> Self { let thread_hdl = Builder::new() - .name("sol-vote-service".to_string()) + .name("solVoteService".to_string()) .spawn(move || { for vote_op in vote_receiver.iter() { let rooted_bank = bank_forks.read().unwrap().root_bank().clone(); diff --git a/core/src/warm_quic_cache_service.rs b/core/src/warm_quic_cache_service.rs index 2632d031019ed9..08428d589878ae 100644 --- a/core/src/warm_quic_cache_service.rs +++ b/core/src/warm_quic_cache_service.rs @@ -32,7 +32,7 @@ impl WarmQuicCacheService { exit: Arc, ) -> Self { let thread_hdl = Builder::new() - .name("sol-warm-quic-service".to_string()) + .name("solWarmQuicSvc".to_string()) .spawn(move || { let slot_jitter = thread_rng().gen_range(-CACHE_JITTER_SLOT, CACHE_JITTER_SLOT); let mut maybe_last_leader = None; diff --git a/core/src/window_service.rs b/core/src/window_service.rs index da4cbcb4452b11..2f7983bd86d7eb 100644 --- a/core/src/window_service.rs +++ b/core/src/window_service.rs @@ -375,7 +375,7 @@ impl WindowService { inc_new_counter_error!("solana-check-duplicate-error", 1, 1); }; Builder::new() - .name("solana-check-duplicate".to_string()) + .name("solWinCheckDup".to_string()) .spawn(move || { while !exit.load(Ordering::Relaxed) { if let Err(e) = run_check_duplicate( @@ -408,11 +408,11 @@ impl WindowService { }; let thread_pool = rayon::ThreadPoolBuilder::new() .num_threads(get_thread_count().min(8)) - .thread_name(|i| format!("window-insert-{}", i)) + .thread_name(|i| format!("solWinInsert{:02}", i)) .build() .unwrap(); Builder::new() - .name("solana-window-insert".to_string()) + .name("solWinInsert".to_string()) .spawn(move || { let handle_duplicate = |shred| { let _ = check_duplicate_sender.send(shred); diff --git a/entry/src/entry.rs b/entry/src/entry.rs index bfdba187d00743..225e93b17f6922 100644 --- a/entry/src/entry.rs +++ b/entry/src/entry.rs @@ -46,7 +46,7 @@ use { lazy_static! { static ref PAR_THREAD_POOL: ThreadPool = rayon::ThreadPoolBuilder::new() .num_threads(get_max_thread_count()) - .thread_name(|ix| format!("entry_{}", ix)) + .thread_name(|ix| format!("solEntry{:02}", ix)) .build() .unwrap(); } @@ -525,21 +525,24 @@ pub fn start_verify_transactions( let tx_offset_recycler = verify_recyclers.tx_offset_recycler; let out_recycler = verify_recyclers.out_recycler; let num_packets = entry_txs.len(); - let gpu_verify_thread = thread::spawn(move || { - let mut verify_time = Measure::start("sigverify"); - sigverify::ed25519_verify( - &mut packet_batches, - &tx_offset_recycler, - &out_recycler, - false, - num_packets, - ); - let verified = packet_batches - .iter() - .all(|batch| batch.iter().all(|p| !p.meta.discard())); - verify_time.stop(); - (verified, verify_time.as_us()) - }); + let gpu_verify_thread = thread::Builder::new() + .name("solGpuSigVerify".into()) + .spawn(move || { + let mut verify_time = Measure::start("sigverify"); + sigverify::ed25519_verify( + &mut packet_batches, + &tx_offset_recycler, + &out_recycler, + false, + num_packets, + ); + let verified = packet_batches + .iter() + .all(|batch| batch.iter().all(|p| !p.meta.discard())); + verify_time.stop(); + (verified, verify_time.as_us()) + }) + .unwrap(); Ok(EntrySigVerificationState { verification_status: EntryVerificationStatus::Pending, entries: Some(entries), @@ -770,25 +773,28 @@ impl EntrySlice for [Entry] { let hashes = Arc::new(Mutex::new(hashes_pinned)); let hashes_clone = hashes.clone(); - let gpu_verify_thread = thread::spawn(move || { - let mut hashes = hashes_clone.lock().unwrap(); - let gpu_wait = Instant::now(); - let res; - unsafe { - res = (api.poh_verify_many)( - hashes.as_mut_ptr() as *mut u8, - num_hashes_vec.as_ptr(), - length, - 1, + let gpu_verify_thread = thread::Builder::new() + .name("solGpuPohVerify".into()) + .spawn(move || { + let mut hashes = hashes_clone.lock().unwrap(); + let gpu_wait = Instant::now(); + let res; + unsafe { + res = (api.poh_verify_many)( + hashes.as_mut_ptr() as *mut u8, + num_hashes_vec.as_ptr(), + length, + 1, + ); + } + assert!(res == 0, "GPU PoH verify many failed"); + inc_new_counter_info!( + "entry_verify-gpu_thread", + timing::duration_as_us(&gpu_wait.elapsed()) as usize ); - } - assert!(res == 0, "GPU PoH verify many failed"); - inc_new_counter_info!( - "entry_verify-gpu_thread", - timing::duration_as_us(&gpu_wait.elapsed()) as usize - ); - timing::duration_as_us(&gpu_wait.elapsed()) - }); + timing::duration_as_us(&gpu_wait.elapsed()) + }) + .unwrap(); let verifications = PAR_THREAD_POOL.install(|| { self.into_par_iter() diff --git a/geyser-plugin-manager/src/slot_status_observer.rs b/geyser-plugin-manager/src/slot_status_observer.rs index bad8fa90ece13c..b2f6bf5f795b6c 100644 --- a/geyser-plugin-manager/src/slot_status_observer.rs +++ b/geyser-plugin-manager/src/slot_status_observer.rs @@ -48,7 +48,7 @@ impl SlotStatusObserver { slot_status_notifier: SlotStatusNotifier, ) -> JoinHandle<()> { Builder::new() - .name("bank_notification_receiver".to_string()) + .name("solBankNotif".to_string()) .spawn(move || { while !exit.load(Ordering::Relaxed) { if let Ok(slot) = bank_notification_receiver.recv() { diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 9d692b8a08aba6..15a126eef8b2d5 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -1682,11 +1682,11 @@ impl ClusterInfo { ) -> JoinHandle<()> { let thread_pool = ThreadPoolBuilder::new() .num_threads(std::cmp::min(get_thread_count(), 8)) - .thread_name(|i| format!("ClusterInfo::gossip-{}", i)) + .thread_name(|i| format!("solRunGossip{:02}", i)) .build() .unwrap(); Builder::new() - .name("solana-gossip".to_string()) + .name("solGossip".to_string()) .spawn(move || { let mut last_push = timestamp(); let mut last_contact_info_trace = timestamp(); @@ -2560,7 +2560,7 @@ impl ClusterInfo { ) -> JoinHandle<()> { let thread_pool = ThreadPoolBuilder::new() .num_threads(get_thread_count().min(8)) - .thread_name(|i| format!("gossip-consume-{}", i)) + .thread_name(|i| format!("solGossipCons{:02}", i)) .build() .unwrap(); let run_consume = move || { @@ -2576,7 +2576,7 @@ impl ClusterInfo { } } }; - let thread_name = String::from("gossip-consume"); + let thread_name = String::from("solGossipConsum"); Builder::new().name(thread_name).spawn(run_consume).unwrap() } @@ -2592,11 +2592,11 @@ impl ClusterInfo { let recycler = PacketBatchRecycler::default(); let thread_pool = ThreadPoolBuilder::new() .num_threads(get_thread_count().min(8)) - .thread_name(|i| format!("sol-gossip-work-{}", i)) + .thread_name(|i| format!("solGossipWork{:02}", i)) .build() .unwrap(); Builder::new() - .name("solana-listen".to_string()) + .name("solGossipListen".to_string()) .spawn(move || { while !exit.load(Ordering::Relaxed) { if let Err(err) = self.run_listen( diff --git a/gossip/src/gossip_service.rs b/gossip/src/gossip_service.rs index 9bc911b405d49f..d487cf546ef7ca 100644 --- a/gossip/src/gossip_service.rs +++ b/gossip/src/gossip_service.rs @@ -80,7 +80,7 @@ impl GossipService { exit.clone(), ); let t_responder = streamer::responder( - "gossip", + "Gossip", gossip_socket, response_receiver, socket_addr_space, diff --git a/gossip/tests/crds_gossip.rs b/gossip/tests/crds_gossip.rs index 7095465c38fa54..5eff0147b52ce8 100644 --- a/gossip/tests/crds_gossip.rs +++ b/gossip/tests/crds_gossip.rs @@ -608,7 +608,7 @@ fn network_run_pull( fn build_gossip_thread_pool() -> ThreadPool { ThreadPoolBuilder::new() .num_threads(get_thread_count().min(2)) - .thread_name(|i| format!("crds_gossip_test_{}", i)) + .thread_name(|i| format!("gossipTest{:02}", i)) .build() .unwrap() } diff --git a/ledger/src/bigtable_upload.rs b/ledger/src/bigtable_upload.rs index f43b07db12592a..de717c4d9b1d84 100644 --- a/ledger/src/bigtable_upload.rs +++ b/ledger/src/bigtable_upload.rs @@ -164,35 +164,37 @@ pub async fn upload_confirmed_blocks( let sender = sender.clone(); let slot_receiver = slot_receiver.clone(); let exit = exit.clone(); + std::thread::Builder::new() + .name("solBigTGetBlk".into()) + .spawn(move || { + let start = Instant::now(); + let mut num_blocks_read = 0; + + while let Ok(slot) = slot_receiver.recv() { + if exit.load(Ordering::Relaxed) { + break; + } - std::thread::spawn(move || { - let start = Instant::now(); - let mut num_blocks_read = 0; - - while let Ok(slot) = slot_receiver.recv() { - if exit.load(Ordering::Relaxed) { - break; + let _ = match blockstore.get_rooted_block(slot, true) { + Ok(confirmed_block) => { + num_blocks_read += 1; + sender.send((slot, Some(confirmed_block))) + } + Err(err) => { + warn!( + "Failed to get load confirmed block from slot {}: {:?}", + slot, err + ); + sender.send((slot, None)) + } + }; } - - let _ = match blockstore.get_rooted_block(slot, true) { - Ok(confirmed_block) => { - num_blocks_read += 1; - sender.send((slot, Some(confirmed_block))) - } - Err(err) => { - warn!( - "Failed to get load confirmed block from slot {}: {:?}", - slot, err - ); - sender.send((slot, None)) - } - }; - } - BlockstoreLoadStats { - num_blocks_read, - elapsed: start.elapsed(), - } - }) + BlockstoreLoadStats { + num_blocks_read, + elapsed: start.elapsed(), + } + }) + .unwrap() }) .collect(), receiver, diff --git a/ledger/src/bigtable_upload_service.rs b/ledger/src/bigtable_upload_service.rs index 812f87cf8704a9..857190a47de9ab 100644 --- a/ledger/src/bigtable_upload_service.rs +++ b/ledger/src/bigtable_upload_service.rs @@ -50,7 +50,7 @@ impl BigTableUploadService { ) -> Self { info!("Starting BigTable upload service"); let thread = Builder::new() - .name("bigtable-upload".to_string()) + .name("solBigTUpload".to_string()) .spawn(move || { Self::run( runtime, diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 5bddc02bb90a4e..dccfc4aca6318d 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -90,12 +90,12 @@ pub use { lazy_static! { static ref PAR_THREAD_POOL: ThreadPool = rayon::ThreadPoolBuilder::new() .num_threads(get_max_thread_count()) - .thread_name(|ix| format!("blockstore_{}", ix)) + .thread_name(|ix| format!("solBstore{:02}", ix)) .build() .unwrap(); static ref PAR_THREAD_POOL_ALL_CPUS: ThreadPool = rayon::ThreadPoolBuilder::new() .num_threads(num_cpus::get()) - .thread_name(|ix| format!("blockstore_{}", ix)) + .thread_name(|ix| format!("solBstoreAll{:02}", ix)) .build() .unwrap(); } diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 6b04212bd50f0b..de5a542b79a0a5 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -105,7 +105,7 @@ struct ReplayEntry { lazy_static! { static ref PAR_THREAD_POOL: ThreadPool = rayon::ThreadPoolBuilder::new() .num_threads(get_max_thread_count()) - .thread_name(|ix| format!("blockstore_processor_{}", ix)) + .thread_name(|ix| format!("solBstoreProc{:02}", ix)) .build() .unwrap(); } diff --git a/ledger/src/shredder.rs b/ledger/src/shredder.rs index 671cc0b7c44c47..1b1db2f9775bd4 100644 --- a/ledger/src/shredder.rs +++ b/ledger/src/shredder.rs @@ -19,7 +19,7 @@ use { lazy_static! { static ref PAR_THREAD_POOL: ThreadPool = rayon::ThreadPoolBuilder::new() .num_threads(get_thread_count()) - .thread_name(|ix| format!("shredder_{}", ix)) + .thread_name(|ix| format!("solShredder{:02}", ix)) .build() .unwrap(); } diff --git a/ledger/src/sigverify_shreds.rs b/ledger/src/sigverify_shreds.rs index aecad26aa6c311..87bffcb00469ee 100644 --- a/ledger/src/sigverify_shreds.rs +++ b/ledger/src/sigverify_shreds.rs @@ -26,7 +26,7 @@ const SIGN_SHRED_GPU_MIN: usize = 256; lazy_static! { static ref SIGVERIFY_THREAD_POOL: ThreadPool = rayon::ThreadPoolBuilder::new() .num_threads(get_thread_count()) - .thread_name(|ix| format!("sigverify_shreds_{}", ix)) + .thread_name(|ix| format!("solSvrfyShred{:02}", ix)) .build() .unwrap(); } diff --git a/metrics/src/metrics.rs b/metrics/src/metrics.rs index 99f1183648b147..4b9b183de98ee4 100644 --- a/metrics/src/metrics.rs +++ b/metrics/src/metrics.rs @@ -173,7 +173,11 @@ impl MetricsAgent { max_points_per_sec: usize, ) -> Self { let (sender, receiver) = unbounded::(); - thread::spawn(move || Self::run(&receiver, &writer, write_frequency, max_points_per_sec)); + + thread::Builder::new() + .name("solMetricsAgent".into()) + .spawn(move || Self::run(&receiver, &writer, write_frequency, max_points_per_sec)) + .unwrap(); Self { sender } } diff --git a/net-utils/src/lib.rs b/net-utils/src/lib.rs index ecee6d98f6f582..f690a05f6bdc25 100644 --- a/net-utils/src/lib.rs +++ b/net-utils/src/lib.rs @@ -144,15 +144,18 @@ fn do_verify_reachable_ports( for (port, tcp_listener) in tcp_listeners { let (sender, receiver) = unbounded(); let listening_addr = tcp_listener.local_addr().unwrap(); - let thread_handle = std::thread::spawn(move || { - debug!("Waiting for incoming connection on tcp/{}", port); - match tcp_listener.incoming().next() { - Some(_) => sender - .send(()) - .unwrap_or_else(|err| warn!("send failure: {}", err)), - None => warn!("tcp incoming failed"), - } - }); + let thread_handle = std::thread::Builder::new() + .name(format!("solVrfyTcp{:05}", port)) + .spawn(move || { + debug!("Waiting for incoming connection on tcp/{}", port); + match tcp_listener.incoming().next() { + Some(_) => sender + .send(()) + .unwrap_or_else(|err| warn!("send failure: {}", err)), + None => warn!("tcp incoming failed"), + } + }) + .unwrap(); match receiver.recv_timeout(timeout) { Ok(_) => { info!("tcp/{} is reachable", port); @@ -222,33 +225,37 @@ fn do_verify_reachable_ports( let port = udp_socket.local_addr().unwrap().port(); let udp_socket = udp_socket.try_clone().expect("Unable to clone udp socket"); let reachable_ports = reachable_ports.clone(); - std::thread::spawn(move || { - let start = Instant::now(); - - let original_read_timeout = udp_socket.read_timeout().unwrap(); - udp_socket - .set_read_timeout(Some(Duration::from_millis(250))) - .unwrap(); - loop { - if reachable_ports.read().unwrap().contains(&port) - || Instant::now().duration_since(start) >= timeout - { - break; - } - - let recv_result = udp_socket.recv(&mut [0; 1]); - debug!( - "Waited for incoming datagram on udp/{}: {:?}", - port, recv_result - ); - if recv_result.is_ok() { - reachable_ports.write().unwrap().insert(port); - break; + std::thread::Builder::new() + .name(format!("solVrfyUdp{:05}", port)) + .spawn(move || { + let start = Instant::now(); + + let original_read_timeout = udp_socket.read_timeout().unwrap(); + udp_socket + .set_read_timeout(Some(Duration::from_millis(250))) + .unwrap(); + loop { + if reachable_ports.read().unwrap().contains(&port) + || Instant::now().duration_since(start) >= timeout + { + break; + } + + let recv_result = udp_socket.recv(&mut [0; 1]); + debug!( + "Waited for incoming datagram on udp/{}: {:?}", + port, recv_result + ); + + if recv_result.is_ok() { + reachable_ports.write().unwrap().insert(port); + break; + } } - } - udp_socket.set_read_timeout(original_read_timeout).unwrap(); - }) + udp_socket.set_read_timeout(original_read_timeout).unwrap(); + }) + .unwrap() }) .collect(); diff --git a/perf/src/sigverify.rs b/perf/src/sigverify.rs index aee1b310dd59d9..e2856e9423f796 100644 --- a/perf/src/sigverify.rs +++ b/perf/src/sigverify.rs @@ -45,7 +45,7 @@ pub const VERIFY_MIN_PACKETS_PER_THREAD: usize = 128; lazy_static! { static ref PAR_THREAD_POOL: ThreadPool = rayon::ThreadPoolBuilder::new() .num_threads(get_thread_count()) - .thread_name(|ix| format!("sigverify_{}", ix)) + .thread_name(|ix| format!("solSigVerify{:02}", ix)) .build() .unwrap(); } diff --git a/poh/src/poh_service.rs b/poh/src/poh_service.rs index 70f4d2f1ed47ac..2b71c6ab6140f4 100644 --- a/poh/src/poh_service.rs +++ b/poh/src/poh_service.rs @@ -106,7 +106,7 @@ impl PohService { let poh_exit_ = poh_exit.clone(); let poh_config = poh_config.clone(); let tick_producer = Builder::new() - .name("solana-poh-service-tick_producer".to_string()) + .name("solPohTickProd".to_string()) .spawn(move || { solana_sys_tuner::request_realtime_poh(); if poh_config.hashes_per_tick.is_none() { @@ -452,7 +452,7 @@ mod tests { let exit = exit.clone(); Builder::new() - .name("solana-poh-service-entry_producer".to_string()) + .name("solPohEntryProd".to_string()) .spawn(move || { let now = Instant::now(); let mut total_us = 0; diff --git a/rpc/src/optimistically_confirmed_bank_tracker.rs b/rpc/src/optimistically_confirmed_bank_tracker.rs index 3dc5645e605dff..3dbc25473c1572 100644 --- a/rpc/src/optimistically_confirmed_bank_tracker.rs +++ b/rpc/src/optimistically_confirmed_bank_tracker.rs @@ -71,7 +71,7 @@ impl OptimisticallyConfirmedBankTracker { let mut last_notified_confirmed_slot: Slot = 0; let mut highest_confirmed_slot: Slot = 0; let thread_hdl = Builder::new() - .name("solana-optimistic-bank-tracker".to_string()) + .name("solOpConfBnkTrk".to_string()) .spawn(move || loop { if exit_.load(Ordering::Relaxed) { break; diff --git a/rpc/src/rpc_completed_slots_service.rs b/rpc/src/rpc_completed_slots_service.rs index 919f66a98d9fa4..fb1c20f3199b03 100644 --- a/rpc/src/rpc_completed_slots_service.rs +++ b/rpc/src/rpc_completed_slots_service.rs @@ -24,7 +24,7 @@ impl RpcCompletedSlotsService { exit: Arc, ) -> JoinHandle<()> { Builder::new() - .name("solana-rpc-completed-slots-service".to_string()) + .name("solRpcComplSlot".to_string()) .spawn(move || loop { // received exit signal, shutdown the service if exit.load(Ordering::Relaxed) { diff --git a/rpc/src/rpc_pubsub_service.rs b/rpc/src/rpc_pubsub_service.rs index 4efc2b0aa12683..9b5139a9efb3a6 100644 --- a/rpc/src/rpc_pubsub_service.rs +++ b/rpc/src/rpc_pubsub_service.rs @@ -85,7 +85,7 @@ impl PubSubService { let (trigger, tripwire) = Tripwire::new(); let thread_hdl = Builder::new() - .name("solana-pubsub".to_string()) + .name("solRpcPubSub".to_string()) .spawn(move || { let runtime = tokio::runtime::Builder::new_multi_thread() .worker_threads(pubsub_config.worker_threads) @@ -416,6 +416,6 @@ mod tests { let (_trigger, pubsub_service) = PubSubService::new(PubSubConfig::default(), &subscriptions, pubsub_addr); let thread = pubsub_service.thread_hdl.thread(); - assert_eq!(thread.name().unwrap(), "solana-pubsub"); + assert_eq!(thread.name().unwrap(), "solRpcPubSub"); } } diff --git a/rpc/src/rpc_service.rs b/rpc/src/rpc_service.rs index f7ad9622fbd0b7..a74b46e7eaec91 100644 --- a/rpc/src/rpc_service.rs +++ b/rpc/src/rpc_service.rs @@ -385,7 +385,7 @@ impl JsonRpcService { tokio::runtime::Builder::new_multi_thread() .worker_threads(rpc_threads) .on_thread_start(move || renice_this_thread(rpc_niceness_adj).unwrap()) - .thread_name("sol-rpc-el") + .thread_name("solRpcEl") .enable_all() .build() .expect("Runtime"), @@ -483,7 +483,7 @@ impl JsonRpcService { let (close_handle_sender, close_handle_receiver) = unbounded(); let thread_hdl = Builder::new() - .name("solana-jsonrpc".to_string()) + .name("solJsonRpcSvc".to_string()) .spawn(move || { renice_this_thread(rpc_niceness_adj).unwrap(); @@ -648,7 +648,7 @@ mod tests { ) .expect("assume successful JsonRpcService start"); let thread = rpc_service.thread_hdl.thread(); - assert_eq!(thread.name().unwrap(), "solana-jsonrpc"); + assert_eq!(thread.name().unwrap(), "solJsonRpcSvc"); assert_eq!( 10_000, diff --git a/rpc/src/rpc_subscriptions.rs b/rpc/src/rpc_subscriptions.rs index bd9fe337460279..9ee8696d7b5cb7 100644 --- a/rpc/src/rpc_subscriptions.rs +++ b/rpc/src/rpc_subscriptions.rs @@ -632,11 +632,11 @@ impl RpcSubscriptions { } else { Some( Builder::new() - .name("solana-rpc-notifications".to_string()) + .name("solRpcNotifier".to_string()) .spawn(move || { let pool = rayon::ThreadPoolBuilder::new() .num_threads(notification_threads) - .thread_name(|i| format!("sol-sub-notif-{}", i)) + .thread_name(|i| format!("solRpcNotify{:02}", i)) .build() .unwrap(); pool.install(|| { diff --git a/rpc/src/transaction_status_service.rs b/rpc/src/transaction_status_service.rs index b9fdfb9bf90b49..4d393a01262b67 100644 --- a/rpc/src/transaction_status_service.rs +++ b/rpc/src/transaction_status_service.rs @@ -37,7 +37,7 @@ impl TransactionStatusService { ) -> Self { let exit = exit.clone(); let thread_hdl = Builder::new() - .name("solana-transaction-status-writer".to_string()) + .name("solTxStatusWrtr".to_string()) .spawn(move || loop { if exit.load(Ordering::Relaxed) { break; diff --git a/runtime/src/accounts_background_service.rs b/runtime/src/accounts_background_service.rs index 8d21fed9c7c939..41ed6eb8cecc96 100644 --- a/runtime/src/accounts_background_service.rs +++ b/runtime/src/accounts_background_service.rs @@ -461,7 +461,7 @@ impl AccountsBackgroundService { let mut total_remove_slots_time = 0; let mut last_expiration_check_time = Instant::now(); let t_background = Builder::new() - .name("solana-bg-accounts".to_string()) + .name("solBgAccounts".to_string()) .spawn(move || { let mut stats = StatsManager::new(); let mut last_snapshot_end_time = None; diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 2bb0f51142eaa1..23c1d230ba22e3 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -1707,7 +1707,7 @@ pub fn make_min_priority_thread_pool() -> ThreadPool { // Use lower thread count to reduce priority. let num_threads = quarter_thread_count(); rayon::ThreadPoolBuilder::new() - .thread_name(|i| format!("solana-cleanup-accounts-{}", i)) + .thread_name(|i| format!("solAccountsLo{:02}", i)) .num_threads(num_threads) .build() .unwrap() @@ -1946,7 +1946,7 @@ impl AccountsDb { file_size: DEFAULT_FILE_SIZE, thread_pool: rayon::ThreadPoolBuilder::new() .num_threads(num_threads) - .thread_name(|i| format!("solana-db-accounts-{}", i)) + .thread_name(|i| format!("solAccounts{:02}", i)) .stack_size(ACCOUNTS_STACK_SIZE) .build() .unwrap(), @@ -2299,7 +2299,7 @@ impl AccountsDb { fn start_background_hasher(&mut self) { let (sender, receiver) = unbounded(); Builder::new() - .name("solana-db-store-hasher-accounts".to_string()) + .name("solDbStoreHashr".to_string()) .spawn(move || { Self::background_hasher(receiver); }) diff --git a/runtime/src/accounts_index_storage.rs b/runtime/src/accounts_index_storage.rs index 16fe3b5c0c7038..155adade447e1c 100644 --- a/runtime/src/accounts_index_storage.rs +++ b/runtime/src/accounts_index_storage.rs @@ -72,7 +72,7 @@ impl BgThreads { // note that using rayon here causes us to exhaust # rayon threads and many tests running in parallel deadlock Builder::new() - .name("solana-idx-flusher".to_string()) + .name(format!("solIdxFlusher{:02}", idx)) .spawn(move || { storage_.background(exit_, in_mem_, can_advance_age); }) diff --git a/runtime/src/bank_client.rs b/runtime/src/bank_client.rs index d9eb457126faab..c1cedf62065ba9 100644 --- a/runtime/src/bank_client.rs +++ b/runtime/src/bank_client.rs @@ -312,7 +312,7 @@ impl BankClient { let thread_bank = bank.clone(); let bank = bank.clone(); Builder::new() - .name("solana-bank-client".to_string()) + .name("solBankClient".to_string()) .spawn(move || Self::run(&thread_bank, transaction_receiver)) .unwrap(); Self { diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 5b42208d042e7c..9e9af746301031 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -791,7 +791,7 @@ where let accounts_db = Arc::new(accounts_db); let accounts_db_clone = accounts_db.clone(); let handle = Builder::new() - .name("notify_account_restore_from_snapshot".to_string()) + .name("solNfyAccRestor".to_string()) .spawn(move || { accounts_db_clone.notify_account_restore_from_snapshot(); }) diff --git a/runtime/src/shared_buffer_reader.rs b/runtime/src/shared_buffer_reader.rs index 819b4bcb3f2a01..41c57b0d484ce3 100644 --- a/runtime/src/shared_buffer_reader.rs +++ b/runtime/src/shared_buffer_reader.rs @@ -75,7 +75,7 @@ impl SharedBuffer { let bg_reader_data = instance.bg_reader_data.clone(); let handle = Builder::new() - .name("solana-compressed_file_reader".to_string()) + .name("solCompFileRead".to_string()) .spawn(move || { // importantly, this thread does NOT hold a refcount on the arc of 'instance' bg_reader_data.read_entire_file_in_bg(reader, total_buffer_budget, chunk_size); diff --git a/runtime/src/verify_accounts_hash_in_background.rs b/runtime/src/verify_accounts_hash_in_background.rs index 90266e36a61a94..63a7eeed97495b 100644 --- a/runtime/src/verify_accounts_hash_in_background.rs +++ b/runtime/src/verify_accounts_hash_in_background.rs @@ -117,7 +117,7 @@ pub(crate) mod tests { let verify_ = Arc::clone(verify); verify.start(|| { Builder::new() - .name("solana-bg-hash-verifier".to_string()) + .name("solBgHashVerfy".to_string()) .spawn(move || { // should have been marked not complete before thread started assert!(!verify_.check_complete()); diff --git a/send-transaction-service/src/send_transaction_service.rs b/send-transaction-service/src/send_transaction_service.rs index 9195aeb0a7b4b7..97b29ee82e542b 100644 --- a/send-transaction-service/src/send_transaction_service.rs +++ b/send-transaction-service/src/send_transaction_service.rs @@ -410,7 +410,7 @@ impl SendTransactionService { config ); Builder::new() - .name("send-tx-receive".to_string()) + .name("solStxReceive".to_string()) .spawn(move || loop { let recv_timeout_ms = config.batch_send_rate_ms; let stats = &stats_report.stats; @@ -510,7 +510,7 @@ impl SendTransactionService { config ); Builder::new() - .name("send-tx-retry".to_string()) + .name("solStxRetry".to_string()) .spawn(move || loop { let retry_interval_ms = config.retry_rate_ms; let stats = &stats_report.stats; diff --git a/streamer/src/quic.rs b/streamer/src/quic.rs index 41379fa22330f3..39295fa82a5551 100644 --- a/streamer/src/quic.rs +++ b/streamer/src/quic.rs @@ -323,11 +323,14 @@ pub fn spawn_server( stats, ) }?; - let handle = thread::spawn(move || { - if let Err(e) = runtime.block_on(task) { - warn!("error from runtime.block_on: {:?}", e); - } - }); + let handle = thread::Builder::new() + .name("solQuicServer".into()) + .spawn(move || { + if let Err(e) = runtime.block_on(task) { + warn!("error from runtime.block_on: {:?}", e); + } + }) + .unwrap(); Ok(handle) } diff --git a/streamer/src/streamer.rs b/streamer/src/streamer.rs index 3492f60c8933a8..2610b882c7d4b8 100644 --- a/streamer/src/streamer.rs +++ b/streamer/src/streamer.rs @@ -168,7 +168,7 @@ pub fn receiver( let res = socket.set_read_timeout(Some(Duration::new(1, 0))); assert!(res.is_ok(), "streamer::receiver set_read_timeout error"); Builder::new() - .name("solana-receiver".to_string()) + .name("solReceiver".to_string()) .spawn(move || { let _ = recv_loop( &socket, @@ -372,7 +372,7 @@ pub fn responder( stats_reporter_sender: Option>>, ) -> JoinHandle<()> { Builder::new() - .name(format!("solana-responder-{}", name)) + .name(format!("solRspndr{}", name)) .spawn(move || { let mut errors = 0; let mut last_error = None; @@ -477,7 +477,7 @@ mod test { let t_responder = { let (s_responder, r_responder) = unbounded(); let t_responder = responder( - "streamer_send_test", + "SendTest", Arc::new(send), r_responder, SocketAddrSpace::Unspecified, diff --git a/validator/src/admin_rpc_service.rs b/validator/src/admin_rpc_service.rs index ab61c2ee675dbb..75a3971c7c9cb1 100644 --- a/validator/src/admin_rpc_service.rs +++ b/validator/src/admin_rpc_service.rs @@ -192,22 +192,25 @@ impl AdminRpc for AdminRpcImpl { fn exit(&self, meta: Self::Metadata) -> Result<()> { debug!("exit admin rpc request received"); - thread::spawn(move || { - // Delay exit signal until this RPC request completes, otherwise the caller of `exit` might - // receive a confusing error as the validator shuts down before a response is sent back. - thread::sleep(Duration::from_millis(100)); - - warn!("validator exit requested"); - meta.validator_exit.write().unwrap().exit(); - - // TODO: Debug why Exit doesn't always cause the validator to fully exit - // (rocksdb background processing or some other stuck thread perhaps?). - // - // If the process is still alive after five seconds, exit harder - thread::sleep(Duration::from_secs(5)); - warn!("validator exit timeout"); - std::process::exit(0); - }); + thread::Builder::new() + .name("solProcessExit".into()) + .spawn(move || { + // Delay exit signal until this RPC request completes, otherwise the caller of `exit` might + // receive a confusing error as the validator shuts down before a response is sent back. + thread::sleep(Duration::from_millis(100)); + + warn!("validator exit requested"); + meta.validator_exit.write().unwrap().exit(); + + // TODO: Debug why Exit doesn't always cause the validator to fully exit + // (rocksdb background processing or some other stuck thread perhaps?). + // + // If the process is still alive after five seconds, exit harder + thread::sleep(Duration::from_secs(5)); + warn!("validator exit timeout"); + std::process::exit(0); + }) + .unwrap(); Ok(()) } @@ -375,14 +378,14 @@ pub fn run(ledger_path: &Path, metadata: AdminRpcRequestMetadata) { let admin_rpc_path = admin_rpc_path(ledger_path); let event_loop = tokio::runtime::Builder::new_multi_thread() - .thread_name("sol-adminrpc-el") + .thread_name("solAdminRpcEl") .worker_threads(3) // Three still seems like a lot, and better than the default of available core count .enable_all() .build() .unwrap(); Builder::new() - .name("solana-adminrpc".to_string()) + .name("solAdminRpc".to_string()) .spawn(move || { let mut io = MetaIoHandler::default(); io.extend_with(AdminRpcImpl.to_delegate()); diff --git a/validator/src/lib.rs b/validator/src/lib.rs index fa40f4dfb0ae4d..3289a221e5418b 100644 --- a/validator/src/lib.rs +++ b/validator/src/lib.rs @@ -66,15 +66,20 @@ pub fn redirect_stderr_to_file(logfile: Option) -> Option solana_logger::setup_with_default(filter); redirect_stderr(&logfile); - Some(std::thread::spawn(move || { - for signal in signals.forever() { - info!( - "received SIGUSR1 ({}), reopening log file: {:?}", - signal, logfile - ); - redirect_stderr(&logfile); - } - })) + Some( + std::thread::Builder::new() + .name("solSigUsr1".into()) + .spawn(move || { + for signal in signals.forever() { + info!( + "received SIGUSR1 ({}), reopening log file: {:?}", + signal, logfile + ); + redirect_stderr(&logfile); + } + }) + .unwrap(), + ) } #[cfg(not(unix))] { From c63453f2fb117be1b455326f3e447d8bf4fde8bf Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Sat, 20 Aug 2022 10:14:49 -0500 Subject: [PATCH 63/67] cleanup comment on filter_zero_lamport_clean_for_incremental_snapshots (#27273) --- runtime/src/accounts_db.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 23c1d230ba22e3..6218f348253647 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -2985,21 +2985,21 @@ impl AccountsDb { } /// During clean, some zero-lamport accounts that are marked for purge should *not* actually - /// get purged. Filter out those accounts here. + /// get purged. Filter out those accounts here by removing them from 'purges_zero_lamports' /// /// When using incremental snapshots, do not purge zero-lamport accounts if the slot is higher /// than the last full snapshot slot. This is to protect against the following scenario: /// /// ```text - /// A full snapshot is taken, and it contains an account with a non-zero balance. Later, - /// that account's goes to zero. Evntually cleaning runs, and before, this account would be - /// cleaned up. Finally, an incremental snapshot is taken. + /// A full snapshot is taken, including account 'alpha' with a non-zero balance. In a later slot, + /// alpha's lamports go to zero. Eventually, cleaning runs. Without this change, + /// alpha would be cleaned up and removed completely. Finally, an incremental snapshot is taken. /// - /// Later, the incremental (and full) snapshot is used to rebuild the bank and accounts - /// database (e.x. if the node restarts). The full snapshot _does_ contain the account (from - /// above) and its balance is non-zero, however, since the account was cleaned up in a later - /// slot, the incremental snapshot does not contain any info about this account, thus, the - /// accounts database will contain the old info from this account, which has its old non-zero + /// Later, the incremental and full snapshots are used to rebuild the bank and accounts + /// database (e.x. if the node restarts). The full snapshot _does_ contain alpha + /// and its balance is non-zero. However, since alpha was cleaned up in a slot after the full + /// snapshot slot (due to having zero lamports), the incremental snapshot would not contain alpha. + /// Thus, the accounts database will contain the old, incorrect info for alpha with a non-zero /// balance. Very bad! /// ``` /// From 3a80118b11c88b2de271943fbc8aaed6d9a75610 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Sat, 20 Aug 2022 10:15:10 -0500 Subject: [PATCH 64/67] remove inaccurate log (#27255) --- runtime/src/accounts_db.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index 6218f348253647..3c0235733001bf 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -3730,10 +3730,6 @@ impl AccountsDb { } if is_ancient(accounts) { - if current_ancient.is_some() { - info!("ancient_append_vec: shrinking full ancient: {}", slot); - } - // this slot is ancient and can become the 'current' ancient for other slots to be squashed into *current_ancient = Some((slot, Arc::clone(storage))); return false; // we're done with this slot - this slot IS the ancient append vec From bdf37352adf298818344111a26dcf60dd40e5055 Mon Sep 17 00:00:00 2001 From: behzad nouri Date: Sat, 20 Aug 2022 19:10:22 +0000 Subject: [PATCH 65/67] patches metrics for invalid cached vote/stake accounts (#27266) patches invalid cached vote/stake accounts metrics Invalid cached vote accounts is overcounting actual mismatches, and invalid cached stake accounts is undercounting. --- runtime/src/bank.rs | 70 ++++++++++++++++--------------------- runtime/src/vote_account.rs | 12 +++---- 2 files changed, 36 insertions(+), 46 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index cfd5187b84e891..ff989f44135ef8 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -2736,13 +2736,11 @@ impl Bank { "distributed inflation: {} (rounded from: {})", validator_rewards_paid, validator_rewards ); - // TODO: staked_nodes forces an eager stakes calculation. remove it! - let (num_stake_accounts, num_vote_accounts, num_staked_nodes) = { + let (num_stake_accounts, num_vote_accounts) = { let stakes = self.stakes_cache.stakes(); ( stakes.stake_delegations().len(), stakes.vote_accounts().len(), - stakes.staked_nodes().len(), ) }; self.capitalization @@ -2769,7 +2767,6 @@ impl Bank { ("post_capitalization", self.capitalization(), i64), ("num_stake_accounts", num_stake_accounts as i64, i64), ("num_vote_accounts", num_vote_accounts as i64, i64), - ("num_staked_nodes", num_staked_nodes as i64, i64) ); } @@ -2807,9 +2804,26 @@ impl Bank { None => { invalid_stake_keys .insert(*stake_pubkey, InvalidCacheEntryReason::Missing); + invalid_cached_stake_accounts.fetch_add(1, Relaxed); return; } }; + if cached_stake_account.account() != &stake_account { + invalid_cached_stake_accounts.fetch_add(1, Relaxed); + let cached_stake_account = cached_stake_account.account(); + if cached_stake_account.lamports() == stake_account.lamports() + && cached_stake_account.data() == stake_account.data() + && cached_stake_account.owner() == stake_account.owner() + && cached_stake_account.executable() == stake_account.executable() + { + invalid_cached_stake_accounts_rent_epoch.fetch_add(1, Relaxed); + } else { + debug!( + "cached stake account mismatch: {}: {:?}, {:?}", + stake_pubkey, stake_account, cached_stake_account + ); + } + } let stake_account = match StakeAccount::<()>::try_from(stake_account) { Ok(stake_account) => stake_account, Err(stake_account::Error::InvalidOwner { .. }) => { @@ -2832,33 +2846,6 @@ impl Bank { return; } }; - if cached_stake_account != &stake_account { - invalid_cached_stake_accounts.fetch_add(1, Relaxed); - let mut cached_account = cached_stake_account.account().clone(); - // We could have collected rent on the loaded account already in this new epoch (we could be at partition_index 12, for example). - // So, we may need to adjust the rent_epoch of the cached account. So, update rent_epoch and compare just the accounts. - ExpectedRentCollection::maybe_update_rent_epoch_on_load( - &mut cached_account, - &SlotInfoInEpoch::new_small(self.slot()), - &SlotInfoInEpoch::new_small(self.slot()), - self.epoch_schedule(), - self.rent_collector(), - stake_pubkey, - &self.rewrites_skipped_this_slot, - ); - if &cached_account != stake_account.account() { - info!( - "cached stake account mismatch: {}: {:?}, {:?}", - stake_pubkey, - cached_account, - stake_account.account() - ); - } else { - // track how many of 'invalid_cached_stake_accounts' were due to rent_epoch changes - // subtract these to find real invalid cached accounts - invalid_cached_stake_accounts_rent_epoch.fetch_add(1, Relaxed); - } - } let stake_delegation = (*stake_pubkey, stake_account); let mut vote_delegations = if let Some(vote_delegations) = vote_with_stake_delegations_map.get_mut(vote_pubkey) @@ -2868,16 +2855,12 @@ impl Bank { let cached_vote_account = cached_vote_accounts.get(vote_pubkey); let vote_account = match self.get_account_with_fixed_root(vote_pubkey) { Some(vote_account) => { - match cached_vote_account { - Some(cached_vote_account) - if cached_vote_account == &vote_account => {} - _ => { - invalid_cached_vote_accounts.fetch_add(1, Relaxed); - } - }; if vote_account.owner() != &solana_vote_program::id() { invalid_vote_keys .insert(*vote_pubkey, InvalidCacheEntryReason::WrongOwner); + if cached_vote_account.is_some() { + invalid_cached_vote_accounts.fetch_add(1, Relaxed); + } return; } vote_account @@ -2899,9 +2882,18 @@ impl Bank { } else { invalid_vote_keys .insert(*vote_pubkey, InvalidCacheEntryReason::BadState); + if cached_vote_account.is_some() { + invalid_cached_vote_accounts.fetch_add(1, Relaxed); + } return; }; - + match cached_vote_account { + Some(cached_vote_account) + if cached_vote_account.account() == &vote_account => {} + _ => { + invalid_cached_vote_accounts.fetch_add(1, Relaxed); + } + }; vote_with_stake_delegations_map .entry(*vote_pubkey) .or_insert_with(|| VoteWithStakeDelegations { diff --git a/runtime/src/vote_account.rs b/runtime/src/vote_account.rs index 1feefb6ffaee46..c37dd7b2487c4b 100644 --- a/runtime/src/vote_account.rs +++ b/runtime/src/vote_account.rs @@ -3,7 +3,7 @@ use { once_cell::sync::OnceCell, serde::ser::{Serialize, Serializer}, solana_sdk::{ - account::{accounts_equal, AccountSharedData, ReadableAccount}, + account::{AccountSharedData, ReadableAccount}, instruction::InstructionError, pubkey::Pubkey, }, @@ -53,6 +53,10 @@ pub struct VoteAccounts { } impl VoteAccount { + pub(crate) fn account(&self) -> &AccountSharedData { + &self.0.account + } + pub(crate) fn lamports(&self) -> u64 { self.0.account.lamports() } @@ -255,12 +259,6 @@ impl PartialEq for VoteAccountInner { } } -impl PartialEq for VoteAccount { - fn eq(&self, other: &AccountSharedData) -> bool { - accounts_equal(&self.0.account, other) - } -} - impl Default for VoteAccounts { fn default() -> Self { Self { From bac529779bbf9083c446bd70d6f9fa4f5ebc24f8 Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Sun, 21 Aug 2022 16:20:51 -0500 Subject: [PATCH 66/67] Refactor epoch reward 3 (#27259) * refactor: extract store_stake_accounts fn * refactor: extract store_vote_account fn * refactor: extract reward history update fn * clippy: slice * clippy: slice Co-authored-by: haoran --- runtime/src/bank.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index ff989f44135ef8..14e1d42d896abc 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -3295,6 +3295,21 @@ impl Bank { >>>>>>> c17f15a34f (Refactor epoch reward 2 (#27257)) } + fn update_reward_history( + &self, + stake_rewards: Vec, + mut vote_rewards: Vec<(Pubkey, RewardInfo)>, + ) { + let additional_reserve = stake_rewards.len() + vote_rewards.len(); + let mut rewards = self.rewards.write().unwrap(); + rewards.reserve(additional_reserve); + rewards.append(&mut vote_rewards); + stake_rewards + .into_iter() + .filter(|x| x.get_stake_reward() > 0) + .for_each(|x| rewards.push((x.stake_pubkey, x.stake_reward_info))); + } + fn update_recent_blockhashes_locked(&self, locked_blockhash_queue: &BlockhashQueue) { #[allow(deprecated)] self.update_sysvar_account(&sysvar::recent_blockhashes::id(), |account| { From ea4a219144f4e2085bbd2f3e56dfa63c1e5596a7 Mon Sep 17 00:00:00 2001 From: haoran Date: Sun, 21 Aug 2022 16:37:43 -0500 Subject: [PATCH 67/67] fix merges --- runtime/src/bank.rs | 29 ----------------------------- 1 file changed, 29 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 14e1d42d896abc..66f495aeb9d429 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -3264,35 +3264,6 @@ impl Bank { m.stop(); metrics.store_vote_accounts_us.fetch_add(m.as_us(), Relaxed); vote_rewards -<<<<<<< HEAD - } - - fn update_reward_history( - &self, - stake_rewards: Vec, - mut vote_rewards: Vec<(Pubkey, RewardInfo)>, - ) { - let additional_reserve = stake_rewards.len() + vote_rewards.len(); - let mut rewards = self.rewards.write().unwrap(); - rewards.reserve(additional_reserve); - rewards.append(&mut vote_rewards); - stake_rewards - .into_iter() - .filter(|x| x.get_stake_reward() > 0) - .for_each(|x| rewards.push((x.stake_pubkey, x.stake_reward_info))); - } - - fn store_stake_accounts(&self, stake_rewards: &[StakeReward], metrics: &mut RewardsMetrics) { - // store stake account even if stakers_reward is 0 - // because credits observed has changed - let mut m = Measure::start("store_stake_account"); - self.store_accounts((self.slot(), stake_rewards)); - m.stop(); - metrics - .store_stake_accounts_us - .fetch_add(m.as_us(), Relaxed); -======= ->>>>>>> c17f15a34f (Refactor epoch reward 2 (#27257)) } fn update_reward_history(